gt
stringclasses
1 value
context
stringlengths
2.49k
119k
import odml class Event (object): def __init__(self, name): self.handlers = set() self.name = name def add_handler(self, handler): self.handlers.add(handler) return self def remove_handler(self, handler): try: self.handlers.remove(handler) except: raise ValueError("No such handler: %s" % repr(handler)) return self def fire(self, *args, **kargs): for handler in set(self.handlers): if not handler in self.handlers: continue # don't fire to unsubscribed handlers handler(*args, **kargs) self.finish(*args, **kargs) def finish(self, *args, **kargs): """ a final handler for this event (typically passing the event on to higher layers) """ pass def __len__(self): return len(self.handlers) def __repr__(self): return "<%sEvent>" % (self.name) __iadd__ = add_handler __isub__ = remove_handler __call__ = fire class ChangeContext(object): """ A ChangeContext holds information about a change event the context object stays the same within preChange and postChange events, thus you can store information in a preChange event and use it later in the postChange event where the information might not be available any more. Attributes: * action: the action that caused the event * obj: the object upon which the action is executed * val: a value assotiated with a action * preChange: True if the change has not yet occurred * postChange: True if the change has already occured Actions defined so far: * "set": val = (attribute_name, new_value) * "insert", "append": val = object to be inserted * "remove": val = object to be remove Events may be passed on in the hierarchy, thus the context also holds state for this. *cur* holds the current node receiving the event and is a direct or indirect parent of obj. The hidden attribute *_obj* holds the complete pass stack, where obj is obj[0] and cur is _obj[-1] """ def __init__(self, val): self._obj = [] self.val = val self.action = None self.when = None @property def preChange(self): return self.when is True @preChange.setter def preChange(self, val): self.when = True if val else None @property def postChange(self): return self.when is False @postChange.setter def postChange(self, val): self.when = False if val else None @property def obj(self): return self._obj[0] def getStack(self, count): """ helper function used to obtain a event-pass-stack for a certain hierarchy i.e. for a property-change event caught at the parent section, _obj will be [property, section] with getStack you can now obtain: >>> value, property, section = getStack(3) without checking the depth of the level, this will also hold true for longer stacks such as [val, prop, sec, doc] and will still work as expected """ if len(self._obj) < count: return ([None] * count + self._obj)[-count:] return self._obj[:count] @property def cur(self): return self._obj[-1] def passOn(self, obj): """ pass the event to obj appends obj to the event-pass-stack and calls obj._Changed after handling obj is removed from the stack again """ self._obj.append(obj) if obj._change_handler is not None: #hasattr(obj, "_change_handler"): obj._change_handler(self) obj._Changed(self) self._obj.remove(obj) def reset(self): self._obj = [] def __repr__(self): v = "" if self.preChange: v = "Pre" if self.postChange: v = "Post" return "<%sChange %s.%s(%s)>" % (v, repr(self.obj), self.action, repr(self.val)) def dump(self): return repr(self) + "\nObject stack:\n\t" + "\n\t".join(map(repr, self._obj)) class ChangedEvent(object): def __init__(self): pass class EventHandler(object): def __init__(self, func): self._func = func def __call__(self, *args, **kargs): return self._func(*args, **kargs) class ChangeHandlable(object): """ For objects that support the add_change_handler and remove_change_handler functions. """ _change_handler = None def add_change_handler(self, func): if self._change_handler is None: self._change_handler = Event(self.__class__.__name__) self._change_handler += func def remove_change_handler(self, func): self._change_handler -= func if len(self._change_handler) == 0: del self._change_handler class ModificationNotifier(ChangeHandlable): """ Override some methods, to get notification on their calls """ def __setattr__(self, name, value): fire = not name.startswith('_') and hasattr(self, name) func = lambda: super(ModificationNotifier, self).__setattr__(name, value) if fire: self.__fireChange("set", (name, value), func) else: func() def __fireChange(self, action, obj, func): """ create a ChangeContext and * fire a preChange-event * call func * fire a postChange-event """ c = ChangeContext(obj) c.action = action c.preChange = True c.passOn(self) res = func() c.reset() c.postChange = True c.passOn(self) return res def append(self, obj, *args, **kwargs): func = lambda: super(ModificationNotifier, self).append(obj, *args, **kwargs) self.__fireChange("append", obj, func) def remove(self, obj): func = lambda: super(ModificationNotifier, self).remove(obj) self.__fireChange("remove", obj, func) def insert(self, position, obj): func = lambda: super(ModificationNotifier, self).insert(position, obj) self.__fireChange("insert", obj, func) def _reorder(self, childlist, new_index): func = lambda: super(ModificationNotifier, self)._reorder(childlist, new_index) return self.__fireChange("reorder", (childlist, new_index), func) # create a seperate global Event listeners for each class # and provide ModificationNotifier Capabilities name = "event" provides = odml.getImplementation().provides + ["event"] class Value(ModificationNotifier, odml.getImplementation().Value): _Changed = Event("value") class Property(ModificationNotifier, odml.getImplementation().Property): _Changed = Event("prop") class Section(ModificationNotifier, odml.getImplementation().Section): _Changed = Event("sec") class Document(ModificationNotifier, odml.getImplementation().Document): _Changed = Event("doc") def pass_on_change(context): """ pass the change event to the parent node """ parent = context.cur.parent if parent is not None: context.passOn(parent) def pass_on_change_section(context): """ pass the change event directly to the document in question don't go through all parents """ document = context.cur.document if document is not None: context.passOn(document) Value._Changed.finish = pass_on_change Property._Changed.finish = pass_on_change Section._Changed.finish = pass_on_change_section import sys odml.addImplementation(sys.modules[__name__])
# -*- coding: utf-8 -*- import os import sys import shutil import subprocess import time import platform # if debug_info=True, Debugging Print Information will be turned on debug_info=False # if make_fal=True, Partition tables are put into firmware make_fal=False # Setting firmware output directory out_path='./Bin' # Setting the bin file path bin_file='./rtthread.bin' # Setting winnermicro libraries path wmlib_path='./packages/wm_libraries-' # Setting the 1M flash layout file layout_1M_file='.' # Setting the 2M flash layout file layout_2M_file='.' # Setting the makeimg by adding rtt flash original fls makeimg_new_fls='.' def execute_command(cmdstring, cwd=None, shell=True): """Execute the system command at the specified address.""" if shell: cmdstring_list = cmdstring sub = subprocess.Popen(cmdstring_list, cwd=cwd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=shell, bufsize=8192) stdout_str = "" while sub.poll() is None: stdout_str += str(sub.stdout.read()) time.sleep(0.1) return stdout_str def copy_file(name, path): res = True if os.path.exists(path): shutil.copy(path, out_path) else: print('makeimg err! No ' + name + ' file found: ' + path) res = False return res def is_exists(name, path): res = True if not os.path.exists(path): print('makeimg err! No ' + name + ' file found: ' + path) res = False return res def get_exec_path(path): (file_path, file_name) = os.path.split(path) (name, extend) = os.path.splitext(file_name) exec_path = '' if (platform.system() == "Windows"): exec_path = os.path.abspath(file_path + '/' + name + '.exe') elif (platform.system() == "Linux"): exec_path = os.path.abspath(file_path + '/' + name) if debug_info: print('file_path: ' + file_path) print('file_name: ' + file_name) print('name: ' + name) print('extend: ' + extend) return exec_path def do_makeimg(tool_path, param): str = "\"" + tool_path + "\"" + ' ' + param if debug_info: print('exec cmd: ' + str); execute_command(str) def get_wmlib_path_full(path): (_wmlib_path,_wmlib_name) = os.path.split(path) files = os.listdir(_wmlib_path) for f in files: if _wmlib_name in f: return _wmlib_path + '/' + f return path if __name__=='__main__': # find winnermicro libraries full path wmlib_path_full = get_wmlib_path_full(wmlib_path) # Setting the version.txt file path version_file=wmlib_path_full + '/Tools/version.txt' # Setting the secboot.img file path secboot_file=wmlib_path_full + '/Tools/secboot.img' # Setting the wm_gzip.exe file path wm_gzip_file=wmlib_path_full + '/Tools/wm_gzip.exe' # Setting the makeimg.exe file path makeimg_file=wmlib_path_full + '/Tools/makeimg.exe' # Setting the makeimg_all.exe file path makeimg_all_file=wmlib_path_full + '/Tools/makeimg_all.exe' if (platform.system() == "Linux"): wm_gzip_file=wmlib_path_full + '/Tools/wm_gzip.py' makeimg_file=wmlib_path_full + '/Tools/makeimg' makeimg_all_file=wmlib_path_full + '/Tools/makeimg_all' # Get absolute path out_path = os.path.abspath(out_path).replace('\\', '/'); bin_file = os.path.abspath(bin_file).replace('\\', '/'); version_file = os.path.abspath(version_file).replace('\\', '/'); secboot_file = os.path.abspath(secboot_file).replace('\\', '/'); wm_gzip_file = os.path.abspath(wm_gzip_file).replace('\\', '/'); makeimg_file = os.path.abspath(makeimg_file).replace('\\', '/'); makeimg_all_file = os.path.abspath(makeimg_all_file).replace('\\', '/'); # Create the output directory if not os.path.exists(out_path): os.mkdir(out_path) # Copy file if not copy_file('bin', bin_file): exit(0) if not copy_file('version', version_file): exit(0) if not copy_file('secboot', secboot_file): exit(0) # Check the existence of packaging tools if not is_exists('wm_gzip', wm_gzip_file): exit(0) if not is_exists('makeimg', makeimg_file): exit(0) if not is_exists('makeimg_all', makeimg_all_file): exit(0) # Get File Names and File Extensions (bin_file_path,bin_file_name) = os.path.split(bin_file) (bin_name,bin_extend) = os.path.splitext(bin_file_name) (version_file_path,version_file_name) = os.path.split(version_file) (secboot_file_path,secboot_file_name) = os.path.split(secboot_file) # print debug Information if debug_info: print('bin_file_name:' + bin_file_name + 'bin_name:' + bin_name + 'bin_extend:' + bin_extend + 'version_file_name:' + version_file_name + 'secboot_file_name:' + secboot_file_name) print('makeimg 1M Flash...') file_pos_1M='_1M' gzip_param = "\"" + out_path + '/' + bin_file_name + "\"" make_img_param = "\"" + out_path + '/' + bin_file_name + "\"" + ' ' + "\"" + out_path + '/' + bin_name + file_pos_1M + '.img' + "\"" + ' 0' + ' 0' + ' ' + "\"" + out_path + '/' + version_file_name + "\"" + ' 90000' + ' 10100' make_GZ_param = "\"" + out_path + '/' + bin_file_name + '.gz' + "\"" + ' ' + "\"" + out_path + '/' + bin_name + '_GZ' + file_pos_1M + '.img' +"\"" + ' 0' + ' 1' + ' ' + "\"" + out_path + '/' + version_file_name + "\"" + ' 90000' + ' 10100' + ' ' + "\"" + out_path + '/' + bin_file_name + "\"" make_SEC_param = "\"" + out_path + '/' + bin_file_name + "\"" + ' ' + "\"" + out_path + '/' + bin_name + '_SEC' + file_pos_1M + '.img' + "\"" + ' 0' + ' 0' + ' ' + "\"" + out_path + '/' + version_file_name + "\"" + ' 90000' + ' 10100' make_FLS_param = "\"" + out_path + '/' + secboot_file_name + "\"" + ' ' + "\"" + out_path + '/' + bin_name + file_pos_1M + '.img' + "\"" + ' ' + "\"" + out_path + '/' + bin_name + file_pos_1M + '.FLS' + "\"" if debug_info: print('gzip_param' + gzip_param) print('make_img_param' + make_img_param) print('make_GZ_param' + make_GZ_param) print('make_SEC_param' + make_SEC_param) print('make_FLS_param' + make_FLS_param) if (platform.system() == "Linux"): do_makeimg("python",wm_gzip_file + " " + gzip_param) else: do_makeimg(wm_gzip_file, gzip_param) do_makeimg(makeimg_file, make_img_param) do_makeimg(makeimg_file, make_GZ_param) do_makeimg(makeimg_file, make_SEC_param) do_makeimg(makeimg_all_file, make_FLS_param) rm_file = out_path + '/' + bin_name + file_pos_1M + '.img' if os.path.exists(rm_file): os.remove(rm_file) rm_file = out_path + '/' + bin_file_name + '.gz' if os.path.exists(rm_file): os.remove(rm_file) print('makeimg 2M Flash...') file_pos_2M='_2M' gzip_param = "\"" + out_path + '/' + bin_file_name + "\"" make_img_param = "\"" + out_path + '/' + bin_file_name + "\"" + ' ' + "\"" + out_path + '/' + bin_name + file_pos_2M + '.img' + "\"" + ' 3' + ' 0' + ' ' + "\"" + out_path + '/' + version_file_name + "\"" + ' 100000' + ' 10100' make_GZ_param = "\"" + out_path + '/' + bin_file_name + '.gz' + "\"" + ' ' + "\"" + out_path + '/' + bin_name + '_GZ' + file_pos_2M + '.img' +"\"" + ' 3' + ' 1' + ' ' + "\"" + out_path + '/' + version_file_name + "\"" + ' 100000' + ' 10100' + ' ' + "\"" + out_path + '/' + bin_file_name + "\"" make_SEC_param = "\"" + out_path + '/' + bin_file_name + "\"" + ' ' + "\"" + out_path + '/' + bin_name + '_SEC' + file_pos_2M + '.img' + "\"" + ' 3' + ' 0' + ' ' + "\"" + out_path + '/' + version_file_name + "\"" + ' 100000' + ' 10100' make_FLS_param = "\"" + out_path + '/' + secboot_file_name + "\"" + ' ' + "\"" + out_path + '/' + bin_name + file_pos_2M + '.img' + "\"" + ' ' + "\"" + out_path + '/' + bin_name + file_pos_2M + '.FLS' + "\"" if debug_info: print('gzip_param' + gzip_param) print('make_img_param' + make_img_param) print('make_GZ_param' + make_GZ_param) print('make_SEC_param' + make_SEC_param) print('make_FLS_param' + make_FLS_param) if (platform.system() == "Linux"): do_makeimg("python",wm_gzip_file + " " + gzip_param) else: do_makeimg(wm_gzip_file, gzip_param) do_makeimg(makeimg_file, make_img_param) do_makeimg(makeimg_file, make_GZ_param) do_makeimg(makeimg_file, make_SEC_param) do_makeimg(makeimg_all_file, make_FLS_param) rm_file = out_path + '/' + bin_name + file_pos_2M + '.img' if os.path.exists(rm_file): os.remove(rm_file) rm_file = out_path + '/' + bin_file_name + '.gz' if os.path.exists(rm_file): os.remove(rm_file) if make_fal: # Get absolute path layout_1M_file = os.path.abspath(layout_1M_file).replace('\\', '/'); layout_2M_file = os.path.abspath(layout_2M_file).replace('\\', '/'); makeimg_new_fls = os.path.abspath(makeimg_new_fls).replace('\\', '/'); # Create command parameters to new fls makeimg_new_cmd="\"" + out_path + '/' + bin_name + file_pos_1M + '.FLS' + "\"" + ' ' + "\"" + layout_1M_file + "\"" + ' ' + "\"" + out_path + '/'+ bin_name + '_layout' + file_pos_1M+'.FLS' +"\"" do_makeimg(makeimg_new_fls, makeimg_new_cmd) makeimg_new_cmd="\"" + out_path + '/' + bin_name + file_pos_2M + '.FLS' + "\"" + ' ' + "\"" + layout_2M_file + "\"" + ' ' + "\"" + out_path + '/'+ bin_name + '_layout' + file_pos_2M+'.FLS' +"\"" do_makeimg(makeimg_new_fls, makeimg_new_cmd) # Delete temporary files rm_file = out_path + '/' + bin_name + file_pos_1M + '.FLS' if os.path.exists(rm_file): os.remove(rm_file) rm_file = out_path + '/' + bin_name + file_pos_2M + '.FLS' if os.path.exists(rm_file): os.remove(rm_file) print('end')
# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ This module contains a BigQuery Hook, as well as a very basic PEP 249 implementation for BigQuery. """ import logging import time from airflow.contrib.hooks.gcp_api_base_hook import GoogleCloudBaseHook from airflow.hooks.dbapi_hook import DbApiHook from apiclient.discovery import build from pandas.io.gbq import GbqConnector, \ _parse_data as gbq_parse_data, \ _check_google_client_version as gbq_check_google_client_version, \ _test_google_api_imports as gbq_test_google_api_imports from pandas.tools.merge import concat logging.getLogger("bigquery").setLevel(logging.INFO) class BigQueryHook(GoogleCloudBaseHook, DbApiHook): """ Interact with BigQuery. This hook uses the Google Cloud Platform connection. """ conn_name_attr = 'bigquery_conn_id' def __init__(self, bigquery_conn_id='bigquery_default', delegate_to=None): super(BigQueryHook, self).__init__( conn_id=bigquery_conn_id, delegate_to=delegate_to) def get_conn(self): """ Returns a BigQuery PEP 249 connection object. """ service = self.get_service() project = self._get_field('project') return BigQueryConnection(service=service, project_id=project) def get_service(self): """ Returns a BigQuery service object. """ http_authorized = self._authorize() return build('bigquery', 'v2', http=http_authorized) def insert_rows(self, table, rows, target_fields=None, commit_every=1000): """ Insertion is currently unsupported. Theoretically, you could use BigQuery's streaming API to insert rows into a table, but this hasn't been implemented. """ raise NotImplementedError() def get_pandas_df(self, bql, parameters=None): """ Returns a Pandas DataFrame for the results produced by a BigQuery query. The DbApiHook method must be overridden because Pandas doesn't support PEP 249 connections, except for SQLite. See: https://github.com/pydata/pandas/blob/master/pandas/io/sql.py#L447 https://github.com/pydata/pandas/issues/6900 :param bql: The BigQuery SQL to execute. :type bql: string """ service = self.get_service() project = self._get_field('project') connector = BigQueryPandasConnector(project, service) schema, pages = connector.run_query(bql) dataframe_list = [] while len(pages) > 0: page = pages.pop() dataframe_list.append(gbq_parse_data(schema, page)) if len(dataframe_list) > 0: return concat(dataframe_list, ignore_index=True) else: return gbq_parse_data(schema, []) class BigQueryPandasConnector(GbqConnector): """ This connector behaves identically to GbqConnector (from Pandas), except that it allows the service to be injected, and disables a call to self.get_credentials(). This allows Airflow to use BigQuery with Pandas without forcing a three legged OAuth connection. Instead, we can inject service account credentials into the binding. """ def __init__(self, project_id, service, reauth=False, verbose=False): gbq_check_google_client_version() gbq_test_google_api_imports() self.project_id = project_id self.reauth = reauth self.service = service self.verbose = verbose class BigQueryConnection(object): """ BigQuery does not have a notion of a persistent connection. Thus, these objects are small stateless factories for cursors, which do all the real work. """ def __init__(self, *args, **kwargs): self._args = args self._kwargs = kwargs def close(self): """ BigQueryConnection does not have anything to close. """ pass def commit(self): """ BigQueryConnection does not support transactions. """ pass def cursor(self): """ Return a new :py:class:`Cursor` object using the connection. """ return BigQueryCursor(*self._args, **self._kwargs) def rollback(self): raise NotImplementedError( "BigQueryConnection does not have transactions") class BigQueryBaseCursor(object): """ The BigQuery base cursor contains helper methods to execute queries against BigQuery. The methods can be used directly by operators, in cases where a PEP 249 cursor isn't needed. """ def __init__(self, service, project_id): self.service = service self.project_id = project_id def run_query( self, bql, destination_dataset_table = False, write_disposition = 'WRITE_EMPTY', allow_large_results=False, udf_config = False): """ Executes a BigQuery SQL query. Optionally persists results in a BigQuery table. See here: https://cloud.google.com/bigquery/docs/reference/v2/jobs For more details about these parameters. :param bql: The BigQuery SQL to execute. :type bql: string :param destination_dataset_table: The dotted <dataset>.<table> BigQuery table to save the query results. :param write_disposition: What to do if the table already exists in BigQuery. :param allow_large_results: Whether to allow large results. :type allow_large_results: boolean :param udf_config: The User Defined Function configuration for the query. See https://cloud.google.com/bigquery/user-defined-functions for details. :type udf_config: list """ configuration = { 'query': { 'query': bql, } } if destination_dataset_table: assert '.' in destination_dataset_table, ( 'Expected destination_dataset_table in the format of ' '<dataset>.<table>. Got: {}').format(destination_dataset_table) destination_dataset, destination_table = \ destination_dataset_table.split('.', 1) configuration['query'].update({ 'allowLargeResults': allow_large_results, 'writeDisposition': write_disposition, 'destinationTable': { 'projectId': self.project_id, 'datasetId': destination_dataset, 'tableId': destination_table, } }) if udf_config: assert isinstance(udf_config, list) configuration['query'].update({ 'userDefinedFunctionResources': udf_config }) return self.run_with_configuration(configuration) def run_extract( # noqa self, source_project_dataset_table, destination_cloud_storage_uris, compression='NONE', export_format='CSV', field_delimiter=',', print_header=True): """ Executes a BigQuery extract command to copy data from BigQuery to Google Cloud Storage. See here: https://cloud.google.com/bigquery/docs/reference/v2/jobs For more details about these parameters. :param source_project_dataset_table: The dotted <dataset>.<table> BigQuery table to use as the source data. :type source_project_dataset_table: string :param destination_cloud_storage_uris: The destination Google Cloud Storage URI (e.g. gs://some-bucket/some-file.txt). Follows convention defined here: https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple :type destination_cloud_storage_uris: list :param compression: Type of compression to use. :type compression: string :param export_format: File format to export. :type export_format: string :param field_delimiter: The delimiter to use when extracting to a CSV. :type field_delimiter: string :param print_header: Whether to print a header for a CSV file extract. :type print_header: boolean """ source_project, source_dataset, source_table = \ self._split_project_dataset_table_input( 'source_project_dataset_table', source_project_dataset_table) configuration = { 'extract': { 'sourceTable': { 'projectId': source_project, 'datasetId': source_dataset, 'tableId': source_table, }, 'compression': compression, 'destinationUris': destination_cloud_storage_uris, 'destinationFormat': export_format, } } if export_format == 'CSV': # Only set fieldDelimiter and printHeader fields if using CSV. # Google does not like it if you set these fields for other export # formats. configuration['extract']['fieldDelimiter'] = field_delimiter configuration['extract']['printHeader'] = print_header return self.run_with_configuration(configuration) def run_copy(self, source_project_dataset_tables, destination_project_dataset_table, write_disposition='WRITE_EMPTY', create_disposition='CREATE_IF_NEEDED'): """ Executes a BigQuery copy command to copy data from one BigQuery table to another. See here: https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy For more details about these parameters. :param source_project_dataset_tables: One or more dotted (<project>.)<dataset>.<table> BigQuery tables to use as the source data. Use a list if there are multiple source tables. If <project> is not included, project will be the project defined in the connection json. :type source_project_dataset_tables: list|string :param destination_project_dataset_table: The destination BigQuery table. Format is: <project>.<dataset>.<table> :type destination_project_dataset_table: string :param write_disposition: The write disposition if the table already exists. :type write_disposition: string :param create_disposition: The create disposition if the table doesn't exist. :type create_disposition: string """ source_project_dataset_tables = ( [source_project_dataset_tables] if not isinstance(source_project_dataset_tables, list) else source_project_dataset_tables) source_project_dataset_tables_fixup = [] for source_project_dataset_table in source_project_dataset_tables: source_project, source_dataset, source_table = \ self._split_project_dataset_table_input( 'source_project_dataset_table', source_project_dataset_table) source_project_dataset_tables_fixup.append({ 'projectId': source_project, 'datasetId': source_dataset, 'tableId': source_table }) assert 3 == len(destination_project_dataset_table.split('.')), ( 'Expected destination_project_dataset_table in the format of ' '<project>.<dataset>.<table>. ' 'Got: {}').format(destination_project_dataset_table) destination_project, destination_dataset, destination_table = \ destination_project_dataset_table.split('.', 2) configuration = { 'copy': { 'createDisposition': create_disposition, 'writeDisposition': write_disposition, 'sourceTables': source_project_dataset_tables_fixup, 'destinationTable': { 'projectId': destination_project, 'datasetId': destination_dataset, 'tableId': destination_table } } } return self.run_with_configuration(configuration) def run_load(self, destination_project_dataset_table, schema_fields, source_uris, source_format='CSV', create_disposition='CREATE_IF_NEEDED', skip_leading_rows=0, write_disposition='WRITE_EMPTY', field_delimiter=','): """ Executes a BigQuery load command to load data from Google Cloud Storage to BigQuery. See here: https://cloud.google.com/bigquery/docs/reference/v2/jobs For more details about these parameters. :param destination_project_dataset_table: The dotted (<project>.)<dataset>.<table> BigQuery table to load data into. If <project> is not included, project will be the project defined in the connection json. :type destination_project_dataset_table: string :param schema_fields: The schema field list as defined here: https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load :type schema_fields: list :param source_uris: The source Google Cloud Storage URI (e.g. gs://some-bucket/some-file.txt). A single wild per-object name can be used. :type source_uris: list :param source_format: File format to export. :type source_format: string :param create_disposition: The create disposition if the table doesn't exist. :type create_disposition: string :param skip_leading_rows: Number of rows to skip when loading from a CSV. :type skip_leading_rows: int :param write_disposition: The write disposition if the table already exists. :type write_disposition: string :param field_delimiter: The delimiter to use when loading from a CSV. :type field_delimiter: string """ destination_project, destination_dataset, destination_table = \ self._split_project_dataset_table_input( 'destination_project_dataset_table', destination_project_dataset_table) configuration = { 'load': { 'createDisposition': create_disposition, 'destinationTable': { 'projectId': destination_project, 'datasetId': destination_dataset, 'tableId': destination_table, }, 'schema': { 'fields': schema_fields }, 'sourceFormat': source_format, 'sourceUris': source_uris, 'writeDisposition': write_disposition, } } if source_format == 'CSV': configuration['load']['skipLeadingRows'] = skip_leading_rows configuration['load']['fieldDelimiter'] = field_delimiter return self.run_with_configuration(configuration) def _split_project_dataset_table_input(self, var_name, project_dataset_table): """ :param var_name: the name of the variable input, for logging and erroring purposes. :type var_name: str :param project_dataset_table: input string in (<project>.)<dataset>.<project> format. if project is not included in the string, self.project_id will be returned in the tuple. :type project_dataset_table: str :return: (project, dataset, table) tuple """ table_split = project_dataset_table.split('.') assert len(table_split) == 2 or len(table_split) == 3, ( 'Expected {var} in the format of (<project.)<dataset>.<table>, ' 'got {input}').format(var=var_name, input=project_dataset_table) if len(table_split) == 2: logging.info('project not included in {var}: {input}; using project "{project}"'.format(var=var_name, input=project_dataset_table, project=self.project_id)) dataset, table = table_split return self.project_id, dataset, table else: project, dataset, table = table_split return project, dataset, table def run_with_configuration(self, configuration): """ Executes a BigQuery SQL query. See here: https://cloud.google.com/bigquery/docs/reference/v2/jobs For more details about the configuration parameter. :param configuration: The configuration parameter maps directly to BigQuery's configuration field in the job object. See https://cloud.google.com/bigquery/docs/reference/v2/jobs for details. """ jobs = self.service.jobs() job_data = { 'configuration': configuration } # Send query and wait for reply. query_reply = jobs \ .insert(projectId=self.project_id, body=job_data) \ .execute() job_id = query_reply['jobReference']['jobId'] job = jobs.get(projectId=self.project_id, jobId=job_id).execute() # Wait for query to finish. while not job['status']['state'] == 'DONE': logging.info('Waiting for job to complete: %s, %s', self.project_id, job_id) time.sleep(5) job = jobs.get(projectId=self.project_id, jobId=job_id).execute() # Check if job had errors. if 'errorResult' in job['status']: raise Exception( 'BigQuery job failed. Final error was: %s', job['status']['errorResult']) return job_id def get_schema(self, dataset_id, table_id): """ Get the schema for a given datset.table. see https://cloud.google.com/bigquery/docs/reference/v2/tables#resource :param dataset_id: the dataset ID of the requested table :param table_id: the table ID of the requested table :return: a table schema """ tables_resource = self.service.tables() \ .get(projectId=self.project_id, datasetId=dataset_id, tableId=table_id) \ .execute() return tables_resource['schema'] def get_tabledata(self, dataset_id, table_id, max_results=None, page_token=None, start_index=None): """ Get the data of a given dataset.table. see https://cloud.google.com/bigquery/docs/reference/v2/tabledata/list :param dataset_id: the dataset ID of the requested table. :param table_id: the table ID of the requested table. :param max_results: the maximum results to return. :param page_token: page token, returned from a previous call, identifying the result set. :param start_index: zero based index of the starting row to read. :return: map containing the requested rows. """ optional_params = {} if max_results: optional_params['maxResults'] = max_results if page_token: optional_params['pageToken'] = page_token if start_index: optional_params['startIndex'] = start_index return ( self.service.tabledata() .list( projectId=self.project_id, datasetId=dataset_id, tableId=table_id, **optional_params) .execute() ) def run_table_upsert(self, dataset_id, table_resource, project_id=None): """ creates a new, empty table in the dataset; If the table already exists, update the existing table. Since BigQuery does not natively allow table upserts, this is not an atomic operation. :param dataset_id: the dataset to upsert the table into. :type dataset_id: str :param table_resource: a table resource. see https://cloud.google.com/bigquery/docs/reference/v2/tables#resource :type table_resource: dict :param project_id: the project to upsert the table into. If None, project will be self.project_id. :return: """ # check to see if the table exists table_id = table_resource['tableReference']['tableId'] project_id = project_id if project_id is not None else self.project_id tables_list_resp = self.service.tables().list(projectId=project_id, datasetId=dataset_id).execute() while True: for table in tables_list_resp.get('tables', []): if table['tableReference']['tableId'] == table_id: # found the table, do update logging.info('table %s:%s.%s exists, updating.', project_id, dataset_id, table_id) return self.service.tables().update(projectId=project_id, datasetId=dataset_id, tableId=table_id, body=table_resource).execute() # If there is a next page, we need to check the next page. if 'nextPageToken' in tables_list_resp: tables_list_resp = self.service.tables()\ .list(projectId=project_id, datasetId=dataset_id, pageToken=tables_list_resp['nextPageToken'])\ .execute() # If there is no next page, then the table doesn't exist. else: # do insert logging.info('table %s:%s.%s does not exist. creating.', project_id, dataset_id, table_id) return self.service.tables().insert(projectId=project_id, datasetId=dataset_id, body=table_resource).execute() def run_grant_dataset_view_access(self, source_dataset, view_dataset, view_table, source_project = None, view_project = None): """ Grant authorized view access of a dataset to a view table. If this view has already been granted access to the dataset, do nothing. This method is not atomic. Running it may clobber a simultaneous update. :param source_dataset: the source dataset :type source_dataset: str :param view_dataset: the dataset that the view is in :type view_dataset: str :param view_table: the table of the view :type view_table: str :param source_project: the project of the source dataset. If None, self.project_id will be used. :type source_project: str :param view_project: the project that the view is in. If None, self.project_id will be used. :type view_project: str :return: the datasets resource of the source dataset. """ # Apply default values to projects source_project = source_project if source_project else self.project_id view_project = view_project if view_project else self.project_id # we don't want to clobber any existing accesses, so we have to get # info on the dataset before we can add view access source_dataset_resource = self.service.datasets().get(projectId=source_project, datasetId=source_dataset).execute() access = source_dataset_resource['access'] if 'access' in source_dataset_resource else [] view_access = {'view': {'projectId': view_project, 'datasetId': view_dataset, 'tableId': view_table}} # check to see if the view we want to add already exists. if view_access not in access: logging.info('granting table %s:%s.%s authorized view access to %s:%s dataset.', view_project, view_dataset, view_table, source_project, source_dataset) access.append(view_access) return self.service.datasets().patch(projectId=source_project, datasetId=source_dataset, body={'access': access}).execute() else: # if view is already in access, do nothing. logging.info('table %s:%s.%s already has authorized view access to %s:%s dataset.', view_project, view_dataset, view_table, source_project, source_dataset) return source_dataset_resource class BigQueryCursor(BigQueryBaseCursor): """ A very basic BigQuery PEP 249 cursor implementation. The PyHive PEP 249 implementation was used as a reference: https://github.com/dropbox/PyHive/blob/master/pyhive/presto.py https://github.com/dropbox/PyHive/blob/master/pyhive/common.py """ def __init__(self, service, project_id): super(BigQueryCursor, self).__init__(service=service, project_id=project_id) self.buffersize = None self.page_token = None self.job_id = None self.buffer = [] self.all_pages_loaded = False @property def description(self): """ The schema description method is not currently implemented. """ raise NotImplementedError def close(self): """ By default, do nothing """ pass @property def rowcount(self): """ By default, return -1 to indicate that this is not supported. """ return -1 def execute(self, operation, parameters=None): """ Executes a BigQuery query, and returns the job ID. :param operation: The query to execute. :type operation: string :param parameters: Parameters to substitute into the query. :type parameters: dict """ bql = _bind_parameters(operation, parameters) if parameters else operation self.job_id = self.run_query(bql) def executemany(self, operation, seq_of_parameters): """ Execute a BigQuery query multiple times with different parameters. :param operation: The query to execute. :type operation: string :param parameters: List of dictionary parameters to substitute into the query. :type parameters: list """ for parameters in seq_of_parameters: self.execute(operation, parameters) def fetchone(self): """ Fetch the next row of a query result set. """ return self.next() def next(self): """ Helper method for fetchone, which returns the next row from a buffer. If the buffer is empty, attempts to paginate through the result set for the next page, and load it into the buffer. """ if not self.job_id: return None if len(self.buffer) == 0: if self.all_pages_loaded: return None query_results = ( self.service.jobs() .getQueryResults( projectId=self.project_id, jobId=self.job_id, pageToken=self.page_token) .execute() ) if 'rows' in query_results and query_results['rows']: self.page_token = query_results.get('pageToken') fields = query_results['schema']['fields'] col_types = [field['type'] for field in fields] rows = query_results['rows'] for dict_row in rows: typed_row = ([ _bq_cast(vs['v'], col_types[idx]) for idx, vs in enumerate(dict_row['f']) ]) self.buffer.append(typed_row) if not self.page_token: self.all_pages_loaded = True else: # Reset all state since we've exhausted the results. self.page_token = None self.job_id = None self.page_token = None return None return self.buffer.pop(0) def fetchmany(self, size=None): """ Fetch the next set of rows of a query result, returning a sequence of sequences (e.g. a list of tuples). An empty sequence is returned when no more rows are available. The number of rows to fetch per call is specified by the parameter. If it is not given, the cursor's arraysize determines the number of rows to be fetched. The method should try to fetch as many rows as indicated by the size parameter. If this is not possible due to the specified number of rows not being available, fewer rows may be returned. An :py:class:`~pyhive.exc.Error` (or subclass) exception is raised if the previous call to :py:meth:`execute` did not produce any result set or no call was issued yet. """ if size is None: size = self.arraysize result = [] for _ in xrange(size): one = self.fetchone() if one is None: break else: result.append(one) return result def fetchall(self): """ Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). """ result = [] while True: one = self.fetchone() if one is None: break else: result.append(one) return result def get_arraysize(self): """ Specifies the number of rows to fetch at a time with .fetchmany() """ return self._buffersize if self.buffersize else 1 def set_arraysize(self, arraysize): """ Specifies the number of rows to fetch at a time with .fetchmany() """ self.buffersize = arraysize arraysize = property(get_arraysize, set_arraysize) def setinputsizes(self, sizes): """ Does nothing by default """ pass def setoutputsize(self, size, column=None): """ Does nothing by default """ pass def _bind_parameters(operation, parameters): """ Helper method that binds parameters to a SQL query. """ # inspired by MySQL Python Connector (conversion.py) string_parameters = {} for (name, value) in parameters.iteritems(): if value is None: string_parameters[name] = 'NULL' elif isinstance(value, basestring): string_parameters[name] = "'" + _escape(value) + "'" else: string_parameters[name] = str(value) return operation % string_parameters def _escape(s): """ Helper method that escapes parameters to a SQL query. """ e = s e = e.replace('\\', '\\\\') e = e.replace('\n', '\\n') e = e.replace('\r', '\\r') e = e.replace("'", "\\'") e = e.replace('"', '\\"') return e def _bq_cast(string_field, bq_type): """ Helper method that casts a BigQuery row to the appropriate data types. This is useful because BigQuery returns all fields as strings. """ if string_field is None: return None elif bq_type == 'INTEGER' or bq_type == 'TIMESTAMP': return int(string_field) elif bq_type == 'FLOAT': return float(string_field) elif bq_type == 'BOOLEAN': assert string_field in set(['true', 'false']) return string_field == 'true' else: return string_field
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for V2 Collective Operations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import threading import time from absl.testing import parameterized from tensorflow.python.compat import v2_compat from tensorflow.python.data.experimental.ops import testing as dataset_testing from tensorflow.python.data.ops import dataset_ops from tensorflow.python.distribute import combinations from tensorflow.python.distribute import test_util from tensorflow.python.eager import cancellation from tensorflow.python.eager import context from tensorflow.python.eager import def_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import collective_ops as _collective_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.platform import test class CollectiveOpsV1(object): all_reduce = _collective_ops.all_reduce all_gather = _collective_ops.all_gather broadcast_send = _collective_ops.broadcast_send broadcast_recv = _collective_ops.broadcast_recv class CollectiveOpsV2(object): @staticmethod def all_reduce(t, group_size, group_key, instance_key, *args, **kwargs): group_size = array_ops.identity(group_size) group_key = array_ops.identity(group_key) instance_key = array_ops.identity(instance_key) return _collective_ops.all_reduce_v2(t, group_size, group_key, instance_key, *args, **kwargs) @staticmethod def all_gather(t, group_size, group_key, instance_key, *args, **kwargs): group_size = array_ops.identity(group_size) group_key = array_ops.identity(group_key) instance_key = array_ops.identity(instance_key) return _collective_ops.all_gather_v2(t, group_size, group_key, instance_key, *args, **kwargs) @staticmethod def broadcast_send(t, shape, dtype, group_size, group_key, instance_key, *args, **kwargs): group_size = array_ops.identity(group_size) group_key = array_ops.identity(group_key) instance_key = array_ops.identity(instance_key) return _collective_ops.broadcast_send_v2(t, group_size, group_key, instance_key, *args, **kwargs) @staticmethod def broadcast_recv(shape, dtype, group_size, group_key, instance_key, *args, **kwargs): group_size = array_ops.identity(group_size) group_key = array_ops.identity(group_key) instance_key = array_ops.identity(instance_key) shape = array_ops.identity(shape) return _collective_ops.broadcast_recv_v2( shape, dtype, group_size, group_key, instance_key, *args, **kwargs) device_combination = ( combinations.combine(device='CPU', communication='RING', required_gpus=0) + combinations.combine( device='GPU', communication=['RING', 'NCCL'], required_gpus=2)) collective_op_combinations = combinations.times( combinations.combine( collective_op=[ combinations.NamedObject('all_reduce', CollectiveOpsV1.all_reduce), combinations.NamedObject('all_reduce_v2', CollectiveOpsV2.all_reduce), combinations.NamedObject('all_gather', CollectiveOpsV1.all_gather), combinations.NamedObject('all_gather_v2', CollectiveOpsV2.all_gather), ], mode='eager'), device_combination) @combinations.generate( combinations.times( combinations.combine( collective_ops=[ combinations.NamedObject('v1', CollectiveOpsV1), combinations.NamedObject('v2', CollectiveOpsV2) ], mode='eager'), device_combination)) class CollectiveOpsTest(test.TestCase, parameterized.TestCase): def setUp(self): _setup_context() super().setUp() def testReduce(self, collective_ops, device, communication): dev0 = '/device:%s:0' % device dev1 = '/device:%s:1' % device @def_function.function def run_all_reduce_1device(): with ops.device(dev0): in_value = constant_op.constant([1.]) group_size = 1 group_key = 1 instance_key = 1 return collective_ops.all_reduce( in_value, group_size, group_key, instance_key, communication_hint=communication) @def_function.function def run_all_reduce_2devices(): in_value = constant_op.constant([1.]) group_size = 2 group_key = 2 instance_key = 2 collectives = [] with ops.device(dev0): collectives.append( collective_ops.all_reduce( in_value, group_size, group_key, instance_key, communication_hint=communication)) with ops.device(dev1): collectives.append( collective_ops.all_reduce( in_value, group_size, group_key, instance_key, communication_hint=communication)) return collectives self.assertAllClose(run_all_reduce_1device(), [1.], rtol=1e-5, atol=1e-5) for result in run_all_reduce_2devices(): self.assertAllClose(result, [2.], rtol=1e-5, atol=1e-5) def testGather(self, collective_ops, device, communication): dev0 = '/device:%s:0' % device dev1 = '/device:%s:1' % device @def_function.function def run_all_gather_1device(): with ops.device(dev0): in_value = constant_op.constant([1.]) group_size = 1 group_key = 1 instance_key = 1 return collective_ops.all_gather( in_value, group_size, group_key, instance_key, communication_hint=communication) @def_function.function def run_all_gather_2devices(): in_value = constant_op.constant([1.]) group_size = 2 group_key = 2 instance_key = 2 collectives = [] with ops.device(dev0): collectives.append( collective_ops.all_gather( in_value, group_size, group_key, instance_key, communication_hint=communication)) with ops.device(dev1): collectives.append( collective_ops.all_gather( in_value, group_size, group_key, instance_key, communication_hint=communication)) return collectives self.assertAllClose(run_all_gather_1device(), [1.], rtol=1e-5, atol=1e-5) for result in run_all_gather_2devices(): self.assertAllClose(result, [1., 1.], rtol=1e-5, atol=1e-5) def testBroadcast(self, collective_ops, device, communication): dev0 = '/device:%s:0' % device dev1 = '/device:%s:1' % device @def_function.function def run_broadcast_2devices(): shape = [3] in_value = constant_op.constant([1., 2., 3.], shape=shape) group_size = 2 group_key = 2 instance_key = 2 collectives = [] with ops.device(dev0): collectives.append( collective_ops.broadcast_send( in_value, shape, in_value.dtype, group_size, group_key, instance_key, communication_hint=communication)) with ops.device(dev1): collectives.append( collective_ops.broadcast_recv( shape, in_value.dtype, group_size, group_key, instance_key, communication_hint=communication)) return collectives for result in run_broadcast_2devices(): self.assertAllClose(result, [1., 2., 3.], rtol=1e-5, atol=1e-5) def testInstanceKeyScopedUnderGroupKey(self, collective_ops, device, communication): if device == 'GPU' and context.num_gpus() < 4: self.skipTest('not enough GPU') dev0 = '/device:%s:0' % device dev1 = '/device:%s:1' % device dev2 = '/device:%s:2' % device dev3 = '/device:%s:3' % device @def_function.function def run_all_reduce_4devices_same_instance_key(): # Use a common instance key for both groups. instance_key = 0 # We will create 2 groups each with 2 devices. group_size = 2 # Group 0 comprises dev0 and dev1. group0_key = 0 # Group 1 comprises dev2 and dev3. group1_key = 1 collectives = [] with ops.device(dev0): collectives.append( collective_ops.all_reduce( constant_op.constant(1.), group_size, group0_key, instance_key)) with ops.device(dev1): collectives.append( collective_ops.all_reduce( constant_op.constant(2.), group_size, group0_key, instance_key)) with ops.device(dev2): collectives.append( collective_ops.all_reduce( constant_op.constant(3.), group_size, group1_key, instance_key)) with ops.device(dev3): collectives.append( collective_ops.all_reduce( constant_op.constant(4.), group_size, group1_key, instance_key)) return collectives results = run_all_reduce_4devices_same_instance_key() self.assertAllClose(results[0], 3., rtol=1e-5, atol=1e-5) self.assertAllClose(results[1], 3., rtol=1e-5, atol=1e-5) self.assertAllClose(results[2], 7., rtol=1e-5, atol=1e-5) self.assertAllClose(results[3], 7., rtol=1e-5, atol=1e-5) def testCollectiveGroupSizeOne(self, collective_ops, device, communication): dev0 = '/device:%s:0' % device group_size = 1 group_key = 100 in_value = [1., 2., 3., 4.] in_tensor = constant_op.constant(in_value) with ops.device(dev0): reduced_tensor = collective_ops.all_reduce( in_tensor, group_size, group_key, instance_key=100, communication_hint=communication) self.assertAllEqual(in_value, reduced_tensor.numpy()) with ops.device(dev0): gathered_tensor = collective_ops.all_gather( in_tensor, group_size, group_key, instance_key=200, communication_hint=communication) self.assertAllEqual(in_value, gathered_tensor.numpy()) def testCollectiveInvalidKey(self, collective_ops, device, communication): dev0 = '/device:%s:0' % device group_size = 1 group_key = 100 instance_key = 100 in_value = [1., 2., 3., 4.] in_tensor = constant_op.constant(in_value) with ops.device(dev0): reduced_tensor = collective_ops.all_reduce( in_tensor, group_size, group_key, instance_key, communication_hint=communication) self.assertAllEqual(in_value, reduced_tensor.numpy()) with self.assertRaisesRegex( errors.InternalError, 'instance 100 expected type 0 and data_type 1 but' ' got type 2 and data_type 1'): with ops.device(dev0): collective_ops.all_gather( in_tensor, group_size, group_key, instance_key, communication_hint=communication) def testMultipleGroups(self, collective_ops, device, communication): if device == 'GPU' and context.num_gpus() < 4: self.skipTest('not enough GPU') num_elements = 4 @def_function.function def run_all_reduce(group_size, group_key): instance_key = group_key input_value = [float(group_key) for i in range(num_elements)] collectives = [] for device_idx in range(group_size): with ops.device('/{}:{}'.format(device, device_idx)): input_tensor = constant_op.constant(input_value) collectives.append( collective_ops.all_reduce( input_tensor, group_size, group_key, instance_key, communication_hint=communication)) return collectives def run_and_assert(group_size, group_key): for reduced_tensor in run_all_reduce(group_size, group_key): self.assertAllEqual( [float(group_key) * group_size for i in range(num_elements)], reduced_tensor.numpy()) run_and_assert(group_size=2, group_key=1) run_and_assert(group_size=3, group_key=2) @combinations.generate( combinations.times( combinations.combine( collective_ops=[ combinations.NamedObject('v2', CollectiveOpsV2) ], mode='eager', max_subdivs_per_device=[-1, 0, 16]), device_combination)) class AllReduceWithSubdivisionsTest(test.TestCase, parameterized.TestCase): def setUp(self): _setup_context() super().setUp() def testReduce(self, collective_ops, device, communication, max_subdivs_per_device): dev0 = '/device:%s:0' % device dev1 = '/device:%s:1' % device @def_function.function def run_all_reduce_1device(): with ops.device(dev0): in_value = constant_op.constant([1.]) group_size = 1 group_key = 1 instance_key = 1 if max_subdivs_per_device == -1: return collective_ops.all_reduce( in_value, group_size, group_key, instance_key, communication_hint=communication) else: return collective_ops.all_reduce( in_value, group_size, group_key, instance_key, communication_hint=communication, max_subdivs_per_device=max_subdivs_per_device) @def_function.function def run_all_reduce_2devices(): in_value = constant_op.constant([1.]) group_size = 2 group_key = 2 instance_key = 2 collectives = [] with ops.device(dev0): collectives.append( collective_ops.all_reduce( in_value, group_size, group_key, instance_key, communication_hint=communication)) with ops.device(dev1): collectives.append( collective_ops.all_reduce( in_value, group_size, group_key, instance_key, communication_hint=communication)) return collectives self.assertAllClose(run_all_reduce_1device(), [1.], rtol=1e-5, atol=1e-5) for result in run_all_reduce_2devices(): self.assertAllClose(result, [2.], rtol=1e-5, atol=1e-5) @combinations.generate( combinations.combine(required_physical_gpus=2, mode='eager')) class XlaTest(test.TestCase, parameterized.TestCase): def testReduce(self): device0 = '/device:GPU:0' device1 = '/device:GPU:1' group_size = 2 group_key = 100 instance_key = 100 results = [] def all_reduce(device): @def_function.function(jit_compile=True) def f(): return _collective_ops.all_reduce_v2([1.], group_size, group_key, instance_key) with ops.device(device): results.append(f()) t0 = threading.Thread(target=all_reduce, args=(device0,)) t1 = threading.Thread(target=all_reduce, args=(device1,)) t0.start() t1.start() t0.join() t1.join() self.assertAllEqual(results, [[2.], [2.]]) @combinations.generate(collective_op_combinations) class AbortCollectiveOpsTest(test.TestCase, parameterized.TestCase): def setUp(self): _setup_context() super().setUp() def testAbortGroupParamsResolution(self, collective_op, device, communication): dev0 = '/device:%s:0' % device dev1 = '/device:%s:1' % device group_size = 2 group_key = 100 instance_key = 100 in_tensor = constant_op.constant([1.]) def abort_fn(): time.sleep(2) context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down') t = threading.Thread(target=abort_fn) t.start() with self.assertRaisesRegex(errors.UnavailableError, 'peer down'): # This hangs on params resolution since we're only launching one # collective for a group size of 2. with ops.device(dev0): collective_op( in_tensor, group_size, group_key, instance_key, communication_hint=communication) # After abortion, subsequent collectives should fail immediately. with self.assertRaisesRegex(errors.UnavailableError, 'peer down'): with ops.device(dev0): collective_op( in_tensor, group_size, group_key, instance_key, communication_hint=communication) t.join() # Reset the context in order to reset the collective executor. _setup_context() # After reset non-NCCL collectives should work. def collective_fn(): for device in [dev0, dev1]: with ops.device(device): collective_op( in_tensor, group_size, group_key, instance_key, communication_hint=communication) def_function.function(collective_fn)() def testAbortInstanceParamsResolution(self, collective_op, device, communication): dev0 = '/device:%s:0' % device dev1 = '/device:%s:1' % device group_size = 2 group_key = 100 instance_key = 100 in_tensor = constant_op.constant([1.]) def collective_fn(): for device in [dev0, dev1]: with ops.device(device): collective_op( in_tensor, group_size, group_key, instance_key, communication_hint=communication) # First perform a normal all-reduce to complete the group resolution. def_function.function(collective_fn)() def abort_fn(): time.sleep(2) context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down') t = threading.Thread(target=abort_fn) t.start() # Use a different instance key to trigger another instance resolution. instance_key = 101 with self.assertRaisesRegex(errors.UnavailableError, 'peer down'): # This hangs on params resolution since we're only launching one # collective for a group size of 2. with ops.device(dev0): collective_op( in_tensor, group_size, group_key, instance_key, communication_hint=communication) # After abortion, subsequent collectives should fail immediately. with self.assertRaisesRegex(errors.UnavailableError, 'peer down'): with ops.device(dev0): collective_op( in_tensor, group_size, group_key, instance_key, communication_hint=communication) context._reset_context() # pylint: disable=protected-access t.join() # Reset the context in order to reset the collective executor. _setup_context() # After reset non-NCCL collectives should work. def_function.function(collective_fn)() def testAbortCommunication(self, collective_op, device, communication): dev0 = '/device:%s:0' % device dev1 = '/device:%s:1' % device group_size = 2 group_key = 100 instance_key = 100 in_tensor = constant_op.constant([1.]) # First perform a normal collective to finish resolution. def collective_fn(): for device in [dev0, dev1]: with ops.device(device): collective_op( in_tensor, group_size, group_key, instance_key, communication_hint=communication) def_function.function(collective_fn)() # Launch a collective that hangs, and abort the collective executor after # the launch. def abort_fn(): time.sleep(2) context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down') t = threading.Thread(target=abort_fn) t.start() with self.assertRaisesRegex(errors.UnavailableError, 'peer down'): with ops.device(dev0): collective_op( in_tensor, group_size, group_key, instance_key, communication_hint=communication) # After abortion, subsequent collectives should fail immediately. with self.assertRaisesRegex(errors.UnavailableError, 'peer down'): with ops.device(dev0): collective_op( in_tensor, group_size, group_key, instance_key, communication_hint=communication) # Reset the context in order to reset the collective executor. t.join() _setup_context() def_function.function(collective_fn)() class OpCancellationTest(test.TestCase, parameterized.TestCase): def setUp(self): _setup_context() super().setUp() @combinations.generate( combinations.times( combinations.combine( collective_op=[ combinations.NamedObject('all_reduce', CollectiveOpsV1.all_reduce), combinations.NamedObject('all_reduce_v2', CollectiveOpsV2.all_reduce), combinations.NamedObject('all_gather', CollectiveOpsV1.all_gather), combinations.NamedObject('all_gather_v2', CollectiveOpsV2.all_gather), ], mode='eager'), device_combination)) def testOpErrorNotAbortIfNoCollective(self, collective_op, device, communication): # Do not abort if there's no active collective ops. There could be # exceptions like EOF which we expect users to catch, aborting collective # ops on all op errors intervenes with this workflow. dev0 = '/device:%s:0' % device dev1 = '/device:%s:1' % device group_size = 2 group_key = 100 instance_key = 100 dataset = dataset_ops.Dataset.from_tensors([1.]) @def_function.function def collective_fn(in_tensor): for device in [dev0, dev1]: with ops.device(device): collective_op( in_tensor, group_size, group_key, instance_key, communication_hint=communication) @def_function.function def f(): iterator = iter(dataset) collective_fn(next(iterator)) # This next(iterator) should raise EOF. collective_fn(next(iterator)) with self.assertRaises(errors.OutOfRangeError): f() collective_fn(constant_op.constant([1.])) @combinations.generate( combinations.times( combinations.combine( collective_op=[ combinations.NamedObject('all_reduce', CollectiveOpsV1.all_reduce), combinations.NamedObject('all_gather', CollectiveOpsV1.all_gather), ], mode='eager'), device_combination)) def testOpErrorAbortWithCollective(self, collective_op, device, communication): # Abort v1 collective ops if there're active collective ops at the time of # an op error. This is due to the inability to cancel collective ops, and op # errors may cause running collective ops to hang. dev0 = '/device:%s:0' % device group_size = 2 group_key = 100 instance_key = 100 in_tensor = constant_op.constant([1.]) # Make the dataset sleep a while so that the collective is being executed # when the EOF happens. dataset = dataset_ops.Dataset.from_tensors([1.]).apply( dataset_testing.sleep(sleep_microseconds=200)) @def_function.function def f(): # Launch a collective op that won't be able to finish to test abortion # when other ops error. with ops.device(dev0): ret = collective_op( in_tensor, group_size, group_key, instance_key, communication_hint=communication) iterator = iter(dataset) next(iterator) # This should raise EOF. next(iterator) return ret with self.assertRaises(errors.OutOfRangeError): f() # Now collective ops is aborted, subsequent collective ops should fail with # the previous error. with self.assertRaises(errors.CancelledError): with ops.device(dev0): collective_op( in_tensor, group_size, group_key, instance_key, communication_hint=communication) @combinations.generate( combinations.times( combinations.combine( collective_op=[ combinations.NamedObject('all_reduce_v2', CollectiveOpsV2.all_reduce), combinations.NamedObject('all_gather_v2', CollectiveOpsV2.all_gather), ], mode='eager'), device_combination)) def testOpErrorNotAbortWithCollective(self, collective_op, device, communication): # Do not abort v2 collective ops even if there're active collective ops at # the time of an op error. We rely cancellation to terminate active # collective ops. dev0 = '/device:%s:0' % device dev1 = '/device:%s:1' % device group_size = 2 group_key = 100 instance_key = 100 in_tensor = constant_op.constant([1.]) @def_function.function def collective_fn(): for device in [dev0, dev1]: with ops.device(device): collective_op( in_tensor, group_size, group_key, instance_key, communication_hint=communication) # Local params resolution cannot be cancelled yet, so we perform a normal # collective so that the group is resolved. collective_fn() # Make the dataset sleep a while so that the collective is being executed # when the EOF happens. dataset = dataset_ops.Dataset.from_tensors([1.]).apply( dataset_testing.sleep(sleep_microseconds=200)) @def_function.function def f(): # Launch a collective op that won't be able to finish to test cancellation # when other ops error. with ops.device(dev0): ret = collective_op( in_tensor, group_size, group_key, instance_key, communication_hint=communication) iterator = iter(dataset) next(iterator) # This should raise EOF. next(iterator) return ret with self.assertRaises(errors.OutOfRangeError): f() # Collective ops shouldn't be aborted and new collectives should be able to # proceed. collective_fn() @combinations.generate( combinations.times( combinations.combine( collective_op=[ combinations.NamedObject('all_reduce_v2', CollectiveOpsV2.all_reduce), combinations.NamedObject('all_gather_v2', CollectiveOpsV2.all_gather), ], mode='eager'), device_combination)) def testCancelDuringParamResolution(self, collective_op, device, communication): dev0 = '/device:%s:0' % device dev1 = '/device:%s:1' % device group_size = 2 group_key = 100 instance_key = 100 in_tensor = constant_op.constant([1.]) t1_cancellation_manager = cancellation.CancellationManager() t2_cancellation_manager = cancellation.CancellationManager() @def_function.function def _collective_fn(x): # Run an assertion to crash one of the two function executions running # collectives. We explicitly cancel the other in response. assert_op = check_ops.assert_equal(x, in_tensor) with ops.control_dependencies([assert_op]): return collective_op( in_tensor, group_size, group_key, instance_key, communication_hint=communication) collective_concrete = _collective_fn.get_concrete_function(in_tensor) finish_mu = threading.Lock() finishes = 0 def _placement_wrapper(device, x, my_cancellation, other_cancellation): try: with ops.device(device): cancelable_collective = my_cancellation.get_cancelable_function( collective_concrete) return cancelable_collective(x) except errors.InvalidArgumentError: # `assert_equal` failed for this execution of the function. The other # function would deadlock without cancellation. other_cancellation.start_cancel() except errors.CancelledError: pass nonlocal finishes with finish_mu: finishes += 1 t1 = threading.Thread( target=_placement_wrapper, args=(dev0, constant_op.constant([1.]), t1_cancellation_manager, t2_cancellation_manager)) t2 = threading.Thread( target=_placement_wrapper, # Will cause the assertion to fail args=(dev1, constant_op.constant([2.]), t2_cancellation_manager, t1_cancellation_manager)) t1.start() t2.start() t1.join() t2.join() self.assertEqual(finishes, 2) @combinations.generate(collective_op_combinations) class TimeoutTest(test.TestCase, parameterized.TestCase): def setUp(self): _setup_context() super().setUp() def testTimeout(self, collective_op, device, communication): timeout = 1.5 @def_function.function def run(group_size, reported_group_size=None): group_key = 20 instance_key = 30 tensor = [1., 2., 3., 4.] results = [] if reported_group_size is None: reported_group_size = group_size for i in range(group_size): with ops.device('/{}:{}'.format(device, i)): input_data = constant_op.constant(tensor) result = collective_op( input_data, group_size=reported_group_size, group_key=group_key, instance_key=instance_key, communication_hint=communication, timeout=timeout) results.append(result) return results run(2, 2) start_time = time.time() with self.assertRaisesRegex(errors.DeadlineExceededError, 'Collective has timed out during execution'): run(1, 2) elapsed = time.time() - start_time self.assertAllGreaterEqual(elapsed, timeout) def testParamResolutionAfterTimeout(self, collective_op, device, communication): dev0 = '/device:%s:0' % device dev1 = '/device:%s:1' % device timeout = 1.5 group_key = 20 instance_key = 30 input_data = constant_op.constant([1., 2., 3., 4.]) # This timeout comes from param solution. with self.assertRaisesRegex( errors.DeadlineExceededError, 'Collective has timed out waiting for other workers'): with ops.device(dev0): collective_op( input_data, group_size=2, group_key=group_key, instance_key=instance_key, communication_hint=communication, timeout=timeout) # We launch the second device after the first device times out. This is to # simulate the situation when other workers are slow and the timeout is # short. It should error immediately. with self.assertRaisesRegex( errors.DeadlineExceededError, 'Collective has timed out waiting for other workers'): with ops.device(dev1): collective_op( input_data, group_size=2, group_key=group_key, instance_key=instance_key, communication_hint=communication) def testExecutionAfterTimeout(self, collective_op, device, communication): dev0 = '/device:%s:0' % device dev1 = '/device:%s:1' % device timeout = 1.5 group_key = 20 instance_key = 30 input_data = constant_op.constant([1., 2., 3., 4.]) @def_function.function def run(): for device in [dev0, dev1]: with ops.device(device): collective_op( input_data, group_size=2, group_key=group_key, instance_key=instance_key, communication_hint=communication, timeout=timeout) # Run a normal all-reduce to complete param resolution. run() with self.assertRaisesRegex(errors.DeadlineExceededError, 'Collective has timed out during execution'): with ops.device(dev0): collective_op( input_data, group_size=2, group_key=group_key, instance_key=instance_key, communication_hint=communication, timeout=timeout) # We launch the second device after the first device times out. This is to # simulate the situation when other workers are slow and the timeout is # short. It should error immediately. with self.assertRaisesRegex(errors.DeadlineExceededError, 'Collective has timed out during execution'): with ops.device(dev1): # No timeout. collective_op( input_data, group_size=2, group_key=group_key, instance_key=instance_key, communication_hint=communication) @combinations.generate( combinations.times( combinations.combine( collective_op=[ combinations.NamedObject('all_reduce_v2', CollectiveOpsV2.all_reduce), combinations.NamedObject('all_gather_v2', CollectiveOpsV2.all_gather), ], mode='eager'), device_combination)) class OrderingTest(test.TestCase, parameterized.TestCase): def setUp(self): _setup_context() super().setUp() def testOrdering(self, collective_op, device, communication): dev0 = '/device:%s:0' % device dev1 = '/device:%s:1' % device group_size = 2 group_key = 100 instance_key = 100 in_tensor = constant_op.constant([1.]) with ops.device(dev0): token0 = resource_variable_ops.ResourceVariable(0.) with ops.device(dev1): token1 = resource_variable_ops.ResourceVariable(0.) @def_function.function def f(): # Launch the first collective with token. with ops.device(dev0): collective_op( in_tensor, group_size, group_key, instance_key, ordering_token=token0.handle) with ops.device(dev1): collective_op( in_tensor, group_size, group_key, instance_key, ordering_token=token1.handle) # Launch the second collective without token. with ops.device(dev0): collective_op(in_tensor, group_size, group_key, instance_key) with ops.device(dev1): collective_op(in_tensor, group_size, group_key, instance_key) # Launch the third collective with token. with ops.device(dev0): collective_op( in_tensor, group_size, group_key, instance_key, ordering_token=token0.handle) with ops.device(dev1): collective_op( in_tensor, group_size, group_key, instance_key, ordering_token=token1.handle) graph = f.get_concrete_function().graph for device in [dev0, dev1]: # Try to find the third collective, which should have the first collective # as a control input. third = None for op in graph.get_operations(): if (op.type.startswith('Collective') and op.device.endswith(device) and op.control_inputs and op.control_inputs[0].type.startswith('Collective')): self.assertIsNone(third) third = op self.assertIsNotNone(third) # Verify it's not the second collective by looking at the inputs. self.assertTrue(any(v.dtype == dtypes.resource for v in third.inputs)) first = third.control_inputs[0] self.assertEqual(third.device, first.device) # Verify it's not the second collective by looking at the inputs. self.assertTrue(any(v.dtype == dtypes.resource for v in first.inputs)) self.assertEmpty(first.control_inputs) class InputPipelineTest(test.TestCase): def setUp(self): super().setUp() _setup_context() def testMap(self): group_size = 2 group_key = 100 instance_key = 100 def create_dataset_and_fetch_one(t): dataset = dataset_ops.Dataset.from_tensor_slices([t]) def reduce_fn(t): return CollectiveOpsV2.all_reduce( t, group_size=group_size, group_key=group_key, instance_key=instance_key) dataset = dataset.map(reduce_fn) return next(iter(dataset)) @def_function.function def f(): with ops.device('CPU:0'): value0 = create_dataset_and_fetch_one([1.]) with ops.device('CPU:1'): value1 = create_dataset_and_fetch_one([2.]) return value0, value1 self.assertAllEqual(self.evaluate(f()), [[3.], [3.]]) def _setup_context(): context._reset_context() test_util.set_logical_devices_to_at_least('CPU', 4) context.ensure_initialized() if __name__ == '__main__': os.environ['NCCL_DEBUG'] = 'INFO' v2_compat.enable_v2_behavior() test.main()
from __future__ import (absolute_import, division, print_function, unicode_literals) from matplotlib.externals import six import os import numpy from matplotlib._pylab_helpers import Gcf from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\ FigureManagerBase, FigureCanvasBase, NavigationToolbar2, TimerBase from matplotlib.backend_bases import ShowBase from matplotlib.cbook import maxdict from matplotlib.figure import Figure from matplotlib.path import Path from matplotlib.mathtext import MathTextParser from matplotlib.colors import colorConverter from matplotlib import rcParams from matplotlib.widgets import SubplotTool import matplotlib from matplotlib.backends import _macosx class Show(ShowBase): def mainloop(self): _macosx.show() show = Show() class RendererMac(RendererBase): """ The renderer handles drawing/rendering operations. Most of the renderer's methods forward the command to the renderer's graphics context. The renderer does not wrap a C object and is written in pure Python. """ texd = maxdict(50) # a cache of tex image rasters def __init__(self, dpi, width, height): RendererBase.__init__(self) self.dpi = dpi self.width = width self.height = height self.gc = GraphicsContextMac() self.gc.set_dpi(self.dpi) self.mathtext_parser = MathTextParser('MacOSX') def set_width_height (self, width, height): self.width, self.height = width, height def draw_path(self, gc, path, transform, rgbFace=None): if rgbFace is not None: rgbFace = tuple(rgbFace) linewidth = gc.get_linewidth() gc.draw_path(path, transform, linewidth, rgbFace) def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None): if rgbFace is not None: rgbFace = tuple(rgbFace) linewidth = gc.get_linewidth() gc.draw_markers(marker_path, marker_trans, path, trans, linewidth, rgbFace) def draw_path_collection(self, gc, master_transform, paths, all_transforms, offsets, offsetTrans, facecolors, edgecolors, linewidths, linestyles, antialiaseds, urls, offset_position): if offset_position=='data': offset_position = True else: offset_position = False path_ids = [] for path, transform in self._iter_collection_raw_paths( master_transform, paths, all_transforms): path_ids.append((path, transform)) master_transform = master_transform.get_matrix() offsetTrans = offsetTrans.get_matrix() gc.draw_path_collection(master_transform, path_ids, all_transforms, offsets, offsetTrans, facecolors, edgecolors, linewidths, linestyles, antialiaseds, offset_position) def draw_quad_mesh(self, gc, master_transform, meshWidth, meshHeight, coordinates, offsets, offsetTrans, facecolors, antialiased, edgecolors): gc.draw_quad_mesh(master_transform.get_matrix(), meshWidth, meshHeight, coordinates, offsets, offsetTrans.get_matrix(), facecolors, antialiased, edgecolors) def new_gc(self): self.gc.save() self.gc.set_hatch(None) self.gc._alpha = 1.0 self.gc._forced_alpha = False # if True, _alpha overrides A from RGBA return self.gc def draw_gouraud_triangle(self, gc, points, colors, transform): points = transform.transform(points) gc.draw_gouraud_triangle(points, colors) def get_image_magnification(self): return self.gc.get_image_magnification() def draw_image(self, gc, x, y, im): nrows, ncols, data = im.as_rgba_str() gc.draw_image(x, y, nrows, ncols, data) def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!', mtext=None): # todo, handle props, angle, origins scale = self.gc.get_image_magnification() size = prop.get_size_in_points() texmanager = self.get_texmanager() key = s, size, self.dpi, angle, texmanager.get_font_config() im = self.texd.get(key) # Not sure what this does; just copied from backend_agg.py if im is None: Z = texmanager.get_grey(s, size, self.dpi*scale) Z = numpy.array(255.0 - Z * 255.0, numpy.uint8) gc.draw_mathtext(x, y, angle, Z) def _draw_mathtext(self, gc, x, y, s, prop, angle): scale = self.gc.get_image_magnification() ox, oy, width, height, descent, image, used_characters = \ self.mathtext_parser.parse(s, self.dpi*scale, prop) descent /= scale xd = descent * numpy.sin(numpy.deg2rad(angle)) yd = descent * numpy.cos(numpy.deg2rad(angle)) x = numpy.round(x + ox + xd) y = numpy.round(y + oy - yd) gc.draw_mathtext(x, y, angle, 255 - image.as_array()) def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None): if ismath: self._draw_mathtext(gc, x, y, s, prop, angle) else: family = prop.get_family() weight = prop.get_weight() # transform weight into string for the native backend if weight >= 700: weight = 'bold' else: weight = 'normal' style = prop.get_style() points = prop.get_size_in_points() size = self.points_to_pixels(points) gc.draw_text(x, y, six.text_type(s), family, size, weight, style, angle) def get_text_width_height_descent(self, s, prop, ismath): if ismath=='TeX': # todo: handle props texmanager = self.get_texmanager() fontsize = prop.get_size_in_points() w, h, d = texmanager.get_text_width_height_descent(s, fontsize, renderer=self) return w, h, d if ismath: ox, oy, width, height, descent, fonts, used_characters = \ self.mathtext_parser.parse(s, self.dpi, prop) return width, height, descent family = prop.get_family() weight = prop.get_weight() # transform weight into string for the native backend if weight >= 700: weight = 'bold' else: weight = 'normal' style = prop.get_style() points = prop.get_size_in_points() size = self.points_to_pixels(points) width, height, descent = self.gc.get_text_width_height_descent( six.text_type(s), family, size, weight, style) return width, height, descent def flipy(self): return False def points_to_pixels(self, points): return points/72.0 * self.dpi def option_image_nocomposite(self): return True class GraphicsContextMac(_macosx.GraphicsContext, GraphicsContextBase): """ The GraphicsContext wraps a Quartz graphics context. All methods are implemented at the C-level in macosx.GraphicsContext. These methods set drawing properties such as the line style, fill color, etc. The actual drawing is done by the Renderer, which draws into the GraphicsContext. """ def __init__(self): GraphicsContextBase.__init__(self) _macosx.GraphicsContext.__init__(self) def set_alpha(self, alpha): GraphicsContextBase.set_alpha(self, alpha) _alpha = self.get_alpha() _macosx.GraphicsContext.set_alpha(self, _alpha, self.get_forced_alpha()) rgb = self.get_rgb() _macosx.GraphicsContext.set_foreground(self, rgb) def set_foreground(self, fg, isRGBA=False): GraphicsContextBase.set_foreground(self, fg, isRGBA) rgb = self.get_rgb() _macosx.GraphicsContext.set_foreground(self, rgb) def set_graylevel(self, fg): GraphicsContextBase.set_graylevel(self, fg) _macosx.GraphicsContext.set_graylevel(self, fg) def set_clip_rectangle(self, box): GraphicsContextBase.set_clip_rectangle(self, box) if not box: return _macosx.GraphicsContext.set_clip_rectangle(self, box.bounds) def set_clip_path(self, path): GraphicsContextBase.set_clip_path(self, path) if not path: return path = path.get_fully_transformed_path() _macosx.GraphicsContext.set_clip_path(self, path) ######################################################################## # # The following functions and classes are for pylab and implement # window/figure managers, etc... # ######################################################################## def draw_if_interactive(): """ For performance reasons, we don't want to redraw the figure after each draw command. Instead, we mark the figure as invalid, so that it will be redrawn as soon as the event loop resumes via PyOS_InputHook. This function should be called after each draw event, even if matplotlib is not running interactively. """ if matplotlib.is_interactive(): figManager = Gcf.get_active() if figManager is not None: figManager.canvas.invalidate() def new_figure_manager(num, *args, **kwargs): """ Create a new figure manager instance """ FigureClass = kwargs.pop('FigureClass', Figure) figure = FigureClass(*args, **kwargs) return new_figure_manager_given_figure(num, figure) def new_figure_manager_given_figure(num, figure): """ Create a new figure manager instance for the given figure. """ canvas = FigureCanvasMac(figure) manager = FigureManagerMac(canvas, num) return manager class TimerMac(_macosx.Timer, TimerBase): ''' Subclass of :class:`backend_bases.TimerBase` that uses CoreFoundation run loops for timer events. Attributes: * interval: The time between timer events in milliseconds. Default is 1000 ms. * single_shot: Boolean flag indicating whether this timer should operate as single shot (run once and then stop). Defaults to False. * callbacks: Stores list of (func, args) tuples that will be called upon timer events. This list can be manipulated directly, or the functions add_callback and remove_callback can be used. ''' # completely implemented at the C-level (in _macosx.Timer) class FigureCanvasMac(_macosx.FigureCanvas, FigureCanvasBase): """ The canvas the figure renders into. Calls the draw and print fig methods, creates the renderers, etc... Public attribute figure - A Figure instance Events such as button presses, mouse movements, and key presses are handled in the C code and the base class methods button_press_event, button_release_event, motion_notify_event, key_press_event, and key_release_event are called from there. """ filetypes = FigureCanvasBase.filetypes.copy() filetypes['bmp'] = 'Windows bitmap' filetypes['jpeg'] = 'JPEG' filetypes['jpg'] = 'JPEG' filetypes['gif'] = 'Graphics Interchange Format' filetypes['tif'] = 'Tagged Image Format File' filetypes['tiff'] = 'Tagged Image Format File' def __init__(self, figure): FigureCanvasBase.__init__(self, figure) width, height = self.get_width_height() self.renderer = RendererMac(figure.dpi, width, height) _macosx.FigureCanvas.__init__(self, width, height) def draw_idle(self, *args, **kwargs): self.invalidate() def resize(self, width, height): self.renderer.set_width_height(width, height) dpi = self.figure.dpi width /= dpi height /= dpi self.figure.set_size_inches(width, height) FigureCanvasBase.resize_event(self) def _print_bitmap(self, filename, *args, **kwargs): # In backend_bases.py, print_figure changes the dpi of the figure. # But since we are essentially redrawing the picture, we need the # original dpi. Pick it up from the renderer. dpi = kwargs['dpi'] old_dpi = self.figure.dpi self.figure.dpi = self.renderer.dpi width, height = self.figure.get_size_inches() width, height = width*dpi, height*dpi filename = six.text_type(filename) self.write_bitmap(filename, width, height, dpi) self.figure.dpi = old_dpi def print_bmp(self, filename, *args, **kwargs): self._print_bitmap(filename, *args, **kwargs) def print_jpg(self, filename, *args, **kwargs): self._print_bitmap(filename, *args, **kwargs) def print_jpeg(self, filename, *args, **kwargs): self._print_bitmap(filename, *args, **kwargs) def print_tif(self, filename, *args, **kwargs): self._print_bitmap(filename, *args, **kwargs) def print_tiff(self, filename, *args, **kwargs): self._print_bitmap(filename, *args, **kwargs) def print_gif(self, filename, *args, **kwargs): self._print_bitmap(filename, *args, **kwargs) def new_timer(self, *args, **kwargs): """ Creates a new backend-specific subclass of :class:`backend_bases.Timer`. This is useful for getting periodic events through the backend's native event loop. Implemented only for backends with GUIs. optional arguments: *interval* Timer interval in milliseconds *callbacks* Sequence of (func, args, kwargs) where func(*args, **kwargs) will be executed by the timer every *interval*. """ return TimerMac(*args, **kwargs) class FigureManagerMac(_macosx.FigureManager, FigureManagerBase): """ Wrap everything up into a window for the pylab interface """ def __init__(self, canvas, num): FigureManagerBase.__init__(self, canvas, num) title = "Figure %d" % num _macosx.FigureManager.__init__(self, canvas, title) if rcParams['toolbar']=='toolbar2': self.toolbar = NavigationToolbar2Mac(canvas) else: self.toolbar = None if self.toolbar is not None: self.toolbar.update() def notify_axes_change(fig): 'this will be called whenever the current axes is changed' if self.toolbar != None: self.toolbar.update() self.canvas.figure.add_axobserver(notify_axes_change) if matplotlib.is_interactive(): self.show() self.canvas.draw_idle() def close(self): Gcf.destroy(self.num) class NavigationToolbar2Mac(_macosx.NavigationToolbar2, NavigationToolbar2): def __init__(self, canvas): NavigationToolbar2.__init__(self, canvas) def _init_toolbar(self): basedir = os.path.join(rcParams['datapath'], "images") _macosx.NavigationToolbar2.__init__(self, basedir) def draw_rubberband(self, event, x0, y0, x1, y1): self.canvas.set_rubberband(int(x0), int(y0), int(x1), int(y1)) def release(self, event): self.canvas.remove_rubberband() def set_cursor(self, cursor): _macosx.set_cursor(cursor) def save_figure(self, *args): filename = _macosx.choose_save_file('Save the figure', self.canvas.get_default_filename()) if filename is None: # Cancel return self.canvas.print_figure(filename) def prepare_configure_subplots(self): toolfig = Figure(figsize=(6,3)) canvas = FigureCanvasMac(toolfig) toolfig.subplots_adjust(top=0.9) tool = SubplotTool(self.canvas.figure, toolfig) return canvas def set_message(self, message): _macosx.NavigationToolbar2.set_message(self, message.encode('utf-8')) def dynamic_update(self): self.canvas.draw_idle() ######################################################################## # # Now just provide the standard names that backend.__init__ is expecting # ######################################################################## FigureCanvas = FigureCanvasMac FigureManager = FigureManagerMac
# coding=utf-8 # Copyright 2022 The Deeplab2 Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This file contains utility functions for the model code.""" from typing import Any, List, MutableMapping, MutableSequence, Optional, Set import tensorflow as tf from deeplab2 import common from deeplab2 import config_pb2 layers = tf.keras.layers _PREDICTION_WITH_NEAREST_UPSAMPLING = ( common.PRED_INSTANCE_KEY, common.PRED_INSTANCE_CENTER_KEY, common.PRED_INSTANCE_SCORES_KEY, common.PRED_PANOPTIC_KEY, common.PRED_SEMANTIC_KEY, common.PRED_CENTER_HEATMAP_KEY, common.PRED_NEXT_PANOPTIC_KEY, common.PRED_CONCAT_NEXT_PANOPTIC_KEY, ) _PREDICTION_WITH_BILINEAR_UPSAMPLING = ( common.PRED_SEMANTIC_PROBS_KEY, common.PRED_DEPTH_KEY, common.PRED_OFFSET_MAP_KEY, ) _INPUT_WITH_NEAREST_UPSAMPLING = ( common.GT_INSTANCE_CENTER_KEY, ) _INPUT_WITH_BILINEAR_UPSAMPLING = ( common.IMAGE, common.GT_INSTANCE_REGRESSION_KEY ) def _scale_helper(value, scale): if isinstance(value, tf.Tensor): return tf.cast( (tf.cast(value, dtype=tf.float32) - 1.0) * scale + 1.0, dtype=tf.int32) else: return int((float(value) - 1.0) * scale + 1.0) def scale_mutable_sequence(input_sequence: MutableSequence[int], scale: float) -> MutableSequence[int]: return [_scale_helper(x, scale) for x in input_sequence] def scale_int_list(int_list, scale): return [int(x * scale) for x in int_list] def undo_image_preprocessing(image_in: tf.Tensor, method: str, perform_crop: bool, regions_to_crop: List[int], output_shape: List[int]) -> tf.Tensor: """Undoes the image preprocessing. In particular, this function slices out the valid regions (determined by `regions_to_crop`) in the input when perform_crop is True. After that, we resize the results to the desired `output_shape`. Args: image_in: Input image Tensor with shape [batch, height, width, n_channels]. method: Image resize method. perform_crop: Boolean, performing crop or not. regions_to_crop: The regions to crop [height, width]. Will only apply cropping at the bottom right. output_shape: Desired shape after resizing [height, width]. Returns: Outputs after cropping (if perform_crop = True) and resizing. """ if perform_crop: image_out = image_in[ :, :regions_to_crop[0], :regions_to_crop[1], :] else: image_out = image_in return resize_align_corners(image_out, output_shape, method=method) def undo_preprocessing(input_or_prediction_dict: MutableMapping[str, Any], regions_to_crop: List[int], output_shape: List[int]) -> MutableMapping[str, Any]: """Undoes preprocessing for predictions. Args: input_or_prediction_dict: A dictionary storing different types of inputs or predictions. regions_to_crop: The regions to crop [height, width]. Will only apply cropping at the bottom right. output_shape: Desired shape after resizing [height, width]. Returns: inputs or predictions after cropping (if perform_crop = True) and resizing. """ for key in input_or_prediction_dict.keys(): if key in _PREDICTION_WITH_NEAREST_UPSAMPLING or key in _INPUT_WITH_NEAREST_UPSAMPLING: input_or_prediction_dict[key] = tf.squeeze( undo_image_preprocessing( tf.expand_dims(input_or_prediction_dict[key], 3), 'nearest', perform_crop=True, regions_to_crop=regions_to_crop, output_shape=output_shape), axis=3) elif key in _PREDICTION_WITH_BILINEAR_UPSAMPLING or key in _INPUT_WITH_BILINEAR_UPSAMPLING: input_or_prediction_dict[key] = undo_image_preprocessing( input_or_prediction_dict[key], 'bilinear', perform_crop=True, regions_to_crop=regions_to_crop, output_shape=output_shape) else: # We only undo preprocessing for those defined in # _{PREDICTION,INPUT}_WITH_{NEAREST,BILINEAR}_UPSAMPLING. # Other intermediate results are skipped. continue return input_or_prediction_dict def add_zero_padding(input_tensor: tf.Tensor, kernel_size: int, rank: int) -> tf.Tensor: """Adds zero-padding to the input_tensor.""" pad_total = kernel_size - 1 pad_begin = pad_total // 2 pad_end = pad_total - pad_begin if rank == 3: return tf.pad( input_tensor, paddings=[[pad_begin, pad_end], [pad_begin, pad_end], [0, 0]]) else: return tf.pad( input_tensor, paddings=[[0, 0], [pad_begin, pad_end], [pad_begin, pad_end], [0, 0]]) def resize_and_rescale_offsets(input_tensor: tf.Tensor, target_size): """Bilinearly resizes and rescales the offsets. Args: input_tensor: A tf.Tensor of shape [batch, height, width, 2]. target_size: A list or tuple or 1D tf.Tensor that specifies the height and width after resizing. Returns: The input_tensor resized to shape `[batch, target_height, target_width, 2]`. Moreover, the offsets along the y-axis are rescaled by a factor equal to (target_height - 1) / (reference_height - 1) and the offsets along the x-axis are rescaled by a factor equal to (target_width - 1) / (reference_width - 1). """ input_size_y = tf.shape(input_tensor)[1] input_size_x = tf.shape(input_tensor)[2] scale_y = tf.cast(target_size[0] - 1, tf.float32) / tf.cast( input_size_y - 1, tf.float32) scale_x = tf.cast(target_size[1] - 1, tf.float32) / tf.cast( input_size_x - 1, tf.float32) target_y, target_x = tf.split( value=input_tensor, num_or_size_splits=2, axis=3) target_y *= scale_y target_x *= scale_x target = tf.concat([target_y, target_x], 3) return resize_bilinear(target, target_size) def resize_align_corners(input_tensor, target_size, method='bilinear'): """Resizes the input_tensor to target_size. This returns the same output as tf.compat.v1.image.resize(input_tensor, target_size, align_corners=True). Args: input_tensor: A tf.Tensor of shape [batch, height, width, channels]. target_size: A list or tuple or 1D tf.Tensor that specifies the height and width after resizing. method: An optional string specifying the method used for resizing. Supported options are 'nearest' and 'bilinear'. Returns: The resized tensor. Raises: ValueError: An error occurs if 1) the input tensor's rank is not 4 or 2) the resizing method is not supported. """ if method == 'bilinear': tf_method = tf.compat.v1.image.ResizeMethod.BILINEAR elif method == 'nearest': tf_method = tf.compat.v1.image.ResizeMethod.NEAREST_NEIGHBOR else: raise ValueError('The given method %s is not supported. Please use bilinear' ' or nearest.' % method) tf.debugging.assert_rank( input_tensor, 4, message='Input tensor to resize method should have rank of 4.') return tf.compat.v1.image.resize( input_tensor, target_size, method=tf_method, align_corners=True, name='resize_align_corners') def resize_bilinear(images, size, align_corners=True, name=None): """TPU memory efficient version of tf.compat.v1.image.resize_bilinear. ResizeBilinear on TPU requires padded batch and channel dimensions. On a TPUv3, the worst case could lead to 256x memory consumption, if the input is, for example, [1, 257, 513, 1]. In this function, we replace the default resize_bilinear by two resize_bilinear operations, which put one image axis on the channel axis. This reduces TPU padding when batch * channel is small and height * width is large. Args: images: Input image of shape [B, H, W, C]. size: A list of two elements: [height, width]. The new size for the images. align_corners: Whether to align corners of the image. name: Name of the operation. Returns: Resized image. """ _, height, width, channel = images.get_shape().as_list() if height == size[0] and width == size[1]: return images dtype = images.dtype images = tf.cast(images, tf.float32) # We check the channel axis only since the batch size is similar (usually 1 or # 2). In this way, this if-else easily supports dynamic batch size without # using tf.cond(). if channel > 32 or not align_corners: images = tf.compat.v1.image.resize_bilinear( images, size, align_corners=align_corners, name=name) else: images = tf.transpose(images, [0, 3, 1, 2]) images = tf.compat.v1.image.resize_bilinear( images, [channel, size[0]], align_corners=align_corners, name=name + '_height' if name else None) images = tf.transpose(images, [0, 1, 3, 2]) images = tf.compat.v1.image.resize_bilinear( images, [channel, size[1]], align_corners=align_corners, name=name + '_width' if name else None) images = tf.transpose(images, [0, 3, 2, 1]) return tf.cast(images, dtype) def make_divisible(value: float, divisor: int, min_value: Optional[float] = None) -> int: """Ensures all layers have channels that are divisible by the divisor. Args: value: A `float` of original value. divisor: An `int` of the divisor that needs to be checked upon. min_value: A `float` of minimum value threshold. Returns: The adjusted value in `int` that is divisible by divisor. Raises: ValueError: Minimual value should be divisible by divisor. """ if min_value is None: min_value = divisor elif min_value % divisor != 0: raise ValueError('Minimual value should be divisible by divisor.') new_value = max(min_value, int(value + divisor / 2) // divisor * divisor) # Make sure that round down does not go down by more than 10%. if new_value < 0.9 * value: new_value += divisor return int(new_value) def transpose_and_reshape_for_attention_operation(inputs): """Sequentially transposes and reshapes the tensor. Args: inputs: An input [batch, num_heads, length, channel] tensor. Returns: output: An output [batch, length, num_heads * channel] tensor. """ _, num_heads, length, channel = inputs.get_shape().as_list() transposed_inputs = tf.transpose(inputs, [0, 2, 1, 3]) return tf.reshape(transposed_inputs, [-1, length, num_heads * channel]) def reshape_and_transpose_for_attention_operation(inputs, num_heads): """Sequentially reshapes and transposes the tensor. Args: inputs: An input [batch, length, num_heads * channel] tensor. num_heads: An integer, the number of attention heads. Returns: output: An output [batch, num_heads, length, channel] tensor. """ _, length, channels = inputs.get_shape().as_list() inputs = tf.reshape(inputs, [-1, length, num_heads, channels // num_heads]) return tf.transpose(inputs, [0, 2, 1, 3]) def get_layer_name(private_attribute_name): if private_attribute_name[0] != '_': raise ValueError('Private attribute name should start with a \'_\'.') return private_attribute_name[1:] def get_stem_current_name(index): return '_basic_block{}'.format(index + 1) def get_low_level_conv_fusion_conv_current_names(index): return ('_low_level_conv{}'.format(index + 1), '_fusion_conv{}'.format(index + 1)) def get_conv_bn_act_current_name(index, use_bn, activation): name = '_conv{}'.format(index + 1) if use_bn: name += '_bn' if (activation is not None and activation.lower() != 'none' and activation.lower() != 'linear'): name += '_act' return name def safe_setattr(obj, name, value): """A conflict-safe version of setattr(). Different from setattr(), this function raises ValueError if the object already has an attribute with the same name. Args: obj: An object whose attribute has to be set. name: A string, the name of the attribute. value: Any type, the value given to the attribute. Raises: ValueError: If the object already has an attribute with the same name. """ if hasattr(obj, name): raise ValueError('The object already has an attribute with the same name.') setattr(obj, name, value) def pad_sequence_with_none(sequence, target_length): return list(sequence) + [None] * (target_length - len(sequence)) def strided_downsample(input_tensor, target_size): """Strided downsamples a tensor to the target size. The stride_height and stride_width is computed by (height - 1) // (target_height - 1) and (width - 1) // (target_width - 1). We raise an error if stride_height != stride_width, since this is not intended in our current use cases. But this check can be removed if different strides are desired. This function supports static shape only. Args: input_tensor: A [batch, height, width] tf.Tensor to be downsampled. target_size: A list of two integers, [target_height, target_width], the target size after downsampling. Returns: output_tensor: A [batch, target_height, target_width] tf.Tensor, the downsampled result. Raises: ValueError: If the input cannot be downsampled with integer stride, i.e., (height - 1) % (target_height - 1) != 0, or (width - 1) % (target_width - 1) != 0. ValueError: If the height axis stride does not equal to the width axis stride. """ input_height, input_width = input_tensor.get_shape().as_list()[1:3] target_height, target_width = target_size if ((input_height - 1) % (target_height - 1) or (input_width - 1) % (target_width - 1)): raise ValueError('The input cannot be downsampled with integer striding. ' 'Please ensure (height - 1) % (target_height - 1) == 0 ' 'and (width - 1) % (target_width - 1) == 0.') stride_height = (input_height - 1) // (target_height - 1) stride_width = (input_width - 1) // (target_width - 1) if stride_height != stride_width: raise ValueError('The height axis stride does not equal to the width axis ' 'stride.') if stride_height > 1 or stride_width > 1: return input_tensor[:, ::stride_height, ::stride_width] return input_tensor def get_stuff_class_ids(num_thing_stuff_classes: int, thing_class_ids: List[int], void_label: int) -> List[int]: """Computes stuff_class_ids. The stuff_class_ids are computed from the num_thing_stuff_classes, the thing_class_ids and the void_label. Args: num_thing_stuff_classes: An integer specifying the number of stuff and thing classes, not including `void` class. thing_class_ids: A List of integers of length [num_thing_classes] containing thing class indices. void_label: An integer specifying the void label. Returns: stuff_class_ids: A sorted List of integers of shape [num_stuff_classes] containing stuff class indices. """ if void_label >= num_thing_stuff_classes: thing_stuff_class_ids = list(range(num_thing_stuff_classes)) else: thing_stuff_class_ids = [_ for _ in range(num_thing_stuff_classes + 1) if _ is not void_label] return sorted(set(thing_stuff_class_ids) - set(thing_class_ids)) def get_supported_tasks( config: config_pb2.ExperimentOptions) -> Set[str]: """Gets currently supported tasks for each meta_architecture. Args: config: A config_pb2.ExperimentOptions configuration. Returns: supported_tasks: A set of strings (see common.py), optionally - common.TASK_PANOPTIC_SEGMENTATION, - common.TASK_INSTANCE_SEGMENTATION, - common.TASK_VIDEO_PANOPTIC_SEGMENTATION, """ supported_tasks = set() meta_architecture = config.model_options.WhichOneof('meta_architecture') is_max_deeplab = meta_architecture == 'max_deeplab' is_motion_deeplab = meta_architecture == 'motion_deeplab' is_panoptic_deeplab = meta_architecture == 'panoptic_deeplab' is_vip_deeplab = meta_architecture == 'vip_deeplab' is_panoptic = ( (config.model_options.panoptic_deeplab.instance.enable and is_panoptic_deeplab) or is_motion_deeplab or is_max_deeplab or is_vip_deeplab) if is_panoptic: supported_tasks.add(common.TASK_PANOPTIC_SEGMENTATION) # MaX-DeepLab does not support evaluating instance segmentation mask AP yet. if not is_max_deeplab: supported_tasks.add(common.TASK_INSTANCE_SEGMENTATION) if is_motion_deeplab or is_vip_deeplab: supported_tasks.add(common.TASK_VIDEO_PANOPTIC_SEGMENTATION) if is_vip_deeplab: supported_tasks.add(common.TASK_DEPTH_AWARE_VIDEO_PANOPTIC_SEGMENTATION) return supported_tasks
# -*- coding: utf-8 -*- """ exporter.stores.github ~~~~~~~~~~~~~~~~~ This module implements Github issue store for exporter. :copyright: (c) 2014 by Vedarth Kulkarni. :license: MIT, see LICENSE for more details. """ import json import pytz import datetime import requests from dateutil.tz import tzlocal class GithubCredentials(object): """Github credentials. Necessary credentials to access/create/update issues on Github. Basic Usage: >>> from exporter.stores.github import GithubCredentials >>> gc = GithubCredentials(user="username", repo="repo name", auth_token="personla auth token") """ def __init__(self, user, repo, auth_token): self.user, self.repo, self.auth_token = user, repo, auth_token class GithubStore(object): """Github Issue Store. """ def __init__(self, credentials): '''Initializes Github issue store. :params credentials: object of :class:`GithubCredentials` with proper credentials ''' assert type(credentials) is GithubCredentials,\ 'Credentials object is not of type GithubCredentials' self.credentials = credentials self.github_request = GithubRequest(credentials=credentials) def create_or_update_issue(self, title, body, culprit, labels, **kwargs): '''Creates or comments on existing issue in the store. :params title: title for the issue :params body: body, the content of the issue :params culprit: string used to identify the cause of the issue, also used for aggregation :params labels: (optional) list of labels attached to the issue :returns: issue object :rtype: :class:`exreporter.stores.github.GithubIssue` ''' issues = self.search(q=culprit, labels=labels) self.time_delta = kwargs.pop('time_delta') self.max_comments = kwargs.pop('max_comments') if issues: latest_issue = issues.pop(0) return self.handle_issue_comment( issue=latest_issue, title=title, body=body, **kwargs) else: return self.create_issue( title=title, body=body, **kwargs) def search(self, q, labels, state='open,closed', **kwargs): """Search for issues in Github. :param q: query string to search :param state: state of the issue :returns: list of issue objects :rtype: list """ search_result = self.github_request.search(q=q, state=state, **kwargs) if search_result['total_count'] > 0: return list( map(lambda issue_dict: GithubIssue( github_request=self.github_request, **issue_dict), search_result['items']) ) def handle_issue_comment(self, issue, title, body, **kwargs): """Decides whether to comment or create a new issue when trying to comment. :param issue: issue on which the comment is to be added :param title: title of the issue if new one is to be created :param body: body of the issue/comment to be created :returns: newly created issue or the one on which comment was created :rtype: :class:`exreporter.stores.github.GithubIssue` """ if self._is_time_delta_valid(issue.updated_time_delta): if issue.comments_count < self.max_comments: issue.comment(body=body) return issue else: return self.create_issue(title=title, body=body, **kwargs) def _is_time_delta_valid(self, delta): return delta > self.time_delta def create_issue(self, title, body, labels=None): """Creates a new issue in Github. :params title: title of the issue to be created :params body: body of the issue to be created :params labels: (optional) list of labels for the issue :returns: newly created issue :rtype: :class:`exreporter.stores.github.GithubIssue` """ kwargs = self.github_request.create( title=title, body=body, labels=labels) return GithubIssue(github_request=self.github_request, **kwargs) class GithubIssue(object): """Python object representation of issue on Github. """ def __init__(self, github_request, **kwargs): """Initializes Github issue object. For **kwargs please refer Github's documentation: https://developer.github.com/v3/issues/#get-a-single-issue :params github_request: object of :class:`GithubRequest` used to make HTTP requests to Github """ self.github_request = github_request for attr, value in kwargs.items(): setattr(self, attr, value) @property def comments_count(self): """Returns number of comments on the issue. """ return int(self.comments) @property def updated_time_delta(self): """Returns the number of seconds ago the issue was updated from current time. """ local_timezone = tzlocal() update_at = datetime.datetime.strptime(self.updated_at, '%Y-%m-%dT%XZ') update_at_utc = pytz.utc.localize(update_at) update_at_local = update_at_utc.astimezone(local_timezone) delta = datetime.datetime.now(local_timezone) - update_at_local return int(delta.total_seconds()) def open_issue(self): """Changes the state of issue to 'open'. """ self.github_request.update(issue=self, state='open') self.state = 'open' def comment(self, body): """Adds a comment to the issue. :params body: body, content of the comment :returns: issue object :rtype: :class:`exreporter.stores.github.GithubIssue` """ self.github_request.comment(issue=self, body=body) if self.state == 'closed': self.open_issue() return self class GithubRequest(object): """This class objects are created with valid credentials. The objects have methods to create/update issues and to add comments on Github. Basic Usage: >>> from exreporter.stores.github import GithubCredentials, GithubRequest >>> gc = GithubCredentials(user="username", repo="repo name", auth_token="personla auth token") >>> gr = GithubRequest(credentials=gc) >>> gr.create(title="title", body="body", labels=['labels']) """ def __init__(self, credentials): self.user = credentials.user self.repo = credentials.repo self.session = requests.Session() self.session.headers.update({ 'Authorization': 'token {}'.format(credentials.auth_token) }) def create(self, title, body, labels): """Create an issue in Github. For JSON data returned by Github refer: https://developer.github.com/v3/issues/#create-an-issue :param title: title of the issue :param body: body of the issue :param labels: list of labels for the issue :returns: dict of JSON data returned by Github of newly created issue :rtype: `dict` """ url = "https://api.github.com/repos/{}/{}/issues".format( self.user, self.repo) data = { 'title': title, 'body': body, } if labels: data.update({'labels': labels}) response = self.session.post(url, json.dumps(data)) assert response.status_code == 201 return json.loads(response.content) def comment(self, issue, body): """Comment on existing issue on Github. For JSON data returned by Github refer: https://developer.github.com/v3/issues/comments/#create-a-comment :param issue: object of exisiting issue :param body: body of the comment :returns: dict of JSON data returned by Github of the new comment :rtype: `dict` """ url = issue.comments_url data = {'body': body} response = self.session.post(url, json.dumps(data)) assert response.status_code == 201 return json.loads(response.content) def update(self, issue, **kwargs): """Update an existing issue on Github. For JSON data returned by Github refer: https://developer.github.com/v3/issues/#edit-an-issue :param issue: object existing issue :returns: dict of JSON data returned by Github. :rtype: `dict` """ url = issue.url response = self.session.patch(url, json.dumps(kwargs)) assert response.status_code == 200 return json.loads(response.content) def search(self, q, state, labels): """Search for issues in Github. For JSON data returned by Github refer: https://developer.github.com/v3/search/#search-issues :param q: query string for search :param state: the states of the issue :param labels: labels of the issue :returns: dictionary of JSON data returned by Github :rtype: `dict` """ # TODO: add support for search with labels labels = ['"{}"'.format(label) for label in labels] q = "'{}'+state:{}+label:{}".format( q, '+state:'.join(state.split(',')), ','.join(labels)) sort = "updated" url = "https://api.github.com/search/"\ "issues?q={}+repo:{}/{}&sort={}".format( q, self.user, self.repo, sort) response = self.session.get(url) assert response.status_code == 200 return json.loads(response.content)
from sqlite3 import connect, Row class SQL(object): def __init__(self, query_string): self.query_string = query_string class _TableQuery(object): def __init__(self, cursor, table_name): self._cursor = cursor self._table_name = table_name self._where_columns = None @property def cursor(self): return self._cursor def validate(self): if not Table.exists(cursor=self._cursor, table_name=self._table_name) and self._table_name != "sqlite_master": raise ValueError("Table must exist to select entries from it") @staticmethod def _build_where(where_conditions): """ Builds a tuple containing the string that constitutes the query's WHERE clause and a tuple containing the values to be substituted into the clause string. :param where_conditions: dictionary mapping column names to the values to be searched against. :return: """ if where_conditions is None: return "", tuple() clauses = [f"""[{_key}] {'IS NULL' if _val is None else '= ?'}""" for _key, _val in where_conditions.items()] values = tuple([_value for _value in where_conditions.values() if _value is not None]) return f"""WHERE {' AND '.join(clauses)}""", values def where(self, **column_conditions): self._where_columns = column_conditions if len(column_conditions) else None return self class From(_TableQuery): def __init__(self, cursor, table_name): super(From, self).__init__(cursor, table_name) self._select_columns = None def select(self, *columns): self._select_columns = columns if len(columns) else None return self def execute(self): # check if the table exists before querying for rows self.validate() select_columns = self._select_columns or [] where_columns = (self._where_columns or {}).keys() all_columns = set([_column for _list in [select_columns, where_columns] for _column in _list]) if any(column_name is None or len(column_name) == 0 for column_name in all_columns): raise ValueError("The column names cannot be None or empty") if len(select_columns) == 0: select_columns = "*" else: select_columns = ", ".join(select_columns) where_clause, where_values = self._build_where(self._where_columns) self._cursor.execute( f""" SELECT {select_columns} FROM {self._table_name} {where_clause} """, where_values, ) return self class Update(_TableQuery): def __init__(self, cursor, table_name): super(Update, self).__init__(cursor, table_name) self._set_columns = None @staticmethod def get_values(set_columns): return [_value for _value in set_columns.values() if not isinstance(_value, SQL)] @staticmethod def get_value_insert(value): if isinstance(value, SQL): return value.query_string return "?" @staticmethod def _build_set(set_columns): return ", ".join( [f"""[{_name}] = {Update.get_value_insert(_value)}""" for _name, _value in set_columns.items()] ) def validate(self): super(Update, self).validate() if not self._set_columns: raise ValueError("Update must specify values to set.") def set(self, **set_columns): self._set_columns = set_columns return self def execute(self): self.validate() where_columns = (self._where_columns or {}).keys() set_columns = self._set_columns.keys() all_columns = set([_column for _list in [set_columns, where_columns] for _column in _list]) if any(column_name is None or len(column_name) == 0 for column_name in all_columns): raise ValueError("The column names cannot be None or empty") where_clause, where_values = self._build_where(self._where_columns) self._cursor.execute( f""" UPDATE {self._table_name} SET {self._build_set(self._set_columns)} {where_clause} """, tuple(Update.get_values(self._set_columns)) + where_values, ) return self class Insert(Update): def execute(self): self.validate() set_columns = self._set_columns.keys() if any(column_name is None or len(column_name) == 0 for column_name in set_columns): raise ValueError("The column names cannot be None or empty") column_names = ", ".join([f"[{_column}]" for _column in self._set_columns.keys()]) self._cursor.execute( f""" INSERT INTO {self._table_name} ({column_names}) VALUES ({', '.join([Insert.get_value_insert(_column) for _column in self._set_columns.values()])}) """, tuple(Insert.get_values(self._set_columns)), ) return self class Table(_TableQuery): def insert(self): return Insert(self.cursor, self._table_name) def update(self): return Update(self.cursor, self._table_name) def select(self, *select_columns): return From(self.cursor, self._table_name).select(*select_columns) def __init__(self, cursor, table_name): super(Table, self).__init__(cursor, table_name) @staticmethod def exists(cursor, table_name): """ Checks and validates a table name against the master list of all tables present in the db. :param cursor: the cursor to the database's connection :param table_name: the name of the table searching for :return: true if the table exists, false otherwise """ cursor.execute( """ SELECT name FROM sqlite_master WHERE type='table' AND name=?; """, (table_name,), ) return bool(cursor.fetchone()) @staticmethod def validate_table(cursor, table_name, expected_columns): """ Validate the table to ensure the table with a name table_name exists with the expected columns given. Raises an error if columns aren't what is expected or table doesn't exist. :param cursor: the cursor to the GeoPackage database's connection :param table_name: the name of the expected table :param expected_columns: a list of strings with the names of the columns """ if not Table.exists(cursor=cursor, table_name=table_name): raise ValueError(f"Table {table_name} does not exist. Cannot retrieve entries from a non-existent table") if not Table.columns_exists(cursor=cursor, table_name=table_name, column_names=expected_columns): raise ValueError(f"Invalid Schema. The table does not have the expected columns: {expected_columns}.") @staticmethod def columns_exists(cursor, table_name, column_names): """ Validates the existence for several columns in a table. :param cursor: the cursor to the database's connection :param table_name: the name of the table with the column :param column_names: the list of column names to verify exist in the table :return: True if the column exists, false otherwise """ cursor.execute( """ PRAGMA table_info("{table_name}") """.format( table_name=table_name ) ) found_columns = [_row["name"] for _row in cursor] return all(any(_existing_column == _column for _existing_column in found_columns) for _column in column_names) def insert_or_update_row(self, set_columns, where_columns): """ Searches for a row matching the clauses defined by mappings in where_columns. If exactly one row is returned, an update operation will be performed. If 0 or multiple rows are returned, an insert operation will take place. In either case, affected row will have the values defined in set_columns. :param set_columns: dictionary mapping a set of column names to their new/inserted values. :param where_columns: dictionary mapping a set of column names to their current values, search against these. """ self.select().where(**where_columns).execute() existing_row = self.cursor.fetchall() if existing_row is None or len(existing_row) > 1 or len(existing_row) == 0: self.insert().set(**set_columns).execute() else: self.update().set(**set_columns).where(**where_columns).execute() return self def get_database_connection(file_path, timeout=0.0): """ Gets a Connection to an Sqlite Database :param timeout: how long in seconds the driver will attempt to execute a statement before aborting. :param file_path: path to the sqlite database :return: a connection to the database """ db_connection = connect(database=file_path, timeout=timeout) db_connection.row_factory = Row return db_connection
# Copyright (c) 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import uuidutils from oslo_utils import versionutils from nova.compute import utils as compute_utils from nova import db from nova import exception from nova import objects from nova.objects import base from nova.objects import fields LAZY_LOAD_FIELDS = ['hosts'] # TODO(berrange): Remove NovaObjectDictCompat @base.NovaObjectRegistry.register class InstanceGroup(base.NovaPersistentObject, base.NovaObject, base.NovaObjectDictCompat): # Version 1.0: Initial version # Version 1.1: String attributes updated to support unicode # Version 1.2: Use list/dict helpers for policies, metadetails, members # Version 1.3: Make uuid a non-None real string # Version 1.4: Add add_members() # Version 1.5: Add get_hosts() # Version 1.6: Add get_by_name() # Version 1.7: Deprecate metadetails # Version 1.8: Add count_members_by_user() # Version 1.9: Add get_by_instance_uuid() # Version 1.10: Add hosts field VERSION = '1.10' fields = { 'id': fields.IntegerField(), 'user_id': fields.StringField(nullable=True), 'project_id': fields.StringField(nullable=True), 'uuid': fields.UUIDField(), 'name': fields.StringField(nullable=True), 'policies': fields.ListOfStringsField(nullable=True), 'members': fields.ListOfStringsField(nullable=True), 'hosts': fields.ListOfStringsField(nullable=True), } def obj_make_compatible(self, primitive, target_version): target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 7): # NOTE(danms): Before 1.7, we had an always-empty # metadetails property primitive['metadetails'] = {} @staticmethod def _from_db_object(context, instance_group, db_inst): """Method to help with migration to objects. Converts a database entity to a formal object. """ # Most of the field names match right now, so be quick for field in instance_group.fields: if field in LAZY_LOAD_FIELDS: continue if field == 'deleted': instance_group.deleted = db_inst['deleted'] == db_inst['id'] else: instance_group[field] = db_inst[field] instance_group._context = context instance_group.obj_reset_changes() return instance_group def obj_load_attr(self, attrname): # NOTE(sbauza): Only hosts could be lazy-loaded right now if attrname != 'hosts': raise exception.ObjectActionError( action='obj_load_attr', reason='unable to load %s' % attrname) self.hosts = self.get_hosts() self.obj_reset_changes(['hosts']) @base.remotable_classmethod def get_by_uuid(cls, context, uuid): db_inst = db.instance_group_get(context, uuid) return cls._from_db_object(context, cls(), db_inst) @base.remotable_classmethod def get_by_name(cls, context, name): # TODO(russellb) We need to get the group by name here. There's no # db.api method for this yet. Come back and optimize this by # adding a new query by name. This is unnecessarily expensive if a # tenant has lots of groups. igs = objects.InstanceGroupList.get_by_project_id(context, context.project_id) for ig in igs: if ig.name == name: return ig raise exception.InstanceGroupNotFound(group_uuid=name) @base.remotable_classmethod def get_by_instance_uuid(cls, context, instance_uuid): db_inst = db.instance_group_get_by_instance(context, instance_uuid) return cls._from_db_object(context, cls(), db_inst) @classmethod def get_by_hint(cls, context, hint): if uuidutils.is_uuid_like(hint): return cls.get_by_uuid(context, hint) else: return cls.get_by_name(context, hint) @base.remotable def save(self): """Save updates to this instance group.""" updates = self.obj_get_changes() # NOTE(sbauza): We do NOT save the set of compute nodes that an # instance group is connected to in this method. Instance groups are # implicitly connected to compute nodes when the # InstanceGroup.add_members() method is called, which adds the mapping # table entries. # So, since the only way to have hosts in the updates is to set that # field explicitely, we prefer to raise an Exception so the developer # knows he has to call obj_reset_changes(['hosts']) right after setting # the field. if 'hosts' in updates: raise exception.InstanceGroupSaveException(field='hosts') if not updates: return payload = dict(updates) payload['server_group_id'] = self.uuid db.instance_group_update(self._context, self.uuid, updates) db_inst = db.instance_group_get(self._context, self.uuid) self._from_db_object(self._context, self, db_inst) compute_utils.notify_about_server_group_update(self._context, "update", payload) @base.remotable def refresh(self): """Refreshes the instance group.""" current = self.__class__.get_by_uuid(self._context, self.uuid) for field in self.fields: if self.obj_attr_is_set(field) and self[field] != current[field]: self[field] = current[field] self.obj_reset_changes() @base.remotable def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason='already created') updates = self.obj_get_changes() payload = dict(updates) updates.pop('id', None) policies = updates.pop('policies', None) members = updates.pop('members', None) db_inst = db.instance_group_create(self._context, updates, policies=policies, members=members) self._from_db_object(self._context, self, db_inst) payload['server_group_id'] = self.uuid compute_utils.notify_about_server_group_update(self._context, "create", payload) @base.remotable def destroy(self): payload = {'server_group_id': self.uuid} db.instance_group_delete(self._context, self.uuid) self.obj_reset_changes() compute_utils.notify_about_server_group_update(self._context, "delete", payload) @base.remotable_classmethod def add_members(cls, context, group_uuid, instance_uuids): payload = {'server_group_id': group_uuid, 'instance_uuids': instance_uuids} members = db.instance_group_members_add(context, group_uuid, instance_uuids) compute_utils.notify_about_server_group_update(context, "addmember", payload) return list(members) @base.remotable def get_hosts(self, exclude=None): """Get a list of hosts for non-deleted instances in the group This method allows you to get a list of the hosts where instances in this group are currently running. There's also an option to exclude certain instance UUIDs from this calculation. """ filter_uuids = self.members if exclude: filter_uuids = set(filter_uuids) - set(exclude) filters = {'uuid': filter_uuids, 'deleted': False} instances = objects.InstanceList.get_by_filters(self._context, filters=filters) return list(set([instance.host for instance in instances if instance.host])) @base.remotable def count_members_by_user(self, user_id): """Count the number of instances in a group belonging to a user.""" filter_uuids = self.members filters = {'uuid': filter_uuids, 'user_id': user_id, 'deleted': False} instances = objects.InstanceList.get_by_filters(self._context, filters=filters) return len(instances) @base.NovaObjectRegistry.register class InstanceGroupList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version # InstanceGroup <= version 1.3 # Version 1.1: InstanceGroup <= version 1.4 # Version 1.2: InstanceGroup <= version 1.5 # Version 1.3: InstanceGroup <= version 1.6 # Version 1.4: InstanceGroup <= version 1.7 # Version 1.5: InstanceGroup <= version 1.8 # Version 1.6: InstanceGroup <= version 1.9 # Version 1.7: InstanceGroup <= version 1.10 VERSION = '1.7' fields = { 'objects': fields.ListOfObjectsField('InstanceGroup'), } @base.remotable_classmethod def get_by_project_id(cls, context, project_id): groups = db.instance_group_get_all_by_project_id(context, project_id) return base.obj_make_list(context, cls(context), objects.InstanceGroup, groups) @base.remotable_classmethod def get_all(cls, context): groups = db.instance_group_get_all(context) return base.obj_make_list(context, cls(context), objects.InstanceGroup, groups)
# -*- coding: utf-8 -*- import csv import os import platform import codecs import re import sys from datetime import datetime import pytest import numpy as np from pandas._libs.lib import Timestamp import pandas as pd import pandas.util.testing as tm from pandas import DataFrame, Series, Index, MultiIndex from pandas import compat from pandas.compat import (StringIO, BytesIO, PY3, range, lrange, u) from pandas.errors import DtypeWarning, EmptyDataError, ParserError from pandas.io.common import URLError from pandas.io.parsers import TextFileReader, TextParser class ParserTests(object): """ Want to be able to test either C+Cython or Python+Cython parsers """ data1 = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ def test_empty_decimal_marker(self): data = """A|B|C 1|2,334|5 10|13|10. """ # Parsers support only length-1 decimals msg = 'Only length-1 decimal markers supported' with tm.assert_raises_regex(ValueError, msg): self.read_csv(StringIO(data), decimal='') def test_bad_stream_exception(self): # Issue 13652: # This test validates that both python engine # and C engine will raise UnicodeDecodeError instead of # c engine raising ParserError and swallowing exception # that caused read to fail. handle = open(self.csv_shiftjs, "rb") codec = codecs.lookup("utf-8") utf8 = codecs.lookup('utf-8') # stream must be binary UTF8 stream = codecs.StreamRecoder( handle, utf8.encode, utf8.decode, codec.streamreader, codec.streamwriter) if compat.PY3: msg = "'utf-8' codec can't decode byte" else: msg = "'utf8' codec can't decode byte" with tm.assert_raises_regex(UnicodeDecodeError, msg): self.read_csv(stream) stream.close() def test_read_csv(self): if not compat.PY3: if compat.is_platform_windows(): prefix = u("file:///") else: prefix = u("file://") fname = prefix + compat.text_type(self.csv1) self.read_csv(fname, index_col=0, parse_dates=True) def test_1000_sep(self): data = """A|B|C 1|2,334|5 10|13|10. """ expected = DataFrame({ 'A': [1, 10], 'B': [2334, 13], 'C': [5, 10.] }) df = self.read_csv(StringIO(data), sep='|', thousands=',') tm.assert_frame_equal(df, expected) df = self.read_table(StringIO(data), sep='|', thousands=',') tm.assert_frame_equal(df, expected) def test_squeeze(self): data = """\ a,1 b,2 c,3 """ idx = Index(['a', 'b', 'c'], name=0) expected = Series([1, 2, 3], name=1, index=idx) result = self.read_table(StringIO(data), sep=',', index_col=0, header=None, squeeze=True) assert isinstance(result, Series) tm.assert_series_equal(result, expected) def test_squeeze_no_view(self): # see gh-8217 # Series should not be a view data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13""" result = self.read_csv(StringIO(data), index_col='time', squeeze=True) assert not result._is_view def test_malformed(self): # see gh-6607 # all data = """ignore A,B,C 1,2,3 # comment 1,2,3,4,5 2,3,4 """ msg = 'Expected 3 fields in line 4, saw 5' with tm.assert_raises_regex(Exception, msg): self.read_table(StringIO(data), sep=',', header=1, comment='#') # first chunk data = """ignore A,B,C skip 1,2,3 3,5,10 # comment 1,2,3,4,5 2,3,4 """ msg = 'Expected 3 fields in line 6, saw 5' with tm.assert_raises_regex(Exception, msg): it = self.read_table(StringIO(data), sep=',', header=1, comment='#', iterator=True, chunksize=1, skiprows=[2]) it.read(5) # middle chunk data = """ignore A,B,C skip 1,2,3 3,5,10 # comment 1,2,3,4,5 2,3,4 """ msg = 'Expected 3 fields in line 6, saw 5' with tm.assert_raises_regex(Exception, msg): it = self.read_table(StringIO(data), sep=',', header=1, comment='#', iterator=True, chunksize=1, skiprows=[2]) it.read(3) # last chunk data = """ignore A,B,C skip 1,2,3 3,5,10 # comment 1,2,3,4,5 2,3,4 """ msg = 'Expected 3 fields in line 6, saw 5' with tm.assert_raises_regex(Exception, msg): it = self.read_table(StringIO(data), sep=',', header=1, comment='#', iterator=True, chunksize=1, skiprows=[2]) it.read() # skipfooter is not supported with the C parser yet if self.engine == 'python': # skipfooter data = """ignore A,B,C 1,2,3 # comment 1,2,3,4,5 2,3,4 footer """ msg = 'Expected 3 fields in line 4, saw 5' with tm.assert_raises_regex(Exception, msg): self.read_table(StringIO(data), sep=',', header=1, comment='#', skipfooter=1) def test_quoting(self): bad_line_small = """printer\tresult\tvariant_name Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten"" Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>""" # noqa pytest.raises(Exception, self.read_table, StringIO(bad_line_small), sep='\t') good_line_small = bad_line_small + '"' df = self.read_table(StringIO(good_line_small), sep='\t') assert len(df) == 3 def test_unnamed_columns(self): data = """A,B,C,, 1,2,3,4,5 6,7,8,9,10 11,12,13,14,15 """ expected = np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]], dtype=np.int64) df = self.read_table(StringIO(data), sep=',') tm.assert_almost_equal(df.values, expected) tm.assert_index_equal(df.columns, Index(['A', 'B', 'C', 'Unnamed: 3', 'Unnamed: 4'])) def test_duplicate_columns(self): # TODO: add test for condition 'mangle_dupe_cols=False' # once it is actually supported (gh-12935) data = """A,A,B,B,B 1,2,3,4,5 6,7,8,9,10 11,12,13,14,15 """ for method in ('read_csv', 'read_table'): # check default behavior df = getattr(self, method)(StringIO(data), sep=',') assert list(df.columns) == ['A', 'A.1', 'B', 'B.1', 'B.2'] df = getattr(self, method)(StringIO(data), sep=',', mangle_dupe_cols=True) assert list(df.columns) == ['A', 'A.1', 'B', 'B.1', 'B.2'] def test_csv_mixed_type(self): data = """A,B,C a,1,2 b,3,4 c,4,5 """ expected = DataFrame({'A': ['a', 'b', 'c'], 'B': [1, 3, 4], 'C': [2, 4, 5]}) out = self.read_csv(StringIO(data)) tm.assert_frame_equal(out, expected) def test_read_csv_dataframe(self): df = self.read_csv(self.csv1, index_col=0, parse_dates=True) df2 = self.read_table(self.csv1, sep=',', index_col=0, parse_dates=True) tm.assert_index_equal(df.columns, pd.Index(['A', 'B', 'C', 'D'])) assert df.index.name == 'index' assert isinstance( df.index[0], (datetime, np.datetime64, Timestamp)) assert df.values.dtype == np.float64 tm.assert_frame_equal(df, df2) def test_read_csv_no_index_name(self): df = self.read_csv(self.csv2, index_col=0, parse_dates=True) df2 = self.read_table(self.csv2, sep=',', index_col=0, parse_dates=True) tm.assert_index_equal(df.columns, pd.Index(['A', 'B', 'C', 'D', 'E'])) assert isinstance(df.index[0], (datetime, np.datetime64, Timestamp)) assert df.loc[:, ['A', 'B', 'C', 'D']].values.dtype == np.float64 tm.assert_frame_equal(df, df2) def test_read_table_unicode(self): fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8')) df1 = self.read_table(fin, sep=";", encoding="utf-8", header=None) assert isinstance(df1[0].values[0], compat.text_type) def test_read_table_wrong_num_columns(self): # too few! data = """A,B,C,D,E,F 1,2,3,4,5,6 6,7,8,9,10,11,12 11,12,13,14,15,16 """ pytest.raises(ValueError, self.read_csv, StringIO(data)) def test_read_duplicate_index_explicit(self): data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo,12,13,14,15 bar,12,13,14,15 """ result = self.read_csv(StringIO(data), index_col=0) expected = self.read_csv(StringIO(data)).set_index( 'index', verify_integrity=False) tm.assert_frame_equal(result, expected) result = self.read_table(StringIO(data), sep=',', index_col=0) expected = self.read_table(StringIO(data), sep=',', ).set_index( 'index', verify_integrity=False) tm.assert_frame_equal(result, expected) def test_read_duplicate_index_implicit(self): data = """A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo,12,13,14,15 bar,12,13,14,15 """ # make sure an error isn't thrown self.read_csv(StringIO(data)) self.read_table(StringIO(data), sep=',') def test_parse_bools(self): data = """A,B True,1 False,2 True,3 """ data = self.read_csv(StringIO(data)) assert data['A'].dtype == np.bool_ data = """A,B YES,1 no,2 yes,3 No,3 Yes,3 """ data = self.read_csv(StringIO(data), true_values=['yes', 'Yes', 'YES'], false_values=['no', 'NO', 'No']) assert data['A'].dtype == np.bool_ data = """A,B TRUE,1 FALSE,2 TRUE,3 """ data = self.read_csv(StringIO(data)) assert data['A'].dtype == np.bool_ data = """A,B foo,bar bar,foo""" result = self.read_csv(StringIO(data), true_values=['foo'], false_values=['bar']) expected = DataFrame({'A': [True, False], 'B': [False, True]}) tm.assert_frame_equal(result, expected) def test_int_conversion(self): data = """A,B 1.0,1 2.0,2 3.0,3 """ data = self.read_csv(StringIO(data)) assert data['A'].dtype == np.float64 assert data['B'].dtype == np.int64 def test_read_nrows(self): expected = self.read_csv(StringIO(self.data1))[:3] df = self.read_csv(StringIO(self.data1), nrows=3) tm.assert_frame_equal(df, expected) # see gh-10476 df = self.read_csv(StringIO(self.data1), nrows=3.0) tm.assert_frame_equal(df, expected) msg = r"'nrows' must be an integer >=0" with tm.assert_raises_regex(ValueError, msg): self.read_csv(StringIO(self.data1), nrows=1.2) with tm.assert_raises_regex(ValueError, msg): self.read_csv(StringIO(self.data1), nrows='foo') with tm.assert_raises_regex(ValueError, msg): self.read_csv(StringIO(self.data1), nrows=-1) def test_read_chunksize(self): reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2) df = self.read_csv(StringIO(self.data1), index_col=0) chunks = list(reader) tm.assert_frame_equal(chunks[0], df[:2]) tm.assert_frame_equal(chunks[1], df[2:4]) tm.assert_frame_equal(chunks[2], df[4:]) # with invalid chunksize value: msg = r"'chunksize' must be an integer >=1" with tm.assert_raises_regex(ValueError, msg): self.read_csv(StringIO(self.data1), chunksize=1.3) with tm.assert_raises_regex(ValueError, msg): self.read_csv(StringIO(self.data1), chunksize='foo') with tm.assert_raises_regex(ValueError, msg): self.read_csv(StringIO(self.data1), chunksize=0) def test_read_chunksize_and_nrows(self): # gh-15755 # With nrows reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2, nrows=5) df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5) tm.assert_frame_equal(pd.concat(reader), df) # chunksize > nrows reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=8, nrows=5) df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5) tm.assert_frame_equal(pd.concat(reader), df) # with changing "size": reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=8, nrows=5) df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5) tm.assert_frame_equal(reader.get_chunk(size=2), df.iloc[:2]) tm.assert_frame_equal(reader.get_chunk(size=4), df.iloc[2:5]) with pytest.raises(StopIteration): reader.get_chunk(size=3) def test_read_chunksize_named(self): reader = self.read_csv( StringIO(self.data1), index_col='index', chunksize=2) df = self.read_csv(StringIO(self.data1), index_col='index') chunks = list(reader) tm.assert_frame_equal(chunks[0], df[:2]) tm.assert_frame_equal(chunks[1], df[2:4]) tm.assert_frame_equal(chunks[2], df[4:]) def test_get_chunk_passed_chunksize(self): data = """A,B,C 1,2,3 4,5,6 7,8,9 1,2,3""" result = self.read_csv(StringIO(data), chunksize=2) piece = result.get_chunk() assert len(piece) == 2 def test_read_chunksize_generated_index(self): # GH 12185 reader = self.read_csv(StringIO(self.data1), chunksize=2) df = self.read_csv(StringIO(self.data1)) tm.assert_frame_equal(pd.concat(reader), df) reader = self.read_csv(StringIO(self.data1), chunksize=2, index_col=0) df = self.read_csv(StringIO(self.data1), index_col=0) tm.assert_frame_equal(pd.concat(reader), df) def test_read_text_list(self): data = """A,B,C\nfoo,1,2,3\nbar,4,5,6""" as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar', '4', '5', '6']] df = self.read_csv(StringIO(data), index_col=0) parser = TextParser(as_list, index_col=0, chunksize=2) chunk = parser.read(None) tm.assert_frame_equal(chunk, df) def test_iterator(self): # See gh-6607 reader = self.read_csv(StringIO(self.data1), index_col=0, iterator=True) df = self.read_csv(StringIO(self.data1), index_col=0) chunk = reader.read(3) tm.assert_frame_equal(chunk, df[:3]) last_chunk = reader.read(5) tm.assert_frame_equal(last_chunk, df[3:]) # pass list lines = list(csv.reader(StringIO(self.data1))) parser = TextParser(lines, index_col=0, chunksize=2) df = self.read_csv(StringIO(self.data1), index_col=0) chunks = list(parser) tm.assert_frame_equal(chunks[0], df[:2]) tm.assert_frame_equal(chunks[1], df[2:4]) tm.assert_frame_equal(chunks[2], df[4:]) # pass skiprows parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1]) chunks = list(parser) tm.assert_frame_equal(chunks[0], df[1:3]) treader = self.read_table(StringIO(self.data1), sep=',', index_col=0, iterator=True) assert isinstance(treader, TextFileReader) # gh-3967: stopping iteration when chunksize is specified data = """A,B,C foo,1,2,3 bar,4,5,6 baz,7,8,9 """ reader = self.read_csv(StringIO(data), iterator=True) result = list(reader) expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[ 3, 6, 9]), index=['foo', 'bar', 'baz']) tm.assert_frame_equal(result[0], expected) # chunksize = 1 reader = self.read_csv(StringIO(data), chunksize=1) result = list(reader) expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[ 3, 6, 9]), index=['foo', 'bar', 'baz']) assert len(result) == 3 tm.assert_frame_equal(pd.concat(result), expected) # skipfooter is not supported with the C parser yet if self.engine == 'python': # test bad parameter (skipfooter) reader = self.read_csv(StringIO(self.data1), index_col=0, iterator=True, skipfooter=1) pytest.raises(ValueError, reader.read, 3) def test_pass_names_with_index(self): lines = self.data1.split('\n') no_header = '\n'.join(lines[1:]) # regular index names = ['index', 'A', 'B', 'C', 'D'] df = self.read_csv(StringIO(no_header), index_col=0, names=names) expected = self.read_csv(StringIO(self.data1), index_col=0) tm.assert_frame_equal(df, expected) # multi index data = """index1,index2,A,B,C,D foo,one,2,3,4,5 foo,two,7,8,9,10 foo,three,12,13,14,15 bar,one,12,13,14,15 bar,two,12,13,14,15 """ lines = data.split('\n') no_header = '\n'.join(lines[1:]) names = ['index1', 'index2', 'A', 'B', 'C', 'D'] df = self.read_csv(StringIO(no_header), index_col=[0, 1], names=names) expected = self.read_csv(StringIO(data), index_col=[0, 1]) tm.assert_frame_equal(df, expected) df = self.read_csv(StringIO(data), index_col=['index1', 'index2']) tm.assert_frame_equal(df, expected) def test_multi_index_no_level_names(self): data = """index1,index2,A,B,C,D foo,one,2,3,4,5 foo,two,7,8,9,10 foo,three,12,13,14,15 bar,one,12,13,14,15 bar,two,12,13,14,15 """ data2 = """A,B,C,D foo,one,2,3,4,5 foo,two,7,8,9,10 foo,three,12,13,14,15 bar,one,12,13,14,15 bar,two,12,13,14,15 """ lines = data.split('\n') no_header = '\n'.join(lines[1:]) names = ['A', 'B', 'C', 'D'] df = self.read_csv(StringIO(no_header), index_col=[0, 1], header=None, names=names) expected = self.read_csv(StringIO(data), index_col=[0, 1]) tm.assert_frame_equal(df, expected, check_names=False) # 2 implicit first cols df2 = self.read_csv(StringIO(data2)) tm.assert_frame_equal(df2, df) # reverse order of index df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names, header=None) expected = self.read_csv(StringIO(data), index_col=[1, 0]) tm.assert_frame_equal(df, expected, check_names=False) def test_multi_index_blank_df(self): # GH 14545 data = """a,b """ df = self.read_csv(StringIO(data), header=[0]) expected = DataFrame(columns=['a', 'b']) tm.assert_frame_equal(df, expected) round_trip = self.read_csv(StringIO( expected.to_csv(index=False)), header=[0]) tm.assert_frame_equal(round_trip, expected) data_multiline = """a,b c,d """ df2 = self.read_csv(StringIO(data_multiline), header=[0, 1]) cols = MultiIndex.from_tuples([('a', 'c'), ('b', 'd')]) expected2 = DataFrame(columns=cols) tm.assert_frame_equal(df2, expected2) round_trip = self.read_csv(StringIO( expected2.to_csv(index=False)), header=[0, 1]) tm.assert_frame_equal(round_trip, expected2) def test_no_unnamed_index(self): data = """ id c0 c1 c2 0 1 0 a b 1 2 0 c d 2 2 2 e f """ df = self.read_table(StringIO(data), sep=' ') assert df.index.name is None def test_read_csv_parse_simple_list(self): text = """foo bar baz qux foo foo bar""" df = self.read_csv(StringIO(text), header=None) expected = DataFrame({0: ['foo', 'bar baz', 'qux foo', 'foo', 'bar']}) tm.assert_frame_equal(df, expected) @tm.network def test_url(self): # HTTP(S) url = ('https://raw.github.com/pandas-dev/pandas/master/' 'pandas/tests/io/parser/data/salaries.csv') url_table = self.read_table(url) dirpath = tm.get_data_path() localtable = os.path.join(dirpath, 'salaries.csv') local_table = self.read_table(localtable) tm.assert_frame_equal(url_table, local_table) # TODO: ftp testing @tm.slow def test_file(self): dirpath = tm.get_data_path() localtable = os.path.join(dirpath, 'salaries.csv') local_table = self.read_table(localtable) try: url_table = self.read_table('file://localhost/' + localtable) except URLError: # fails on some systems pytest.skip("failing on %s" % ' '.join(platform.uname()).strip()) tm.assert_frame_equal(url_table, local_table) def test_path_pathlib(self): df = tm.makeDataFrame() result = tm.round_trip_pathlib(df.to_csv, lambda p: self.read_csv(p, index_col=0)) tm.assert_frame_equal(df, result) def test_path_localpath(self): df = tm.makeDataFrame() result = tm.round_trip_localpath( df.to_csv, lambda p: self.read_csv(p, index_col=0)) tm.assert_frame_equal(df, result) def test_nonexistent_path(self): # gh-2428: pls no segfault # gh-14086: raise more helpful FileNotFoundError path = '%s.csv' % tm.rands(10) pytest.raises(compat.FileNotFoundError, self.read_csv, path) def test_missing_trailing_delimiters(self): data = """A,B,C,D 1,2,3,4 1,3,3, 1,4,5""" result = self.read_csv(StringIO(data)) assert result['D'].isnull()[1:].all() def test_skipinitialspace(self): s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, ' '1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, ' '314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, ' '70.06056, 344.98370, 1, 1, -0.689265, -0.692787, ' '0.212036, 14.7674, 41.605, -9999.0, -9999.0, ' '-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128') sfile = StringIO(s) # it's 33 columns result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'], header=None, skipinitialspace=True) assert pd.isnull(result.iloc[0, 29]) def test_utf16_bom_skiprows(self): # #2298 data = u("""skip this skip this too A\tB\tC 1\t2\t3 4\t5\t6""") data2 = u("""skip this skip this too A,B,C 1,2,3 4,5,6""") path = '__%s__.csv' % tm.rands(10) with tm.ensure_clean(path) as path: for sep, dat in [('\t', data), (',', data2)]: for enc in ['utf-16', 'utf-16le', 'utf-16be']: bytes = dat.encode(enc) with open(path, 'wb') as f: f.write(bytes) s = BytesIO(dat.encode('utf-8')) if compat.PY3: # somewhat False since the code never sees bytes from io import TextIOWrapper s = TextIOWrapper(s, encoding='utf-8') result = self.read_csv(path, encoding=enc, skiprows=2, sep=sep) expected = self.read_csv(s, encoding='utf-8', skiprows=2, sep=sep) s.close() tm.assert_frame_equal(result, expected) def test_utf16_example(self): path = tm.get_data_path('utf16_ex.txt') # it works! and is the right length result = self.read_table(path, encoding='utf-16') assert len(result) == 50 if not compat.PY3: buf = BytesIO(open(path, 'rb').read()) result = self.read_table(buf, encoding='utf-16') assert len(result) == 50 def test_unicode_encoding(self): pth = tm.get_data_path('unicode_series.csv') result = self.read_csv(pth, header=None, encoding='latin-1') result = result.set_index(0) got = result[1][1632] expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)') assert got == expected def test_trailing_delimiters(self): # #2442. grumble grumble data = """A,B,C 1,2,3, 4,5,6, 7,8,9,""" result = self.read_csv(StringIO(data), index_col=False) expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8], 'C': [3, 6, 9]}) tm.assert_frame_equal(result, expected) def test_escapechar(self): # http://stackoverflow.com/questions/13824840/feature-request-for- # pandas-read-csv data = '''SEARCH_TERM,ACTUAL_URL "bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord" "tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord" "SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa result = self.read_csv(StringIO(data), escapechar='\\', quotechar='"', encoding='utf-8') assert result['SEARCH_TERM'][2] == ('SLAGBORD, "Bergslagen", ' 'IKEA:s 1700-tals serie') tm.assert_index_equal(result.columns, Index(['SEARCH_TERM', 'ACTUAL_URL'])) def test_int64_min_issues(self): # #2599 data = 'A,B\n0,0\n0,' result = self.read_csv(StringIO(data)) expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]}) tm.assert_frame_equal(result, expected) def test_parse_integers_above_fp_precision(self): data = """Numbers 17007000002000191 17007000002000191 17007000002000191 17007000002000191 17007000002000192 17007000002000192 17007000002000192 17007000002000192 17007000002000192 17007000002000194""" result = self.read_csv(StringIO(data)) expected = DataFrame({'Numbers': [17007000002000191, 17007000002000191, 17007000002000191, 17007000002000191, 17007000002000192, 17007000002000192, 17007000002000192, 17007000002000192, 17007000002000192, 17007000002000194]}) assert np.array_equal(result['Numbers'], expected['Numbers']) def test_chunks_have_consistent_numerical_type(self): integers = [str(i) for i in range(499999)] data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers) with tm.assert_produces_warning(False): df = self.read_csv(StringIO(data)) # Assert that types were coerced. assert type(df.a[0]) is np.float64 assert df.a.dtype == np.float def test_warn_if_chunks_have_mismatched_type(self): warning_type = False integers = [str(i) for i in range(499999)] data = "a\n" + "\n".join(integers + ['a', 'b'] + integers) # see gh-3866: if chunks are different types and can't # be coerced using numerical types, then issue warning. if self.engine == 'c' and self.low_memory: warning_type = DtypeWarning with tm.assert_produces_warning(warning_type): df = self.read_csv(StringIO(data)) assert df.a.dtype == np.object def test_integer_overflow_bug(self): # see gh-2601 data = "65248E10 11\n55555E55 22\n" result = self.read_csv(StringIO(data), header=None, sep=' ') assert result[0].dtype == np.float64 result = self.read_csv(StringIO(data), header=None, sep=r'\s+') assert result[0].dtype == np.float64 def test_catch_too_many_names(self): # see gh-5156 data = """\ 1,2,3 4,,6 7,8,9 10,11,12\n""" pytest.raises(ValueError, self.read_csv, StringIO(data), header=0, names=['a', 'b', 'c', 'd']) def test_ignore_leading_whitespace(self): # see gh-3374, gh-6607 data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9' result = self.read_table(StringIO(data), sep=r'\s+') expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]}) tm.assert_frame_equal(result, expected) def test_chunk_begins_with_newline_whitespace(self): # see gh-10022 data = '\n hello\nworld\n' result = self.read_csv(StringIO(data), header=None) assert len(result) == 2 # see gh-9735: this issue is C parser-specific (bug when # parsing whitespace and characters at chunk boundary) if self.engine == 'c': chunk1 = 'a' * (1024 * 256 - 2) + '\na' chunk2 = '\n a' result = self.read_csv(StringIO(chunk1 + chunk2), header=None) expected = DataFrame(['a' * (1024 * 256 - 2), 'a', ' a']) tm.assert_frame_equal(result, expected) def test_empty_with_index(self): # see gh-10184 data = 'x,y' result = self.read_csv(StringIO(data), index_col=0) expected = DataFrame([], columns=['y'], index=Index([], name='x')) tm.assert_frame_equal(result, expected) def test_empty_with_multiindex(self): # see gh-10467 data = 'x,y,z' result = self.read_csv(StringIO(data), index_col=['x', 'y']) expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays( [[]] * 2, names=['x', 'y'])) tm.assert_frame_equal(result, expected, check_index_type=False) def test_empty_with_reversed_multiindex(self): data = 'x,y,z' result = self.read_csv(StringIO(data), index_col=[1, 0]) expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays( [[]] * 2, names=['y', 'x'])) tm.assert_frame_equal(result, expected, check_index_type=False) def test_float_parser(self): # see gh-9565 data = '45e-1,4.5,45.,inf,-inf' result = self.read_csv(StringIO(data), header=None) expected = DataFrame([[float(s) for s in data.split(',')]]) tm.assert_frame_equal(result, expected) def test_scientific_no_exponent(self): # see gh-12215 df = DataFrame.from_items([('w', ['2e']), ('x', ['3E']), ('y', ['42e']), ('z', ['632E'])]) data = df.to_csv(index=False) for prec in self.float_precision_choices: df_roundtrip = self.read_csv( StringIO(data), float_precision=prec) tm.assert_frame_equal(df_roundtrip, df) def test_int64_overflow(self): data = """ID 00013007854817840016671868 00013007854817840016749251 00013007854817840016754630 00013007854817840016781876 00013007854817840017028824 00013007854817840017963235 00013007854817840018860166""" # 13007854817840016671868 > UINT64_MAX, so this # will overflow and return object as the dtype. result = self.read_csv(StringIO(data)) assert result['ID'].dtype == object # 13007854817840016671868 > UINT64_MAX, so attempts # to cast to either int64 or uint64 will result in # an OverflowError being raised. for conv in (np.int64, np.uint64): pytest.raises(OverflowError, self.read_csv, StringIO(data), converters={'ID': conv}) # These numbers fall right inside the int64-uint64 range, # so they should be parsed as string. ui_max = np.iinfo(np.uint64).max i_max = np.iinfo(np.int64).max i_min = np.iinfo(np.int64).min for x in [i_max, i_min, ui_max]: result = self.read_csv(StringIO(str(x)), header=None) expected = DataFrame([x]) tm.assert_frame_equal(result, expected) # These numbers fall just outside the int64-uint64 range, # so they should be parsed as string. too_big = ui_max + 1 too_small = i_min - 1 for x in [too_big, too_small]: result = self.read_csv(StringIO(str(x)), header=None) expected = DataFrame([str(x)]) tm.assert_frame_equal(result, expected) # No numerical dtype can hold both negative and uint64 values, # so they should be cast as string. data = '-1\n' + str(2**63) expected = DataFrame([str(-1), str(2**63)]) result = self.read_csv(StringIO(data), header=None) tm.assert_frame_equal(result, expected) data = str(2**63) + '\n-1' expected = DataFrame([str(2**63), str(-1)]) result = self.read_csv(StringIO(data), header=None) tm.assert_frame_equal(result, expected) def test_empty_with_nrows_chunksize(self): # see gh-9535 expected = DataFrame([], columns=['foo', 'bar']) result = self.read_csv(StringIO('foo,bar\n'), nrows=10) tm.assert_frame_equal(result, expected) result = next(iter(self.read_csv( StringIO('foo,bar\n'), chunksize=10))) tm.assert_frame_equal(result, expected) with tm.assert_produces_warning( FutureWarning, check_stacklevel=False): result = self.read_csv(StringIO('foo,bar\n'), nrows=10, as_recarray=True) result = DataFrame(result[2], columns=result[1], index=result[0]) tm.assert_frame_equal(DataFrame.from_records( result), expected, check_index_type=False) with tm.assert_produces_warning( FutureWarning, check_stacklevel=False): result = next(iter(self.read_csv(StringIO('foo,bar\n'), chunksize=10, as_recarray=True))) result = DataFrame(result[2], columns=result[1], index=result[0]) tm.assert_frame_equal(DataFrame.from_records(result), expected, check_index_type=False) def test_eof_states(self): # see gh-10728, gh-10548 # With skip_blank_lines = True expected = DataFrame([[4, 5, 6]], columns=['a', 'b', 'c']) # gh-10728: WHITESPACE_LINE data = 'a,b,c\n4,5,6\n ' result = self.read_csv(StringIO(data)) tm.assert_frame_equal(result, expected) # gh-10548: EAT_LINE_COMMENT data = 'a,b,c\n4,5,6\n#comment' result = self.read_csv(StringIO(data), comment='#') tm.assert_frame_equal(result, expected) # EAT_CRNL_NOP data = 'a,b,c\n4,5,6\n\r' result = self.read_csv(StringIO(data)) tm.assert_frame_equal(result, expected) # EAT_COMMENT data = 'a,b,c\n4,5,6#comment' result = self.read_csv(StringIO(data), comment='#') tm.assert_frame_equal(result, expected) # SKIP_LINE data = 'a,b,c\n4,5,6\nskipme' result = self.read_csv(StringIO(data), skiprows=[2]) tm.assert_frame_equal(result, expected) # With skip_blank_lines = False # EAT_LINE_COMMENT data = 'a,b,c\n4,5,6\n#comment' result = self.read_csv( StringIO(data), comment='#', skip_blank_lines=False) expected = DataFrame([[4, 5, 6]], columns=['a', 'b', 'c']) tm.assert_frame_equal(result, expected) # IN_FIELD data = 'a,b,c\n4,5,6\n ' result = self.read_csv(StringIO(data), skip_blank_lines=False) expected = DataFrame( [['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c']) tm.assert_frame_equal(result, expected) # EAT_CRNL data = 'a,b,c\n4,5,6\n\r' result = self.read_csv(StringIO(data), skip_blank_lines=False) expected = DataFrame( [[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c']) tm.assert_frame_equal(result, expected) # Should produce exceptions # ESCAPED_CHAR data = "a,b,c\n4,5,6\n\\" pytest.raises(Exception, self.read_csv, StringIO(data), escapechar='\\') # ESCAPE_IN_QUOTED_FIELD data = 'a,b,c\n4,5,6\n"\\' pytest.raises(Exception, self.read_csv, StringIO(data), escapechar='\\') # IN_QUOTED_FIELD data = 'a,b,c\n4,5,6\n"' pytest.raises(Exception, self.read_csv, StringIO(data), escapechar='\\') def test_uneven_lines_with_usecols(self): # See gh-12203 csv = r"""a,b,c 0,1,2 3,4,5,6,7 8,9,10 """ # make sure that an error is still thrown # when the 'usecols' parameter is not provided msg = r"Expected \d+ fields in line \d+, saw \d+" with tm.assert_raises_regex(ValueError, msg): df = self.read_csv(StringIO(csv)) expected = DataFrame({ 'a': [0, 3, 8], 'b': [1, 4, 9] }) usecols = [0, 1] df = self.read_csv(StringIO(csv), usecols=usecols) tm.assert_frame_equal(df, expected) usecols = ['a', 'b'] df = self.read_csv(StringIO(csv), usecols=usecols) tm.assert_frame_equal(df, expected) def test_read_empty_with_usecols(self): # See gh-12493 names = ['Dummy', 'X', 'Dummy_2'] usecols = names[1:2] # ['X'] # first, check to see that the response of # parser when faced with no provided columns # throws the correct error, with or without usecols errmsg = "No columns to parse from file" with tm.assert_raises_regex(EmptyDataError, errmsg): self.read_csv(StringIO('')) with tm.assert_raises_regex(EmptyDataError, errmsg): self.read_csv(StringIO(''), usecols=usecols) expected = DataFrame(columns=usecols, index=[0], dtype=np.float64) df = self.read_csv(StringIO(',,'), names=names, usecols=usecols) tm.assert_frame_equal(df, expected) expected = DataFrame(columns=usecols) df = self.read_csv(StringIO(''), names=names, usecols=usecols) tm.assert_frame_equal(df, expected) def test_trailing_spaces(self): data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa expected = DataFrame([[1., 2., 4.], [5.1, np.nan, 10.]]) # gh-8661, gh-8679: this should ignore six lines including # lines with trailing whitespace and blank lines df = self.read_csv(StringIO(data.replace(',', ' ')), header=None, delim_whitespace=True, skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True) tm.assert_frame_equal(df, expected) df = self.read_table(StringIO(data.replace(',', ' ')), header=None, delim_whitespace=True, skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True) tm.assert_frame_equal(df, expected) # gh-8983: test skipping set of rows after a row with trailing spaces expected = DataFrame({"A": [1., 5.1], "B": [2., np.nan], "C": [4., 10]}) df = self.read_table(StringIO(data.replace(',', ' ')), delim_whitespace=True, skiprows=[1, 2, 3, 5, 6], skip_blank_lines=True) tm.assert_frame_equal(df, expected) def test_raise_on_sep_with_delim_whitespace(self): # see gh-6607 data = 'a b c\n1 2 3' with tm.assert_raises_regex(ValueError, 'you can only specify one'): self.read_table(StringIO(data), sep=r'\s', delim_whitespace=True) def test_single_char_leading_whitespace(self): # see gh-9710 data = """\ MyColumn a b a b\n""" expected = DataFrame({'MyColumn': list('abab')}) result = self.read_csv(StringIO(data), delim_whitespace=True, skipinitialspace=True) tm.assert_frame_equal(result, expected) result = self.read_csv(StringIO(data), skipinitialspace=True) tm.assert_frame_equal(result, expected) def test_empty_lines(self): data = """\ A,B,C 1,2.,4. 5.,NaN,10.0 -70,.4,1 """ expected = np.array([[1., 2., 4.], [5., np.nan, 10.], [-70., .4, 1.]]) df = self.read_csv(StringIO(data)) tm.assert_numpy_array_equal(df.values, expected) df = self.read_csv(StringIO(data.replace(',', ' ')), sep=r'\s+') tm.assert_numpy_array_equal(df.values, expected) expected = np.array([[1., 2., 4.], [np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan], [5., np.nan, 10.], [np.nan, np.nan, np.nan], [-70., .4, 1.]]) df = self.read_csv(StringIO(data), skip_blank_lines=False) tm.assert_numpy_array_equal(df.values, expected) def test_whitespace_lines(self): data = """ \t \t\t \t A,B,C \t 1,2.,4. 5.,NaN,10.0 """ expected = np.array([[1, 2., 4.], [5., np.nan, 10.]]) df = self.read_csv(StringIO(data)) tm.assert_numpy_array_equal(df.values, expected) def test_regex_separator(self): # see gh-6607 data = """ A B C D a 1 2 3 4 b 1 2 3 4 c 1 2 3 4 """ df = self.read_table(StringIO(data), sep=r'\s+') expected = self.read_csv(StringIO(re.sub('[ ]+', ',', data)), index_col=0) assert expected.index.name is None tm.assert_frame_equal(df, expected) data = ' a b c\n1 2 3 \n4 5 6\n 7 8 9' result = self.read_table(StringIO(data), sep=r'\s+') expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=['a', 'b', 'c']) tm.assert_frame_equal(result, expected) @tm.capture_stdout def test_verbose_import(self): text = """a,b,c,d one,1,2,3 one,1,2,3 ,1,2,3 one,1,2,3 ,1,2,3 ,1,2,3 one,1,2,3 two,1,2,3""" # Engines are verbose in different ways. self.read_csv(StringIO(text), verbose=True) output = sys.stdout.getvalue() if self.engine == 'c': assert 'Tokenization took:' in output assert 'Parser memory cleanup took:' in output else: # Python engine assert output == 'Filled 3 NA values in column a\n' # Reset the stdout buffer. sys.stdout = StringIO() text = """a,b,c,d one,1,2,3 two,1,2,3 three,1,2,3 four,1,2,3 five,1,2,3 ,1,2,3 seven,1,2,3 eight,1,2,3""" self.read_csv(StringIO(text), verbose=True, index_col=0) output = sys.stdout.getvalue() # Engines are verbose in different ways. if self.engine == 'c': assert 'Tokenization took:' in output assert 'Parser memory cleanup took:' in output else: # Python engine assert output == 'Filled 1 NA values in column a\n' def test_iteration_open_handle(self): if PY3: pytest.skip( "won't work in Python 3 {0}".format(sys.version_info)) with tm.ensure_clean() as path: with open(path, 'wb') as f: f.write('AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG') with open(path, 'rb') as f: for line in f: if 'CCC' in line: break if self.engine == 'c': pytest.raises(Exception, self.read_table, f, squeeze=True, header=None) else: result = self.read_table(f, squeeze=True, header=None) expected = Series(['DDD', 'EEE', 'FFF', 'GGG'], name=0) tm.assert_series_equal(result, expected) def test_1000_sep_with_decimal(self): data = """A|B|C 1|2,334.01|5 10|13|10. """ expected = DataFrame({ 'A': [1, 10], 'B': [2334.01, 13], 'C': [5, 10.] }) assert expected.A.dtype == 'int64' assert expected.B.dtype == 'float' assert expected.C.dtype == 'float' df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.') tm.assert_frame_equal(df, expected) df = self.read_table(StringIO(data), sep='|', thousands=',', decimal='.') tm.assert_frame_equal(df, expected) data_with_odd_sep = """A|B|C 1|2.334,01|5 10|13|10, """ df = self.read_csv(StringIO(data_with_odd_sep), sep='|', thousands='.', decimal=',') tm.assert_frame_equal(df, expected) df = self.read_table(StringIO(data_with_odd_sep), sep='|', thousands='.', decimal=',') tm.assert_frame_equal(df, expected) def test_euro_decimal_format(self): data = """Id;Number1;Number2;Text1;Text2;Number3 1;1521,1541;187101,9543;ABC;poi;4,738797819 2;121,12;14897,76;DEF;uyt;0,377320872 3;878,158;108013,434;GHI;rez;2,735694704""" df2 = self.read_csv(StringIO(data), sep=';', decimal=',') assert df2['Number1'].dtype == float assert df2['Number2'].dtype == float assert df2['Number3'].dtype == float def test_read_duplicate_names(self): # See gh-7160 data = "a,b,a\n0,1,2\n3,4,5" df = self.read_csv(StringIO(data)) expected = DataFrame([[0, 1, 2], [3, 4, 5]], columns=['a', 'b', 'a.1']) tm.assert_frame_equal(df, expected) data = "0,1,2\n3,4,5" df = self.read_csv(StringIO(data), names=["a", "b", "a"]) expected = DataFrame([[0, 1, 2], [3, 4, 5]], columns=['a', 'b', 'a.1']) tm.assert_frame_equal(df, expected) def test_inf_parsing(self): data = """\ ,A a,inf b,-inf c,+Inf d,-Inf e,INF f,-INF g,+INf h,-INf i,inF j,-inF""" inf = float('inf') expected = Series([inf, -inf] * 5) df = self.read_csv(StringIO(data), index_col=0) tm.assert_almost_equal(df['A'].values, expected.values) df = self.read_csv(StringIO(data), index_col=0, na_filter=False) tm.assert_almost_equal(df['A'].values, expected.values) def test_raise_on_no_columns(self): # single newline data = "\n" pytest.raises(EmptyDataError, self.read_csv, StringIO(data)) # test with more than a single newline data = "\n\n\n" pytest.raises(EmptyDataError, self.read_csv, StringIO(data)) def test_compact_ints_use_unsigned(self): # see gh-13323 data = 'a,b,c\n1,9,258' # sanity check expected = DataFrame({ 'a': np.array([1], dtype=np.int64), 'b': np.array([9], dtype=np.int64), 'c': np.array([258], dtype=np.int64), }) out = self.read_csv(StringIO(data)) tm.assert_frame_equal(out, expected) expected = DataFrame({ 'a': np.array([1], dtype=np.int8), 'b': np.array([9], dtype=np.int8), 'c': np.array([258], dtype=np.int16), }) # default behaviour for 'use_unsigned' with tm.assert_produces_warning( FutureWarning, check_stacklevel=False): out = self.read_csv(StringIO(data), compact_ints=True) tm.assert_frame_equal(out, expected) with tm.assert_produces_warning( FutureWarning, check_stacklevel=False): out = self.read_csv(StringIO(data), compact_ints=True, use_unsigned=False) tm.assert_frame_equal(out, expected) expected = DataFrame({ 'a': np.array([1], dtype=np.uint8), 'b': np.array([9], dtype=np.uint8), 'c': np.array([258], dtype=np.uint16), }) with tm.assert_produces_warning( FutureWarning, check_stacklevel=False): out = self.read_csv(StringIO(data), compact_ints=True, use_unsigned=True) tm.assert_frame_equal(out, expected) def test_compact_ints_as_recarray(self): data = ('0,1,0,0\n' '1,1,0,0\n' '0,1,0,1') with tm.assert_produces_warning( FutureWarning, check_stacklevel=False): result = self.read_csv(StringIO(data), delimiter=',', header=None, compact_ints=True, as_recarray=True) ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)]) assert result.dtype == ex_dtype with tm.assert_produces_warning( FutureWarning, check_stacklevel=False): result = self.read_csv(StringIO(data), delimiter=',', header=None, as_recarray=True, compact_ints=True, use_unsigned=True) ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)]) assert result.dtype == ex_dtype def test_as_recarray(self): # basic test with tm.assert_produces_warning( FutureWarning, check_stacklevel=False): data = 'a,b\n1,a\n2,b' expected = np.array([(1, 'a'), (2, 'b')], dtype=[('a', '=i8'), ('b', 'O')]) out = self.read_csv(StringIO(data), as_recarray=True) tm.assert_numpy_array_equal(out, expected) # index_col ignored with tm.assert_produces_warning( FutureWarning, check_stacklevel=False): data = 'a,b\n1,a\n2,b' expected = np.array([(1, 'a'), (2, 'b')], dtype=[('a', '=i8'), ('b', 'O')]) out = self.read_csv(StringIO(data), as_recarray=True, index_col=0) tm.assert_numpy_array_equal(out, expected) # respects names with tm.assert_produces_warning( FutureWarning, check_stacklevel=False): data = '1,a\n2,b' expected = np.array([(1, 'a'), (2, 'b')], dtype=[('a', '=i8'), ('b', 'O')]) out = self.read_csv(StringIO(data), names=['a', 'b'], header=None, as_recarray=True) tm.assert_numpy_array_equal(out, expected) # header order is respected even though it conflicts # with the natural ordering of the column names with tm.assert_produces_warning( FutureWarning, check_stacklevel=False): data = 'b,a\n1,a\n2,b' expected = np.array([(1, 'a'), (2, 'b')], dtype=[('b', '=i8'), ('a', 'O')]) out = self.read_csv(StringIO(data), as_recarray=True) tm.assert_numpy_array_equal(out, expected) # overrides the squeeze parameter with tm.assert_produces_warning( FutureWarning, check_stacklevel=False): data = 'a\n1' expected = np.array([(1,)], dtype=[('a', '=i8')]) out = self.read_csv(StringIO(data), as_recarray=True, squeeze=True) tm.assert_numpy_array_equal(out, expected) # does data conversions before doing recarray conversion with tm.assert_produces_warning( FutureWarning, check_stacklevel=False): data = 'a,b\n1,a\n2,b' conv = lambda x: int(x) + 1 expected = np.array([(2, 'a'), (3, 'b')], dtype=[('a', '=i8'), ('b', 'O')]) out = self.read_csv(StringIO(data), as_recarray=True, converters={'a': conv}) tm.assert_numpy_array_equal(out, expected) # filters by usecols before doing recarray conversion with tm.assert_produces_warning( FutureWarning, check_stacklevel=False): data = 'a,b\n1,a\n2,b' expected = np.array([(1,), (2,)], dtype=[('a', '=i8')]) out = self.read_csv(StringIO(data), as_recarray=True, usecols=['a']) tm.assert_numpy_array_equal(out, expected) def test_memory_map(self): mmap_file = os.path.join(self.dirpath, 'test_mmap.csv') expected = DataFrame({ 'a': [1, 2, 3], 'b': ['one', 'two', 'three'], 'c': ['I', 'II', 'III'] }) out = self.read_csv(mmap_file, memory_map=True) tm.assert_frame_equal(out, expected) def test_null_byte_char(self): # see gh-2741 data = '\x00,foo' cols = ['a', 'b'] expected = DataFrame([[np.nan, 'foo']], columns=cols) if self.engine == 'c': out = self.read_csv(StringIO(data), names=cols) tm.assert_frame_equal(out, expected) else: msg = "NULL byte detected" with tm.assert_raises_regex(ParserError, msg): self.read_csv(StringIO(data), names=cols) def test_utf8_bom(self): # see gh-4793 bom = u('\ufeff') utf8 = 'utf-8' def _encode_data_with_bom(_data): bom_data = (bom + _data).encode(utf8) return BytesIO(bom_data) # basic test data = 'a\n1' expected = DataFrame({'a': [1]}) out = self.read_csv(_encode_data_with_bom(data), encoding=utf8) tm.assert_frame_equal(out, expected) # test with "regular" quoting data = '"a"\n1' expected = DataFrame({'a': [1]}) out = self.read_csv(_encode_data_with_bom(data), encoding=utf8, quotechar='"') tm.assert_frame_equal(out, expected) # test in a data row instead of header data = 'b\n1' expected = DataFrame({'a': ['b', '1']}) out = self.read_csv(_encode_data_with_bom(data), encoding=utf8, names=['a']) tm.assert_frame_equal(out, expected) # test in empty data row with skipping data = '\n1' expected = DataFrame({'a': [1]}) out = self.read_csv(_encode_data_with_bom(data), encoding=utf8, names=['a'], skip_blank_lines=True) tm.assert_frame_equal(out, expected) # test in empty data row without skipping data = '\n1' expected = DataFrame({'a': [np.nan, 1.0]}) out = self.read_csv(_encode_data_with_bom(data), encoding=utf8, names=['a'], skip_blank_lines=False) tm.assert_frame_equal(out, expected) def test_temporary_file(self): # see gh-13398 data1 = "0 0" from tempfile import TemporaryFile new_file = TemporaryFile("w+") new_file.write(data1) new_file.flush() new_file.seek(0) result = self.read_csv(new_file, sep=r'\s+', header=None) new_file.close() expected = DataFrame([[0, 0]]) tm.assert_frame_equal(result, expected) def test_read_csv_utf_aliases(self): # see gh issue 13549 expected = pd.DataFrame({'mb_num': [4.8], 'multibyte': ['test']}) for byte in [8, 16]: for fmt in ['utf-{0}', 'utf_{0}', 'UTF-{0}', 'UTF_{0}']: encoding = fmt.format(byte) data = 'mb_num,multibyte\n4.8,test'.encode(encoding) result = self.read_csv(BytesIO(data), encoding=encoding) tm.assert_frame_equal(result, expected) def test_internal_eof_byte(self): # see gh-5500 data = "a,b\n1\x1a,2" expected = pd.DataFrame([["1\x1a", 2]], columns=['a', 'b']) result = self.read_csv(StringIO(data)) tm.assert_frame_equal(result, expected) def test_internal_eof_byte_to_file(self): # see gh-16559 data = b'c1,c2\r\n"test \x1a test", test\r\n' expected = pd.DataFrame([["test \x1a test", " test"]], columns=["c1", "c2"]) path = '__%s__.csv' % tm.rands(10) with tm.ensure_clean(path) as path: with open(path, "wb") as f: f.write(data) result = self.read_csv(path) tm.assert_frame_equal(result, expected) def test_file_handles(self): # GH 14418 - don't close user provided file handles fh = StringIO('a,b\n1,2') self.read_csv(fh) assert not fh.closed with open(self.csv1, 'r') as f: self.read_csv(f) assert not f.closed # mmap not working with python engine if self.engine != 'python': import mmap with open(self.csv1, 'r') as f: m = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) self.read_csv(m) # closed attribute new in python 3.2 if PY3: assert not m.closed m.close() def test_invalid_file_buffer(self): # see gh-15337 class InvalidBuffer(object): pass msg = "Invalid file path or buffer object type" with tm.assert_raises_regex(ValueError, msg): self.read_csv(InvalidBuffer()) # gh-16135: we want to ensure that "tell" and "seek" # aren't actually being used when we call `read_csv` # # Thus, while the object may look "invalid" (these # methods are attributes of the `StringIO` class), # it is still a valid file-object for our purposes. class NoSeekTellBuffer(StringIO): def tell(self): raise AttributeError("No tell method") def seek(self, pos, whence=0): raise AttributeError("No seek method") data = "a\n1" expected = pd.DataFrame({"a": [1]}) result = self.read_csv(NoSeekTellBuffer(data)) tm.assert_frame_equal(result, expected) if PY3: from unittest import mock with tm.assert_raises_regex(ValueError, msg): self.read_csv(mock.Mock()) @tm.capture_stderr def test_skip_bad_lines(self): # see gh-15925 data = 'a\n1\n1,2,3\n4\n5,6,7' with pytest.raises(ParserError): self.read_csv(StringIO(data)) with pytest.raises(ParserError): self.read_csv(StringIO(data), error_bad_lines=True) expected = DataFrame({'a': [1, 4]}) out = self.read_csv(StringIO(data), error_bad_lines=False, warn_bad_lines=False) tm.assert_frame_equal(out, expected) val = sys.stderr.getvalue() assert val == '' # Reset the stderr buffer. sys.stderr = StringIO() out = self.read_csv(StringIO(data), error_bad_lines=False, warn_bad_lines=True) tm.assert_frame_equal(out, expected) val = sys.stderr.getvalue() assert 'Skipping line 3' in val assert 'Skipping line 5' in val
import os.path import platform import console_helper import path_helper import job_manager PROJECTS={} ENV={} ROOT=os.path.realpath(path_helper.dirname(os.path.realpath(__file__)) + '/../..') SCHEME='debug' TARGET=platform.uname()[4] PROFILE=platform.uname()[0].lower() BUILD_ROOT=ROOT + '/build/{0}-{1}-{2}'.format(PROFILE, TARGET, SCHEME) OBJECT_ROOT=BUILD_ROOT + '/object' OUTPUT_ROOT=BUILD_ROOT + '/output' def config(scheme=None, target=None, profile=None): global SCHEME, TARGET, PROFILE global BUILD_ROOT, OBJECT_ROOT, OUTPUT_ROOT if scheme is not None: SCHEME = scheme if target is not None: TARGET=target if profile is not None: PROFILE=profile BUILD_ROOT=ROOT + '/build/{0}-{1}-{2}'.format(PROFILE, TARGET, SCHEME) OBJECT_ROOT=BUILD_ROOT + '/object' OUTPUT_ROOT=BUILD_ROOT + '/output' def scan_project_depends(name, project): if 'all_depends' in project: return project_include_dir = project["directory"] + "/include" project_internal_include_dir = project["directory"] + "/internal_include" include_dirs = [] if os.path.exists(project_include_dir): include_dirs.append(project_include_dir) if os.path.exists(project_internal_include_dir): include_dirs.append(project_internal_include_dir) include_dirs.append(OUTPUT_ROOT + '/include') if 'extra_include_dirs' in project: include_dirs += project['extra_include_dirs'] all_depends = project['depends'] if 'depends' in project else [] if 'depends' in project: for depend in project['depends']: if depend not in PROJECTS: console_helper.fatal("failed to find dependency {0} used by {1}".format(depend, name)) scan_project_depends(depend, PROJECTS[depend]) for indirect_depend in PROJECTS[depend]["all_depends"]: if indirect_depend not in all_depends: all_depends.append(indirect_depend) else: all_depends.remove(indirect_depend) all_depends.append(indirect_depend) for depend in all_depends: scan_project_depends(depend, PROJECTS[depend]) for include_dir in PROJECTS[depend]["include_dirs"]: if include_dir not in include_dirs and not include_dir.endswith('internal_include'): include_dirs.append(include_dir) if 'include_dirs' not in project: project['include_dirs'] = include_dirs project['all_depends'] = all_depends def is_excluded(project, filename): src_directory = project["directory"] + "/src" relative_path = filename[len(src_directory) + 1:] exclude_src_dirs = project.get('exclude_src_dirs', []) exclude_src_files = project.get('exclude_src_files', []) if relative_path in exclude_src_files: return True for dir in exclude_src_dirs: if relative_path.startswith(dir + '/'): return True return False def scan_source_files(name, project): if 'source_files' in project or 'custom_build' in project: return source_files = [] def process_dir(_, base_path, files): for filename in files: path = base_path + '/' + filename if os.path.isdir(path): continue if is_excluded(project, path): continue ext = path_helper.get_ext_filename(filename) if ext not in ['c', 'cc', 'cpp']: continue source_files.append(path) src_dirs = project.get('src_dirs', ['src']) for src_dir_name in src_dirs: src_directory = project["directory"] + "/" + src_dir_name os.path.walk(src_directory, process_dir, None) if 'extra_src_files' in project: for extra_src_file in project['extra_src_files']: source_files.append(extra_src_file) project['source_files'] = source_files def should_replace_with_gnupp0x(flag): if flag != "-std=gnu++11": return False if ENV["compiler_set"] != "gcc": return False major, minor = int(ENV["compiler_version"]["major"]), int(ENV["compiler_version"]["minor"]) if major < 4: return True if major == 4 and minor <= 6: return True return False def process_flag(flag): if should_replace_with_gnupp0x(flag): return "-std=gnu++0x" has_space = ' ' in flag has_double_quote = '"' in flag has_single_quote = "'" in flag if not has_space and not has_double_quote and not has_single_quote: return flag elif not has_single_quote: return "'" + flag + "'" elif not has_double_quote: return '"' + flag + '"' else: return flag.replace('\\', '\\\\').replace(' ', '\\ ').replace('"', '\\"').replace("'", "\\'") def concat_flags(flags): return ' '.join(map(process_flag, flags)) def add_compiler_flag(flags, compiler, option, value): for flag in ENV['compiler_options'][compiler][option]: flags.append(flag.format(value)) def generate_compiler_flags(project, compiler_name, flag_name, options, extra_flags=None): if flag_name in project: return flags = [] if extra_flags: for extra_flag_name, extra_flag_value in extra_flags: add_compiler_flag(flags, compiler_name, extra_flag_name, extra_flag_value) for enum_option_name, option_name in options: for option in project.get(enum_option_name, []): if enum_option_name == 'all_depends' and PROJECTS[option]['job'].job_type == 'source_library': continue add_compiler_flag(flags, compiler_name, option_name, option) if 'env' in project and flag_name in project['env']: flags.extend(project['env'][flag_name]) project[flag_name] = flags def generate_flags(name, project): generate_compiler_flags(project, "cc", "cflags", [("include_dirs", 'include_dir')]) generate_compiler_flags(project, "cxx", "cxxflags", [("include_dirs", 'include_dir')]) def create_jobs(name, project): if 'job' in project: return depends_jobs = [] if 'depends' in project: for depend in project['depends']: create_jobs(depend, PROJECTS[depend]) depends_jobs.append(PROJECTS[depend]['job']) if project['type'] == 'library': project["output"] = '{0}/lib/lib{1}.a'.format(OUTPUT_ROOT, name) project["log_file"] = '{0}/lib/lib{1}.a.log'.format(OBJECT_ROOT, name) else: project["output"] = '{0}/bin/{1}'.format(OUTPUT_ROOT, name) project["log_file"] = '{0}/bin/{1}.log'.format(OBJECT_ROOT, name) job_depends = [] copy_jobs = [] if project.get('output_headers', False): include_directory = project["directory"] + '/include' def process_dir(_, base_path, files): for filename in files: path = base_path + '/' + filename if os.path.isdir(path): continue relative_path = path[len(include_directory) + 1:] dest_path = OUTPUT_ROOT + '/include/' + relative_path source_job = job_manager.add_or_lookup_source_job(path) copy_job_depends = [source_job] copy_job_depends.extend(depends_jobs) copy_job = job_manager.add_job("copy", dest_path, copy_job_depends, path) job_depends.append(copy_job) copy_jobs.append(copy_job) os.path.walk(include_directory, process_dir, None) if 'source_files' in project: obj_files = [] for src in project["source_files"]: src_path = ROOT + '/' + src dest_path = OBJECT_ROOT + '/' + name + '/' + path_helper.get_obj_filename(src) source_job = job_manager.add_or_lookup_source_job(src_path) compile_job_depends = [source_job] compile_job_depends.extend(depends_jobs) compile_job_depends.extend(copy_jobs) compile_job = job_manager.add_job("compile", dest_path, compile_job_depends, name) job_depends.append(compile_job) obj_files.append(dest_path) project['object_files'] = obj_files job_depends.extend(depends_jobs) if 'custom_build' in project: job = job_manager.add_job('custom_build', project['output'], job_depends, project) elif project['type'] == 'library': if project.get('source_files', []): job = job_manager.add_job('static_library', project['output'], job_depends, project) else: job = job_manager.add_job('source_library', name, job_depends, project) else: job = job_manager.add_job('executable', project['output'], job_depends, project) project['job'] = job def prepare(): for name, project in PROJECTS.items(): scan_project_depends(name, project) scan_source_files(name, project) generate_flags(name, project) for name, project in PROJECTS.items(): create_jobs(name, project) for name, project in PROJECTS.items(): generate_compiler_flags(project, "ld", "ldflags", [("all_depends", 'library'), ("external_depends", "library"), ("library_dirs", "library_dir")], [("library_dir", OUTPUT_ROOT + "/lib")])
from __future__ import unicode_literals import codecs import datetime from decimal import Decimal import locale from django.utils.functional import Promise from django.utils import six from django.utils.six.moves.urllib.parse import quote class DjangoUnicodeDecodeError(UnicodeDecodeError): def __init__(self, obj, *args): self.obj = obj UnicodeDecodeError.__init__(self, *args) def __str__(self): original = UnicodeDecodeError.__str__(self) return '%s. You passed in %r (%s)' % (original, self.obj, type(self.obj)) def python_2_unicode_compatible(klass): """ A decorator that defines __unicode__ and __str__ methods under Python 2. Under Python 3 it does nothing. To support Python 2 and 3 with a single code base, define a __str__ method returning text and apply this decorator to the class. """ if six.PY2: klass.__unicode__ = klass.__str__ klass.__str__ = lambda self: self.__unicode__().encode('utf-8') return klass def smart_text(s, encoding='utf-8', strings_only=False, errors='strict'): """ Returns a text object representing 's' -- unicode on Python 2 and str on Python 3. Treats bytestrings using the 'encoding' codec. If strings_only is True, don't convert (some) non-string-like objects. """ if isinstance(s, Promise): # The input is the result of a gettext_lazy() call. return s return force_text(s, encoding, strings_only, errors) def is_protected_type(obj): """Determine if the object instance is of a protected type. Objects of protected types are preserved as-is when passed to force_text(strings_only=True). """ return isinstance(obj, six.integer_types + (type(None), float, Decimal, datetime.datetime, datetime.date, datetime.time)) def force_text(s, encoding='utf-8', strings_only=False, errors='strict'): """ Similar to smart_text, except that lazy instances are resolved to strings, rather than kept as lazy objects. If strings_only is True, don't convert (some) non-string-like objects. """ # Handle the common case first for performance reasons. if isinstance(s, six.text_type): return s if strings_only and is_protected_type(s): return s try: if not isinstance(s, six.string_types): if six.PY3: if isinstance(s, bytes): s = six.text_type(s, encoding, errors) else: s = six.text_type(s) elif hasattr(s, '__unicode__'): s = six.text_type(s) else: s = six.text_type(bytes(s), encoding, errors) else: # Note: We use .decode() here, instead of six.text_type(s, encoding, # errors), so that if s is a SafeBytes, it ends up being a # SafeText at the end. s = s.decode(encoding, errors) except UnicodeDecodeError as e: if not isinstance(s, Exception): raise DjangoUnicodeDecodeError(s, *e.args) else: # If we get to here, the caller has passed in an Exception # subclass populated with non-ASCII bytestring data without a # working unicode method. Try to handle this without raising a # further exception by individually forcing the exception args # to unicode. s = ' '.join([force_text(arg, encoding, strings_only, errors) for arg in s]) return s def smart_bytes(s, encoding='utf-8', strings_only=False, errors='strict'): """ Returns a bytestring version of 's', encoded as specified in 'encoding'. If strings_only is True, don't convert (some) non-string-like objects. """ if isinstance(s, Promise): # The input is the result of a gettext_lazy() call. return s return force_bytes(s, encoding, strings_only, errors) def force_bytes(s, encoding='utf-8', strings_only=False, errors='strict'): """ Similar to smart_bytes, except that lazy instances are resolved to strings, rather than kept as lazy objects. If strings_only is True, don't convert (some) non-string-like objects. """ # Handle the common case first for performance reasons. if isinstance(s, bytes): if encoding == 'utf-8': return s else: return s.decode('utf-8', errors).encode(encoding, errors) if strings_only and is_protected_type(s): return s if isinstance(s, six.memoryview): return bytes(s) if isinstance(s, Promise): return six.text_type(s).encode(encoding, errors) if not isinstance(s, six.string_types): try: if six.PY3: return six.text_type(s).encode(encoding) else: return bytes(s) except UnicodeEncodeError: if isinstance(s, Exception): # An Exception subclass containing non-ASCII data that doesn't # know how to print itself properly. We shouldn't raise a # further exception. return b' '.join([force_bytes(arg, encoding, strings_only, errors) for arg in s]) return six.text_type(s).encode(encoding, errors) else: return s.encode(encoding, errors) if six.PY3: smart_str = smart_text force_str = force_text else: smart_str = smart_bytes force_str = force_bytes # backwards compatibility for Python 2 smart_unicode = smart_text force_unicode = force_text smart_str.__doc__ = """\ Apply smart_text in Python 3 and smart_bytes in Python 2. This is suitable for writing to sys.stdout (for instance). """ force_str.__doc__ = """\ Apply force_text in Python 3 and force_bytes in Python 2. """ def iri_to_uri(iri): """ Convert an Internationalized Resource Identifier (IRI) portion to a URI portion that is suitable for inclusion in a URL. This is the algorithm from section 3.1 of RFC 3987. However, since we are assuming input is either UTF-8 or unicode already, we can simplify things a little from the full method. Returns an ASCII string containing the encoded result. """ # The list of safe characters here is constructed from the "reserved" and # "unreserved" characters specified in sections 2.2 and 2.3 of RFC 3986: # reserved = gen-delims / sub-delims # gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@" # sub-delims = "!" / "$" / "&" / "'" / "(" / ")" # / "*" / "+" / "," / ";" / "=" # unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" # Of the unreserved characters, urllib.quote already considers all but # the ~ safe. # The % character is also added to the list of safe characters here, as the # end of section 3.1 of RFC 3987 specifically mentions that % must not be # converted. if iri is None: return iri return quote(force_bytes(iri), safe=b"/#%[]=:;$&()+,!?*@'~") def filepath_to_uri(path): """Convert a file system path to a URI portion that is suitable for inclusion in a URL. We are assuming input is either UTF-8 or unicode already. This method will encode certain chars that would normally be recognized as special chars for URIs. Note that this method does not encode the ' character, as it is a valid character within URIs. See encodeURIComponent() JavaScript function for more details. Returns an ASCII string containing the encoded result. """ if path is None: return path # I know about `os.sep` and `os.altsep` but I want to leave # some flexibility for hardcoding separators. return quote(force_bytes(path).replace(b"\\", b"/"), safe=b"/~!*()'") def get_system_encoding(): """ The encoding of the default system locale but falls back to the given fallback encoding if the encoding is unsupported by python or could not be determined. See tickets #10335 and #5846 """ try: encoding = locale.getdefaultlocale()[1] or 'ascii' codecs.lookup(encoding) except Exception: encoding = 'ascii' return encoding DEFAULT_LOCALE_ENCODING = get_system_encoding()
"""This module implements the Scraper component which parses responses and extracts information from them""" import logging from collections import deque from twisted.python.failure import Failure from twisted.internet import defer from scrapy.utils.defer import defer_result, defer_succeed, parallel, iter_errback from scrapy.utils.spider import iterate_spider_output from scrapy.utils.misc import load_object from scrapy.utils.log import logformatter_adapter, failure_to_exc_info from scrapy.exceptions import CloseSpider, DropItem, IgnoreRequest from scrapy import signals from scrapy.http import Request, Response from scrapy.item import BaseItem from scrapy.core.spidermw import SpiderMiddlewareManager from scrapy.utils.request import referer_str logger = logging.getLogger(__name__) class Slot(object): """Scraper slot (one per running spider)""" MIN_RESPONSE_SIZE = 1024 def __init__(self, max_active_size=5000000): self.max_active_size = max_active_size self.queue = deque() self.active = set() self.active_size = 0 self.itemproc_size = 0 self.closing = None def add_response_request(self, response, request): deferred = defer.Deferred() self.queue.append((response, request, deferred)) if isinstance(response, Response): self.active_size += max(len(response.body), self.MIN_RESPONSE_SIZE) else: self.active_size += self.MIN_RESPONSE_SIZE return deferred def next_response_request_deferred(self): response, request, deferred = self.queue.popleft() self.active.add(request) return response, request, deferred def finish_response(self, response, request): self.active.remove(request) if isinstance(response, Response): self.active_size -= max(len(response.body), self.MIN_RESPONSE_SIZE) else: self.active_size -= self.MIN_RESPONSE_SIZE def is_idle(self): return not (self.queue or self.active) def needs_backout(self): return self.active_size > self.max_active_size class Scraper(object): def __init__(self, crawler): self.slot = None self.spidermw = SpiderMiddlewareManager.from_crawler(crawler) itemproc_cls = load_object(crawler.settings['ITEM_PROCESSOR']) self.itemproc = itemproc_cls.from_crawler(crawler) self.concurrent_items = crawler.settings.getint('CONCURRENT_ITEMS') self.crawler = crawler self.signals = crawler.signals self.logformatter = crawler.logformatter @defer.inlineCallbacks def open_spider(self, spider): """Open the given spider for scraping and allocate resources for it""" self.slot = Slot() yield self.itemproc.open_spider(spider) def close_spider(self, spider): """Close a spider being scraped and release its resources""" slot = self.slot slot.closing = defer.Deferred() slot.closing.addCallback(self.itemproc.close_spider) self._check_if_closing(spider, slot) return slot.closing def is_idle(self): """Return True if there isn't any more spiders to process""" return not self.slot def _check_if_closing(self, spider, slot): if slot.closing and slot.is_idle(): slot.closing.callback(spider) def enqueue_scrape(self, response, request, spider): slot = self.slot dfd = slot.add_response_request(response, request) def finish_scraping(_): slot.finish_response(response, request) self._check_if_closing(spider, slot) self._scrape_next(spider, slot) return _ dfd.addBoth(finish_scraping) dfd.addErrback( lambda f: logger.error('Scraper bug processing %(request)s', {'request': request}, exc_info=failure_to_exc_info(f), extra={'spider': spider})) self._scrape_next(spider, slot) return dfd def _scrape_next(self, spider, slot): while slot.queue: response, request, deferred = slot.next_response_request_deferred() self._scrape(response, request, spider).chainDeferred(deferred) def _scrape(self, response, request, spider): """Handle the downloaded response or failure through the spider callback/errback""" assert isinstance(response, (Response, Failure)) dfd = self._scrape2(response, request, spider) # returns spiders processed output dfd.addErrback(self.handle_spider_error, request, response, spider) dfd.addCallback(self.handle_spider_output, request, response, spider) return dfd def _scrape2(self, request_result, request, spider): """Handle the different cases of request's result been a Response or a Failure""" if not isinstance(request_result, Failure): return self.spidermw.scrape_response( self.call_spider, request_result, request, spider) else: # FIXME: don't ignore errors in spider middleware dfd = self.call_spider(request_result, request, spider) return dfd.addErrback( self._log_download_errors, request_result, request, spider) def call_spider(self, result, request, spider): result.request = request dfd = defer_result(result) dfd.addCallbacks(request.callback or spider.parse, request.errback) return dfd.addCallback(iterate_spider_output) def handle_spider_error(self, _failure, request, response, spider): exc = _failure.value if isinstance(exc, CloseSpider): self.crawler.engine.close_spider(spider, exc.reason or 'cancelled') return logger.error( "Spider error processing %(request)s (referer: %(referer)s)", {'request': request, 'referer': referer_str(request)}, exc_info=failure_to_exc_info(_failure), extra={'spider': spider} ) self.signals.send_catch_log( signal=signals.spider_error, failure=_failure, response=response, spider=spider ) self.crawler.stats.inc_value( "spider_exceptions/%s" % _failure.value.__class__.__name__, spider=spider ) def handle_spider_output(self, result, request, response, spider): if not result: return defer_succeed(None) it = iter_errback(result, self.handle_spider_error, request, response, spider) dfd = parallel(it, self.concurrent_items, self._process_spidermw_output, request, response, spider) return dfd def _process_spidermw_output(self, output, request, response, spider): """Process each Request/Item (given in the output parameter) returned from the given spider """ if isinstance(output, Request): self.crawler.engine.crawl(request=output, spider=spider) elif isinstance(output, (BaseItem, dict)): self.slot.itemproc_size += 1 dfd = self.itemproc.process_item(output, spider) dfd.addBoth(self._itemproc_finished, output, response, spider) return dfd elif output is None: pass else: typename = type(output).__name__ logger.error('Spider must return Request, BaseItem, dict or None, ' 'got %(typename)r in %(request)s', {'request': request, 'typename': typename}, extra={'spider': spider}) def _log_download_errors(self, spider_failure, download_failure, request, spider): """Log and silence errors that come from the engine (typically download errors that got propagated thru here) """ if (isinstance(download_failure, Failure) and not download_failure.check(IgnoreRequest)): if download_failure.frames: logger.error('Error downloading %(request)s', {'request': request}, exc_info=failure_to_exc_info(download_failure), extra={'spider': spider}) else: errmsg = download_failure.getErrorMessage() if errmsg: logger.error('Error downloading %(request)s: %(errmsg)s', {'request': request, 'errmsg': errmsg}, extra={'spider': spider}) if spider_failure is not download_failure: return spider_failure def _itemproc_finished(self, output, item, response, spider): """ItemProcessor finished for the given ``item`` and returned ``output`` """ self.slot.itemproc_size -= 1 if isinstance(output, Failure): ex = output.value if isinstance(ex, DropItem): logkws = self.logformatter.dropped(item, ex, response, spider) logger.log(*logformatter_adapter(logkws), extra={'spider': spider}) return self.signals.send_catch_log_deferred( signal=signals.item_dropped, item=item, response=response, spider=spider, exception=output.value) else: logger.error('Error processing %(item)s', {'item': item}, exc_info=failure_to_exc_info(output), extra={'spider': spider}) return self.signals.send_catch_log_deferred( signal=signals.item_error, item=item, response=response, spider=spider, failure=output) else: logkws = self.logformatter.scraped(output, response, spider) logger.log(*logformatter_adapter(logkws), extra={'spider': spider}) return self.signals.send_catch_log_deferred( signal=signals.item_scraped, item=output, response=response, spider=spider)
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Test the Python 2.6 backport of OrderedDict. Modified the Python 2.7 Lib/test/test_collections.py to test only OrderedDict and slightly customized (and renamed) the modules test_support.py and mapping_tests.py that provide support for those tests. """ from __future__ import (absolute_import, division, print_function, unicode_literals) from ...extern import six import inspect import unittest import pickle import copy from random import shuffle from . import odict_mapping as mapping_tests from ..compat.odict import OrderedDict from ...tests.helper import pytest #Skips all of these tests if the builtin ordered dict is available pytestmark = pytest.mark.skipif(str("sys.version_info >= (2,7)")) class TestOrderedDict(unittest.TestCase): def test_init(self): try: OrderedDict([('a', 1), ('b', 2)], None) # too many args assert False except TypeError: pass else: assert False pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5)] self.assertEqual(sorted(OrderedDict(dict(pairs)).items()), pairs) # dict input self.assertEqual(sorted(OrderedDict(**dict(pairs)).items()), pairs) # kwds input self.assertEqual(list(OrderedDict(pairs).items()), pairs) # pairs input self.assertEqual(list(OrderedDict([('a', 1), ('b', 2), ('c', 9), ('d', 4)], c=3, e=5).items()), pairs) # mixed input # make sure no positional args conflict with possible kwdargs self.assertEqual(inspect.getargspec(OrderedDict.__dict__['__init__']).args, ['self']) # Make sure that direct calls to __init__ do not clear previous contents d = OrderedDict([('a', 1), ('b', 2), ('c', 3), ('d', 44), ('e', 55)]) d.__init__([('e', 5), ('f', 6)], g=7, d=4) self.assertEqual(list(d.items()), [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5), ('f', 6), ('g', 7)]) def test_update(self): try: OrderedDict().update([('a', 1), ('b', 2)], None) # too many args assert False except TypeError: pass else: assert False pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5)] od = OrderedDict() od.update(dict(pairs)) self.assertEqual(sorted(od.items()), pairs) # dict input od = OrderedDict() od.update(**dict(pairs)) self.assertEqual(sorted(od.items()), pairs) # kwds input od = OrderedDict() od.update(pairs) self.assertEqual(list(od.items()), pairs) # pairs input od = OrderedDict() od.update([('a', 1), ('b', 2), ('c', 9), ('d', 4)], c=3, e=5) self.assertEqual(list(od.items()), pairs) # mixed input # Make sure that direct calls to update do not clear previous contents # add that updates items are not moved to the end d = OrderedDict([('a', 1), ('b', 2), ('c', 3), ('d', 44), ('e', 55)]) d.update([('e', 5), ('f', 6)], g=7, d=4) self.assertEqual(list(d.items()), [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5), ('f', 6), ('g', 7)]) def test_clear(self): pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] shuffle(pairs) od = OrderedDict(pairs) self.assertEqual(len(od), len(pairs)) od.clear() self.assertEqual(len(od), 0) def test_delitem(self): pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] od = OrderedDict(pairs) del od['a'] self.assertNotIn('a', od) try: del od['a'] assert False except KeyError: pass else: assert False self.assertEqual(list(od.items()), pairs[:2] + pairs[3:]) def test_setitem(self): od = OrderedDict([('d', 1), ('b', 2), ('c', 3), ('a', 4), ('e', 5)]) od['c'] = 10 # existing element od['f'] = 20 # new element self.assertEqual(list(od.items()), [('d', 1), ('b', 2), ('c', 10), ('a', 4), ('e', 5), ('f', 20)]) def test_iterators(self): pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] shuffle(pairs) od = OrderedDict(pairs) self.assertEqual(list(od), [t[0] for t in pairs]) self.assertEqual(od.keys()[:], [t[0] for t in pairs]) self.assertEqual(od.values()[:], [t[1] for t in pairs]) self.assertEqual(od.items()[:], pairs) self.assertEqual(list(six.iterkeys(od)), [t[0] for t in pairs]) self.assertEqual(list(six.itervalues(od)), [t[1] for t in pairs]) self.assertEqual(list(six.iteritems(od)), pairs) self.assertEqual(list(reversed(od)), [t[0] for t in reversed(pairs)]) def test_popitem(self): pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] shuffle(pairs) od = OrderedDict(pairs) while pairs: self.assertEqual(od.popitem(), pairs.pop()) try: od.popitem() assert False except: pass else: assert False self.assertEqual(len(od), 0) def test_pop(self): pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] shuffle(pairs) od = OrderedDict(pairs) shuffle(pairs) while pairs: k, v = pairs.pop() self.assertEqual(od.pop(k), v) try: od.pop('xyz') assert False except KeyError: pass else: assert False self.assertEqual(len(od), 0) self.assertEqual(od.pop(k, 12345), 12345) def test_equality(self): pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] shuffle(pairs) od1 = OrderedDict(pairs) od2 = OrderedDict(pairs) self.assertEqual(od1, od2) # same order implies equality pairs = pairs[2:] + pairs[:2] od2 = OrderedDict(pairs) self.assertNotEqual(od1, od2) # different order implies inequality # comparison to regular dict is not order sensitive self.assertEqual(od1, dict(od2)) self.assertEqual(dict(od2), od1) # different length implied inequality self.assertNotEqual(od1, OrderedDict(pairs[:-1])) def test_copying(self): # Check that ordered dicts are copyable, deepcopyable, picklable, # and have a repr/eval round-trip pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] od = OrderedDict(pairs) update_test = OrderedDict() update_test.update(od) for i, dup in enumerate([ od.copy(), copy.copy(od), copy.deepcopy(od), pickle.loads(pickle.dumps(od, 0)), pickle.loads(pickle.dumps(od, 1)), pickle.loads(pickle.dumps(od, 2)), pickle.loads(pickle.dumps(od, -1)), eval(repr(od)), update_test, OrderedDict(od), ]): self.assertTrue(dup is not od) self.assertEquals(dup, od) self.assertEquals(list(dup.items()), list(od.items())) self.assertEquals(len(dup), len(od)) self.assertEquals(type(dup), type(od)) def test_yaml_linkage(self): # Verify that __reduce__ is setup in a way that supports PyYAML's dump() feature. # In yaml, lists are native but tuples are not. pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] od = OrderedDict(pairs) # yaml.dump(od) --> # '!!python/object/apply:__main__.OrderedDict\n- - [a, 1]\n - [b, 2]\n' self.assertTrue(all(type(pair)==list for pair in od.__reduce__()[1])) def test_reduce_not_too_fat(self): # do not save instance dictionary if not needed pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] od = OrderedDict(pairs) self.assertEqual(len(od.__reduce__()), 2) od.x = 10 self.assertEqual(len(od.__reduce__()), 3) def test_repr(self): od = OrderedDict([('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]) self.assertEqual(repr(od), "OrderedDict([(u'c', 1), (u'b', 2), (u'a', 3), (u'd', 4), (u'e', 5), (u'f', 6)])") self.assertEqual(eval(repr(od)), od) self.assertEqual(repr(OrderedDict()), "OrderedDict()") def test_setdefault(self): pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] shuffle(pairs) od = OrderedDict(pairs) pair_order = list(od.items()) self.assertEqual(od.setdefault('a', 10), 3) # make sure order didn't change self.assertEqual(list(od.items()), pair_order) self.assertEqual(od.setdefault('x', 10), 10) # make sure 'x' is added to the end self.assertEqual(list(od.items())[-1], ('x', 10)) def test_reinsert(self): # Given insert a, insert b, delete a, re-insert a, # verify that a is now later than b. od = OrderedDict() od['a'] = 1 od['b'] = 2 del od['a'] od['a'] = 1 self.assertEqual(list(od.items()), [('b', 2), ('a', 1)]) def assertIn(self, key, d): self.assertTrue(key in d) def assertNotIn(self, key, d): self.assertFalse(key in d) class GeneralMappingTests(mapping_tests.BasicTestMappingProtocol): type2test = OrderedDict def test_popitem(self): d = self._empty_mapping() self.assertRaises(KeyError, d.popitem) class MyOrderedDict(OrderedDict): pass class SubclassMappingTests(mapping_tests.BasicTestMappingProtocol): type2test = MyOrderedDict def test_popitem(self): d = self._empty_mapping() self.assertRaises(KeyError, d.popitem)
#!/usr/bin/env python2.6 # -*- cpy-indent-level: 4; indent-tabs-mode: nil -*- # ex: set expandtab softtabstop=4 shiftwidth=4: # # Copyright (C) 2009,2010,2011,2012,2013 Contributor # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module for testing the make command.""" import os import unittest if __name__ == "__main__": import utils utils.import_depends() from brokertest import TestBrokerCommand class TestMake(TestBrokerCommand): # network based service mappings def testmakeafsbynet_1_checkloc(self): # Must by issued before map service. command = ["make", "--hostname", "afs-by-net.aqd-unittest.ms.com"] (out, err) = self.successtest(command) command = "show host --hostname afs-by-net.aqd-unittest.ms.com" out = self.commandtest(command.split(" ")) self.matchoutput(out, "Template: service/afs/q.ny.ms.com", command) def testmakeafsbynet_2_mapservice(self): ip = self.net.netsvcmap.subnet()[0].ip self.noouttest(["map", "service", "--networkip", ip, "--service", "afs", "--instance", "afs-by-net"]) self.noouttest(["map", "service", "--networkip", ip, "--service", "afs", "--instance", "afs-by-net2"]) def testmakeafsbynet_3_verifymapservice(self): ip = self.net.netsvcmap.subnet()[0].ip command = ["show_map", "--service=afs", "--instance=afs-by-net", "--networkip=%s" % ip] out = self.commandtest(command) self.matchoutput(out, "Archetype: aquilon Service: afs " "Instance: afs-by-net Map: Network netsvcmap", command) def testmakeafsbynet_3_verifymapservice_proto(self): ip = self.net.netsvcmap.subnet()[0].ip command = ["show_map", "--service=afs", "--instance=afs-by-net", "--networkip=%s" % ip, "--format=proto"] out = self.commandtest(command) servicemaplist = self.parse_servicemap_msg(out, expect=1) service_map = servicemaplist.servicemaps[0] self.failUnlessEqual(service_map.network.ip, str(ip)) self.failUnlessEqual(service_map.network.env_name, 'internal') self.failUnlessEqual(service_map.service.name, 'afs') self.failUnlessEqual(service_map.service.serviceinstances[0].name, 'afs-by-net') def testmakeafsbynet_4_make(self): command = ["make", "--hostname", "afs-by-net.aqd-unittest.ms.com"] (out, err) = self.successtest(command) command = "show host --hostname afs-by-net.aqd-unittest.ms.com" out = self.commandtest(command.split(" ")) self.matchoutput(out, "Template: service/afs/afs-by-net", command) def testmakeafsbynet_5_mapconflicts(self): ip = self.net.netsvcmap.subnet()[0].ip command = ["map", "service", "--networkip", ip, "--service", "afs", "--instance", "afs-by-net", "--building", "whatever"] out = self.badoptiontest(command) self.matchoutput(out, "networkip conflicts with building", command) # network / personality based service mappings def testmakenetmappers_1_maplocsvc_nopers(self): """Maps a location based service map just to be overridden by a location based personality service map""" self.noouttest(["map", "service", "--building", "ut", "--service", "netmap", "--instance", "q.ny.ms.com"]) command = ["make", "--hostname", "netmap-pers.aqd-unittest.ms.com"] (out, err) = self.successtest(command) command = "show host --hostname netmap-pers.aqd-unittest.ms.com" out = self.commandtest(command.split(" ")) self.matchoutput(out, "Template: service/netmap/q.ny.ms.com", command) def testmakenetmappers_2_maplocsvc_pers(self): """Maps a location based personality service map to be overridden by a network based personality service map""" self.noouttest(["map", "service", "--building", "ut", "--personality", "eaitools", "--archetype", "aquilon", "--service", "netmap", "--instance", "p-q.ny.ms.com"]) command = ["make", "--hostname", "netmap-pers.aqd-unittest.ms.com"] (out, err) = self.successtest(command) command = "show host --hostname netmap-pers.aqd-unittest.ms.com" out = self.commandtest(command.split(" ")) self.matchoutput(out, "Template: service/netmap/p-q.ny.ms.com", command) def testmakenetmappers_3_mapservice(self): ip = self.net.netperssvcmap.subnet()[0].ip self.noouttest(["map", "service", "--networkip", ip, "--service", "netmap", "--instance", "netmap-pers", "--personality", "eaitools", "--archetype", "aquilon"]) def testmakenetmappers_4_verifymapservice(self): ip = self.net.netperssvcmap.subnet()[0].ip command = ["show_map", "--service=netmap", "--instance=netmap-pers", "--networkip=%s" % ip, "--personality", "eaitools", "--archetype", "aquilon"] out = self.commandtest(command) self.matchoutput(out, "Archetype: aquilon Personality: eaitools " "Service: netmap " "Instance: netmap-pers Map: Network netperssvcmap", command) def testmakenetmappers_5_verifymapservice_proto(self): ip = self.net.netperssvcmap.subnet()[0].ip command = ["show_map", "--service=netmap", "--instance=netmap-pers", "--networkip=%s" % ip, "--personality", "eaitools", "--archetype", "aquilon", "--format=proto"] out = self.commandtest(command) servicemaplist = self.parse_servicemap_msg(out, expect=1) service_map = servicemaplist.servicemaps[0] self.failUnlessEqual(service_map.network.ip, str(ip)) self.failUnlessEqual(service_map.network.env_name, 'internal') self.failUnlessEqual(service_map.service.name, 'netmap') self.failUnlessEqual(service_map.service.serviceinstances[0].name, 'netmap-pers') self.failUnlessEqual(service_map.personality.name, 'eaitools') self.failUnlessEqual(service_map.personality.archetype.name, 'aquilon') def testmakenetmappers_6_make(self): command = ["make", "--hostname", "netmap-pers.aqd-unittest.ms.com"] (out, err) = self.successtest(command) command = "show host --hostname netmap-pers.aqd-unittest.ms.com" out = self.commandtest(command.split(" ")) self.matchoutput(out, "Template: service/netmap/netmap-pers", command) def testmakevmhosts(self): for i in range(1, 6): command = ["make", "--hostname", "evh%s.aqd-unittest.ms.com" % i, "--osname", "esxi", "--osversion", "4.0.0", "--buildstatus", "rebuild"] (out, err) = self.successtest(command) self.matchclean(err, "removing binding", command) self.assert_(os.path.exists(os.path.join( self.config.get("broker", "profilesdir"), "evh1.aqd-unittest.ms.com%s" % self.profile_suffix))) self.failUnless(os.path.exists( self.build_profile_name("evh1.aqd-unittest.ms.com", domain="unittest"))) servicedir = os.path.join(self.config.get("broker", "plenarydir"), "servicedata") results = self.grepcommand(["-rl", "evh%s.aqd-unittest.ms.com" % i, servicedir]) self.failUnless(results, "No service plenary data that includes" "evh%s.aqd-unittest.ms.com" % i) def testmake10gighosts(self): for i in range(51, 75): command = ["make", "--hostname", "evh%s.aqd-unittest.ms.com" % i] (out, err) = self.successtest(command) def testmakeccisshost(self): command = ["make", "--hostname=unittest18.aqd-unittest.ms.com"] (out, err) = self.successtest(command) self.matchoutput(err, "2/2 compiled", command) def testmakezebra(self): command = ["make", "--hostname", "unittest20.aqd-unittest.ms.com"] (out, err) = self.successtest(command) self.matchoutput(err, "2/2 compiled", command) def testverifyunittest20(self): eth0_ip = self.net.unknown[11].usable[0] eth0_broadcast = self.net.unknown[11].broadcast eth0_netmask = self.net.unknown[11].netmask eth0_gateway = self.net.unknown[11].gateway eth1_ip = self.net.unknown[12].usable[0] eth1_broadcast = self.net.unknown[12].broadcast eth1_netmask = self.net.unknown[12].netmask eth1_gateway = self.net.unknown[12].gateway eth1_1_ip = self.net.unknown[12].usable[3] hostname_ip = self.net.unknown[13].usable[2] zebra2_ip = self.net.unknown[13].usable[1] zebra3_ip = self.net.unknown[13].usable[0] command = ["cat", "--hostname", "unittest20.aqd-unittest.ms.com", "--data"] out = self.commandtest(command) self.matchoutput(out, "structure template hostdata/unittest20.aqd-unittest.ms.com;", command) self.searchoutput(out, r'"system/resources/service_address" = ' r'append\(create\("resource/host/unittest20.aqd-unittest.ms.com/service_address/hostname/config"\)\);', command) self.searchoutput(out, r'"system/resources/service_address" = ' r'append\(create\("resource/host/unittest20.aqd-unittest.ms.com/service_address/zebra2/config"\)\);', command) self.searchoutput(out, r'"system/resources/service_address" = ' r'append\(create\("resource/host/unittest20.aqd-unittest.ms.com/service_address/zebra3/config"\)\);', command) self.searchoutput(out, r'"system/network/routers" = nlist\(\s*' r'"eth0", list\(\s*"%s",\s*"%s"\s*\),\s*' r'"eth1", list\(\s*"%s",\s*"%s"\s*\)\s*' r'\);' % (self.net.unknown[11][1], self.net.unknown[11][2], self.net.unknown[12][1], self.net.unknown[12][2]), command) self.searchoutput(out, r'"eth0", nlist\(\s*' r'"bootproto", "static",\s*' r'"broadcast", "%s",\s*' r'"fqdn", "unittest20-e0.aqd-unittest.ms.com",\s*' r'"gateway", "%s",\s*' r'"ip", "%s",\s*' r'"netmask", "%s",\s*' r'"network_environment", "internal",\s*' r'"network_type", "unknown"\s*\)\s*' % (eth0_broadcast, eth0_gateway, eth0_ip, eth0_netmask), command) self.searchoutput(out, r'"eth1", nlist\(\s*' r'"aliases", nlist\(\s*' r'"e1", nlist\(\s*' r'"broadcast", "%s",\s*' r'"fqdn", "unittest20-e1-1.aqd-unittest.ms.com",\s*' r'"ip", "%s",\s*' r'"netmask", "%s"\s*\)\s*\),\s*' r'"bootproto", "static",\s*' r'"broadcast", "%s",\s*' r'"fqdn", "unittest20-e1.aqd-unittest.ms.com",\s*' r'"gateway", "%s",\s*' r'"ip", "%s",\s*' r'"netmask", "%s",\s*' r'"network_environment", "internal",\s*' r'"network_type", "unknown"\s*\)\s*' % (eth1_broadcast, eth1_1_ip, eth1_netmask, eth1_broadcast, eth1_gateway, eth1_ip, eth1_netmask), command) self.matchoutput(out, '"system/network/default_gateway" = \"%s\";' % eth0_gateway, command) command = ["cat", "--hostname", "unittest20.aqd-unittest.ms.com"] out = self.commandtest(command) self.matchoutput(out, '"/metadata/template/branch/name" = "unittest";', command) self.matchoutput(out, '"/metadata/template/branch/type" = "domain";', command) self.matchclean(out, '"/metadata/template/branch/author"', command) command = ["cat", "--service_address", "hostname", "--hostname", "unittest20.aqd-unittest.ms.com"] out = self.commandtest(command) self.matchoutput(out, '"name" = "hostname";', command) self.matchoutput(out, '"ip" = "%s";' % hostname_ip, command) self.searchoutput(out, r'"interfaces" = list\(\s*"eth0",\s*"eth1"\s*\);', command) self.matchoutput(out, '"fqdn" = "unittest20.aqd-unittest.ms.com";', command) command = ["cat", "--service_address", "zebra2", "--hostname", "unittest20.aqd-unittest.ms.com"] out = self.commandtest(command) self.matchoutput(out, '"name" = "zebra2";', command) self.matchoutput(out, '"ip" = "%s";' % zebra2_ip, command) self.searchoutput(out, r'"interfaces" = list\(\s*"eth0",\s*"eth1"\s*\);', command) self.matchoutput(out, '"fqdn" = "zebra2.aqd-unittest.ms.com";', command) command = ["cat", "--service_address", "zebra3", "--hostname", "unittest20.aqd-unittest.ms.com"] out = self.commandtest(command) self.matchoutput(out, '"name" = "zebra3";', command) self.matchoutput(out, '"ip" = "%s";' % zebra3_ip, command) self.searchoutput(out, r'"interfaces" = list\(\s*"eth0",\s*"eth1"\s*\);', command) self.matchoutput(out, '"fqdn" = "zebra3.aqd-unittest.ms.com";', command) def testmakeunittest21(self): command = ["make", "--hostname", "unittest21.aqd-unittest.ms.com"] (out, err) = self.successtest(command) self.matchoutput(err, "2/2 compiled", command) def testverifyunittest21(self): net = self.net.unknown[11] command = ["cat", "--hostname", "unittest21.aqd-unittest.ms.com", "--data"] out = self.commandtest(command) self.matchoutput(out, '"system/network/default_gateway" = \"%s\";' % net.gateway, command) self.searchoutput(out, r'"system/network/routers" = nlist\(\s*' r'"bond0", list\(\s*' r'"%s",\s*"%s"\s*\)\s*\);' % (net[1], net[2]), command) def testmakeunittest23(self): command = ["make", "--hostname", "unittest23.aqd-unittest.ms.com"] (out, err) = self.successtest(command) self.matchoutput(err, "2/2 compiled", command) def testverifyunittest23(self): # Verify that the host chooses the closest router command = ["cat", "--hostname", "unittest23.aqd-unittest.ms.com", "--data"] out = self.commandtest(command) net = self.net.vpls[0] ip = net.usable[1] router = net[1] self.searchoutput(out, r'"eth0", nlist\(\s*' r'"bootproto", "static",\s*' r'"broadcast", "%s",\s*' r'"fqdn", "unittest23.aqd-unittest.ms.com",\s*' r'"gateway", "%s",\s*' r'"ip", "%s",\s*' r'"netmask", "%s",\s*' r'"network_environment", "internal",\s*' r'"network_type", "vpls"\s*\)\s*' % (net.broadcast, router, ip, net.netmask), command) self.matchoutput(out, '"system/network/default_gateway" = \"%s\";' % router, command) self.searchoutput(out, r'"system/network/routers" = nlist\(\s*' r'"eth0", list\(\s*"%s"\s*\)\s*\);' % router, command) def testmakeunittest24(self): command = ["make", "--hostname", "unittest24.aqd-unittest.ms.com"] (out, err) = self.successtest(command) self.matchoutput(err, "2/2 compiled", command) def testverifyunittest24(self): # Verify that the host chooses the closest router command = ["cat", "--hostname", "unittest24.aqd-unittest.ms.com", "--data"] out = self.commandtest(command) net = self.net.vpls[0] ip = net.usable[2] router = net[2] self.searchoutput(out, r'"eth0", nlist\(\s*' r'"bootproto", "static",\s*' r'"broadcast", "%s",\s*' r'"fqdn", "unittest24.aqd-unittest.ms.com",\s*' r'"gateway", "%s",\s*' r'"ip", "%s",\s*' r'"netmask", "%s",\s*' r'"network_environment", "internal",\s*' r'"network_type", "vpls"\s*\)\s*' % (net.broadcast, router, ip, net.netmask), command) self.matchoutput(out, '"system/network/default_gateway" = \"%s\";' % router, command) self.searchoutput(out, r'"system/network/routers" = nlist\(\s*' r'"eth0", list\(\s*"%s"\s*\)\s*\);' % router, command) def testmakeunittest25(self): command = ["make", "--hostname", "unittest25.aqd-unittest.ms.com"] (out, err) = self.successtest(command) self.matchoutput(err, "2/2 compiled", command) def testverifyunittest25(self): # Verify that the host chooses the closest router command = ["cat", "--hostname", "unittest25.aqd-unittest.ms.com", "--data"] out = self.commandtest(command) net = self.net.unknown[1] ip = net[4] router = net[2] self.searchoutput(out, r'"eth1", nlist\(\s*' r'"bootproto", "static",\s*' r'"broadcast", "%s",\s*' r'"fqdn", "unittest25-e1.utcolo.aqd-unittest.ms.com",\s*' r'"gateway", "%s",\s*' r'"ip", "%s",\s*' r'"netmask", "%s",\s*' r'"network_environment", "utcolo",\s*' r'"network_type", "unknown"\s*\)\s*' % (net.broadcast, router, ip, net.netmask), command) self.matchoutput(out, '"system/network/default_gateway" = "%s";' % self.net.unknown[0].gateway, command) def testmakeaurora(self): command = ["make", "--hostname", self.aurora_with_node + ".ms.com"] self.successtest(command) if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(TestMake) unittest.TextTestRunner(verbosity=2).run(suite)
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import jinja2 from oslo_log import log as logging from oslo_utils import encodeutils import six from kube import base from senlin.common import consts from senlin.common import exception as exc from senlin.common.i18n import _ from senlin.common import schema LOG = logging.getLogger(__name__) class ServerProfile(base.KubeBaseProfile): """Profile for an kubernetes master server.""" VERSIONS = { '1.0': [ {'status': consts.EXPERIMENTAL, 'since': '2017.10'} ] } KEYS = ( CONTEXT, FLAVOR, IMAGE, KEY_NAME, PUBLIC_NETWORK, BLOCK_DEVICE_MAPPING_V2, ) = ( 'context', 'flavor', 'image', 'key_name', 'public_network', 'block_device_mapping_v2', ) INTERNAL_KEYS = ( KUBEADM_TOKEN, KUBE_MASTER_IP, SECURITY_GROUP, PRIVATE_NETWORK, PRIVATE_SUBNET, PRIVATE_ROUTER, KUBE_MASTER_FLOATINGIP, KUBE_MASTER_FLOATINGIP_ID, SCALE_OUT_RECV_ID, SCALE_OUT_URL, ) = ( 'kubeadm_token', 'kube_master_ip', 'security_group', 'private_network', 'private_subnet', 'private_router', 'kube_master_floatingip', 'kube_master_floatingip_id', 'scale_out_recv_id', 'scale_out_url', ) NETWORK_KEYS = ( PORT, FIXED_IP, NETWORK, PORT_SECURITY_GROUPS, FLOATING_NETWORK, FLOATING_IP, ) = ( 'port', 'fixed_ip', 'network', 'security_groups', 'floating_network', 'floating_ip', ) BDM2_KEYS = ( BDM2_UUID, BDM2_SOURCE_TYPE, BDM2_DESTINATION_TYPE, BDM2_DISK_BUS, BDM2_DEVICE_NAME, BDM2_VOLUME_SIZE, BDM2_GUEST_FORMAT, BDM2_BOOT_INDEX, BDM2_DEVICE_TYPE, BDM2_DELETE_ON_TERMINATION, ) = ( 'uuid', 'source_type', 'destination_type', 'disk_bus', 'device_name', 'volume_size', 'guest_format', 'boot_index', 'device_type', 'delete_on_termination', ) properties_schema = { CONTEXT: schema.Map( _('Customized security context for operating servers.'), ), FLAVOR: schema.String( _('ID of flavor used for the server.'), required=True, updatable=True, ), IMAGE: schema.String( # IMAGE is not required, because there could be BDM or BDMv2 # support and the corresponding settings effective _('ID of image to be used for the new server.'), updatable=True, ), KEY_NAME: schema.String( _('Name of Nova keypair to be injected to server.'), ), PUBLIC_NETWORK: schema.String( _('Public network for kubernetes.'), required=True, ), BLOCK_DEVICE_MAPPING_V2: schema.List( _('A list specifying the properties of block devices to be used ' 'for this server.'), schema=schema.Map( _('A map specifying the properties of a block device to be ' 'used by the server.'), schema={ BDM2_UUID: schema.String( _('ID of the source image, snapshot or volume'), ), BDM2_SOURCE_TYPE: schema.String( _("Volume source type, must be one of 'image', " "'snapshot', 'volume' or 'blank'"), required=True, ), BDM2_DESTINATION_TYPE: schema.String( _("Volume destination type, must be 'volume' or " "'local'"), required=True, ), BDM2_DISK_BUS: schema.String( _('Bus of the device.'), ), BDM2_DEVICE_NAME: schema.String( _('Name of the device(e.g. vda, xda, ....).'), ), BDM2_VOLUME_SIZE: schema.Integer( _('Size of the block device in MB(for swap) and ' 'in GB(for other formats)'), required=True, ), BDM2_GUEST_FORMAT: schema.String( _('Specifies the disk file system format(e.g. swap, ' 'ephemeral, ...).'), ), BDM2_BOOT_INDEX: schema.Integer( _('Define the boot order of the device'), ), BDM2_DEVICE_TYPE: schema.String( _('Type of the device(e.g. disk, cdrom, ...).'), ), BDM2_DELETE_ON_TERMINATION: schema.Boolean( _('Whether to delete the volume when the server ' 'stops.'), ), } ), ), } def __init__(self, type_name, name, **kwargs): super(ServerProfile, self).__init__(type_name, name, **kwargs) self.server_id = None def do_cluster_create(self, obj): self._generate_kubeadm_token(obj) self._create_security_group(obj) self._create_network(obj) def do_cluster_delete(self, obj): if obj.dependents and 'kube-node' in obj.dependents: msg = ("Cluster %s delete failed, " "Node clusters %s must be deleted first." % (obj.id, obj.dependents['kube-node'])) raise exc.EResourceDeletion(type='kubernetes.master', id=obj.id, message=msg) self._delete_network(obj) self._delete_security_group(obj) def do_create(self, obj): """Create a server for the node object. :param obj: The node object for which a server will be created. """ kwargs = {} for key in self.KEYS: if self.properties[key] is not None: kwargs[key] = self.properties[key] image_ident = self.properties[self.IMAGE] if image_ident is not None: image = self._validate_image(obj, image_ident, 'create') kwargs.pop(self.IMAGE) kwargs['imageRef'] = image.id flavor_ident = self.properties[self.FLAVOR] flavor = self._validate_flavor(obj, flavor_ident, 'create') kwargs.pop(self.FLAVOR) kwargs['flavorRef'] = flavor.id keypair_name = self.properties[self.KEY_NAME] if keypair_name: keypair = self._validate_keypair(obj, keypair_name, 'create') kwargs['key_name'] = keypair.name kwargs['name'] = obj.name metadata = self._build_metadata(obj, {}) kwargs['metadata'] = metadata jj_vars = {} cluster_data = self._get_cluster_data(obj) kwargs['networks'] = [{'uuid': cluster_data[self.PRIVATE_NETWORK]}] # Get user_data parameters from metadata jj_vars['KUBETOKEN'] = cluster_data[self.KUBEADM_TOKEN] jj_vars['MASTER_FLOATINGIP'] = cluster_data[ self.KUBE_MASTER_FLOATINGIP] block_device_mapping_v2 = self.properties[self.BLOCK_DEVICE_MAPPING_V2] if block_device_mapping_v2 is not None: kwargs['block_device_mapping_v2'] = self._resolve_bdm( obj, block_device_mapping_v2, 'create') # user_data = self.properties[self.USER_DATA] user_data = base.loadScript('./scripts/master.sh') if user_data is not None: # Use jinja2 to replace variables defined in user_data try: jj_t = jinja2.Template(user_data) user_data = jj_t.render(**jj_vars) except (jinja2.exceptions.UndefinedError, ValueError) as ex: # TODO(anyone) Handle jinja2 error pass ud = encodeutils.safe_encode(user_data) kwargs['user_data'] = encodeutils.safe_decode(base64.b64encode(ud)) sgid = self._get_security_group(obj) kwargs['security_groups'] = [{'name': sgid}] server = None resource_id = None try: server = self.compute(obj).server_create(**kwargs) self.compute(obj).wait_for_server(server.id) server = self.compute(obj).server_get(server.id) self._update_master_ip(obj, server.addresses[''][0]['addr']) self._associate_floatingip(obj, server) LOG.info("Created master node: %s" % server.id) return server.id except exc.InternalError as ex: if server and server.id: resource_id = server.id raise exc.EResourceCreation(type='server', message=six.text_type(ex), resource_id=resource_id) def do_delete(self, obj, **params): """Delete the physical resource associated with the specified node. :param obj: The node object to operate on. :param kwargs params: Optional keyword arguments for the delete operation. :returns: This operation always return True unless exception is caught. :raises: `EResourceDeletion` if interaction with compute service fails. """ if not obj.physical_id: return True server_id = obj.physical_id ignore_missing = params.get('ignore_missing', True) internal_ports = obj.data.get('internal_ports', []) force = params.get('force', False) try: self._disassociate_floatingip(obj, server_id) driver = self.compute(obj) if force: driver.server_force_delete(server_id, ignore_missing) else: driver.server_delete(server_id, ignore_missing) driver.wait_for_server_delete(server_id) if internal_ports: ex = self._delete_ports(obj, internal_ports) if ex: raise ex return True except exc.InternalError as ex: raise exc.EResourceDeletion(type='server', id=server_id, message=six.text_type(ex))
import unittest from pynmea.nmea import (NMEASentence, GPAAM, GPALM, GPAPA, GPAPB, GPBEC, GPBOD, GPBWC, GPBWR, GPBWW, GPGGA, GPGLL, GPGSA, GPGSV, GPHDG, GPHDT, GPZDA, GPSTN, GPRMA, GPRMB, GPRMC, GPRTE, GPR00, GPTRF, GPVBW, GPVTG, GPWCV, GPWNC, GPWPL, GPXTE, PGRME, PGRMZ, PGRMM) from pynmea.utils import checksum_calc class TestNMEAParse(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_basic_parse(self): parse_map = (("Latitude", "lat"), ("Direction", "lat_dir"), ("Longitude", "lon"), ("Direction", "lon_dir")) p = NMEASentence(parse_map) p._parse("$GPGLL,3751.65,S,14507.36,E*77") self.assertEquals("GPGLL", p.sen_type) self.assertEquals(p.parts, ['GPGLL', '3751.65', 'S', '14507.36', 'E']) def test_parse(self): parse_map = (("Latitude", "lat"), ("Direction", "lat_dir"), ("Longitude", "lon"), ("Direction", "lon_dir")) p = NMEASentence(parse_map) p.parse("$GPGLL,3751.65,S,14507.36,E*77") self.assertEquals("GPGLL", p.sen_type) self.assertEquals(p.parts, ['GPGLL', '3751.65', 'S', '14507.36', 'E']) self.assertEquals(p.lat, '3751.65') self.assertEquals(p.lat_dir, 'S') self.assertEquals(p.lon, '14507.36') self.assertEquals(p.lon_dir, 'E') self.assertEquals(p.checksum, '77') def test_checksum_passes(self): parse_map = ('Checksum', 'checksum') nmea_str = "$GPGLL,3751.65,S,14507.36,E*77" p = NMEASentence(parse_map) p.checksum = '77' p.nmea_sentence = nmea_str result = p.check_chksum() self.assertTrue(result) def test_checksum_fails_wrong_checksum(self): parse_map = ('Checksum', 'checksum') nmea_str = "$GPGLL,3751.65,S,14507.36,E*78" p = NMEASentence(parse_map) p.checksum = '78' p.nmea_sentence = nmea_str result = p.check_chksum() self.assertFalse(result) def test_checksum_fails_wrong_str(self): parse_map = ('Checksum', 'checksum') nmea_str = "$GPGLL,3751.65,S,14507.36,W*77" p = NMEASentence(parse_map) p.checksum = '77' p.nmea_sentence = nmea_str result = p.check_chksum() self.assertFalse(result) class TestGPAAM(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_parses_map(self): p = GPAAM() p.parse("$GPAAM,A,A,0.10,N,WPTNME*32") self.assertEquals("GPAAM", p.sen_type) self.assertEquals("A", p.arrival_circ_entered) self.assertEquals("A", p.perp_passed) self.assertEquals("0.10", p.circle_rad) self.assertEquals("N", p.circle_rad_unit) self.assertEquals("WPTNME", p.waypoint_id) self.assertEquals("32", p.checksum) class TestGPALM(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_parses_map(self): p = GPALM() p.parse("$GPALM,32,1,01,5,00,264A,4E,0A5E,FD3F,A11257,B8E036,536C67,2532C1,069,000*7B") self.assertEquals("GPALM", p.sen_type) self.assertEquals("32", p.total_num_msgs) self.assertEquals("1", p.msg_num) self.assertEquals("01", p.sat_prn_num) self.assertEquals("5", p.gps_week_num) self.assertEquals("00", p.sv_health) self.assertEquals("264A", p.eccentricity) self.assertEquals("4E", p.alamanac_ref_time) self.assertEquals("0A5E", p.inc_angle) self.assertEquals("FD3F", p.rate_right_asc) self.assertEquals("A11257", p.root_semi_major_axis) self.assertEquals("B8E036", p.arg_perigee) self.assertEquals("536C67", p.lat_asc_node) self.assertEquals("2532C1", p.mean_anom) self.assertEquals("069", p.f0_clock_param) self.assertEquals("000", p.f1_clock_param) self.assertEquals("7B", p.checksum) class TestGPAPA(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_parses_map(self): p = GPAPA() p.parse("$GPAPA,A,A,0.10,R,N,V,V,011,M,DEST*82") self.assertEquals("GPAPA", p.sen_type) self.assertEquals("A", p.status_gen) self.assertEquals("A", p.status_cycle_lock) self.assertEquals("0.10", p.cross_track_err_mag) self.assertEquals("R", p.dir_steer) self.assertEquals("N", p.cross_track_unit) self.assertEquals("V", p.arr_circle_entered) self.assertEquals("V", p.perp_passed) self.assertEquals("011", p.bearing_to_dest) self.assertEquals("M", p.bearing_type) self.assertEquals("DEST", p.dest_waypoint_id) self.assertEquals("82", p.checksum) class TestGPAPB(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_parses_map(self): p = GPAPB() p.parse("$GPAPB,A,A,0.10,R,N,V,V,011,M,DEST,011,M,011,M*82") self.assertEquals("GPAPB", p.sen_type) self.assertEquals("A", p.status_gen) self.assertEquals("A", p.status_cycle_lock) self.assertEquals("0.10", p.cross_track_err_mag) self.assertEquals("R", p.dir_steer) self.assertEquals("N", p.cross_track_unit) self.assertEquals("V", p.arr_circle_entered) self.assertEquals("V", p.perp_passed) self.assertEquals("011", p.bearing_to_dest) self.assertEquals("M", p.bearing_type) self.assertEquals("DEST", p.dest_waypoint_id) self.assertEquals("011", p.bearing_pres_dest) self.assertEquals("M", p.bearing_pres_dest_type) self.assertEquals("011", p.heading_to_dest) self.assertEquals("M", p.heading_to_dest_type) self.assertEquals("82", p.checksum) class TestGPBEC(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_parses_map_1(self): """ No FAA mode indicator """ p = GPBEC() p.parse("$GPBEC,081837,,,,,,T,,M,,N,*13") self.assertEquals("GPBEC", p.sen_type) self.assertEquals("081837", p.timestamp) self.assertEquals("", p.waypoint_lat) self.assertEquals("", p.waypoint_lat_dir) self.assertEquals("", p.waypoint_lon) self.assertEquals("", p.waypoint_lon_dir) self.assertEquals("", p.bearing_true) self.assertEquals("T", p.bearing_true_sym) self.assertEquals("", p.bearing_mag) self.assertEquals("M", p.bearing_mag_sym) self.assertEquals("", p.nautical_miles) self.assertEquals("N", p.nautical_miles_sym) self.assertEquals("", p.waypoint_id) self.assertEquals("13", p.checksum) def test_parses_map_2(self): """ No FAA mode indicator """ p = GPBEC() p.parse("GPBEC,220516,5130.02,N,00046.34,W,213.8,T,218.0,M,0004.6,N,EGLM*11") self.assertEquals("GPBEC", p.sen_type) self.assertEquals("220516", p.timestamp) self.assertEquals("5130.02", p.waypoint_lat) self.assertEquals("N", p.waypoint_lat_dir) self.assertEquals("00046.34", p.waypoint_lon) self.assertEquals("W", p.waypoint_lon_dir) self.assertEquals("213.8", p.bearing_true) self.assertEquals("T", p.bearing_true_sym) self.assertEquals("218.0", p.bearing_mag) self.assertEquals("M", p.bearing_mag_sym) self.assertEquals("0004.6", p.nautical_miles) self.assertEquals("N", p.nautical_miles_sym) self.assertEquals("EGLM", p.waypoint_id) self.assertEquals("11", p.checksum) def test_parses_map_3(self): """ WITH FAA mode indicator """ p = GPBEC() p.parse("GPBEC,220516,5130.02,N,00046.34,W,213.8,T,218.0,M,0004.6,N,EGLM,X*11") self.assertEquals("GPBEC", p.sen_type) self.assertEquals("220516", p.timestamp) self.assertEquals("5130.02", p.waypoint_lat) self.assertEquals("N", p.waypoint_lat_dir) self.assertEquals("00046.34", p.waypoint_lon) self.assertEquals("W", p.waypoint_lon_dir) self.assertEquals("213.8", p.bearing_true) self.assertEquals("T", p.bearing_true_sym) self.assertEquals("218.0", p.bearing_mag) self.assertEquals("M", p.bearing_mag_sym) self.assertEquals("0004.6", p.nautical_miles) self.assertEquals("N", p.nautical_miles_sym) self.assertEquals("EGLM", p.waypoint_id) self.assertEquals("X", p.faa_mode) self.assertEquals("11", p.checksum) class TestGPBOD(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_parses_map(self): p = GPBOD() p.parse("$GPBOD,045.,T,023.,M,DEST,START") self.assertEquals("GPBOD", p.sen_type) self.assertEquals(p.parts, ['GPBOD', '045.', 'T', '023.', 'M', 'DEST', 'START']) self.assertEquals(p.bearing_t, '045.') self.assertEquals(p.bearing_t_type, 'T') self.assertEquals(p.bearing_mag, '023.') self.assertEquals(p.bearing_mag_type, 'M') self.assertEquals(p.dest, 'DEST') self.assertEquals(p.start, 'START') def test_gets_properties(self): p = GPBOD() p.parse("$GPBOD,045.,T,023.,M,DEST,START") self.assertEquals(p.bearing_true, '045.,T') self.assertEquals(p.bearing_magnetic, '023.,M') self.assertEquals(p.destination, 'DEST') self.assertEquals(p.origin, 'START') class TestGPBWC(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_parses_map(self): p = GPBWC() p.parse("$GPBWC,220516,5130.02,N,00046.34,W,213.8,T,218.0,M,0004.6,N,EGLM*21") self.assertEquals("GPBWC", p.sen_type) self.assertEquals("220516", p.timestamp) self.assertEquals("5130.02", p.lat_next) self.assertEquals("N", p.lat_next_direction) self.assertEquals("00046.34", p.lon_next) self.assertEquals("W", p.lon_next_direction) self.assertEquals("213.8", p.true_track) self.assertEquals("T", p.true_track_sym) self.assertEquals("218.0", p.mag_track) self.assertEquals("M", p.mag_sym) self.assertEquals("0004.6", p.range_next) self.assertEquals("N", p.range_unit) self.assertEquals("EGLM", p.waypoint_name) self.assertEquals("21", p.checksum) def test_checksum_passes(self): p = GPBWC() p.parse("$GPBWC,220516,5130.02,N,00046.34,W,213.8,T,218.0,M,0004.6,N,EGLM*21") result = p.check_chksum() self.assertTrue(result) class TestGPBWR(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_parses_map(self): p = GPBWR() p.parse("$GPBWR,161102,4217.4920,N,07055.7950,W,296.9,T,311.9,M,47.664,N,0001*3E") self.assertEquals("GPBWR", p.sen_type) self.assertEquals("161102", p.timestamp) self.assertEquals("4217.4920", p.lat_next) self.assertEquals("N", p.lat_next_direction) self.assertEquals("07055.7950", p.lon_next) self.assertEquals("W", p.lon_next_direction) self.assertEquals("296.9", p.true_track) self.assertEquals("T", p.true_track_sym) self.assertEquals("311.9", p.mag_track) self.assertEquals("M", p.mag_sym) self.assertEquals("47.664", p.range_next) self.assertEquals("N", p.range_unit) self.assertEquals("0001", p.waypoint_name) self.assertEquals("3E", p.checksum) class TestGPBWW(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_parses_map(self): p = GPBWW() p.parse("$GPBWW,x.x,T,x.x,M,c--c,c--c*ff") self.assertEquals("GPBWW", p.sen_type) self.assertEquals("x.x", p.bearing_deg_true) self.assertEquals("T", p.bearing_deg_true_sym) self.assertEquals("x.x", p.bearing_deg_mag) self.assertEquals("M", p.bearing_deg_mag_sym) self.assertEquals("c--c", p.waypoint_id_dest) self.assertEquals("c--c", p.waypoint_id_orig) self.assertEquals("ff", p.checksum) class TestGPGGA(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_parses_map(self): p = GPGGA() p.parse("$GPGGA,123519,4807.038,N,01131.000,E,1,08,0.9,545.4,M,46.9,M,,*47") self.assertEquals("GPGGA", p.sen_type) self.assertEquals("123519", p.timestamp) self.assertEquals("4807.038", p.latitude) self.assertEquals("N", p.lat_direction) self.assertEquals("01131.000", p.longitude) self.assertEquals("E", p.lon_direction) self.assertEquals("1", p.gps_qual) self.assertEquals("08", p.num_sats) self.assertEquals("0.9", p.horizontal_dil) self.assertEquals("545.4", p.antenna_altitude) self.assertEquals("M", p.altitude_units) self.assertEquals("46.9", p.geo_sep) self.assertEquals("M", p.geo_sep_units) self.assertEquals("", p.age_gps_data) self.assertEquals("", p.ref_station_id) self.assertEquals("47", p.checksum) class TestGPGLL(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_parses_map1(self): p = GPGLL() p.parse("$GPGLL,3751.65,S,14507.36,E*77") self.assertEquals("GPGLL", p.sen_type) self.assertEquals("3751.65", p.lat) self.assertEquals("S", p.lat_dir) self.assertEquals("14507.36", p.lon) self.assertEquals("E", p.lon_dir) #self.assertEquals("", p.timestamp) # No timestamp given self.assertEquals("77", p.checksum) def test_parses_map2(self): p = GPGLL() p.parse("$GPGLL,4916.45,N,12311.12,W,225444,A") self.assertEquals("GPGLL", p.sen_type) self.assertEquals("4916.45", p.lat) self.assertEquals("N", p.lat_dir) self.assertEquals("12311.12", p.lon) self.assertEquals("W", p.lon_dir) self.assertEquals("225444", p.timestamp) self.assertEquals("A", p.data_valid) #def test_checksum_passes1(self): #p = GPGLL() #p.nmea_sentence = "$GPGLL,4916.45,N,12311.12,W,225444,A" #p.data_validity = 'A' ##p._use_data_validity = True #result = p.check_chksum() #self.assertTrue(result) #def test_checksum_fails1(self): #p = GPGLL() #p.nmea_sentence = "$GPGLL,4916.45,N,12311.12,W,225444,B" #p.checksum = 'B' #p._use_data_validity = True #result = p.check_chksum() #self.assertFalse(result) def test_checksum_passes2(self): p = GPGLL() p.nmea_sentence = "$GPGLL,3751.65,S,14507.36,E*77" p.checksum = '77' result = p.check_chksum() self.assertTrue(result) def test_checksum_fails2(self): p = GPGLL() p.nmea_sentence = "$GPGLL,3751.65,S,14507.36,E*78" p.checksum = '78' result = p.check_chksum() self.assertFalse(result) def test_gets_properties(self): p = GPGLL() p.parse("$GPGLL,3751.65,S,14507.36,E*77") self.assertEquals(p.latitude, float('3751.65')) self.assertEquals(p.longitude, float('14507.36')) self.assertEquals(p.lat_direction, 'South') self.assertEquals(p.lon_direction, 'East') self.assertEquals(p.checksum, "77") class TestGPGSA(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_parses_map(self): p = GPGSA() p.parse("$GPGSA,A,3,04,05,,09,12,,,24,,,,,2.5,1.3,2.1*39") self.assertEquals("GPGSA", p.sen_type) self.assertEquals("A", p.mode) self.assertEquals("3", p.mode_fix_type) self.assertEquals("04", p.sv_id01) self.assertEquals("05", p.sv_id02) self.assertEquals("", p.sv_id03) self.assertEquals("09", p.sv_id04) self.assertEquals("12", p.sv_id05) self.assertEquals("", p.sv_id06) self.assertEquals("", p.sv_id07) self.assertEquals("24", p.sv_id08) self.assertEquals("", p.sv_id09) self.assertEquals("", p.sv_id10) self.assertEquals("", p.sv_id11) self.assertEquals("", p.sv_id12) self.assertEquals("2.5", p.pdop) self.assertEquals("1.3", p.hdop) self.assertEquals("2.1", p.vdop) self.assertEquals("39", p.checksum) def test_checksum_passes(self): p = GPGSA() p.checksum = '39' p.nmea_sentence = "$GPGSA,A,3,04,05,,09,12,,,24,,,,,2.5,1.3,2.1*39" result = p.check_chksum() self.assertTrue(result) def test_checksum_fails(self): p = GPGSA() p.checksum = '38' p.nmea_sentence = "$GPGSA,A,3,04,05,,09,12,,,24,,,,,2.5,1.3,2.1*38" result = p.check_chksum() self.assertFalse(result) class TestGPGSV(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_parses_map(self): p = GPGSV() p.parse("$GPGSV,3,1,11,03,03,111,00,04,15,270,00,06,01,010,00,13,06,292,00*74") self.assertEquals("GPGSV", p.sen_type) self.assertEquals('3', p.num_messages) self.assertEquals('1', p.msg_num) self.assertEquals('11', p.num_sv_in_view) self.assertEquals('03', p.sv_prn_num_1) self.assertEquals('03', p.elevation_deg_1) self.assertEquals('111', p.azimuth_1) self.assertEquals('00', p.snr_1) self.assertEquals('04', p.sv_prn_num_2) self.assertEquals('15', p.elevation_deg_2) self.assertEquals('270', p.azimuth_2) self.assertEquals('00', p.snr_2) self.assertEquals('06', p.sv_prn_num_3) self.assertEquals('01', p.elevation_deg_3) self.assertEquals('010', p.azimuth_3) self.assertEquals('00', p.snr_3) self.assertEquals('13', p.sv_prn_num_4) self.assertEquals('06', p.elevation_deg_4) self.assertEquals('292', p.azimuth_4) self.assertEquals('00', p.snr_4) self.assertEquals("74", p.checksum) def test_checksum_passes(self): p = GPGSV() p.checksum = '74' p.nmea_sentence = "$GPGSV,3,1,11,03,03,111,00,04,15,270,00,06,01,010,00,13,06,292,00*74" result = p.check_chksum() self.assertTrue(result) def test_checksum_fails(self): p = GPGSV() p.checksum = '73' p.nmea_sentence = "$GPGSV,3,1,11,03,03,111,00,04,15,270,00,06,01,010,00,13,06,292,00*74" result = p.check_chksum() self.assertFalse(result) class TestGPHDG(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_parses_map(self): p = GPHDG() p.parse("$GPHDG,190.7,,E,0.0,E*7F") self.assertEquals("GPHDG", p.sen_type) self.assertEquals("190.7", p.heading) self.assertEquals("", p.deviation) self.assertEquals("E", p.dev_dir) self.assertEquals("0.0", p.variation) self.assertEquals("E", p.var_dir) self.assertEquals("7F", p.checksum) def test_checksum_passes(self): p = GPHDG() p.checksum = '7F' p.nmea_sentence = "$GPHDG,190.7,,E,0.0,E*7F" result = p.check_chksum() self.assertTrue(result) def test_checksum_fails(self): p = GPHDG() p.checksum = '7E' p.nmea_sentence = "$GPHDG,190.7,,E,0.0,E*7E" result = p.check_chksum() self.assertFalse(result) class TestGPHDT(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_parses_map(self): p = GPHDT() p.parse("$GPHDT,227.66,T*02") self.assertEquals("GPHDT", p.sen_type) self.assertEquals("227.66", p.heading) self.assertEquals("T", p.hdg_true) self.assertEquals("02", p.checksum) def test_checksum_passes(self): p = GPHDT() p.checksum = '02' p.nmea_sentence = '$GPHDT,227.66,T*02' result = p.check_chksum() self.assertTrue(result) def test_checksum_fails(self): p = GPHDT() p.checksum = '03' p.nmea_sentence = '$GPHDT,227.66,T*03' result = p.check_chksum() self.assertFalse(result) class TestGPR00(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_parses_map_1(self): p = GPR00() p.parse("$GPR00,EGLL,EGLM,EGTB,EGUB,EGTK,MBOT,EGTB,,,,,,,*58") self.assertEquals("GPR00", p.sen_type) self.assertEquals(['EGLL', 'EGLM', 'EGTB', 'EGUB', 'EGTK', 'MBOT', 'EGTB', '', '', '', '', '', '', ''], p.waypoint_list) self.assertEquals("58", p.checksum) def test_parses_map_2(self): p = GPR00() p.parse("$GPR00,MINST,CHATN,CHAT1,CHATW,CHATM,CHATE,003,004,005,006,007,,,*05") self.assertEquals("GPR00", p.sen_type) self.assertEquals(['MINST', 'CHATN', 'CHAT1', 'CHATW', 'CHATM', 'CHATE', '003', '004', '005', '006', '007', '', '', ''], p.waypoint_list) self.assertEquals("05", p.checksum) def test_checksum_passes(self): p = GPR00() p.checksum = '58' p.nmea_sentence = "$GPR00,EGLL,EGLM,EGTB,EGUB,EGTK,MBOT,EGTB,,,,,,,*58" result = p.check_chksum() self.assertTrue(result) def test_checksum_fails(self): p = GPR00() p.checksum = '57' p.nmea_sentence = "$GPR00,EGLL,EGLM,EGTB,EGUB,EGTK,MBOT,EGTB,,,,,,,*57" result = p.check_chksum() self.assertFalse(result) class TestGPRMA(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_parses_map(self): p = GPRMA() p.parse("$GPRMA,A,4630.129,N,147.372,W,,,12.2,5,7,N*51") self.assertEquals("GPRMA", p.sen_type) self.assertEquals("A", p.data_status) self.assertEquals("4630.129", p.lat) self.assertEquals("N", p.lat_dir) self.assertEquals("147.372", p.lon) self.assertEquals("W", p.lon_dir) self.assertEquals("", p.not_used_1) self.assertEquals("", p.not_used_2) self.assertEquals("12.2", p.spd_over_grnd) self.assertEquals("5", p.crse_over_grnd) self.assertEquals("7", p.variation) self.assertEquals("N", p.var_dir) self.assertEquals("51", p.checksum) def test_checksum_passes(self): p = GPRMA() p.checksum = '51' p.nmea_sentence = '$GPRMA,A,4630.129,N,147.372,W,,,12.2,5,7,N*51' result = p.check_chksum() self.assertTrue(result) def test_checksum_fails(self): p = GPRMA() p.checksum = '52' p.nmea_sentence = '$GPRMA,A,4630.129,N,147.372,W,,,12.2,5,7,N*52' result = p.check_chksum() self.assertFalse(result) class TestGPRMB(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_parses_map_1(self): p = GPRMB() p.parse("$GPRMB,A,0.66,L,003,004,4917.24,N,12309.57,W,001.3,052.5,000.5,V*20") self.assertEquals("GPRMB", p.sen_type) self.assertEquals("A", p.validity) self.assertEquals("0.66", p.cross_track_error) self.assertEquals("L", p.cte_correction_dir) self.assertEquals("003", p.origin_waypoint_id) self.assertEquals("004", p.dest_waypoint_id) self.assertEquals("4917.24", p.dest_lat) self.assertEquals("N", p.dest_lat_dir) self.assertEquals("12309.57", p.dest_lon) self.assertEquals("W", p.dest_lon_dir) self.assertEquals("001.3", p.dest_range) self.assertEquals("052.5", p.dest_true_bearing) self.assertEquals("000.5", p.dest_velocity) self.assertEquals("V", p.arrival_alarm) self.assertEquals("20", p.checksum) def test_parses_map_2(self): p = GPRMB() p.parse("$GPRMB,A,4.08,L,EGLL,EGLM,5130.02,N,00046.34,W,004.6,213.9,122.9,A*3D") self.assertEquals("GPRMB", p.sen_type) self.assertEquals("A", p.validity) self.assertEquals("4.08", p.cross_track_error) self.assertEquals("L", p.cte_correction_dir) self.assertEquals("EGLL", p.origin_waypoint_id) self.assertEquals("EGLM", p.dest_waypoint_id) self.assertEquals("5130.02", p.dest_lat) self.assertEquals("N", p.dest_lat_dir) self.assertEquals("00046.34", p.dest_lon) self.assertEquals("W", p.dest_lon_dir) self.assertEquals("004.6", p.dest_range) self.assertEquals("213.9", p.dest_true_bearing) self.assertEquals("122.9", p.dest_velocity) self.assertEquals("A", p.arrival_alarm) self.assertEquals("3D", p.checksum) def test_parses_map_3(self): p = GPRMB() p.parse("$GPRMB,A,x.x,a,c--c,d--d,llll.ll,e,yyyyy.yy,f,g.g,h.h,i.i,j*kk") self.assertEquals("GPRMB", p.sen_type) self.assertEquals("A", p.validity) self.assertEquals("x.x", p.cross_track_error) self.assertEquals("a", p.cte_correction_dir) self.assertEquals("c--c", p.origin_waypoint_id) self.assertEquals("d--d", p.dest_waypoint_id) self.assertEquals("llll.ll", p.dest_lat) self.assertEquals("e", p.dest_lat_dir) self.assertEquals("yyyyy.yy", p.dest_lon) self.assertEquals("f", p.dest_lon_dir) self.assertEquals("g.g", p.dest_range) self.assertEquals("h.h", p.dest_true_bearing) self.assertEquals("i.i", p.dest_velocity) # This should include the bogus checksum as the checksum is not a valid # hex pair and should not be stripped off self.assertEquals("j*kk", p.arrival_alarm) # There should be no checksum as it was not a valid hex pair self.assertFalse(hasattr(p, 'checksum')) def test_checksum_passes(self): p = GPRMB() p.checksum = '20' p.nmea_sentence = '$GPRMB,A,0.66,L,003,004,4917.24,N,12309.57,W,001.3,052.5,000.5,V*20' result = p.check_chksum() self.assertTrue(result) def test_checksum_fails(self): p = GPRMB() p.checksum = '21' p.nmea_sentence = '$GPRMB,A,0.66,L,003,004,4917.24,N,12309.57,W,001.3,052.5,000.5,V*21' result = p.check_chksum() self.assertFalse(result) class TestGPRMC(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_parses_map_1(self): p = GPRMC() p.parse("$GPRMC,081836,A,3751.65,S,14507.36,E,000.0,360.0,130998,011.3,E*62") self.assertEquals("GPRMC", p.sen_type) self.assertEquals("081836", p.timestamp) self.assertEquals("A", p.data_validity) self.assertEquals("3751.65", p.lat) self.assertEquals("S", p.lat_dir) self.assertEquals("14507.36", p.lon) self.assertEquals("E", p.lon_dir) self.assertEquals("000.0", p.spd_over_grnd) self.assertEquals("360.0", p.true_course) self.assertEquals("130998", p.datestamp) self.assertEquals("011.3", p.mag_variation) self.assertEquals("E", p.mag_var_dir) self.assertEquals("62", p.checksum) def test_parses_map_2(self): p = GPRMC() p.parse("$GPRMC,225446,A,4916.45,N,12311.12,W,000.5,054.7,191194,020.3,E*68") self.assertEquals("GPRMC", p.sen_type) self.assertEquals("225446", p.timestamp) self.assertEquals("A", p.data_validity) self.assertEquals("4916.45", p.lat) self.assertEquals("N", p.lat_dir) self.assertEquals("12311.12", p.lon) self.assertEquals("W", p.lon_dir) self.assertEquals("000.5", p.spd_over_grnd) self.assertEquals("054.7", p.true_course) self.assertEquals("191194", p.datestamp) self.assertEquals("020.3", p.mag_variation) self.assertEquals("E", p.mag_var_dir) self.assertEquals("68", p.checksum) def test_parses_map_3(self): p = GPRMC() p.parse("$GPRMC,220516,A,5133.82,N,00042.24,W,173.8,231.8,130694,004.2,W*70") self.assertEquals("GPRMC", p.sen_type) self.assertEquals("220516", p.timestamp) self.assertEquals("A", p.data_validity) self.assertEquals("5133.82", p.lat) self.assertEquals("N", p.lat_dir) self.assertEquals("00042.24", p.lon) self.assertEquals("W", p.lon_dir) self.assertEquals("173.8", p.spd_over_grnd) self.assertEquals("231.8", p.true_course) self.assertEquals("130694", p.datestamp) self.assertEquals("004.2", p.mag_variation) self.assertEquals("W", p.mag_var_dir) self.assertEquals("70", p.checksum) def test_checksum_passes(self): p = GPRMC() p.checksum = '70' p.nmea_sentence = '$GPRMC,220516,A,5133.82,N,00042.24,W,173.8,231.8,130694,004.2,W*70' result = p.check_chksum() self.assertTrue(result) def test_checksum_fails(self): p = GPRMC() p.checksum = '71' p.nmea_sentence = '$GPRMC,220516,A,5133.82,N,00042.24,W,173.8,231.8,130694,004.2,W*71' result = p.check_chksum() self.assertFalse(result) class TestGPRTE(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_parses_map1(self): p = GPRTE() p.parse("$GPRTE,2,1,c,0,PBRCPK,PBRTO,PTELGR,PPLAND,PYAMBU,PPFAIR,PWARRN,PMORTL,PLISMR*73") self.assertEquals("GPRTE", p.sen_type) self.assertEquals("2", p.num_in_seq) self.assertEquals("1", p.sen_num) self.assertEquals("c", p.start_type) self.assertEquals("0", p.active_route_id) self.assertEquals(["PBRCPK", "PBRTO", "PTELGR", "PPLAND", "PYAMBU", "PPFAIR", "PWARRN", "PMORTL", "PLISMR"], p.waypoint_list) self.assertEquals("73", p.checksum) def test_parses_map2(self): p = GPRTE() p.parse("$GPRTE,2,2,c,0,PCRESY,GRYRIE,GCORIO,GWERR,GWESTG,7FED*34") self.assertEquals("GPRTE", p.sen_type) self.assertEquals("2", p.num_in_seq) self.assertEquals("2", p.sen_num) self.assertEquals("c", p.start_type) self.assertEquals("0", p.active_route_id) self.assertEquals( ["PCRESY", "GRYRIE", "GCORIO", "GWERR", "GWESTG", "7FED"], p.waypoint_list) self.assertEquals("34", p.checksum) def test_checksum_passes(self): p = GPRTE() p.checksum = "73" p.nmea_sentence = "$GPRTE,2,1,c,0,PBRCPK,PBRTO,PTELGR,PPLAND,PYAMBU,PPFAIR,PWARRN,PMORTL,PLISMR*73" result = p.check_chksum() self.assertTrue(result) def test_checksum_fails(self): p = GPRTE() p.checksum = "74" p.nmea_sentence = "$GPRTE,2,1,c,0,PBRCPK,PBRTO,PTELGR,PPLAND,PYAMBU,PPFAIR,PWARRN,PMORTL,PLISMR*74" result = p.check_chksum() self.assertFalse(result) class TestGPSTN(unittest.TestCase): def setup(self): pass def tearDown(self): pass def test_parses_map1(self): p = GPSTN() p.parse("$GPSTN,10") self.assertEquals("GPSTN", p.sen_type) self.assertEquals("10", p.talker_id) def test_parses_map2(self): p = GPSTN() p.parse("$GPSTN,10*73") self.assertEquals("GPSTN", p.sen_type) self.assertEquals("10", p.talker_id) self.assertEquals("73", p.checksum) class TestGPTRF(unittest.TestCase): def setup(self): pass def tearDown(self): pass def test_parses_map(self): p = GPTRF() p.parse("$GPTRF,121314.15,020112,123.321,N,0987.232,W,2.3,4.5,6.7,8.9,ABC") self.assertEquals("GPTRF", p.sen_type) self.assertEquals("121314.15", p.timestamp) self.assertEquals("020112", p.date) self.assertEquals("123.321", p.lat) self.assertEquals("N", p.lat_dir) self.assertEquals("0987.232", p.lon) self.assertEquals("W", p.lon_dir) self.assertEquals("2.3", p.ele_angle) self.assertEquals("4.5", p.num_iterations) self.assertEquals("6.7", p.num_doppler_intervals) self.assertEquals("8.9", p.update_dist) self.assertEquals("ABC", p.sat_id) class TestGPVBW(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_parses_map(self): p = GPVBW() p.parse("$GPVBW,10.1,-0.2,A,9.8,-0.5,A*62") self.assertEquals("GPVBW", p.sen_type) self.assertEquals("10.1", p.lon_water_spd) self.assertEquals("-0.2", p.trans_water_spd) self.assertEquals("A", p.data_validity_water_spd) self.assertEquals("9.8", p.lon_grnd_spd) self.assertEquals("-0.5", p.trans_grnd_spd) self.assertEquals("A", p.data_validity_grnd_spd) class TestGPVTG(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_parses_map_1(self): p = GPVTG() p.parse("$GPVTG,360.0,T,348.7,M,000.0,N,000.0,K*43") self.assertEquals("GPVTG", p.sen_type) self.assertEquals("360.0", p.true_track) self.assertEquals("T", p.true_track_sym) self.assertEquals("348.7", p.mag_track) self.assertEquals("M", p.mag_track_sym) self.assertEquals("000.0", p.spd_over_grnd_kts) self.assertEquals("N", p.spd_over_grnd_kts_sym) self.assertEquals("000.0", p.spd_over_grnd_kmph) self.assertEquals("K", p.spd_over_grnd_kmph_sym) self.assertEquals('43', p.checksum) def test_parses_map_2(self): p = GPVTG() p.parse("GPVTG,054.7,T,034.4,M,005.5,N,010.2,K") self.assertEquals("GPVTG", p.sen_type) self.assertEquals("054.7", p.true_track) self.assertEquals("T", p.true_track_sym) self.assertEquals("034.4", p.mag_track) self.assertEquals("M", p.mag_track_sym) self.assertEquals("005.5", p.spd_over_grnd_kts) self.assertEquals("N", p.spd_over_grnd_kts_sym) self.assertEquals("010.2", p.spd_over_grnd_kmph) self.assertEquals("K", p.spd_over_grnd_kmph_sym) self.assertFalse(hasattr(p, 'checksum')) def test_parses_map3(self): p = GPVTG() p.parse("$GPVTG,t,T,,,s.ss,N,s.ss,K*hh") self.assertEquals("GPVTG", p.sen_type) self.assertEquals("t", p.true_track) self.assertEquals("T", p.true_track_sym) self.assertEquals("", p.mag_track) self.assertEquals("", p.mag_track_sym) self.assertEquals("s.ss", p.spd_over_grnd_kts) self.assertEquals("N", p.spd_over_grnd_kts_sym) self.assertEquals("s.ss", p.spd_over_grnd_kmph) # The checksum did not get stripped off as it is invalid # (not a hex pair). self.assertEquals("K*hh", p.spd_over_grnd_kmph_sym) # Despite a checksum being listed in the sentence, there should NOT be # on on the object as 'hh' is not a valid hex pair self.assertFalse(hasattr(p, 'checksum')) class TestGPZDA(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_parses_map(self): p = GPZDA() p.parse("$GPZDA,025959.000,01,01,1970,,*5B") self.assertEquals("GPZDA", p.sen_type) self.assertEquals("025959.000", p.timestamp) self.assertEquals("01", p.day) self.assertEquals("01", p.month) self.assertEquals("1970", p.year) self.assertEquals("", p.local_zone) self.assertEquals("", p.local_zone_minutes) self.assertEquals("5B", p.checksum) def test_checksum_passes(self): p = GPZDA() p.checksum = '5B' p.nmea_sentence = '$GPZDA,025959.000,01,01,1970,,*5B' result = p.check_chksum() self.assertTrue(result) def test_checksum_fails(self): p = GPZDA() p.checksum = 'b5' p.nmea_sentence = '$GPZDA,025959.000,01,01,1970,,*b5' result = p.check_chksum() self.assertFalse(result) class TestGPWCV(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_parses_map(self): p = GPWCV() p.parse("$GPWCV,2.3,N,ABCD*1C") self.assertEquals("GPWCV", p.sen_type) self.assertEquals("2.3", p.velocity) self.assertEquals("N", p.vel_units) self.assertEquals("ABCD", p.waypoint_id) def test_checksum_passes(self): p = GPWCV() p.checksum = '1C' p.nmea_sentence = '$GPWCV,2.3,N,ABCD*1C' result = p.check_chksum() self.assertTrue(result) def test_checksum_fails(self): p = GPWCV() p.checksum = '1B' p.nmea_sentence = '$GPWCV,2.3,N,ABCD*1B' result = p.check_chksum() self.assertFalse(result) class TestGPWNC(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_parses_map(self): p = GPWNC() p.parse("$GPWNC,1.1,N,2.2,K,c--c,c--c*ff") self.assertEquals("GPWNC", p.sen_type) self.assertEquals("1.1", p.dist_nautical_miles) self.assertEquals("N", p.dist_naut_unit) self.assertEquals("2.2", p.dist_km) self.assertEquals("K", p.dist_km_unit) self.assertEquals("c--c", p.waypoint_origin_id) self.assertEquals("c--c", p.waypoint_dest_id) self.assertEquals("ff", p.checksum) class TestGPWPL(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_parses_map_1(self): p = GPWPL() p.parse("$GPWPL,4917.16,N,12310.64,W,003*65") self.assertEquals("GPWPL", p.sen_type) self.assertEquals("4917.16", p.lat) self.assertEquals("N", p.lat_dir) self.assertEquals("12310.64", p.lon) self.assertEquals("W", p.lon_dir) self.assertEquals("003", p.waypoint_id) self.assertEquals("65", p.checksum) def test_parses_map_2(self): p = GPWPL() p.parse("$GPWPL,5128.62,N,00027.58,W,EGLL*59") self.assertEquals("GPWPL", p.sen_type) self.assertEquals("5128.62", p.lat) self.assertEquals("N", p.lat_dir) self.assertEquals("00027.58", p.lon) self.assertEquals("W", p.lon_dir) self.assertEquals("EGLL", p.waypoint_id) self.assertEquals("59", p.checksum) class TestGPXTE(unittest.TestCase): def setUp(Self): pass def tearDown(self): pass def test_parses_map_1(self): p = GPXTE() p.parse("$GPXTE,A,A,0.67,L,N") self.assertEquals("GPXTE", p.sen_type) self.assertEquals("A", p.warning_flag) self.assertEquals("A", p.lock_flag) self.assertEquals("0.67", p.cross_track_err_dist) self.assertEquals("L", p.correction_dir) self.assertEquals("N", p.dist_units) def test_parses_map_2(self): p = GPXTE() p.parse("$GPXTE,A,A,4.07,L,N*6D") self.assertEquals("GPXTE", p.sen_type) self.assertEquals("A", p.warning_flag) self.assertEquals("A", p.lock_flag) self.assertEquals("4.07", p.cross_track_err_dist) self.assertEquals("L", p.correction_dir) self.assertEquals("N", p.dist_units) self.assertEquals("6D", p.checksum) class TestPGRME(unittest.TestCase): def test_parses_map(self): p = PGRME() p.parse("$PGRME,3.1,M,4.2,M,5.2,M*2D") self.assertEqual("PGRME", p.sen_type) self.assertEqual("3.1", p.hpe) self.assertEqual("M", p.hpe_unit) self.assertEqual("4.2", p.vpe) self.assertEqual("M", p.vpe_unit) self.assertEqual("5.2", p.osepe) self.assertEqual("M", p.osepe_unit) class TestPGRMM(unittest.TestCase): def test_parses_map(self): p = PGRMM() p.parse("PGRMM,WGS 84*06") self.assertEqual("PGRMM", p.sen_type) self.assertEqual("WGS 84", p.datum) class TestPGRMZ(unittest.TestCase): def test_parses_map(self): p = PGRMZ() p.parse("PGRMZ,492,f,3*14") self.assertEqual("PGRMZ", p.sen_type) self.assertEqual("492", p.altitude) self.assertEqual("f", p.altitude_unit) self.assertEqual("3", p.pos_fix_dim) class TestUtils(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_checksum_calc(self): nmea_str1 = 'GPGLL,3751.65,S,14507.36,E' nmea_str2 = '$GPGLL,3751.65,S,14507.36,E' nmea_str3 = 'GPGLL,3751.65,S,14507.36,E*77' nmea_str4 = '$GPGLL,3751.65,S,14507.36,E*77' nmea_str5 = '$GPGLL,3751.65,S,14507.36,E*' nmea_str6 = 'GPGLL,3751.65,S,14507.36,E*' nmea_str7 = '$GPHDT,227.66,T*02' result1 = checksum_calc(nmea_str1) result2 = checksum_calc(nmea_str2) result3 = checksum_calc(nmea_str3) result4 = checksum_calc(nmea_str4) result5 = checksum_calc(nmea_str5) result6 = checksum_calc(nmea_str6) result7 = checksum_calc(nmea_str7) self.assertEquals(result1, '77') self.assertEquals(result2, '77') self.assertEquals(result3, '77') self.assertEquals(result4, '77') self.assertEquals(result5, '77') self.assertEquals(result6, '77') self.assertEquals(result7, '02')
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from hwt.code import If, Concat, Switch from hwt.hdl.types.bits import Bits from hwt.interfaces.utils import addClkRstn from hwt.math import log2ceil from hwt.synthesizer.param import Param from hwtLib.amba.axis import AxiStream from hwtLib.amba.axis_comp.base import AxiSCompBase from hwtLib.amba.axis_comp.reg import AxiSReg from hwtLib.handshaked.streamNode import StreamNode class AxiS_resizer(AxiSCompBase): """ Change data with of AxiStream interface :attention: start of frame is expected to be aligned on first word :attention: strb can be not fully set only in last word :attention: in upscale mode id and other signals which are not dependent on data width are propagated only from last word :note: interface is configurable and schematic is example with AxiStream :note: first schematic is for upsize mode, second one is for downsize mode .. hwt-params: _example_AxiS_resizer_upscale .. hwt-interfaces: _example_AxiS_resizer_upscale .. hwt-schematic:: _example_AxiS_resizer_upscale .. hwt-schematic:: _example_AxiS_resizer_downscale """ def _config(self): AxiSCompBase._config(self) self.OUT_DATA_WIDTH = Param(64) self.USE_STRB = True def _declr(self): assert self.USE_STRB addClkRstn(self) with self._paramsShared(): self.dataIn = AxiStream() with self._paramsShared(exclude=({"DATA_WIDTH"}, set())): o = self.dataOut = AxiStream()._m() o.DATA_WIDTH = self.OUT_DATA_WIDTH def nextAreNotValidLogic(self, inStrb, actualItemIndx, ITEMS, ITEM_DW): res = None ITEM_W = ITEM_DW // 8 for i in range(ITEMS - 1): # -1 because if it is last we do not need this strbHi = ITEMS * ITEM_W strbLo = (i + 1) * ITEM_W othersNotValid = actualItemIndx._eq( i) & inStrb[strbHi:strbLo]._eq(0) if res is None: res = othersNotValid else: res = res | othersNotValid if res is None: return True _res = self._sig("nextAreNotValid") _res(res) return _res def upscale(self, IN_DW, OUT_DW): if OUT_DW % IN_DW != 0: raise NotImplementedError() ITEMS = OUT_DW // IN_DW dIn = self.getDataWidthDependent(self.dataIn) dataOut = self.dataOut dOut = self.getDataWidthDependent(dataOut) itemCntr = self._reg("itemCntr", Bits(log2ceil(ITEMS + 1)), def_val=0) hs = StreamNode([self.dataIn], [dataOut]).ack() isLastItem = (itemCntr._eq(ITEMS - 1) | self.dataIn.last) vld = self.get_valid_signal(self.dataIn) outputs = {outp: [] for outp in dOut} for wordIndx in range(ITEMS): for inp, outp in zip(dIn, dOut): # generate register if is not last item s = self._sig(f"item_{wordIndx:d}_{inp._name:s}", inp._dtype) if wordIndx <= ITEMS - 1: r = self._reg(f"reg_{inp._name:s}_{wordIndx:d}", inp._dtype, def_val=0) If(hs & isLastItem, r(0) ).Elif(vld & itemCntr._eq(wordIndx), r(inp) ) If(itemCntr._eq(wordIndx), s(inp) ).Else( s(r) ) else: # last item does not need register If(itemCntr._eq(wordIndx), s(inp) ).Else( s(0) ) outputs[outp].append(s) # dataIn/dataOut hs self.get_ready_signal(self.dataIn)(self.get_ready_signal(dataOut)) self.get_valid_signal(dataOut)(vld & (isLastItem)) # connect others signals directly for inp, outp in zip(self.get_data(self.dataIn), self.get_data(dataOut)): if inp not in dIn: outp(inp) # connect data signals to utput for outp, outItems in outputs.items(): outp(Concat(*reversed(outItems))) # itemCntr next logic If(hs, If(isLastItem, itemCntr(0) ).Else( itemCntr(itemCntr + 1) ) ) def downscale(self, IN_DW, OUT_DW): if IN_DW % OUT_DW != 0: raise NotImplementedError() dOut = self.getDataWidthDependent(self.dataOut) # instantiate AxiSReg, AxiSBuilder is not used to avoid dependencies inReg = AxiSReg(self.intfCls) inReg._updateParamsFrom(self.dataIn) self.inReg = inReg inReg.clk(self.clk) inReg.rst_n(self.rst_n) inReg.dataIn(self.dataIn) dataIn = inReg.dataOut dIn = self.getDataWidthDependent(dataIn) ITEMS = IN_DW // OUT_DW itemCntr = self._reg("itemCntr", Bits(log2ceil(ITEMS + 1)), def_val=0) hs = StreamNode([dataIn], [self.dataOut]).ack() isLastItem = itemCntr._eq(ITEMS - 1) strbLastOverride = self.nextAreNotValidLogic(dataIn.strb, itemCntr, ITEMS, OUT_DW) if strbLastOverride is not True: isLastItem = isLastItem | strbLastOverride # connected item selected by itemCntr to output for inp, outp in zip(dIn, dOut): w = outp._dtype.bit_length() Switch(itemCntr)\ .add_cases([ (wordIndx, outp(inp[((wordIndx + 1) * w):(w * wordIndx)])) for wordIndx in range(ITEMS) ])\ .Default( outp(None) ) # connect others signals directly for inp, outp in zip(self.get_data(dataIn), self.get_data(self.dataOut)): if inp not in dIn and inp is not dataIn.last: outp(inp) self.dataOut.last(dataIn.last & isLastItem) self.get_ready_signal(dataIn)(self.get_ready_signal(self.dataOut) & isLastItem & dataIn.valid) self.get_valid_signal(self.dataOut)(self.get_valid_signal(dataIn)) If(hs, If(isLastItem, itemCntr(0) ).Else( itemCntr(itemCntr + 1) ) ) def _impl(self): IN_DW = int(self.DATA_WIDTH) OUT_DW = int(self.OUT_DATA_WIDTH) if IN_DW < OUT_DW: # UPSCALE self.upscale(IN_DW, OUT_DW) elif IN_DW > OUT_DW: # DOWNSCALE self.downscale(IN_DW, OUT_DW) else: # same raise AssertionError( "Input and output width are same, this instance is useless") def _example_AxiS_resizer_upscale(): from hwtLib.amba.axis import AxiStream u = AxiS_resizer(AxiStream) u.ID_WIDTH = 3 u.DATA_WIDTH = 32 u.OUT_DATA_WIDTH = 64 return u def _example_AxiS_resizer_downscale(): from hwtLib.amba.axis import AxiStream u = AxiS_resizer(AxiStream) u.ID_WIDTH = 3 u.DATA_WIDTH = 64 u.OUT_DATA_WIDTH = 32 return u if __name__ == "__main__": from hwt.synthesizer.utils import to_rtl_str u = _example_AxiS_resizer_downscale() print(to_rtl_str(u))
#!/usr/bin/env python """ Copyright 2015 The Trustees of Princeton University Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import syndicate.util.config as conf import syndicate.util.crypto as crypto import syndicate.util.objects as object_stub import syndicate.ms.jsonrpc as jsonrpc import syndicate.ms.msconfig as msconfig from syndicate.util.objects import MissingKeyException import os import urlparse from Crypto.Hash import SHA256 as HashAlg from Crypto.PublicKey import RSA as CryptoKey from Crypto import Random from Crypto.Signature import PKCS1_PSS as CryptoSigner log = conf.log def warn_key_change(config): """ return a warning string the user that the MS's public key has changed, and exit. """ return """ SECURE VERIFICATION FAILURE! It's possible that someone is impersonating your Syndicate, to get you to leak sensitive data! If you are certain this is not the case, you should remove the offending public key. Offending public key path: %s """ % conf.object_key_path(config, "syndicate", make_ms_url(config['syndicate_host'], config['syndicate_port'], config['no_tls']).strip("https://"), public=True) def parse_url(url): """ Given a URL, find its host and port, and determine based on the protocol scheme whether or not it uses TLS. Return (host, port, tls) on success Raise exception if invalid. """ if "://" not in url: log.warning("No scheme for %s. Assuming https://" % url) url = "https://" + url parsed = urlparse.urlparse(url) scheme = parsed.scheme netloc = parsed.netloc host = None port = None no_tls = None if ":" in netloc: host, port = netloc.split(":") try: port = int(port) except: raise Exception("Invalid URL %s" % config['url']) else: host = netloc if scheme.lower() == 'http': if port is None: port = 80 no_tls = True elif scheme.lower() == 'https': port = 443 no_tls = False else: raise Exception("Unrecognized scheme in URL %s" % config['url']) return (host, port, no_tls) def make_ms_url(syndicate_host, syndicate_port, no_tls, urlpath=""): """ Make a URL to the MS. Return the URL. """ scheme = "https://" default_port = 80 if no_tls: default_port = 443 scheme = "http://" if syndicate_port != default_port: return scheme + os.path.join(syndicate_host.strip("/") + ":%s" % syndicate_port, urlpath) else: return scheme + os.path.join(syndicate_host.strip("/"), urlpath) def api_call_signer(signing_pkey, method_name, data): """ Sign an RPC call with the user's private key """ # sign the data h = HashAlg.new(data) signer = CryptoSigner.new(signing_pkey) signature = signer.sign(h) return signature def api_call_verifier(config, pubkey, method_name, data, syndicate_data, rpc_result): """ Verify an RPC call. """ # sanity check if not 'signature' in syndicate_data: log.error("No signature in server reply") return False sig = syndicate_data['signature'] # verify object ID and type ret = False if pubkey is not None: # verify this data h = HashAlg.new(data) verifier = CryptoSigner.new(pubkey) ret = verifier.verify(h, sig) if not ret: # verification key has changed on the MS print warn_key_change(config) return False else: return True else: raise Exception("No public key given. Unable to verify result.") def make_rpc_client(config, caller_username=None): """ Create an RPC client for calling MS methods. Requires a config dictionary with: * syndicate_host * syndicate_port * no_tls * syndicate_public_key * user_pkey """ import storage ms_url = make_ms_url(config['syndicate_host'], config['syndicate_port'], config['no_tls']) + "/API" username = caller_username if username is None: username = config['username'] user_private_key = storage.load_private_key(config, "user", username) if user_private_key is None: raise MissingKeyException("No private key for '%s'" % username) syndicate_public_key = config['syndicate_public_key'] if not ms_url.lower().startswith("https://"): log.debug("MS URL %s is NOT confidential!" % ms_url) signer = lambda method_name, data: api_call_signer(user_private_key, method_name, data) verifier = lambda method_name, args, kw, data, syndicate_data, rpc_result: api_call_verifier(config, syndicate_public_key, method_name, data, syndicate_data, rpc_result) json_client = jsonrpc.Client(ms_url, jsonrpc.VERSION, signer=signer, verifier=verifier, username=username) json_client.config = config json_client.caller_username = username return json_client def json_stable_serialize(json_data): """ Convert a dict or list into json, ensuring that key-values are serialized in a stable order. Lifted verbatum from the MS, to remove the dependency. """ if isinstance(json_data, list) or isinstance(json_data, tuple): json_serialized_list = [] for json_element in json_data: json_serialized_list.append(json_stable_serialize(json_element)) # json_serialized_list.sort() return "[" + ", ".join(json_serialized_list) + "]" elif isinstance(json_data, dict): json_serialized_dict = {} for key in json_data.keys(): json_serialized_dict[key] = json_stable_serialize(json_data[key]) key_order = [k for k in json_serialized_dict.keys()] key_order.sort() return "{" + ", ".join(['"%s": %s' % (k, json_serialized_dict[k]) for k in key_order]) + "}" elif isinstance(json_data, str) or isinstance(json_data, unicode): return '"' + json_data + '"' return '"' + str(json_data) + '"' def ms_rpc(proxy, method_name, *args, **kw): """ Call a method on the MS (method_name). Take the argument vector *args and dictionary **kw (both taken from sys.argv), look up the method's parser, parse the arguments, and then issue the RPC call. """ verify_reply = True config = proxy.config if config.has_key('verify_reply'): # do not verify the reply (i.e. we might not know the Syndicate public key) verify_reply = config['verify_reply'] # parse arguments. # use lib to load and store temporary data for the argument parsers. lib = conf.ArgLib() lib.config = config try: args, kw, extras = conf.parse_args(config, method_name, args, kw, lib) except Exception, e: log.exception(e) log.error("Failed to parse arguments for '%s'" % method_name) raise e # make sure we got the right number of arguments valid = conf.validate_args(method_name, args, kw) if not valid: raise Exception("Invalid arguments for %s" % method_name) method_callable = getattr(proxy, method_name) # NOTE: might cause an exception log.debug("As %s, call %s(%s %s)" % (proxy.caller_username, method_name, ", ".join(map(lambda x: x[:50] + "..." if len(x) > 50 else x, [str(a) for a in args])), ", ".join(map(lambda x: x[:50] + "..." if len(x) > 50 else x, ["%s=%s" % (str(k), str(kw[k])) for k in kw.keys()])))) ret = method_callable(*args, **kw) # process object-specific extra information, based on the returned value of this method. for object_cls in object_stub.object_classes: object_cls.PostProcessResult(extras, config, method_name, args, kw, ret) return ret
""" Some generic utility routines for number handling and calculating (specific) variances """ import logging import itertools import numpy from tkp.utility import containers from tkp.utility.memoize import Memoize from tkp.sourcefinder import utils from tkp.sourcefinder import stats from tkp.sourcefinder import extract try: import ndimage except ImportError: from scipy import ndimage logger = logging.getLogger(__name__) # # Hard-coded configuration parameters; not user settable. # INTERPOLATE_ORDER = 1 # Spline order for grid interpolation MEDIAN_FILTER = 0 # If non-zero, apply a median filter of size # MEDIAN_FILTER to the background and RMS grids prior # to interpolating. MF_THRESHOLD = 0 # If MEDIAN_FILTER is non-zero, only use the filtered # grid when the (absolute) difference between the raw # and filtered grids is larger than MF_THRESHOLD. DEBLEND_MINCONT = 0.005 # Min. fraction of island flux in deblended subisland STRUCTURING_ELEMENT = [[0,1,0], [1,1,1], [0,1,0]] # Island connectiivty class ImageData(object): """Encapsulates an image in terms of a numpy array + meta/headerdata. This is your primary contact point for interaction with images: it icludes facilities for source extraction and measurement, etc. """ def __init__(self, data, beam, wcs, margin=0, radius=0, back_size_x=32, back_size_y=32, residuals=True ): """Sets up an ImageData object. *Args:* - data (2D numpy.ndarray): actual image data - wcs (utility.coordinates.wcs): world coordinate system specification - beam (3-tuple): beam shape specification as (semimajor, semiminor, theta) """ # Do data, wcs and beam need deepcopy? # Probably not (memory overhead, in particular for data), # but then the user shouldn't change them outside ImageData in the # mean time self.rawdata = data # a 2D numpy array self.wcs = wcs # a utility.coordinates.wcs instance self.beam = beam # tuple of (semimaj, semimin, theta) self.clip = {} self.labels = {} self.freq_low = 1 self.freq_high = 1 self.back_size_x = back_size_x self.back_size_y= back_size_y self.margin = margin self.radius = radius self.residuals = residuals ########################################################################### # # # Properties and attributes. # # # # Properties are attributes managed by methods; rather than calling the # # method directly, the attribute automatically invokes it. We can use # # this to do cunning transparent caching ("memoizing") etc; see the # # Memoize class. # # # # clearcache() clears all the memoized data, which can get quite large. # # It may be wise to call this, for example, in an exception handler # # dealing with MemoryErrors. # # # ########################################################################### @Memoize def _grids(self): """Gridded RMS and background data for interpolating""" return self.__grids() grids = property(fget=_grids, fdel=_grids.delete) @Memoize def _backmap(self): """Background map""" if not hasattr(self, "_user_backmap"): return self._interpolate(self.grids['bg']) else: return self._user_backmap def _set_backmap(self, bgmap): self._user_backmap = bgmap del(self.backmap) del(self.data_bgsubbed) backmap = property(fget=_backmap, fdel=_backmap.delete, fset=_set_backmap) @Memoize def _get_rm(self): """RMS map""" if not hasattr(self, "_user_noisemap"): return self._interpolate(self.grids['rms'], roundup=True) else: return self._user_noisemap def _set_rm(self, noisemap): self._user_noisemap = noisemap del(self.rmsmap) rmsmap = property(fget=_get_rm, fdel=_get_rm.delete, fset=_set_rm) @Memoize def _get_data(self): """Masked image data""" # We will ignore all the data which is masked for the rest of the # sourcefinding process. We build up the mask by stacking ("or-ing # together") a number of different effects: # # * A margin from the edge of the image; # * Any data outside a given radius from the centre of the image; # * Data which is "obviously" bad (equal to 0 or NaN). mask = numpy.zeros((self.xdim, self.ydim)) if self.margin: margin_mask = numpy.ones((self.xdim, self.ydim)) margin_mask[self.margin:-self.margin, self.margin:-self.margin] = 0 mask = numpy.logical_or(mask, margin_mask) if self.radius: radius_mask = utils.circular_mask(self.xdim, self.ydim, self.radius) mask = numpy.logical_or(mask, radius_mask) mask = numpy.logical_or(mask, numpy.where(self.rawdata == 0, 1, 0)) mask = numpy.logical_or(mask, numpy.isnan(self.rawdata)) return numpy.ma.array(self.rawdata, mask=mask) data = property(fget=_get_data, fdel=_get_data.delete) @Memoize def _get_data_bgsubbed(self): """Background subtracted masked image data""" return self.data - self.backmap data_bgsubbed = property(fget=_get_data_bgsubbed, fdel=_get_data_bgsubbed.delete) @property def xdim(self): """X pixel dimension of (unmasked) data""" return self.rawdata.shape[0] @property def ydim(self): """Y pixel dimension of (unmasked) data""" return self.rawdata.shape[1] @property def pixmax(self): """Maximum pixel value (pre-background subtraction)""" return self.data.max() @property def pixmin(self): """Minimum pixel value (pre-background subtraction)""" return self.data.min() def clearcache(self): """Zap any calculated data stored in this object. Clear the background and rms maps, labels, clip, and any locally held data. All of these can be reconstructed from the data accessor. Note that this *must* be run to pick up any new settings. """ self.labels.clear() self.clip.clear() del(self.backmap) del(self.rmsmap) del(self.data) del(self.data_bgsubbed) del(self.grids) if hasattr(self, 'residuals_from_gauss_fitting'): del(self.residuals_from_gauss_fitting) if hasattr(self, 'residuals_from_deblending'): del(self.residuals_from_deblending) ########################################################################### # # # General purpose image handling. # # # # Routines for saving and trimming data, and calculating background/RMS # # maps (in conjuntion with the properties above). # # # ########################################################################### # Private "support" methods def __grids(self): """Calculate background and RMS grids of this image. These grids can be interpolated up to make maps of the original image dimensions: see _interpolate(). This is called automatically when ImageData.backmap, ImageData.rmsmap or ImageData.fdrmap is first accessed. """ # there's no point in working with the whole of the data array # if it's masked. useful_chunk = ndimage.find_objects(numpy.where(self.data.mask, 0, 1)) assert(len(useful_chunk) == 1) useful_data = self.data[useful_chunk[0]] my_xdim, my_ydim = useful_data.shape rmsgrid, bggrid = [], [] for startx in xrange(0, my_xdim, self.back_size_x): rmsrow, bgrow = [], [] for starty in xrange(0, my_ydim, self.back_size_y): chunk = useful_data[ startx:startx + self.back_size_x, starty:starty + self.back_size_y ].ravel() if not chunk.any(): rmsrow.append(False) bgrow.append(False) continue chunk, sigma, median, num_clip_its = stats.sigma_clip( chunk, self.beam) if len(chunk) == 0 or not chunk.any(): rmsrow.append(False) bgrow.append(False) else: mean = numpy.mean(chunk) rmsrow.append(sigma) # In the case of a crowded field, the distribution will be # skewed and we take the median as the background level. # Otherwise, we take 2.5 * median - 1.5 * mean. This is the # same as SExtractor: see discussion at # <http://terapix.iap.fr/forum/showthread.php?tid=267>. # (mean - median) / sigma is a quick n' dirty skewness # estimator devised by Karl Pearson. if numpy.fabs(mean - median) / sigma >= 0.3: logger.debug( 'bg skewed, %f clipping iterations', num_clip_its) bgrow.append(median) else: logger.debug( 'bg not skewed, %f clipping iterations', num_clip_its) bgrow.append(2.5 * median - 1.5 * mean) rmsgrid.append(rmsrow) bggrid.append(bgrow) rmsgrid = numpy.ma.array( rmsgrid, mask=numpy.where(numpy.array(rmsgrid) == False, 1, 0)) bggrid = numpy.ma.array( bggrid, mask=numpy.where(numpy.array(bggrid) == False, 1, 0)) return {'rms': rmsgrid, 'bg': bggrid} def _interpolate(self, grid, roundup=False): """ Interpolate a grid to produce a map of the dimensions of the image. Args: grid (numpy.ma.MaskedArray) Kwargs: roundup (bool) Returns: (numpy.ma.MaskedArray) Used to transform the RMS, background or FDR grids produced by L{_grids()} to a map we can compare with the image data. If roundup is true, values of the resultant map which are lower than the input grid are trimmed. """ # there's no point in working with the whole of the data array if it's # masked. useful_chunk = ndimage.find_objects(numpy.where(self.data.mask, 0, 1)) assert(len(useful_chunk) == 1) my_xdim, my_ydim = self.data[useful_chunk[0]].shape if MEDIAN_FILTER: f_grid = ndimage.median_filter(grid, MEDIAN_FILTER) if MF_THRESHOLD: grid = numpy.where( numpy.fabs(f_grid - grid) > MF_THRESHOLD, f_grid, grid ) else: grid = f_grid # Bicubic spline interpolation xratio = float(my_xdim)/self.back_size_x yratio = float(my_ydim)/self.back_size_y # First arg: starting point. Second arg: ending point. Third arg: # 1j * number of points. (Why is this complex? Sometimes, NumPy has an # utterly baffling API...) slicex = slice(-0.5, -0.5+xratio, 1j*my_xdim) slicey = slice(-0.5, -0.5+yratio, 1j*my_ydim) my_map = numpy.ma.MaskedArray(numpy.zeros(self.data.shape), mask = self.data.mask) my_map[useful_chunk[0]] = ndimage.map_coordinates( grid, numpy.mgrid[slicex, slicey], mode='nearest', order=INTERPOLATE_ORDER) # If the input grid was entirely masked, then the output map must # also be masked: there's no useful data here. We don't search for # sources on a masked background/RMS, so this data will be cleanly # skipped by the rest of the sourcefinder if numpy.ma.getmask(grid).all(): my_map.mask = True elif roundup: # In some cases, the spline interpolation may produce values # lower than the minimum value in the map. If required, these # can be trimmed off. No point doing this if the map is already # fully masked, though. my_map = numpy.ma.MaskedArray( data = numpy.where( my_map >= numpy.min(grid), my_map, numpy.min(grid)), mask = my_map.mask ) return my_map ########################################################################### # # # Source extraction. # # # # Provides for both traditional (islands-above-RMS) and FDR source # # extraction systems. # # # ########################################################################### def extract(self, det, anl, noisemap=None, bgmap=None, labelled_data=None, labels=None, deblend_nthresh=0, force_beam=False): """ Kick off conventional (ie, RMS island finding) source extraction. Kwargs: det (float): detection threshold, as a multiple of the RMS noise. At least one pixel in a source must exceed this for it to be regarded as significant. anl (float): analysis threshold, as a multiple of the RMS noise. All the pixels within the island that exceed this will be used when fitting the source. noisemap (numpy.ndarray): bgmap (numpy.ndarray): deblend_nthresh (int): number of subthresholds to use for deblending. Set to 0 to disable. force_beam (bool): force all extractions to have major/minor axes equal to the restoring beam Returns: :class:`tkp.utility.containers.ExtractionResults` """ if anl > det: logger.warn( "Analysis threshold is higher than detection threshold" ) if (type(bgmap).__name__ == 'ndarray' or type(bgmap).__name__ == 'MaskedArray'): if bgmap.shape != self.backmap.shape: raise IndexError("Background map has wrong shape") else: self.backmap = bgmap if (type(noisemap).__name__ == 'ndarray' or type(noisemap).__name__ == 'MaskedArray'): if noisemap.shape != self.rmsmap.shape: raise IndexError("Noisemap has wrong shape") if noisemap.min() < 0: raise ValueError("RMS noise cannot be negative") else: self.rmsmap = noisemap if labelled_data is not None and labelled_data.shape != self.data.shape: raise ValueError("Labelled map is wrong shape") return self._pyse( det * self.rmsmap, anl * self.rmsmap, deblend_nthresh, force_beam, labelled_data=labelled_data, labels=labels ) def reverse_se(self, det): """Run source extraction on the negative of this image. Obviously, there should be no sources in the negative image, so this tells you about the false positive rate. We need to clear cached data -- backgroung map, cached clips, etc -- before & after doing this, as they'll interfere with the normal extraction process. If this is regularly used, we'll want to implement a separate cache. """ self.labels.clear() self.clip.clear() self.data_bgsubbed *= -1 results = self.extract(det=det) self.data_bgsubbed *= -1 self.labels.clear() self.clip.clear() return results def fd_extract(self, alpha, anl=None, noisemap=None, bgmap=None, deblend_nthresh=0, force_beam=False ): """False Detection Rate based source extraction. The FDR procedure guarantees that <FDR> < alpha. See `Hopkins et al., AJ, 123, 1086 (2002) <http://adsabs.harvard.edu/abs/2002AJ....123.1086H>`_. """ # The correlation length in config.py is used not only for the # calculation of error bars with the Condon formulae, but also for # calculating the number of independent pixels. corlengthlong, corlengthshort = utils.calculate_correlation_lengths( self.beam[0], self.beam[1]) C_n = (1.0 / numpy.arange( round(0.25 * numpy.pi * corlengthlong * corlengthshort + 1))[1:]).sum() # Calculate the FDR threshold # Things will go terribly wrong in the line below if the interpolated # noise values get very close or below zero. Use INTERPOLATE_ORDER=1 # or the roundup option. if (type(bgmap).__name__ == 'ndarray' or type(bgmap).__name__ == 'MaskedArray'): if bgmap.shape != self.backmap.shape: raise IndexError("Background map has wrong shape") else: self.backmap = bgmap if (type(noisemap).__name__ == 'ndarray' or type(noisemap).__name__ == 'MaskedArray'): if noisemap.shape != self.rmsmap.shape: raise IndexError("Noisemap has wrong shape") if noisemap.min()<0: raise ValueError("RMS noise cannot be negative") else: self.rmsmap = noisemap normalized_data = self.data_bgsubbed/self.rmsmap n1 = numpy.sqrt(2 * numpy.pi) prob = numpy.sort(numpy.ravel(numpy.exp(-0.5 * normalized_data**2)/n1)) lengthprob = float(len(prob)) compare = (alpha / C_n) * numpy.arange(lengthprob+1)[1:] / lengthprob # Find the last undercrossing, see, e.g., fig. 9 in Miller et al., AJ # 122, 3492 (2001). Searchsorted is not used because the array is not # sorted. try: index = (numpy.where(prob-compare < 0.)[0]).max() except ValueError: # Everything below threshold return containers.ExtractionResults() fdr_threshold = numpy.sqrt(-2.0 * numpy.log(n1 * prob[index])) # Default we require that all source pixels are above the threshold, # not only the peak pixel. This gives a better guarantee that indeed # the fraction of false positives is less than fdr_alpha in config.py. # See, e.g., Hopkins et al., AJ 123, 1086 (2002). if not anl: anl = fdr_threshold return self._pyse(fdr_threshold * self.rmsmap, anl * self.rmsmap, deblend_nthresh, force_beam) def flux_at_pixel(self, x, y, numpix=1): """Return the background-subtracted flux at a certain position in the map""" # numpix is the number of pixels to look around the target. # e.g. numpix = 1 means a total of 9 pixels, 1 in each direction. return self.data_bgsubbed[y-numpix:y+numpix+1, x-numpix:x+numpix+1].max() @staticmethod def box_slice_about_pixel(x,y,box_radius): """ Returns a slice centred about (x,y), of width = 2*int(box_radius) + 1 """ ibr = int(box_radius) return (slice(x - ibr, x + ibr + 1), slice(y - ibr, y + ibr + 1)) def fit_to_point(self, x, y, boxsize, threshold, fixed): """Fit an elliptical Gaussian to a specified point on the image. The fit is carried on a square section of the image, of length *boxsize* & centred at pixel coordinates *x*, *y*. Any data below *threshold* * rmsmap is not used for fitting. If *fixed* is set to ``position``, then the pixel coordinates are fixed in the fit. Returns an instance of :class:`tkp.sourcefinder.extract.Detection`. """ if (( # Recent NumPy hasattr(numpy.ma.core, "MaskedConstant") and isinstance(self.rmsmap, numpy.ma.core.MaskedConstant) ) or ( # Old NumPy numpy.ma.is_masked(self.rmsmap[x, y]) )): logger.error("Background is masked: cannot fit") return None chunk = ImageData.box_slice_about_pixel(x, y, boxsize/2.0) if threshold is not None: # We'll mask out anything below threshold*self.rmsmap from the fit. labels, num = self.labels.setdefault( #Dictionary mapping threshold -> islands map threshold, ndimage.label( self.clip.setdefault( #Dictionary mapping threshold -> mask threshold, numpy.where( self.data_bgsubbed > threshold * self.rmsmap, 1, 0 ) ) ) ) mylabel = labels[x, y] if mylabel == 0: # 'Background' raise ValueError("Fit region is below specified threshold, fit aborted.") mask = numpy.where(labels[chunk] == mylabel, 0, 1) fitme = numpy.ma.array(self.data_bgsubbed[chunk], mask=mask) if len(fitme.compressed()) < 1: raise IndexError("Fit region too close to edge or too small") else: fitme = self.data_bgsubbed[chunk] if fitme.size < 1: raise IndexError("Fit region too close to edge or too small") if not len(fitme.compressed()): logger.error("All data is masked: cannot fit") return None # set argument for fixed parameters based on input string if fixed == 'position': fixed = {'xbar': boxsize/2.0, 'ybar': boxsize/2.0} elif fixed == 'position+shape': fixed = {'xbar': boxsize/2.0, 'ybar': boxsize/2.0, 'semimajor': self.beam[0], 'semiminor': self.beam[1], 'theta': self.beam[2]} elif fixed == None: fixed = {} else: raise TypeError("Unkown fixed parameter") if threshold is not None: threshold_at_pixel = threshold * self.rmsmap[x, y] else: threshold_at_pixel = None try: measurement, residuals = extract.source_profile_and_errors( fitme, threshold_at_pixel, self.rmsmap[x, y], self.beam, fixed=fixed ) except ValueError: # Fit failed to converge # Moments are not applicable when holding parameters fixed logger.error("Gaussian fit failed at %f, %f", x, y) return None try: assert(abs(measurement['xbar']) < boxsize) assert(abs(measurement['ybar']) < boxsize) except AssertionError: logger.warn('Fit falls outside of box.') measurement['xbar'] += x-boxsize/2.0 measurement['ybar'] += y-boxsize/2.0 measurement.sig = (fitme / self.rmsmap[chunk]).max() return extract.Detection(measurement, self) def fit_fixed_positions(self, positions, boxsize, threshold=None, fixed='position+shape', ids=None): """ Convenience function to fit a list of sources at the given positions This function wraps around fit_to_point(). Args: positions (list): list of (RA, Dec) tuples. Positions to be fit, in decimal degrees. boxsize: See :py:func:`fit_to_point` threshold: as above. fixed: as above. ids (list): A list of identifiers. If not None, then must match the length and order of the ``requested_fits``. Any successfully fit positions will be returned in a tuple along with the matching id. As these are simply passed back to calling code they can be a string, tuple or whatever. In particular, boxsize is in pixel coordinates as in fit_to_point, not in sky coordinates. Returns: list: A list of successful fits. If ``ids`` is None, returns a single list of :class:`tkp.sourcefinder.extract.Detection` s. Otherwise, returns a tuple of two matched lists: ([detections], [matching_ids]). """ if ids is not None: assert len(ids)==len(positions) successful_fits = [] successful_ids = [] for idx, posn in enumerate(positions): try: x, y, = self.wcs.s2p((posn[0], posn[1])) except RuntimeError, e: if (str(e).startswith("wcsp2s error: 8:") or str(e).startswith("wcsp2s error: 9:")): logger.warning("Input coordinates (%.2f, %.2f) invalid: ", posn[0], posn[1]) else: raise else: try: fit_results = self.fit_to_point(x, y, boxsize=boxsize, threshold=threshold, fixed=fixed) if not fit_results: # We were unable to get a good fit continue if ( fit_results.ra.error == float('inf') or fit_results.dec.error == float('inf')): logging.warning("position errors extend outside image") else: successful_fits.append(fit_results) if ids: successful_ids.append(ids[idx]) except IndexError as e: logger.warning("Input pixel coordinates (%.2f, %.2f) " "could not be fit because: " + e.message, posn[0], posn[1]) if ids: return successful_fits, successful_ids return successful_fits def label_islands(self, detectionthresholdmap, analysisthresholdmap): """ Return a lablled array of pixels for fitting. Args: detectionthresholdmap (numpy.ndarray): analysisthresholdmap (numpy.ndarray): Returns: list of valid islands (list of int) labelled islands (numpy.ndarray) """ # If there is no usable data, we return an empty set of islands. if not len(self.rmsmap.compressed()): logging.warning("RMS map masked; sourcefinding skipped") return [], numpy.zeros(self.data_bgsubbed.shape, dtype=numpy.int) # At this point, we select all the data which is eligible for # sourcefitting. We are actually using three separate filters, which # exclude: # # 1. Anything which has been masked before we reach this point; # 2. Any pixels which fall below the analysis threshold at that pixel # position; # 3. Any pixels corresponding to a position where the RMS noise is # less than RMS_FILTER (default 0.001) times the median RMS across # the whole image. # # The third filter attempts to exclude those regions of the image # which contain no usable data; for example, the parts of the image # falling outside the circular region produced by awimager. RMS_FILTER = 0.001 clipped_data = numpy.ma.where( (self.data_bgsubbed > analysisthresholdmap) & (self.rmsmap >= (RMS_FILTER * numpy.ma.median(self.rmsmap))), 1, 0 ).filled(fill_value=0) labelled_data, num_labels = ndimage.label(clipped_data, STRUCTURING_ELEMENT) labels_below_det_thr, labels_above_det_thr = [], [] if num_labels > 0: # Select the labels of the islands above the analysis threshold # that have maximum values values above the detection threshold. # Like above we make sure not to select anything where either # the data or the noise map are masked. # We fill these pixels in above_det_thr with -1 to make sure # its labels will not be in labels_above_det_thr. # NB data_bgsubbed, and hence above_det_thr, is a masked array; # filled() sets all mased values equal to -1. above_det_thr = ( self.data_bgsubbed - detectionthresholdmap ).filled(fill_value=-1) # Note that we avoid label 0 (the background). maximum_values = ndimage.maximum( above_det_thr, labelled_data, numpy.arange(1, num_labels + 1) ) # If there's only one island, ndimage.maximum will return a float, # rather than a list. The rest of this function assumes that it's # always a list, so we need to convert it. if isinstance(maximum_values, float): maximum_values = [maximum_values] # We'll filter out the insignificant islands for i, x in enumerate(maximum_values, 1): if x < 0: labels_below_det_thr.append(i) else: labels_above_det_thr.append(i) # Set to zero all labelled islands that are below det_thr: labelled_data = numpy.where( numpy.in1d(labelled_data.ravel(), labels_above_det_thr).reshape(labelled_data.shape), labelled_data, 0 ) return labels_above_det_thr, labelled_data def _pyse( self, detectionthresholdmap, analysisthresholdmap, deblend_nthresh, force_beam, labelled_data=None, labels=[] ): """ Run Python-based source extraction on this image. Args: detectionthresholdmap (numpy.ndarray): analysisthresholdmap (numpy.ndarray): deblend_nthresh (int): number of subthresholds for deblending. 0 disables. force_beam (bool): force all extractions to have major/minor axes equal to the restoring beam labelled_data (numpy.ndarray): labelled island map (output of numpy.ndimage.label()). Will be calculated automatically if not provided. labels (list): list of labels in the island map to use for fitting. Returns: (..utility.containers.ExtractionResults): This is described in detail in the "Source Extraction System" document by John Swinbank, available from TKP svn. """ # Map our chunks onto a list of islands. island_list = [] if labelled_data is None: labels, labelled_data = self.label_islands( detectionthresholdmap, analysisthresholdmap ) # Get a bounding box for each island: # NB Slices ordered by label value (1...N,) # 'None' returned for missing label indices. slices = ndimage.find_objects(labelled_data) for label in labels: chunk = slices[label-1] analysis_threshold = (analysisthresholdmap[chunk] / self.rmsmap[chunk]).max() # In selected_data only the pixels with the "correct" # (see above) labels are retained. Other pixel values are # set to -(bignum). # In this way, disconnected pixels within (rectangular) # slices around islands (particularly the large ones) do # not affect the source measurements. selected_data = numpy.ma.where( labelled_data[chunk] == label, self.data_bgsubbed[chunk].data, -extract.BIGNUM ).filled(fill_value=-extract.BIGNUM) island_list.append( extract.Island( selected_data, self.rmsmap[chunk], chunk, analysis_threshold, detectionthresholdmap[chunk], self.beam, deblend_nthresh, DEBLEND_MINCONT, STRUCTURING_ELEMENT ) ) # If required, we can save the 'left overs' from the deblending and # fitting processes for later analysis. This needs setting up here: if self.residuals: self.residuals_from_gauss_fitting = numpy.zeros(self.data.shape) self.residuals_from_deblending = numpy.zeros(self.data.shape) for island in island_list: self.residuals_from_deblending[island.chunk] += ( island.data.filled(fill_value=0.)) # Deblend each of the islands to its consituent parts, if necessary if deblend_nthresh: deblended_list = map(lambda x: x.deblend(), island_list) #deblended_list = [x.deblend() for x in island_list] island_list = list(utils.flatten(deblended_list)) # Iterate over the list of islands and measure the source in each, # appending it to the results list. results = containers.ExtractionResults() for island in island_list: if force_beam: fixed = {'semimajor': self.beam[0], 'semiminor': self.beam[1], 'theta': self.beam[2]} else: fixed = None fit_results = island.fit(fixed=fixed) if fit_results: measurement, residual = fit_results else: # Failed to fit; drop this island and go to the next. continue try: det = extract.Detection(measurement, self, chunk=island.chunk) if (det.ra.error == float('inf') or det.dec.error == float('inf')): logger.warn('Bad fit from blind extraction at pixel coords:' '%f %f - measurement discarded' '(increase fitting margin?)', det.x, det.y ) else: results.append(det) except RuntimeError as e: logger.error("Island not processed; unphysical?") if self.residuals: self.residuals_from_deblending[island.chunk] -= ( island.data.filled(fill_value=0.)) self.residuals_from_gauss_fitting[island.chunk] += residual def is_usable(det): # Check that both ends of each axis are usable; that is, that they # fall within an unmasked part of the image. # The axis will not likely fall exactly on a pixel number, so # check all the surroundings. def check_point(x, y): x = (numpy.floor(x), numpy.ceil(x)) y = (numpy.floor(y), numpy.ceil(y)) for position in itertools.product(x, y): try: if self.data.mask[position[0], position[1]]: # Point falls in mask return False except IndexError: # Point falls completely outside image return False # Point is ok return True for point in ( (det.start_smaj_x, det.start_smaj_y), (det.start_smin_x, det.start_smin_y), (det.end_smaj_x, det.end_smaj_y), (det.end_smin_x, det.end_smin_y) ): if not check_point(*point): logger.debug("Unphysical source at pixel %f, %f" % (det.x.value, det.y.value)) return False return True # Filter will return a list; ensure we return an ExtractionResults. return containers.ExtractionResults(filter(is_usable, results))
import functools import json import os import re from argparse import _SubParsersAction, Namespace from collections import OrderedDict from http import HTTPStatus from typing import Callable, Any, List from cloudfoundry_client.client import CloudFoundryClient from cloudfoundry_client.errors import InvalidStatusCode from cloudfoundry_client.json_object import JsonObject class Command(object): def __init__( self, entry: str, generate_parser: Callable[[_SubParsersAction], Any], execute: Callable[[CloudFoundryClient, Namespace], Any], ): self.entry = entry self.generate_parser = generate_parser self.execute = execute class CommandDomain(object): def __init__( self, display_name: str, entity_name: str, filter_list_parameters: list, api_version: str = "v2", name_property: str = "name", allow_retrieve_by_name: bool = False, allow_creation: bool = False, allow_deletion: bool = False, extra_methods: list = None, ): self.display_name = display_name self.client_domain = self.plural(entity_name) self.api_version = api_version self.entity_name = entity_name self.filter_list_parameters = filter_list_parameters self.name_property = name_property self.allow_retrieve_by_name = allow_retrieve_by_name self.allow_creation = allow_creation self.allow_deletion = allow_deletion self.commands = OrderedDict() self.commands[self._list_entry()] = self.list() self.commands[self._get_entry()] = self.get() if self.allow_creation: self.commands[self._create_entry()] = self.create() if self.allow_deletion: self.commands[self._delete_entry()] = self.delete() self.extra_description = OrderedDict() if extra_methods is not None: for command in extra_methods: self.commands[command[0].entry] = command[0] self.extra_description[command[0].entry] = command[1] def description(self) -> List[str]: description = [ " %s" % self.display_name, " %s : List %ss" % (self._list_entry(), self.entity_name), " %s : Get a %s by %s" % (self._get_entry(), self.entity_name, "UUID or name (first found then)" if self.allow_retrieve_by_name else "UUID"), ] if self.allow_creation: description.append(" %s : Create a %s" % (self._create_entry(), self.entity_name)) if self.allow_deletion: description.append(" %s : Delete a %s" % (self._delete_entry(), self.entity_name)) description.extend([" %s : %s" % (k, v) for k, v in self.extra_description.items()]) return description def generate_parser(self, parser: _SubParsersAction): for command in self.commands.values(): command.generate_parser(parser) def is_handled(self, action: str) -> bool: return action in self.commands def execute(self, client: CloudFoundryClient, action: str, arguments: Namespace): return self.commands[action].execute(client, arguments) def _get_client_domain(self, client: CloudFoundryClient) -> Any: return getattr(getattr(client, self.api_version), self.client_domain) @staticmethod def plural(entity_name: str) -> str: if entity_name.endswith("y") and not (re.match(r".+[aeiou]y", entity_name)): return "%sies" % entity_name[: len(entity_name) - 1] else: return "%ss" % entity_name @staticmethod def is_guid(s: str) -> bool: return re.match(r"[\d|a-z]{8}-[\d|a-z]{4}-[\d|a-z]{4}-[\d|a-z]{4}-[\d|a-z]{12}", s.lower()) is not None def id(self, entity: JsonObject) -> str: if self.api_version == "v2": return entity["metadata"]["guid"] elif self.api_version == "v3": return entity["guid"] def resolve_id(self, argument: str, get_by_name: Callable[[str], JsonObject]) -> str: if CommandDomain.is_guid(argument): return argument elif self.allow_retrieve_by_name: result = get_by_name(argument) if result is not None: if self.api_version == "v2": return result["metadata"]["guid"] elif self.api_version == "v3": return result["guid"] else: raise InvalidStatusCode(HTTPStatus.NOT_FOUND, "%s with name %s" % (self.client_domain, argument)) else: raise ValueError("id: %s: does not allow search by name" % self.client_domain) def name(self, entity: JsonObject) -> str: if self.api_version == "v2": return entity["entity"][self.name_property] elif self.api_version == "v3": return entity[self.name_property] def find_by_name(self, client: CloudFoundryClient, name: str) -> JsonObject: return self._get_client_domain(client).get_first(**{self.name_property: name}) def create(self) -> Command: entry = self._create_entry() def execute(client: CloudFoundryClient, arguments: Namespace): data = None if os.path.isfile(arguments.entity[0]): with open(arguments.entity[0], "r") as f: try: data = json.load(f) except ValueError: raise ValueError("entity: file %s does not contain valid json data" % arguments.entity[0]) else: try: data = json.loads(arguments.entity[0]) except ValueError: raise ValueError("entity: must be either a valid json file path or a json object") print(self._get_client_domain(client)._create(data).json()) def generate_parser(parser: _SubParsersAction): create_parser = parser.add_parser(entry) create_parser.add_argument( "entity", metavar="entities", type=str, nargs=1, help="Either a path of the json file containing the %s or a json object or the json %s object" % (self.client_domain, self.client_domain), ) return Command(entry, generate_parser, execute) def delete(self) -> Command: entry = self._delete_entry() def execute(client: CloudFoundryClient, arguments: Namespace): if self.is_guid(arguments.id[0]): self._get_client_domain(client)._remove(arguments.id[0]) elif self.allow_retrieve_by_name: entity = self.find_by_name(client, arguments.id[0]) if entity is None: raise InvalidStatusCode(HTTPStatus.NOT_FOUND, "%s with name %s" % (self.client_domain, arguments.id[0])) else: self._get_client_domain(client)._remove(self.id(entity)) else: raise ValueError("id: %s: does not allow search by name" % self.client_domain) def generate_parser(parser: _SubParsersAction): delete_parser = parser.add_parser(entry) delete_parser.add_argument( "id", metavar="ids", type=str, nargs=1, help="The id. Can be UUID or name (first found then)" if self.allow_retrieve_by_name else "The id (UUID)", ) return Command(entry, generate_parser, execute) def get(self) -> Command: entry = self._get_entry() def execute(client: CloudFoundryClient, arguments: Namespace): resource_id = self.resolve_id(arguments.id[0], functools.partial(self.find_by_name, client)) print(self._get_client_domain(client).get(resource_id).json(indent=1)) def generate_parser(parser: _SubParsersAction): get_parser = parser.add_parser(entry) get_parser.add_argument( "id", metavar="ids", type=str, nargs=1, help="The id. Can be UUID or name (first found then)" if self.allow_retrieve_by_name else "The id (UUID)", ) return Command(entry, generate_parser, execute) def list(self) -> Command: entry = self._list_entry() def execute(client: CloudFoundryClient, arguments: Namespace): filter_list = dict() for filter_parameter in self.filter_list_parameters: filter_value = getattr(arguments, filter_parameter) if filter_value is not None: filter_list[filter_parameter] = filter_value for entity in self._get_client_domain(client).list(**filter_list): if self.name_property is not None: print("%s - %s" % (self.id(entity), self.name(entity))) else: print(self.id(entity)) def generate_parser(parser: _SubParsersAction): list_parser = parser.add_parser(entry) for filter_parameter in self.filter_list_parameters: list_parser.add_argument( "-%s" % filter_parameter, action="store", dest=filter_parameter, type=str, default=None, help="Filter with %s" % filter_parameter, ) return Command(entry, generate_parser, execute) def _list_entry(self) -> str: return "list_%s" % self.plural(self.entity_name) def _create_entry(self) -> str: return "create_%s" % self.entity_name def _delete_entry(self) -> str: return "delete_%s" % self.entity_name def _get_entry(self) -> str: return "get_%s" % self.entity_name
import struct import numpy as np from datetime import datetime from dataquick.loaders import Loader, register_loaders from dataquick.plugins.structures.powderdiffraction import PowderDiffraction def raw_check_version(f): """ Parameters ---------- f : file object Returns ------- version : string or None """ f.seek(0) head = read_str(f, 4) version = None if head == "RAW ": version = "ver. 1" elif head == "RAW2": version = "ver. 2" elif head == "RAW1" and read_str(f, 3) == ".01": version = "ver. 3" return version def load_raw(fname, name=None): """ .raw file output from Bruker XRD. Tested with files from Bruker D8 Ported from xylib\bruker_raw.cpp If multiple ranges exist in the RAW file a list of PowderDiffraction DataFrame objects are returned. If only one a single object is returned Parameters ---------- fname : str filename name : str measurement identifier Returns ------- df_xrd : single PowderDiffraction or list of them PowderDiffraction DataFrame based on XRD data """ with open(fname, 'rb') as f: version = raw_check_version(f) if version == 'ver. 3': dfs = load_raw_version1_01(f, name=name) else: return None if isinstance(dfs, PowderDiffraction) or type(dfs) in (list, tuple): if len(dfs) == 0: raise IOError('Unable to read scan from file') elif len(dfs) == 1: return dfs[0] else: return dfs else: raise TypeError("Unrecognized DataFrame type: {:}".format(type(dfs))) def read_int(f): return struct.unpack('<i', f.read(4))[0] def read_dbl(f): return struct.unpack('<d', f.read(8))[0] def read_flt(f): return struct.unpack('<f', f.read(4))[0] def read_str(f, len): return f.read(len).decode('utf-8').rstrip('\x00') def load_raw_version1_01(f, name=None): """ Read a file object pointing to a Bruker RAW file of version 1.01 Since RAW files can contain more than one scan (or range) a list of PowderDiffraction dataframes are returned Parameters ---------- f : file object Returns ------- dfs : list of PowderDiffraction objects """ meta = {} meta["format version"] = "3" f.seek(8) # jump to after version line file_status = read_int(f) # address 8 if file_status == 1: meta["file status"] = "done" elif file_status == 2: meta["file status"] = "active" elif file_status == 3: meta["file status"] = "aborted" elif file_status == 4: meta["file status"] = "interrupted" range_cnt = read_int(f) # address 12 meta["MEASURE_DATE"] = read_str(f, 10) # address 16 meta["MEASURE_TIME"] = read_str(f, 10) # address 26 meta["USER"] = read_str(f, 72) # address 36 meta["SITE"] = read_str(f, 218) # address 108 meta["SAMPLE_ID"] = read_str(f, 60) # address 326 meta["COMMENT"] = read_str(f, 160) # address 386 f.read(2) # apparently there is a bug in docs, 386+160 != 548 f.read(4) # goniometer code # address 548 f.read(4) # goniometer stage code # address 552 f.read(4) # sample loader code # address 556 f.read(4) # goniometer controller code # address 560 f.read(4) # (R4) goniometer radius # address 564 f.read(4) # (R4) fixed divergence...# address 568 f.read(4) # (R4) fixed sample slit...# address 572 f.read(4) # primary Soller slit # address 576 f.read(4) # primary monochromator # address 580 f.read(4) # (R4) fixed antiscatter...# address 584 f.read(4) # (R4) fixed detector slit...# address 588 f.read(4) # secondary Soller slit # address 592 f.read(4) # fixed thin film attachment # address 596 f.read(4) # beta filter # address 600 f.read(4) # secondary monochromator # address 604 meta["ANODE_MATERIAL"] = read_str(f, 4) # address 608 f.read(4) # unused # address 612 meta["ALPHA_AVERAGE"] = read_dbl(f) # address 616 meta["ALPHA1"] = read_dbl(f) # address 624 meta["ALPHA2"] = read_dbl(f) # address 632 meta["BETA"] = read_dbl(f) # address 640 meta["ALPHA_RATIO"] = read_dbl(f) # address 648 f.read(4) # (C4) unit name # address 656 f.read(4) # (R4) intensity beta:a1 # address 660 meta["measurement time"] = read_flt(f) # address 664 f.read(43) # unused # address 668 f.read(1) # hardware dependency... # address 711 # assert (f.tellg() == 712); # Expected meta # Convert to datetime timestamp = datetime.strptime('{} {}'.format(meta["MEASURE_DATE"], meta["MEASURE_TIME"]), '%m/%d/%y %H:%M:%S') meta['date'] = timestamp.isoformat() # range header dfs = [] for cur_range in range(range_cnt): if name is None: meta["name"] = '{}-{}'.format(meta["SAMPLE_ID"], cur_range) else: meta["name"] = '{}-{}'.format(name, cur_range) # Take off the number of only one if range_cnt == 1: meta["name"] = meta["name"][:-2] # Block * blk = new Block; header_len = read_int(f) # address 0 assert header_len == 304 steps = read_int(f) # address 4 meta["STEPS"] = steps start_theta = read_dbl(f) # address 8 meta["START_THETA"] = start_theta start_2theta = read_dbl(f) # address 16 meta["START_2THETA"] = start_2theta f.read(8) # Chi drive start # address 24 f.read(8) # Phi drive start # address 32 f.read(8) # x drive start # address 40 f.read(8) # y drive start # address 48 f.read(8) # z drive start # address 56 f.read(8) # address 64 f.read(6) # address 72 f.read(2) # unused # address 78 f.read(8) # (R8) variable antiscat.# address 80 f.read(6) # address 88 f.read(2) # unused # address 94 f.read(4) # detector code # address 96 meta["HIGH_VOLTAGE"] = read_flt(f) # address 100 meta["AMPLIFIER_GAIN"] = read_flt(f) # 104 meta["DISCRIMINATOR_1_LOWER_LEVEL"] = read_flt(f) # 108 f.read(4) # address 112 f.read(4) # address 116 f.read(8) # address 120 f.read(4) # address 128 f.read(4) # address 132 f.read(5) # address 136 f.read(3) # unused # address 141 f.read(8) # address 144 f.read(8) # address 152 f.read(8) # address 160 f.read(4) # address 168 f.read(4) # unused # address 172 step_size = read_dbl(f) # address 176 meta["STEP_SIZE"] = step_size f.read(8) # address 184 meta["TIME_PER_STEP"] = read_flt(f) # 192 f.read(4) # address 196 f.read(4) # address 200 f.read(4) # address 204 meta["ROTATION_SPEED [rpm]"] = read_flt(f) # 208 f.read(4) # address 212 f.read(4) # address 216 f.read(4) # address 220 meta["GENERATOR_VOLTAGE"] = read_int(f) # 224 meta["GENERATOR_CURRENT"] = read_int(f) # 228 f.read(4) # address 232 f.read(4) # unused # address 236 meta["USED_LAMBDA"] = read_dbl(f) # 240 f.read(4) # address 248 f.read(4) # address 252 supplementary_headers_size = read_int(f) # address 256 f.read(4) # address 260 f.read(4) # address 264 f.read(4) # unused # address 268 f.read(8) # address 272 f.read(24) # unused # address 280 # assert (f.tellg() == 712 + (cur_range + 1) * header_len); if supplementary_headers_size > 0: f.read(supplementary_headers_size) # StepColumn * xcol = new StepColumn(start_2theta, step_size); # blk->add_column(xcol); # VecColumn * ycol = new VecColumn; xcol = np.array(range(steps)) * step_size + start_2theta ycol = [] for i in range(steps): ycol.append(read_flt(f)) ycol = np.array(ycol) data = np.c_[xcol, ycol] df_meta = meta.copy() if meta["ANODE_MATERIAL"].startswith('Cu'): source = 'CuKa' else: raise ValueError("Unimplemented Anode Material {}".format(meta["ANODE_MATERIAL"])) df_xrd = PowderDiffraction(data=data, columns=['twotheta', 'intensity'], wavelength=df_meta['ALPHA_AVERAGE'], source=source, xunit="deg", yunit="counts", name=df_meta["name"], date=df_meta["date"], metadata=df_meta) dfs.append(df_xrd) return dfs bruker_raw_loader = Loader(load_raw, PowderDiffraction, [".raw"], "Bruker RAW XRD") register_loaders( bruker_raw_loader, )
#!/usr/bin/env python # A tool to parse ASTMatchers.h and update the documentation in # ../LibASTMatchersReference.html automatically. Run from the # directory in which this file is located to update the docs. import collections import re import urllib2 MATCHERS_FILE = '../../include/clang/ASTMatchers/ASTMatchers.h' # Each matcher is documented in one row of the form: # result | name | argA # The subsequent row contains the documentation and is hidden by default, # becoming visible via javascript when the user clicks the matcher name. TD_TEMPLATE=""" <tr><td>%(result)s</td><td class="name" onclick="toggle('%(id)s')"><a name="%(id)sAnchor">%(name)s</a></td><td>%(args)s</td></tr> <tr><td colspan="4" class="doc" id="%(id)s"><pre>%(comment)s</pre></td></tr> """ # We categorize the matchers into these three categories in the reference: node_matchers = {} narrowing_matchers = {} traversal_matchers = {} # We output multiple rows per matcher if the matcher can be used on multiple # node types. Thus, we need a new id per row to control the documentation # pop-up. ids[name] keeps track of those ids. ids = collections.defaultdict(int) # Cache for doxygen urls we have already verified. doxygen_probes = {} def esc(text): """Escape any html in the given text.""" text = re.sub(r'&', '&amp;', text) text = re.sub(r'<', '&lt;', text) text = re.sub(r'>', '&gt;', text) def link_if_exists(m): name = m.group(1) url = 'http://clang.llvm.org/doxygen/classclang_1_1%s.html' % name if url not in doxygen_probes: try: print 'Probing %s...' % url urllib2.urlopen(url) doxygen_probes[url] = True except: doxygen_probes[url] = False if doxygen_probes[url]: return r'Matcher&lt<a href="%s">%s</a>&gt;' % (url, name) else: return m.group(0) text = re.sub( r'Matcher&lt;([^\*&]+)&gt;', link_if_exists, text) return text def extract_result_types(comment): """Extracts a list of result types from the given comment. We allow annotations in the comment of the matcher to specify what nodes a matcher can match on. Those comments have the form: Usable as: Any Matcher | (Matcher<T1>[, Matcher<t2>[, ...]]) Returns ['*'] in case of 'Any Matcher', or ['T1', 'T2', ...]. Returns the empty list if no 'Usable as' specification could be parsed. """ result_types = [] m = re.search(r'Usable as: Any Matcher[\s\n]*$', comment, re.S) if m: return ['*'] while True: m = re.match(r'^(.*)Matcher<([^>]+)>\s*,?[\s\n]*$', comment, re.S) if not m: if re.search(r'Usable as:\s*$', comment): return result_types else: return None result_types += [m.group(2)] comment = m.group(1) def strip_doxygen(comment): """Returns the given comment without \-escaped words.""" # If there is only a doxygen keyword in the line, delete the whole line. comment = re.sub(r'^\\[^\s]+\n', r'', comment, flags=re.M) # Delete the doxygen command and the following whitespace. comment = re.sub(r'\\[^\s]+\s+', r'', comment) return comment def unify_arguments(args): """Gets rid of anything the user doesn't care about in the argument list.""" args = re.sub(r'internal::', r'', args) args = re.sub(r'const\s+', r'', args) args = re.sub(r'&', r' ', args) args = re.sub(r'(^|\s)M\d?(\s)', r'\1Matcher<*>\2', args) return args def add_matcher(result_type, name, args, comment, is_dyncast=False): """Adds a matcher to one of our categories.""" if name == 'id': # FIXME: Figure out whether we want to support the 'id' matcher. return matcher_id = '%s%d' % (name, ids[name]) ids[name] += 1 args = unify_arguments(args) matcher_html = TD_TEMPLATE % { 'result': esc('Matcher<%s>' % result_type), 'name': name, 'args': esc(args), 'comment': esc(strip_doxygen(comment)), 'id': matcher_id, } if is_dyncast: node_matchers[result_type + name] = matcher_html # Use a heuristic to figure out whether a matcher is a narrowing or # traversal matcher. By default, matchers that take other matchers as # arguments (and are not node matchers) do traversal. We specifically # exclude known narrowing matchers that also take other matchers as # arguments. elif ('Matcher<' not in args or name in ['allOf', 'anyOf', 'anything', 'unless']): narrowing_matchers[result_type + name + esc(args)] = matcher_html else: traversal_matchers[result_type + name + esc(args)] = matcher_html def act_on_decl(declaration, comment, allowed_types): """Parse the matcher out of the given declaration and comment. If 'allowed_types' is set, it contains a list of node types the matcher can match on, as extracted from the static type asserts in the matcher definition. """ if declaration.strip(): # Node matchers are defined by writing: # VariadicDynCastAllOfMatcher<ResultType, ArgumentType> name; m = re.match(r""".*Variadic(?:DynCast)?AllOfMatcher\s*< \s*([^\s,]+)\s*(?:, \s*([^\s>]+)\s*)?> \s*([^\s;]+)\s*;\s*$""", declaration, flags=re.X) if m: result, inner, name = m.groups() if not inner: inner = result add_matcher(result, name, 'Matcher<%s>...' % inner, comment, is_dyncast=True) return # Parse the various matcher definition macros. m = re.match(""".*AST_TYPE_MATCHER\( \s*([^\s,]+\s*), \s*([^\s,]+\s*) \)\s*;\s*$""", declaration, flags=re.X) if m: inner, name = m.groups() add_matcher('Type', name, 'Matcher<%s>...' % inner, comment, is_dyncast=True) # FIXME: re-enable once we have implemented casting on the TypeLoc # hierarchy. # add_matcher('TypeLoc', '%sLoc' % name, 'Matcher<%sLoc>...' % inner, # comment, is_dyncast=True) return m = re.match(""".*AST_TYPE(LOC)?_TRAVERSE_MATCHER\( \s*([^\s,]+\s*), \s*(?:[^\s,]+\s*), \s*AST_POLYMORPHIC_SUPPORTED_TYPES_([^(]*)\(([^)]*)\) \)\s*;\s*$""", declaration, flags=re.X) if m: loc, name, n_results, results = m.groups()[0:4] result_types = [r.strip() for r in results.split(',')] comment_result_types = extract_result_types(comment) if (comment_result_types and sorted(result_types) != sorted(comment_result_types)): raise Exception('Inconsistent documentation for: %s' % name) for result_type in result_types: add_matcher(result_type, name, 'Matcher<Type>', comment) if loc: add_matcher('%sLoc' % result_type, '%sLoc' % name, 'Matcher<TypeLoc>', comment) return m = re.match(r"""^\s*AST_POLYMORPHIC_MATCHER(_P)?(.?)(?:_OVERLOAD)?\( \s*([^\s,]+)\s*, \s*AST_POLYMORPHIC_SUPPORTED_TYPES_([^(]*)\(([^)]*)\) (?:,\s*([^\s,]+)\s* ,\s*([^\s,]+)\s*)? (?:,\s*([^\s,]+)\s* ,\s*([^\s,]+)\s*)? (?:,\s*\d+\s*)? \)\s*{\s*$""", declaration, flags=re.X) if m: p, n, name, n_results, results = m.groups()[0:5] args = m.groups()[5:] result_types = [r.strip() for r in results.split(',')] if allowed_types and allowed_types != result_types: raise Exception('Inconsistent documentation for: %s' % name) if n not in ['', '2']: raise Exception('Cannot parse "%s"' % declaration) args = ', '.join('%s %s' % (args[i], args[i+1]) for i in range(0, len(args), 2) if args[i]) for result_type in result_types: add_matcher(result_type, name, args, comment) return m = re.match(r"""^\s*AST_MATCHER_FUNCTION(_P)?(.?)(?:_OVERLOAD)?\( (?:\s*([^\s,]+)\s*,)? \s*([^\s,]+)\s* (?:,\s*([^\s,]+)\s* ,\s*([^\s,]+)\s*)? (?:,\s*([^\s,]+)\s* ,\s*([^\s,]+)\s*)? (?:,\s*\d+\s*)? \)\s*{\s*$""", declaration, flags=re.X) if m: p, n, result, name = m.groups()[0:4] args = m.groups()[4:] if n not in ['', '2']: raise Exception('Cannot parse "%s"' % declaration) args = ', '.join('%s %s' % (args[i], args[i+1]) for i in range(0, len(args), 2) if args[i]) add_matcher(result, name, args, comment) return m = re.match(r"""^\s*AST_MATCHER(_P)?(.?)(?:_OVERLOAD)?\( (?:\s*([^\s,]+)\s*,)? \s*([^\s,]+)\s* (?:,\s*([^\s,]+)\s* ,\s*([^\s,]+)\s*)? (?:,\s*([^\s,]+)\s* ,\s*([^\s,]+)\s*)? (?:,\s*\d+\s*)? \)\s*{\s*$""", declaration, flags=re.X) if m: p, n, result, name = m.groups()[0:4] args = m.groups()[4:] if not result: if not allowed_types: raise Exception('Did not find allowed result types for: %s' % name) result_types = allowed_types else: result_types = [result] if n not in ['', '2']: raise Exception('Cannot parse "%s"' % declaration) args = ', '.join('%s %s' % (args[i], args[i+1]) for i in range(0, len(args), 2) if args[i]) for result_type in result_types: add_matcher(result_type, name, args, comment) return # Parse ArgumentAdapting matchers. m = re.match( r"""^.*ArgumentAdaptingMatcherFunc<.*>\s*(?:LLVM_ATTRIBUTE_UNUSED\s*) ([a-zA-Z]*)\s*=\s*{};$""", declaration, flags=re.X) if m: name = m.groups()[0] add_matcher('*', name, 'Matcher<*>', comment) return # Parse Variadic operator matchers. m = re.match( r"""^.*VariadicOperatorMatcherFunc\s*<\s*([^,]+),\s*([^\s>]+)\s*>\s* ([a-zA-Z]*)\s*=\s*{.*};$""", declaration, flags=re.X) if m: min_args, max_args, name = m.groups()[:3] if max_args == '1': add_matcher('*', name, 'Matcher<*>', comment) return elif max_args == 'UINT_MAX': add_matcher('*', name, 'Matcher<*>, ..., Matcher<*>', comment) return # Parse free standing matcher functions, like: # Matcher<ResultType> Name(Matcher<ArgumentType> InnerMatcher) { m = re.match(r"""^\s*(.*)\s+ ([^\s\(]+)\s*\( (.*) \)\s*{""", declaration, re.X) if m: result, name, args = m.groups() args = ', '.join(p.strip() for p in args.split(',')) m = re.match(r'.*\s+internal::(Bindable)?Matcher<([^>]+)>$', result) if m: result_types = [m.group(2)] else: result_types = extract_result_types(comment) if not result_types: if not comment: # Only overloads don't have their own doxygen comments; ignore those. print 'Ignoring "%s"' % name else: print 'Cannot determine result type for "%s"' % name else: for result_type in result_types: add_matcher(result_type, name, args, comment) else: print '*** Unparsable: "' + declaration + '" ***' def sort_table(matcher_type, matcher_map): """Returns the sorted html table for the given row map.""" table = '' for key in sorted(matcher_map.keys()): table += matcher_map[key] + '\n' return ('<!-- START_%(type)s_MATCHERS -->\n' + '%(table)s' + '<!--END_%(type)s_MATCHERS -->') % { 'type': matcher_type, 'table': table, } # Parse the ast matchers. # We alternate between two modes: # body = True: We parse the definition of a matcher. We need # to parse the full definition before adding a matcher, as the # definition might contain static asserts that specify the result # type. # body = False: We parse the comments and declaration of the matcher. comment = '' declaration = '' allowed_types = [] body = False for line in open(MATCHERS_FILE).read().splitlines(): if body: if line.strip() and line[0] == '}': if declaration: act_on_decl(declaration, comment, allowed_types) comment = '' declaration = '' allowed_types = [] body = False else: m = re.search(r'is_base_of<([^,]+), NodeType>', line) if m and m.group(1): allowed_types += [m.group(1)] continue if line.strip() and line.lstrip()[0] == '/': comment += re.sub(r'/+\s?', '', line) + '\n' else: declaration += ' ' + line if ((not line.strip()) or line.rstrip()[-1] == ';' or (line.rstrip()[-1] == '{' and line.rstrip()[-3:] != '= {')): if line.strip() and line.rstrip()[-1] == '{': body = True else: act_on_decl(declaration, comment, allowed_types) comment = '' declaration = '' allowed_types = [] node_matcher_table = sort_table('DECL', node_matchers) narrowing_matcher_table = sort_table('NARROWING', narrowing_matchers) traversal_matcher_table = sort_table('TRAVERSAL', traversal_matchers) reference = open('../LibASTMatchersReference.html').read() reference = re.sub(r'<!-- START_DECL_MATCHERS.*END_DECL_MATCHERS -->', '%s', reference, flags=re.S) % node_matcher_table reference = re.sub(r'<!-- START_NARROWING_MATCHERS.*END_NARROWING_MATCHERS -->', '%s', reference, flags=re.S) % narrowing_matcher_table reference = re.sub(r'<!-- START_TRAVERSAL_MATCHERS.*END_TRAVERSAL_MATCHERS -->', '%s', reference, flags=re.S) % traversal_matcher_table with open('../LibASTMatchersReference.html', 'w') as output: output.write(reference)
# tr_mime.py chromatic universe 2017 william k. johnson from datetime import datetime from io import StringIO import sys import time import json import os import pprint import sys from io import StringIO import tornado from tornado.locks import Semaphore from tornado.concurrent import run_on_executor from concurrent.futures import ThreadPoolExecutor from tornado.escape import json_decode, json_encode from tornado import gen from bson import SON , ObjectId from bson import json_util import string from copy import deepcopy #chilkat mime import chilkat #cci from cci_mta_trinity.streams.tr_mongo_rest import _logger # body_part_specs_set = { 'HEADER' , 'HEADER.FIELDS' , 'HEADER.FIELDS.NOT' , 'MIME' , 'TEXT' , '' } thread_pool = ThreadPoolExecutor( 10 ) # ------------------------------------------------------------------------------------------------- @tornado.gen.coroutine def call_blocking( func , args ) : yield thread_pool.submit( func , args ) # ------------------------------------------------------------------------------------------------- @tornado.gen.coroutine def stage_one_mime_container( mime , mime_bytes ) : """ :param mime_bytest: :return ckmime object: """ _logger.info( '...stage_one_mime_container...' ) try : mime.LoadMime( mime_bytes ) except Exception as e : _logger.error( '..stage_one_mime_container.. %s' , str( e ) ) # ------------------------------------------------------------------------------------------------- @tornado.gen.coroutine def retr_nested_body( mime , part ) : """ :param mime: :param part: :return: """ parts = part.split( '.' ) # get top level mime_local = None dw = mime.get_NumParts() for level in parts : mime_local = mime.GetPart( int( level ) - 1 ) print( 'foo' ) return mime_local.getEntireBody() # ------------------------------------------------------------------------------------------------- @tornado.gen.coroutine def retr_rfc822text( retr_mime , **kwargs ) : """ :param retr_mime: :return: """ _logger.info( '...retr_rfc822text...' ) return retr_mime.getEntireBody() # ------------------------------------------------------------------------------------------------- @tornado.gen.coroutine def retr_rfc822( retr_mime , **kwargs ) : """ :param retr_mime: :return: """ _logger.info( '...retr_rfc822...' ) return retr_mime.getMime() # ------------------------------------------------------------------------------------------------- @tornado.gen.coroutine def retr_body_header( retr_mime , **kwargs ) : """ :param mime: :return: """ return retr_mime.getEntireHead() # ------------------------------------------------------------------------------------------------- @tornado.gen.coroutine def retr_body_text( mime , **kwargs ) : """ :param mime: :return: """ body = kwargs['body'] # mime body spec must be qualified with section identifier if len( body['section_part_atom'] ) is 0 : return None return mime.getEntireBody() # ------------------------------------------------------------------------------------------------- @tornado.gen.coroutine def retr_body_part_mime( mime , **kwargs ) : """ :param mime: :param kwargs: :return: """ _logger.info( '...retr_body_part_mime...' ) body = kwargs['body'] if len( body['section_part_atom'] ) is 0 : _logger.info( '...imvalid specification...<MIME> spec requires section id,,' ) return None mime_local = None parts = body['section_part_atom'].split( '.' ) # get top level mime_local = mime for level in parts : mime_local = mime_local.GetPart( int( level ) - 1 ) return mime_local.getEntireHead() # ------------------------------------------------------------------------------------------------- @tornado.gen.coroutine def retr_body_headers_fields( mime , **kwargs ) : """ :param mime: :param diff: :return: """ body = kwargs['body'] diff = kwargs['diff'] subset_field_names = set( body['header_fields'] ) subset_field_names = { x.lower() for x in subset_field_names } print( subset_field_names ) field_names = set() idx = 0 fld = chilkat.CkString() b_ret = True # header fields list while b_ret is True : mime.GetHeaderFieldName( idx , fld ) if len( fld.getString() ) is 0 : break field_names.add( fld.getString().lower() ) idx += 1 print( field_names ) out_flds = set() if not diff : # fields out_flds = subset_field_names.intersection( field_names ) else : out_flds = field_names.difference( subset_field_names ) out_str = str() for atom in out_flds : out_str += string.capwords( atom ) out_str += ': ' out_str += mime.getHeaderField( atom ) out_str += '\r\n' return out_str # ------------------------------------------------------------------------------------------------- @tornado.gen.coroutine def truncate_on_partial_fetch( tbody , payload_sz ) : """ :param payload: :return start , end , literal: """ tstart = tbody['body_section_partial']['start'] tend = tbody['body_section_partial']['end'] literal = 0 # if both endpoints are 0 , or end > start; # there is no partial; if start is beyond eof return # -1 as literal denoting an empty string if tstart > tend : # end > start; return 0 , 0 , 0 elif tstart > payload_sz : # -1 as literal denoting an empty string return 0 , 0 , -1 else : literal = tend - tstart return tstart , tend , literal # ------------------------------------------------------------------------------------------------- @tornado.gen.coroutine def dispatch_body_nested_spec( mime , body , **kargs ) : """ :param mime: :param body: :param kargs: :return: """ pass # ------------------------------------------------------------------------------------------------- @tornado.gen.coroutine def dispatch_body_spec( mime , body , **kargs ) : """ :param mime: :param body: :param ksrgs: :return: """ _logger.info( '...dispatch_body_spec...' ) partials = None if 'partials' in kargs : partials = kargs['partials'] if body['section_part_specifier'] not in body_part_specs_set : _logger.error( '...bad body spec in fetch...' ) return None spec_dispatch = { 'HEADER' : ( retr_body_header , {} ) , 'HEADER.FIELDS' : ( retr_body_headers_fields , { 'body' : body , 'diff' : False } ) , 'HEADER.FIELDS.NOT' : ( retr_body_headers_fields , { 'body' : body , 'diff' : True } ) , 'MIME' : ( retr_body_part_mime , { 'body' : body } ) , 'TEXT' : ( retr_rfc822text , {} ) , '' : ( retr_rfc822 , {} ) , } val = yield spec_dispatch[body['section_part_specifier']][0] ( mime , **spec_dispatch[body['section_part_specifier']][1]) # partial fetch partial = () if body['body_section_partial']['partial'] is True : partial = yield truncate_on_partial_fetch( body ,len( val ) ) _logger.info( 'truncate->start: %d end: %d literal %d' % ( partial[0] , partial[1] , partial[2] ) ) #i inclusive in both endpoints? # rfc3501 is somewhat ambigous on this # todo clarify # start is past eof , return empty string load = None if partial[2] is -1 : load = "" # start-end elif partial[2] is not 0 : load = val[partial[0]:partial[1] + 1] partials[body['section_part_specifier']]= partial[0] return load return val # -------------------------------------------------------------------------------------- if __name__ == "__main__" : _logger.info( '...tr_mime main...' ) mime_instance = chilkat.CkMime() b_ret = mime_instance.UnlockComponent( "cci" ) print( mime_instance.lastErrorText())
import re import logging from twisted.conch.telnet import TelnetTransport, TelnetProtocol from twisted.internet import reactor, endpoints, task from twisted.internet.protocol import ServerFactory import game_loop logger = logging.getLogger(__name__) reset = "\x1B[0m" bold = "\x1B[1m" dim = "\x1B[2m" under = "\x1B[4m" reverse = "\x1B[7m" hide = "\x1B[8m" clearscreen = "\x1B[2J" clearline = "\x1B[2K" black = "\x1B[30m" red = "\x1B[31m" green = "\x1B[32m" yellow = "\x1B[33m" blue = "\x1B[34m" magenta = "\x1B[35m" cyan = "\x1B[36m" white = "\x1B[37m" bblack = "\x1B[40m" bred = "\x1B[41m" bgreen = "\x1B[42m" byellow = "\x1B[43m" bblue = "\x1B[44m" bmagenta = "\x1B[45m" bcyan = "\x1B[46m" bwhite = "\x1B[47m" concealed = "\x1B[8m" newline = "\r\n" carriage_return = "\r" TELNET_REPLACEMENTS = dict(reset=reset, bold=bold, dim=dim, under=under, reverse=reverse, hide=hide, clearscreen=clearscreen, clearline=clearline, black=black, red=red, green=green, yellow=yellow, blue=blue, magenta=magenta, cyan=cyan, white=white, bblack=bblack, bred=bred, bgreen=bgreen, byellow=byellow, bblue=bblue, bmagenta=bmagenta, bcyan=bcyan, bwhite=bwhite, concealed=concealed, newline=newline, carriage_return=carriage_return) ######################################################################## def tf(text): """ Replace the telnet tags with the telnet escape values. """ text = str(text) for key, value in TELNET_REPLACEMENTS.items(): text = text.replace("<" + key + ">", value) return text ######################################################################## class MudTelnetHandler(object): #################################################################### def __init__(self, protocol): self.protocol = protocol self.data_handlers = [] #################################################################### def send(self, data): self.protocol.send(data) #################################################################### def get_remote_address(self): if self.protocol: return self.protocol.get_remote_address() else: return "<unknown address>" #################################################################### def leave(self): logger.info("%s - leave called in %s", self.get_remote_address(), self.__class__.__name__) #################################################################### def enter(self): logger.info("%s - enter called in %s", self.get_remote_address(), self.__class__.__name__) #################################################################### def hung_up(self): logger.warn("%s - hung up in %s", self.get_remote_address(), self.__class__.__name__) #################################################################### def flooded(self): logger.warn("%s - flooded in %s", self.get_remote_address(), self.__class__.__name__) #################################################################### def handle(self, data): raise NotImplementedError ######################################################################## class BaseStateDispatchHandler(MudTelnetHandler): initial_state = None state = None #################################################################### def handle(self, data): method_name = ("handle_%s" % self.state.name).lower() logger.info("Received '%s', passing it to handler %s", data, method_name) getattr(self, method_name)(data) ######################################################################## class BaseCommandDispatchHandler(MudTelnetHandler): last_command = None #################################################################### def _register_data_handler(self, predicate, handler): self.data_handlers.append((predicate, handler)) #################################################################### def handle(self, data): if data: split = data.split(None, 1) first_word = data.split(None, 1)[0] if len(split) > 1: rest = split[1] else: rest = "" for predicate, handler in self.data_handlers: if self.check_predicate(predicate, first_word): handler(data, first_word, rest) if predicate != "/": self.last_command = data return #################################################################### def check_predicate(self, predicate, data): if isinstance(predicate, basestring): return predicate == data elif isinstance(predicate, list) or isinstance(predicate, tuple): return data in predicate elif isinstance(predicate, bool): return predicate elif isinstance(predicate, re._pattern_type): if predicate.match(data): return True else: return False else: raise ValueError("I don't know how to use the predicate '%s' of type '%s'" % predicate, type(predicate)) ######################################################################## class MudTelnetProtocol(TelnetProtocol): handler_class = None closed = False #################################################################### def __init__(self): self.handlers = [] self.handlers.append(self.handler_class(self)) #################################################################### @classmethod def set_handler_class(cls, handler_class): cls.handler_class = handler_class #################################################################### @property def handler(self): if self.handlers: return self.handlers[-1] #################################################################### def get_remote_address(self): if self.transport: return self.transport.getPeer() else: return "<unknown address>" #################################################################### def add_handler(self, handler): if self.handler: self.handler.leave() self.handlers.append(handler) self.handler.enter() #################################################################### def drop_connection(self): self.transport.loseConnection() #################################################################### def remove_handler(self): if self.handler: self.handler.leave() self.handlers.pop(-1) if self.handler: self.handler.enter() #################################################################### def clear_handlers(self): if self.handler: self.handler.leave() self.handlers = [] #################################################################### def enableRemote(self, option): return False #################################################################### def disableRemote(self, option): pass #################################################################### def enableLocal(self, option): return False #################################################################### def disableLocal(self, option): pass #################################################################### def send(self, data): if not data.endswith("<concealed>"): data += "<reset>" data = tf(data) self.transport.write(data) #################################################################### def dataReceived(self, data): data = data.strip() self.handler.handle(data) #################################################################### def connectionMade(self): self.handler.enter() #################################################################### def connectionLost(self, reason=None): self.closed = True ######################################################################## def initialize_logger(logging_level=logging.INFO): logging.basicConfig(level=logging_level, format="[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s", datefmt="%d/%b/%Y %H:%M:%S") ######################################################################## def stop(): logger.info("Shutting down the reactor...") reactor.stop() ######################################################################## def run(): initialize_logger(logging.INFO) factory = ServerFactory() factory.protocol = lambda: TelnetTransport(MudTelnetProtocol) endpoints.serverFromString(reactor, "tcp:8023").listen(factory) round_timer = task.LoopingCall(game_loop.perform_round) round_timer.start(game_loop.ROUND_TIME) db_timer = task.LoopingCall(game_loop.save_databases) db_timer.start(game_loop.DB_SAVE_TIME) heal_timer = task.LoopingCall(game_loop.perform_heal) heal_timer.start(game_loop.HEAL_TIME) regen_timer = task.LoopingCall(game_loop.perform_regen) regen_timer.start(game_loop.REGEN_TIME) print "running on port 8023..." reactor.run()
# # spyne - Copyright (C) Spyne contributors. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 # # # FIXME: Supports e.g. # MULTIPOINT (10 40, 40 30, 20 20, 30 10) # # but not: # MULTIPOINT ((10 40), (40 30), (20 20), (30 10)) # from spyne.model import SimpleModel from spyne.model.primitive.string import Unicode FLOAT_PATTERN = r'-?[0-9]+\.?[0-9]*(e-?[0-9]+)?' _rinse_and_repeat = r'\s*\(%s\s*(,\s*%s)*\)\s*' def _get_one_point_pattern(dim): return ' +'.join([FLOAT_PATTERN] * dim) def _get_point_pattern(dim): return r'POINT\s*\(%s\)' % _get_one_point_pattern(dim) def _get_one_multipoint_pattern(dim): one_point = _get_one_point_pattern(dim) return _rinse_and_repeat % (one_point, one_point) def _get_multipoint_pattern(dim): return r'MULTIPOINT\s*%s' % _get_one_multipoint_pattern(dim) def _get_one_line_pattern(dim): one_point = _get_one_point_pattern(dim) return _rinse_and_repeat % (one_point, one_point) def _get_linestring_pattern(dim): return r'LINESTRING\s*%s' % _get_one_line_pattern(dim) def _get_one_multilinestring_pattern(dim): one_line = _get_one_line_pattern(dim) return _rinse_and_repeat % (one_line, one_line) def _get_multilinestring_pattern(dim): return r'MULTILINESTRING\s*%s' % _get_one_multilinestring_pattern(dim) def _get_one_polygon_pattern(dim): one_line = _get_one_line_pattern(dim) return _rinse_and_repeat % (one_line, one_line) def _get_polygon_pattern(dim): return r'POLYGON\s*%s' % _get_one_polygon_pattern(dim) def _get_one_multipolygon_pattern(dim): one_line = _get_one_polygon_pattern(dim) return _rinse_and_repeat % (one_line, one_line) def _get_multipolygon_pattern(dim): return r'MULTIPOLYGON\s*%s' % _get_one_multipolygon_pattern(dim) class Point(Unicode): """A point type whose native format is a WKT string. You can use :func:`shapely.wkt.loads` to get a proper point type. It's a subclass of the :class:`Unicode` type, so regular Unicode constraints apply. The only additional parameter is the number of dimensions. :param dim: Number of dimensons. """ __type_name__ = None class Attributes(Unicode.Attributes): dim = None @staticmethod def Value(x, y, prec=15): return ('POINT(%%3.%(prec)sf %%3.%(prec)sf)' % {'prec': prec}) % (x,y) def __new__(cls, dim=None, **kwargs): assert dim in (None, 2, 3) if dim is not None: kwargs['dim'] = dim kwargs['pattern'] = _get_point_pattern(dim) kwargs['type_name'] = 'point%dd' % dim retval = SimpleModel.__new__(cls, **kwargs) retval.__namespace__ = 'http://spyne.io/schema' retval.__extends__ = Unicode retval.__orig__ = Unicode return retval class Line(Unicode): """A line type whose native format is a WKT string. You can use :func:`shapely.wkt.loads` to get a proper line type. It's a subclass of the :class:`Unicode` type, so regular Unicode constraints apply. The only additional parameter is the number of dimensions. :param dim: Number of dimensons. """ __type_name__ = None class Attributes(Unicode.Attributes): dim = None def __new__(cls, dim=None, **kwargs): assert dim in (None, 2, 3) if dim is not None: kwargs['dim'] = dim kwargs['pattern'] = _get_linestring_pattern(dim) kwargs['type_name'] = 'line%dd' % dim retval = SimpleModel.__new__(cls, **kwargs) retval.__namespace__ = 'http://spyne.io/schema' retval.__extends__ = Unicode retval.__orig__ = Unicode return retval LineString = Line class Polygon(Unicode): """A polygon type whose native format is a WKT string. You can use :func:`shapely.wkt.loads` to get a proper polygon type. It's a subclass of the :class:`Unicode` type, so regular Unicode constraints apply. The only additional parameter is the number of dimensions. :param dim: Number of dimensons. """ __type_name__ = None class Attributes(Unicode.Attributes): dim = None def __new__(cls, dim=None, **kwargs): assert dim in (None, 2, 3) if dim is not None: kwargs['dim'] = dim kwargs['pattern'] = _get_polygon_pattern(dim) kwargs['type_name'] = 'polygon%dd' % dim retval = SimpleModel.__new__(cls, **kwargs) retval.__namespace__ = 'http://spyne.io/schema' retval.__extends__ = Unicode retval.__orig__ = Unicode return retval class MultiPoint(Unicode): """A MultiPoint type whose native format is a WKT string. You can use :func:`shapely.wkt.loads` to get a proper MultiPoint type. It's a subclass of the :class:`Unicode` type, so regular Unicode constraints apply. The only additional parameter is the number of dimensions. :param dim: Number of dimensons. """ __type_name__ = None class Attributes(Unicode.Attributes): dim = None def __new__(cls, dim=None, **kwargs): assert dim in (None, 2, 3) if dim is not None: kwargs['dim'] = dim kwargs['pattern'] = _get_multipoint_pattern(dim) kwargs['type_name'] = 'multiPoint%dd' % dim retval = SimpleModel.__new__(cls, **kwargs) retval.__namespace__ = 'http://spyne.io/schema' retval.__extends__ = Unicode retval.__orig__ = Unicode return retval class MultiLine(Unicode): """A MultiLine type whose native format is a WKT string. You can use :func:`shapely.wkt.loads` to get a proper MultiLine type. It's a subclass of the :class:`Unicode` type, so regular Unicode constraints apply. The only additional parameter is the number of dimensions. :param dim: Number of dimensons. """ __type_name__ = None class Attributes(Unicode.Attributes): dim = None def __new__(cls, dim=None, **kwargs): assert dim in (None, 2, 3) if dim is not None: kwargs['dim'] = dim kwargs['pattern'] = _get_multilinestring_pattern(dim) kwargs['type_name'] = 'multiLine%dd' % dim retval = SimpleModel.__new__(cls, **kwargs) retval.__namespace__ = 'http://spyne.io/schema' retval.__extends__ = Unicode retval.__orig__ = Unicode return retval MultiLineString = MultiLine class MultiPolygon(Unicode): """A MultiPolygon type whose native format is a WKT string. You can use :func:`shapely.wkt.loads` to get a proper MultiPolygon type. It's a subclass of the :class:`Unicode` type, so regular Unicode constraints apply. The only additional parameter is the number of dimensions. :param dim: Number of dimensons. """ __type_name__ = None class Attributes(Unicode.Attributes): dim = None def __new__(cls, dim=None, **kwargs): assert dim in (None, 2, 3) if dim is not None: kwargs['dim'] = dim kwargs['pattern'] = _get_multipolygon_pattern(dim) kwargs['type_name'] = 'multipolygon%dd' % dim retval = SimpleModel.__new__(cls, **kwargs) retval.__namespace__ = 'http://spyne.io/schema' retval.__extends__ = Unicode retval.__orig__ = Unicode return retval
from __future__ import division from __future__ import unicode_literals __author__ = "Joseph Gomes" __copyright__ = "Copyright 2017, Stanford University" __license__ = "MIT" import sys from deepchem.models.tensorgraph.layers import Layer, Feature, Label, AtomicConvolution, L2Loss, ReduceMean from deepchem.models import TensorGraph import numpy as np import tensorflow as tf import itertools def InitializeWeightsBiases(prev_layer_size, size, weights=None, biases=None, name=None): """Initializes weights and biases to be used in a fully-connected layer. Parameters ---------- prev_layer_size: int Number of features in previous layer. size: int Number of nodes in this layer. weights: tf.Tensor, optional (Default None) Weight tensor. biases: tf.Tensor, optional (Default None) Bias tensor. name: str Name for this op, optional (Defaults to 'fully_connected' if None) Returns ------- weights: tf.Variable Initialized weights. biases: tf.Variable Initialized biases. """ if weights is None: weights = tf.truncated_normal([prev_layer_size, size], stddev=0.01) if biases is None: biases = tf.zeros([size]) with tf.name_scope(name, 'fully_connected', [weights, biases]): w = tf.Variable(weights, name='w') b = tf.Variable(biases, name='b') return w, b class AtomicConvScore(Layer): def __init__(self, atom_types, layer_sizes, **kwargs): self.atom_types = atom_types self.layer_sizes = layer_sizes super(AtomicConvScore, self).__init__(**kwargs) def create_tensor(self, in_layers=None, set_tensors=True, **kwargs): frag1_layer = self.in_layers[0].out_tensor frag2_layer = self.in_layers[1].out_tensor complex_layer = self.in_layers[2].out_tensor frag1_z = self.in_layers[3].out_tensor frag2_z = self.in_layers[4].out_tensor complex_z = self.in_layers[5].out_tensor atom_types = self.atom_types layer_sizes = self.layer_sizes num_layers = len(layer_sizes) weight_init_stddevs = [1 / np.sqrt(x) for x in layer_sizes] bias_init_consts = [0.0] * num_layers weights = [] biases = [] output_weights = [] output_biases = [] n_features = int(frag1_layer.get_shape()[-1]) for ind, atomtype in enumerate(atom_types): prev_layer_size = n_features weights.append([]) biases.append([]) output_weights.append([]) output_biases.append([]) for i in range(num_layers): weight, bias = InitializeWeightsBiases( prev_layer_size=prev_layer_size, size=layer_sizes[i], weights=tf.truncated_normal( shape=[prev_layer_size, layer_sizes[i]], stddev=weight_init_stddevs[i]), biases=tf.constant( value=bias_init_consts[i], shape=[layer_sizes[i]])) weights[ind].append(weight) biases[ind].append(bias) prev_layer_size = layer_sizes[i] weight, bias = InitializeWeightsBiases(prev_layer_size, 1) output_weights[ind].append(weight) output_biases[ind].append(bias) def atomnet(current_input, atomtype): prev_layer = current_input for i in range(num_layers): layer = tf.nn.xw_plus_b(prev_layer, weights[atomtype][i], biases[atomtype][i]) layer = tf.nn.relu(layer) prev_layer = layer output_layer = tf.squeeze( tf.nn.xw_plus_b(prev_layer, output_weights[atomtype][0], output_biases[atomtype][0])) return output_layer frag1_zeros = tf.zeros_like(frag1_z, dtype=tf.float32) frag2_zeros = tf.zeros_like(frag2_z, dtype=tf.float32) complex_zeros = tf.zeros_like(complex_z, dtype=tf.float32) frag1_atomtype_energy = [] frag2_atomtype_energy = [] complex_atomtype_energy = [] for ind, atomtype in enumerate(atom_types): frag1_outputs = tf.map_fn(lambda x: atomnet(x, ind), frag1_layer) frag2_outputs = tf.map_fn(lambda x: atomnet(x, ind), frag2_layer) complex_outputs = tf.map_fn(lambda x: atomnet(x, ind), complex_layer) cond = tf.equal(frag1_z, atomtype) frag1_atomtype_energy.append(tf.where(cond, frag1_outputs, frag1_zeros)) cond = tf.equal(frag2_z, atomtype) frag2_atomtype_energy.append(tf.where(cond, frag2_outputs, frag2_zeros)) cond = tf.equal(complex_z, atomtype) complex_atomtype_energy.append( tf.where(cond, complex_outputs, complex_zeros)) frag1_outputs = tf.add_n(frag1_atomtype_energy) frag2_outputs = tf.add_n(frag2_atomtype_energy) complex_outputs = tf.add_n(complex_atomtype_energy) frag1_energy = tf.reduce_sum(frag1_outputs, 1) frag2_energy = tf.reduce_sum(frag2_outputs, 1) complex_energy = tf.reduce_sum(complex_outputs, 1) binding_energy = complex_energy - (frag1_energy + frag2_energy) self.out_tensor = tf.expand_dims(binding_energy, axis=1) return self.out_tensor class AtomicConvModel(TensorGraph): def __init__(self, frag1_num_atoms=70, frag2_num_atoms=634, complex_num_atoms=701, max_num_neighbors=12, batch_size=24, atom_types=[ 6, 7., 8., 9., 11., 12., 15., 16., 17., 20., 25., 30., 35., 53., -1. ], radial=[[ 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0, 11.5, 12.0 ], [0.0, 4.0, 8.0], [0.4]], layer_sizes=[32, 32, 16], learning_rate=0.001, **kwargs): """Implements an Atomic Convolution Model. Implements the atomic convolutional networks as introduced in https://arxiv.org/abs/1703.10603. The atomic convolutional networks function as a variant of graph convolutions. The difference is that the "graph" here is the nearest neighbors graph in 3D space. The AtomicConvModel leverages these connections in 3D space to train models that learn to predict energetic state starting from the spatial geometry of the model. Params ------ frag1_num_atoms: int Number of atoms in first fragment frag2_num_atoms: int Number of atoms in sec max_num_neighbors: int Maximum number of neighbors possible for an atom. Recall neighbors are spatial neighbors. atom_types: list List of atoms recognized by model. Atoms are indicated by their nuclear numbers. radial: list TODO: add description layer_sizes: list TODO: add description learning_rate: float Learning rate for the model. """ # TODO: Turning off queue for now. Safe to re-activate? super(AtomicConvModel, self).__init__(use_queue=False, **kwargs) self.complex_num_atoms = complex_num_atoms self.frag1_num_atoms = frag1_num_atoms self.frag2_num_atoms = frag2_num_atoms self.max_num_neighbors = max_num_neighbors self.batch_size = batch_size self.atom_types = atom_types rp = [x for x in itertools.product(*radial)] self.frag1_X = Feature(shape=(batch_size, frag1_num_atoms, 3)) self.frag1_nbrs = Feature( shape=(batch_size, frag1_num_atoms, max_num_neighbors)) self.frag1_nbrs_z = Feature( shape=(batch_size, frag1_num_atoms, max_num_neighbors)) self.frag1_z = Feature(shape=(batch_size, frag1_num_atoms)) self.frag2_X = Feature(shape=(batch_size, frag2_num_atoms, 3)) self.frag2_nbrs = Feature( shape=(batch_size, frag2_num_atoms, max_num_neighbors)) self.frag2_nbrs_z = Feature( shape=(batch_size, frag2_num_atoms, max_num_neighbors)) self.frag2_z = Feature(shape=(batch_size, frag2_num_atoms)) self.complex_X = Feature(shape=(batch_size, complex_num_atoms, 3)) self.complex_nbrs = Feature( shape=(batch_size, complex_num_atoms, max_num_neighbors)) self.complex_nbrs_z = Feature( shape=(batch_size, complex_num_atoms, max_num_neighbors)) self.complex_z = Feature(shape=(batch_size, complex_num_atoms)) frag1_conv = AtomicConvolution( atom_types=self.atom_types, radial_params=rp, boxsize=None, in_layers=[self.frag1_X, self.frag1_nbrs, self.frag1_nbrs_z]) frag2_conv = AtomicConvolution( atom_types=self.atom_types, radial_params=rp, boxsize=None, in_layers=[self.frag2_X, self.frag2_nbrs, self.frag2_nbrs_z]) complex_conv = AtomicConvolution( atom_types=self.atom_types, radial_params=rp, boxsize=None, in_layers=[self.complex_X, self.complex_nbrs, self.complex_nbrs_z]) score = AtomicConvScore( self.atom_types, layer_sizes, in_layers=[ frag1_conv, frag2_conv, complex_conv, self.frag1_z, self.frag2_z, self.complex_z ]) self.label = Label(shape=(None, 1)) loss = ReduceMean(in_layers=L2Loss(in_layers=[score, self.label])) self.add_output(score) self.set_loss(loss) def default_generator(self, dataset, epochs=1, predict=False, deterministic=True, pad_batches=True): complex_num_atoms = self.complex_num_atoms frag1_num_atoms = self.frag1_num_atoms frag2_num_atoms = self.frag2_num_atoms max_num_neighbors = self.max_num_neighbors batch_size = self.batch_size def replace_atom_types(z): def place_holder(i): if i in self.atom_types: return i return -1 return np.array([place_holder(x) for x in z]) for epoch in range(epochs): for ind, (F_b, y_b, w_b, ids_b) in enumerate( dataset.iterbatches( batch_size, deterministic=True, pad_batches=pad_batches)): N = complex_num_atoms N_1 = frag1_num_atoms N_2 = frag2_num_atoms M = max_num_neighbors orig_dict = {} batch_size = F_b.shape[0] num_features = F_b[0][0].shape[1] frag1_X_b = np.zeros((batch_size, N_1, num_features)) for i in range(batch_size): frag1_X_b[i] = F_b[i][0] orig_dict[self.frag1_X] = frag1_X_b frag2_X_b = np.zeros((batch_size, N_2, num_features)) for i in range(batch_size): frag2_X_b[i] = F_b[i][3] orig_dict[self.frag2_X] = frag2_X_b complex_X_b = np.zeros((batch_size, N, num_features)) for i in range(batch_size): complex_X_b[i] = F_b[i][6] orig_dict[self.complex_X] = complex_X_b frag1_Nbrs = np.zeros((batch_size, N_1, M)) frag1_Z_b = np.zeros((batch_size, N_1)) for i in range(batch_size): z = replace_atom_types(F_b[i][2]) frag1_Z_b[i] = z frag1_Nbrs_Z = np.zeros((batch_size, N_1, M)) for atom in range(N_1): for i in range(batch_size): atom_nbrs = F_b[i][1].get(atom, "") frag1_Nbrs[i, atom, :len(atom_nbrs)] = np.array(atom_nbrs) for j, atom_j in enumerate(atom_nbrs): frag1_Nbrs_Z[i, atom, j] = frag1_Z_b[i, atom_j] orig_dict[self.frag1_nbrs] = frag1_Nbrs orig_dict[self.frag1_nbrs_z] = frag1_Nbrs_Z orig_dict[self.frag1_z] = frag1_Z_b frag2_Nbrs = np.zeros((batch_size, N_2, M)) frag2_Z_b = np.zeros((batch_size, N_2)) for i in range(batch_size): z = replace_atom_types(F_b[i][5]) frag2_Z_b[i] = z frag2_Nbrs_Z = np.zeros((batch_size, N_2, M)) for atom in range(N_2): for i in range(batch_size): atom_nbrs = F_b[i][4].get(atom, "") frag2_Nbrs[i, atom, :len(atom_nbrs)] = np.array(atom_nbrs) for j, atom_j in enumerate(atom_nbrs): frag2_Nbrs_Z[i, atom, j] = frag2_Z_b[i, atom_j] orig_dict[self.frag2_nbrs] = frag2_Nbrs orig_dict[self.frag2_nbrs_z] = frag2_Nbrs_Z orig_dict[self.frag2_z] = frag2_Z_b complex_Nbrs = np.zeros((batch_size, N, M)) complex_Z_b = np.zeros((batch_size, N)) for i in range(batch_size): z = replace_atom_types(F_b[i][8]) complex_Z_b[i] = z complex_Nbrs_Z = np.zeros((batch_size, N, M)) for atom in range(N): for i in range(batch_size): atom_nbrs = F_b[i][7].get(atom, "") complex_Nbrs[i, atom, :len(atom_nbrs)] = np.array(atom_nbrs) for j, atom_j in enumerate(atom_nbrs): complex_Nbrs_Z[i, atom, j] = complex_Z_b[i, atom_j] orig_dict[self.complex_nbrs] = complex_Nbrs orig_dict[self.complex_nbrs_z] = complex_Nbrs_Z orig_dict[self.complex_z] = complex_Z_b orig_dict[self.label] = np.reshape(y_b, newshape=(batch_size, 1)) yield orig_dict
# Copyright 2015, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Abstract tests against the interfaces of the base layer of RPC Framework.""" import threading import time from grpc.framework.base import interfaces from grpc.framework.base import util from grpc.framework.foundation import stream from grpc.framework.foundation import stream_testing from grpc.framework.foundation import stream_util TICK = 0.1 SMALL_TIMEOUT = TICK * 50 STREAM_LENGTH = 100 SYNCHRONOUS_ECHO = 'synchronous echo' ASYNCHRONOUS_ECHO = 'asynchronous echo' IMMEDIATE_FAILURE = 'immediate failure' TRIGGERED_FAILURE = 'triggered failure' WAIT_ON_CONDITION = 'wait on condition' EMPTY_OUTCOME_DICT = { interfaces.Outcome.COMPLETED: 0, interfaces.Outcome.CANCELLED: 0, interfaces.Outcome.EXPIRED: 0, interfaces.Outcome.RECEPTION_FAILURE: 0, interfaces.Outcome.TRANSMISSION_FAILURE: 0, interfaces.Outcome.SERVICER_FAILURE: 0, interfaces.Outcome.SERVICED_FAILURE: 0, } def _synchronous_echo(output_consumer): return stream_util.TransformingConsumer(lambda x: x, output_consumer) class AsynchronousEcho(stream.Consumer): """A stream.Consumer that echoes its input to another stream.Consumer.""" def __init__(self, output_consumer, pool): self._lock = threading.Lock() self._output_consumer = output_consumer self._pool = pool self._queue = [] self._spinning = False def _spin(self, value, complete): while True: if value: if complete: self._output_consumer.consume_and_terminate(value) else: self._output_consumer.consume(value) elif complete: self._output_consumer.terminate() with self._lock: if self._queue: value, complete = self._queue.pop(0) else: self._spinning = False return def consume(self, value): with self._lock: if self._spinning: self._queue.append((value, False)) else: self._spinning = True self._pool.submit(self._spin, value, False) def terminate(self): with self._lock: if self._spinning: self._queue.append((None, True)) else: self._spinning = True self._pool.submit(self._spin, None, True) def consume_and_terminate(self, value): with self._lock: if self._spinning: self._queue.append((value, True)) else: self._spinning = True self._pool.submit(self._spin, value, True) class TestServicer(interfaces.Servicer): """An interfaces.Servicer with instrumented for testing.""" def __init__(self, pool): self._pool = pool self.condition = threading.Condition() self._released = False def service(self, name, context, output_consumer): if name == SYNCHRONOUS_ECHO: return _synchronous_echo(output_consumer) elif name == ASYNCHRONOUS_ECHO: return AsynchronousEcho(output_consumer, self._pool) elif name == IMMEDIATE_FAILURE: raise ValueError() elif name == TRIGGERED_FAILURE: raise NotImplementedError elif name == WAIT_ON_CONDITION: with self.condition: while not self._released: self.condition.wait() return _synchronous_echo(output_consumer) else: raise NotImplementedError() def release(self): with self.condition: self._released = True self.condition.notify_all() class EasyServicedIngestor(interfaces.ServicedIngestor): """A trivial implementation of interfaces.ServicedIngestor.""" def __init__(self, consumer): self._consumer = consumer def consumer(self, operation_context): """See interfaces.ServicedIngestor.consumer for specification.""" return self._consumer class FrontAndBackTest(object): """A test suite usable against any joined Front and Back.""" # Pylint doesn't know that this is a unittest.TestCase mix-in. # pylint: disable=invalid-name def testSimplestCall(self): """Tests the absolute simplest call - a one-ticket fire-and-forget.""" self.front.operate( SYNCHRONOUS_ECHO, None, True, SMALL_TIMEOUT, util.none_serviced_subscription(), 'test trace ID') util.wait_for_idle(self.front) self.assertEqual( 1, self.front.operation_stats()[interfaces.Outcome.COMPLETED]) # Assuming nothing really pathological (such as pauses on the order of # SMALL_TIMEOUT interfering with this test) there are a two different ways # the back could have experienced execution up to this point: # (1) The ticket is still either in the front waiting to be transmitted # or is somewhere on the link between the front and the back. The back has # no idea that this test is even happening. Calling wait_for_idle on it # would do no good because in this case the back is idle and the call would # return with the ticket bound for it still in the front or on the link. back_operation_stats = self.back.operation_stats() first_back_possibility = EMPTY_OUTCOME_DICT # (2) The ticket arrived at the back and the back completed the operation. second_back_possibility = dict(EMPTY_OUTCOME_DICT) second_back_possibility[interfaces.Outcome.COMPLETED] = 1 self.assertIn( back_operation_stats, (first_back_possibility, second_back_possibility)) # It's true that if the ticket had arrived at the back and the back had # begun processing that wait_for_idle could hold test execution until the # back completed the operation, but that doesn't really collapse the # possibility space down to one solution. def testEntireEcho(self): """Tests a very simple one-ticket-each-way round-trip.""" test_payload = 'test payload' test_consumer = stream_testing.TestConsumer() subscription = util.full_serviced_subscription( EasyServicedIngestor(test_consumer)) self.front.operate( ASYNCHRONOUS_ECHO, test_payload, True, SMALL_TIMEOUT, subscription, 'test trace ID') util.wait_for_idle(self.front) util.wait_for_idle(self.back) self.assertEqual( 1, self.front.operation_stats()[interfaces.Outcome.COMPLETED]) self.assertEqual( 1, self.back.operation_stats()[interfaces.Outcome.COMPLETED]) self.assertListEqual([(test_payload, True)], test_consumer.calls) def testBidirectionalStreamingEcho(self): """Tests sending multiple tickets each way.""" test_payload_template = 'test_payload: %03d' test_payloads = [test_payload_template % i for i in range(STREAM_LENGTH)] test_consumer = stream_testing.TestConsumer() subscription = util.full_serviced_subscription( EasyServicedIngestor(test_consumer)) operation = self.front.operate( SYNCHRONOUS_ECHO, None, False, SMALL_TIMEOUT, subscription, 'test trace ID') for test_payload in test_payloads: operation.consumer.consume(test_payload) operation.consumer.terminate() util.wait_for_idle(self.front) util.wait_for_idle(self.back) self.assertEqual( 1, self.front.operation_stats()[interfaces.Outcome.COMPLETED]) self.assertEqual( 1, self.back.operation_stats()[interfaces.Outcome.COMPLETED]) self.assertListEqual(test_payloads, test_consumer.values()) def testCancellation(self): """Tests cancelling a long-lived operation.""" test_consumer = stream_testing.TestConsumer() subscription = util.full_serviced_subscription( EasyServicedIngestor(test_consumer)) operation = self.front.operate( ASYNCHRONOUS_ECHO, None, False, SMALL_TIMEOUT, subscription, 'test trace ID') operation.cancel() util.wait_for_idle(self.front) self.assertEqual( 1, self.front.operation_stats()[interfaces.Outcome.CANCELLED]) util.wait_for_idle(self.back) self.assertListEqual([], test_consumer.calls) # Assuming nothing really pathological (such as pauses on the order of # SMALL_TIMEOUT interfering with this test) there are a two different ways # the back could have experienced execution up to this point: # (1) Both tickets are still either in the front waiting to be transmitted # or are somewhere on the link between the front and the back. The back has # no idea that this test is even happening. Calling wait_for_idle on it # would do no good because in this case the back is idle and the call would # return with the tickets bound for it still in the front or on the link. back_operation_stats = self.back.operation_stats() first_back_possibility = EMPTY_OUTCOME_DICT # (2) Both tickets arrived within SMALL_TIMEOUT of one another at the back. # The back started processing based on the first ticket and then stopped # upon receiving the cancellation ticket. second_back_possibility = dict(EMPTY_OUTCOME_DICT) second_back_possibility[interfaces.Outcome.CANCELLED] = 1 self.assertIn( back_operation_stats, (first_back_possibility, second_back_possibility)) def testExpiration(self): """Tests that operations time out.""" timeout = TICK * 2 allowance = TICK # How much extra time to condition = threading.Condition() test_payload = 'test payload' subscription = util.termination_only_serviced_subscription() start_time = time.time() outcome_cell = [None] termination_time_cell = [None] def termination_action(outcome): with condition: outcome_cell[0] = outcome termination_time_cell[0] = time.time() condition.notify() with condition: operation = self.front.operate( SYNCHRONOUS_ECHO, test_payload, False, timeout, subscription, 'test trace ID') operation.context.add_termination_callback(termination_action) while outcome_cell[0] is None: condition.wait() duration = termination_time_cell[0] - start_time self.assertLessEqual(timeout, duration) self.assertLess(duration, timeout + allowance) self.assertEqual(interfaces.Outcome.EXPIRED, outcome_cell[0]) util.wait_for_idle(self.front) self.assertEqual( 1, self.front.operation_stats()[interfaces.Outcome.EXPIRED]) util.wait_for_idle(self.back) self.assertLessEqual( 1, self.back.operation_stats()[interfaces.Outcome.EXPIRED])
# Josh Berry # CodeWatch # August 2014 # import argparse import xml.etree.ElementTree as ET import re import requests import string import random import os from requests.auth import HTTPBasicAuth from requests.auth import HTTPDigestAuth from requests_ntlm import HttpNtlmAuth # Get all the arguments for the tool parser = argparse.ArgumentParser(prog='jnlpdownloader.py', formatter_class=argparse.ArgumentDefaultsHelpFormatter, description='Download JAR files associated with a JNLP file', epilog='Example: jnlpdownloader.py --link https://www.example.com/java/jnlp/sample.jnlp') parser.add_argument('--link', required=True, help='the full URL to the JNLP file (must include http(s)://') parser.add_argument('--ntlmuser', default=None, help='use NTLM authentication with this username (format of domain \\ username)') parser.add_argument('--ntlmpass', default=None, help='use NTLM authentication with this password') parser.add_argument('--basicuser', default=None, help='use BASIC authentication with this username') parser.add_argument('--basicpass', default=None, help='use BASIC authentication with this password') parser.add_argument('--digestuser', default=None, help='use DIGEST authentication with this username') parser.add_argument('--digestpass', default=None, help='use DIGEST authentication with this password') parser.add_argument('--cookie', default=None, help='use a previously established sessions cookie') # Stick arguments in a variable and then create a session args = vars(parser.parse_args()) r = '' session = requests.Session() # Random value for directory creation randDir = ''.join(random.SystemRandom().choice(string.ascii_lowercase + string.digits) for _ in range(10)) # Check to see if BASIC/DIGEST/NTLM/Cookie authentication is being performed # If so, pass credentials to session, if not, just connect to JNLP URL if args['ntlmuser'] is not None and args['ntlmpass'] is not None: session.auth = HttpNtlmAuth(args['ntlmuser'],args['ntlmpass'], session) r = session.get(args['link'], verify=False) elif args['basicuser'] is not None and args['basicpass'] is not None: session.auth = HTTPBasicAuth(args['basicuser'],args['basicpass']) r = session.get(args['link'], verify=False) elif args['digestuser'] is not None and args['digestpass'] is not None: session.auth = HTTPDigestAuth(args['digestuser'],args['digestpass']) r = session.get(args['link'], verify=False) elif args['cookie'] is not None: cookies = {} # Check to see if the cookie has a semicolon, if so there might be mutiple cookies if re.search(';', args['cookie']): cookielist = args['cookie'].split(';') # Loop through list of cookies for jnlpcookies in cookielist: # If there isn't an equal and some sort of content, then it isn't a valid cookie, otherwise add to list of cookies if re.search('[a-zA-Z0-9]', jnlpcookies) and re.search('[=]', jnlpcookies): cookieparts = jnlpcookies.split('=') cookies[cookieparts[0]] = cookieparts[1] else: # Check to see if cookie has =, if not it is malformed and send dummy cookie # If so, split at the = into correct name/value pairs if re.search('=', args['cookie']): cookielist = args['cookie'].split('=') cookies[cookielist[0]] = cookielist[1] else: cookies['jnlp'] = 'jnlpdownloader' r = session.get(args['link'], cookies=cookies, verify=False) else: r = session.get(args['link'], verify=False) # If the status code is not 200, the file was likely inaccessible so we exit if r.status_code is not 200: print '[*] Link was inaccessible, exiting.' exit(0) xmltree = '' xmlroot = '' jnlpurl = '' # Attempt to read the JNLP XML, if this fails then exit try: xmltree = ET.ElementTree(ET.fromstring(r.content)) except: print '[*] JNLP file was misformed, exiting.' exit(0) # Get the XML document structure and pull out the main link try: xmlroot = xmltree.getroot() jnlpurl = xmlroot.attrib['codebase']+'/' except: print '[*] JNLP file was misformed, exiting.' exit(0) # If the JNLP file was good, create directory to store JARs # First get the path delimeter for the OS path_delim = '' if 'posix' in os.name: path_delim = '/' else: path_delim = '\\' # Next, try to create the directory or default to current try: if not os.path.exists(os.getcwd() + path_delim + randDir): os.mkdir(os.getcwd() + path_delim + randDir) else: print '[*] Random directory already exists, defaulting to current.' randDir = '.' except: print '[*] Failed to create random directory, defaulting to current.' randDir = '.' jnlplinks = [] i = 0 # Loop through each JAR listed in the JNLP file for jars in xmlroot.iter('jar'): # Get the file, path, and URI jnlpfile = jars.get('href').rsplit('/')[1] jnlppath = jars.get('href').rsplit('/')[0] + '/' jnlpuri = jars.get('href') # If the JAR has version info, then store it as we might need to use it if jars.get('version') is None: jnlpalt = None jnlpver = None altfile = None else: jnlpalt = jnlppath + jnlpfile.rsplit('.jar')[0] + '__V' + jars.get('version') + '.jar' altfile = jnlpfile.rsplit('.jar')[0] + '__V' + jars.get('version') + '.jar' jnlpver = jnlpuri + '?version-id=' + jars.get('version') # Add each JAR URI, Version, Filename, Alternate URI, and alternate filename to a list # These alternates are based on behavior I have seen where Java uses the version # information in the file name jnlplinks.append([jnlpuri, jnlpver, jnlpfile, jnlpalt, altfile]) i+=1 # Loop through each Native Library listed in the JNLP file for nativelibs in xmlroot.iter('nativelib'): # Get the file, path, and URI jnlpfile = nativelibs.get('href').rsplit('/')[1] jnlppath = nativelibs.get('href').rsplit('/')[0] + '/' jnlpuri = nativelibs.get('href') # If the Native Library has version info, then store it as we might need to use it if nativelibs.get('version') is None: jnlpalt = None jnlpver = None altfile = None else: jnlpalt = jnlppath + jnlpfile.rsplit('.jar')[0] + '__V' + nativelibs.get('version') + '.jar' altfile = jnlpfile.rsplit('.jar')[0] + '__V' + nativelibs.get('version') + '.jar' jnlpver = jnlpuri + '?version-id=' + nativelibs.get('version') # Add each JAR URI, Version, Filename, Alternate URI, and alternate filename to a list # These alternates are based on behavior I have seen where Java uses the version # information in the file name jnlplinks.append([jnlpuri, jnlpver, jnlpfile, jnlpalt, altfile]) i+=1 # Loop through the list of lists with all the URI, version, etc info for link in jnlplinks: # Make a request for the file print '[+] Attempting to download: '+jnlpurl+link[0] jnlpresp = session.get(jnlpurl + link[0]) # If the request succeeded, then write the JAR to disk if jnlpresp.status_code == 200: print '[-] Saving file: '+link[2]+' to '+randDir output = open(randDir+'/'+link[2], 'wb') output.write(jnlpresp.content) output.close() else: # If the straight request didn't succeed, try to download with version info if link[1] is not None: # Make a request for the file print '[+] Attempting to download: '+jnlpurl+link[1] jnlpresp = session.get(jnlpurl + link[1]) # If the request succeeded, then write the JAR to disk if jnlpresp.status_code == 200: print '[-] Saving file: '+link[2]+' to '+randDir output = open(randDir+'/'+link[2], 'wb') output.write(jnlpresp.content) output.close() # If the straight request didn't succeed, try to download with alternate name if link[3] is not None and link[4] is not None: # Make a request for the file print '[+] Attempting to download: '+jnlpurl+link[3] jnlpresp = session.get(jnlpurl + link[3]) # If the request succeeded, then write the JAR to disk if jnlpresp.status_code == 200: print '[-] Saving file: '+link[4]+' to '+randDir output = open(randDir+'/'+link[4], 'wb') output.write(jnlpresp.content) output.close()
import cStringIO from functools import partial import operator as op import antlr3 from antlr3.tree import CommonTree as AST from grammar.JavaLexer import JavaLexer as Lexer from grammar.JavaParser import JavaParser as Parser from lib.typecheck import * import lib.const as C from lib.enum import enum import lib.visit as v from .. import util from expression import Expression, parse_e, gen_E_id, gen_E_bop # s ::= e | assume e | assert e | return e? | e := e | if e s s? | while e s # | repeat e s # syntactic sugar borrowed from sketch # | minrepeat s # syntactic sugar borrowed from sketch # | for e e s | break | try s (catch e s)* (finally s)? # | s; s # represented as a list C.S = enum("EXP", "ASSUME", "ASSERT", "RETURN", "ASSIGN", "IF", "WHILE", "REPEAT", "MINREPEAT", "FOR", "BREAK", "TRY") class Statement(v.BaseNode): def __init__(self, k, **kwargs): self._kind = k for key in kwargs: setattr(self, key, kwargs[key]) @property def kind(self): return self._kind def __str__(self, e_printer=str): curried = lambda st: st.__str__(e_printer) buf = cStringIO.StringIO() if self._kind == C.S.EXP: buf.write(e_printer(self.e) + ';') elif self._kind == C.S.ASSUME: buf.write("assume " + e_printer(self.e) + ';') elif self._kind == C.S.ASSERT: buf.write("assert " + e_printer(self.e) + ';') elif self._kind == C.S.RETURN: buf.write("return") if hasattr(self, "e"): buf.write(' '+ e_printer(self.e)) buf.write(';') elif self._kind == C.S.ASSIGN: buf.write(e_printer(self.le) + " = " + e_printer(self.re) + ';') elif self._kind == C.S.IF: e = e_printer(self.e) t = '\n'.join(map(curried, self.t)) f = '\n'.join(map(curried, self.f)) buf.write("if (" + e + ") {\n" + t + "\n}") if f: buf.write("\nelse {\n" + f + "\n}") elif self._kind == C.S.WHILE: e = e_printer(self.e) b = '\n'.join(map(curried, self.b)) buf.write("while (" + e + ") {\n" + b + "\n}") elif self._kind == C.S.REPEAT: e = e_printer(self.e) b = '\n'.join(map(curried, self.b)) buf.write("repeat (" + e + ") {\n" + b + "\n}") elif self._kind == C.S.MINREPEAT: b = '\n'.join(map(curried, self.b)) buf.write("minrepeat {\n" + b + "\n}") elif self._kind == C.S.FOR: e_def = e_printer(self.i) e_iter = e_printer(self.init) b = '\n'.join(map(curried, self.b)) buf.write("for (" + e_def + " : " + e_iter + ") {\n" + b + "\n}") elif self._kind == C.S.BREAK: buf.write("break;") elif self._kind == C.S.TRY: b = '\n'.join(map(curried, self.b)) buf.write("try {\n" + b + "\n}") for (e, cs) in self.catches: e = e_printer(e) cs = '\n'.join(map(curried, cs)) buf.write("catch (" + e + ") {\n" + cs + "\n}") fs = '\n'.join(map(curried, self.fs)) buf.write("finally {\n" + fs + "\n}") return buf.getvalue() def __repr__(self): return self.__str__() def accept(self, visitor): f = op.methodcaller("accept", visitor) if self._kind in [C.S.EXP, C.S.ASSERT, C.S.RETURN]: if hasattr(self, "e"): v_e = f(self.e) if type(v_e) is list: return v_e else: self.e = v_e elif self._kind == C.S.ASSIGN: self.le = f(self.le) self.re = f(self.re) elif self._kind == C.S.IF: self.e = f(self.e) self.t = util.flatten(map(f, self.t)) self.f = util.flatten(map(f, self.f)) elif self._kind in [C.S.WHILE, C.S.REPEAT]: self.e = f(self.e) self.b = util.flatten(map(f, self.b)) elif self._kind == C.S.MINREPEAT: self.b = util.flatten(map(f, self.b)) elif self._kind == C.S.FOR: self.i = f(self.i) self.init = f(self.init) self.b = util.flatten(map(f, self.b)) elif self._kind == C.S.TRY: self.b = util.flatten(map(f, self.b)) self.catches = [ ( f(e), util.flatten(map(f, cs)) ) for (e, cs) in self.catches ] self.fs = util.flatten(map(f, self.fs)) return visitor.visit(self) def exists(self, pred): if pred(self): return True f = op.methodcaller("exists", pred) if self._kind == C.S.IF: return util.exists(f, self.t + self.f) elif self._kind in [C.S.WHILE, C.S.REPEAT, C.S.MINREPEAT, C.S.FOR]: return util.exists(f, self.b) elif self._kind == C.S.TRY: # TODO: catches return util.exists(f, self.b + self.fs) else: return False @property def has_return(self): f = lambda s: s.kind == C.S.RETURN return self.exists(f) # e -> S(EXP, e) @takes(Expression) @returns(Statement) def gen_S_e(e): return Statement(C.S.EXP, e=e) # e -> S(ASSUME, e) @takes(Expression) @returns(Statement) def gen_S_assume(e): return Statement(C.S.ASSUME, e=e) # e -> S(ASSERT, e) @takes(Expression) @returns(Statement) def gen_S_assert(e): return Statement(C.S.ASSERT, e=e) # e -> S(RETURN, e) @takes(optional(Expression)) @returns(Statement) def gen_S_rtn(e=None): if e: return Statement(C.S.RETURN, e=e) else: return Statement(C.S.RETURN) # le, re -> S(ASSIGN, le, re) @takes(Expression, Expression) @returns(Statement) def gen_S_assign(le, re): return Statement(C.S.ASSIGN, le=le, re=re) # e, ts, fs -> S(IF, e, ts, fs) @takes(Expression, list_of(Statement), list_of(Statement)) @returns(Statement) def gen_S_if(e, ts, fs): return Statement(C.S.IF, e=e, t=ts, f=fs) # e, ss -> S(WHILE, e, ss) @takes(Expression, list_of(Statement)) @returns(Statement) def gen_S_while(e, ss): return Statement(C.S.WHILE, e=e, b=ss) # e, ss -> S(REPEAT, e, ss) @takes(Expression, list_of(Statement)) @returns(Statement) def gen_S_repeat(e, ss): return Statement(C.S.REPEAT, e=e, b=ss) # ss -> S(MINREPEAT, sS) @takes(list_of(Statement)) @returns(Statement) def gen_S_minrepeat(ss): return Statement(C.S.MINREPEAT, b=ss) # e_def, e_init, ss -> S(FOR, e_def, e_init, ss) @takes(Expression, Expression, list_of(Statement)) @returns(Statement) def gen_S_for(e_def, e_init, ss): return Statement(C.S.FOR, i=e_def, init=e_init, b=ss) # () -> S(BREAK) @takes(nothing) @returns(Statement) def gen_S_break(): return Statement(C.S.BREAK) # ss, catches, fs -> S(TRY, ss, catches, fs) @takes(list_of(Statement), list_of(anything), list_of(Statement)) @returns(Statement) def gen_S_try(ss, catches, fs): return Statement(C.S.TRY, b=ss, catches=catches, fs=fs) # { (S... ) ; } => (S... ) ; @takes(list_of(AST)) @returns(list_of(AST)) def rm_braces(nodes): if len(nodes) < 2: return nodes if nodes[0].getText() == '{' and nodes[-1].getText() == '}': return nodes[1:-1] else: return nodes # parse a statement # (STATEMENT ...) @takes("Method", AST) @returns(Statement) def parse_s(mtd, node): curried_s = partial(parse_s, mtd) curried_e = lambda n: parse_e(n, mtd.clazz) _node = node.getChild(0) kind = _node.getText() _nodes = node.getChildren() # (S... (E... ) ';') if kind == C.T.EXP: _nodes = _node.getChildren() # (S... (E... var = (E... )) ';') if len(_nodes) >= 3 and _nodes[-2].getText() == '=': # var can be a list of nodes, e.g., x . y var_node = util.mk_v_node_w_children(_nodes[:-2]) le = curried_e(var_node) re = curried_e(_nodes[-1]) s = gen_S_assign(le, re) else: s = gen_S_e(curried_e(_node)) # (S... assume (E... ) ';') elif kind == "assume": e = curried_e(node.getChild(1)) s = gen_S_assume(e) # (S... assert (E... ) ';') elif kind == "assert": e = curried_e(node.getChild(1)) s = gen_S_assert(e) # (S... return (E... )? ';') elif kind == "return": if node.getChildCount() > 2: s = gen_S_rtn(curried_e(node.getChild(1))) else: s = gen_S_rtn() # (S... if (E... ) { (S... ) } (else { (S... ) })?) elif kind == "if": e = curried_e(node.getChild(1)) ss = _nodes[2:] # exclude first two nodes: S... if i = next((i for i, n in enumerate(ss) if n.getText() == "else"), -1) if i == -1: # no else branch t_s = rm_braces(ss) f_s = [] else: t_s = rm_braces(ss[:i]) f_s = rm_braces(ss[i+1:]) ts = map(curried_s, t_s) fs = map(curried_s, f_s) s = gen_S_if(e, ts, fs) # (S... switch (E cond) { (case (E case1) { (S1...) } ) ... (default { Sd } ) } # => # desugaring at this parsing phase # if (cond == case1) { S1 } else if ... else { Sd } elif kind == "switch": def parse_cases(case_node): _case_nodes = case_node.getChildren() label = case_node.getText() if label == "case": e_case = curried_e(_case_nodes[0]) ss_case = map(curried_s, rm_braces(_case_nodes[1:])) elif label == "default": e_case = None ss_case = map(curried_s, rm_braces(_case_nodes)) else: raise Exception("irregular grammar", node.toStringTree()) return (label, e_case, ss_case) e_cond = curried_e(node.getChild(1)) ss = _nodes[2:] # exclude first two nodes: S... switch cases = map(parse_cases, rm_braces(ss)) _cases, _default = util.partition(lambda (l,c,ss): l == "case", cases) def rm_break(ss): return filter(lambda s: s.kind != C.S.BREAK, ss) def desugar(acc, (label, e_case, ss)): if label != "case": return acc # double-check e = gen_E_bop(u"==", e_cond, e_case) return [gen_S_if(e, rm_break(ss), acc)] default_ss = rm_break(_default[0][2]) if _default else [] s = reduce(desugar, _cases, default_ss)[0] # (S... while (E... ) { (S... ) }) elif kind == "while": e = curried_e(node.getChild(1)) ss = _nodes[2:] # exclude first two nodes: while (E... ) b = map(curried_s, rm_braces(ss)) s = gen_S_while(e, b) # (S... repeat (E... ) { (S... ) }) elif kind == "repeat": e = curried_e(node.getChild(1)) ss = _nodes[2:] # exclude first two nodes: repeat (E... ) b = map(curried_s, rm_braces(ss)) s = gen_S_repeat(e, b) # (S... minrepeat { (S... ) }) elif kind == "minrepeat": ss = _nodes[1:] # exclude the first node: minrepeat b = map(curried_s, rm_braces(ss)) s = gen_S_minrepeat(b) # (S... for (FOR_CTRL typ var : (E... ) ) { (S... ) }) elif kind == "for": ctrl = node.getChild(1) ty = ctrl.getChild(0).getText() i = ctrl.getChild(1).getText() mtd.locals[i] = ty # NOTE: incorrect scope, in fact. e_def = gen_E_id(i, ty) e_iter = curried_e(ctrl.getChildren()[-1]) ss = _nodes[2:] # exclude first two nodes: for (... ) b = map(curried_s, rm_braces(ss)) s = gen_S_for(e_def, e_iter, b) # (S... break Id? ';') elif kind == "break": # TODO: break identifier s = gen_S_break() # (S... try { (S... ) } # (catch (PARAMS typ var) { (S... ) })* # (finally { (S... ) })? ) elif kind == "try": catches = [] fs = [] idx = -1 while abs(idx) <= node.getChildCount(): __node = node.getChild(idx) __kind = __node.getText() if __kind == "finally": fs = map(curried_s, rm_braces(__node.getChildren())) elif __kind == "catch": ty = __node.getChild(0).getChild(0).getText() ex = __node.getChild(0).getChild(1).getText() e = gen_E_id(ex, ty) cs = map(curried_s, rm_braces(__node.getChildren()[1:])) catches.append( (e, cs) ) elif __kind == '}': break idx = idx - 1 b = map(curried_s, rm_braces(_nodes[1:idx+1])) s = gen_S_try(b, catches, fs) # (S... typ var (= (E... )) ';') elif _nodes[-2].getText() == '=': var = _nodes[-3].getText() # type can be a list of nodes, e.g., List < T > ty_node = util.mk_v_node_w_children(_nodes[:-3]) ty = util.implode_id(ty_node) mtd.locals[var] = ty le = gen_E_id(var, ty) re = curried_e(_nodes[-2].getChild(0)) s = gen_S_assign(le, re) # (DECL typ var ';') # local variable declaration elif node.getText() == C.T.DECL: # type can be a list of nodes, e.g., Class < ? extends Activity > ty_node = util.mk_v_node_w_children(_nodes[:-2]) ty = util.implode_id(ty_node) var = _nodes[-2].getText() mtd.locals[var] = ty e_decl = gen_E_id(var, ty) s = gen_S_e(e_decl) else: raise Exception("unhandled statement", node.toStringTree()) return s # parse a method body # body ::= ';' | { s* } @takes("Method", list_of(AST)) @returns(list_of(Statement)) def parse(mtd, nodes): if len(nodes) < 1: raise SyntaxError if len(nodes) == 1 and nodes[0].toStringTree() == ';': return [] return map(partial(parse_s, mtd), rm_braces(nodes)) # parse and generate statements @takes("Method", unicode) @returns(list_of(Statement)) def to_statements(mtd, s): s_stream = antlr3.StringStream('{' + s + '}') lexer = Lexer(s_stream) t_stream = antlr3.CommonTokenStream(lexer) parser = Parser(t_stream) try: ast = parser.block() return parse(mtd, ast.tree.getChildren()) except antlr3.RecognitionException: traceback.print_stack()
# -*- coding: utf-8 -*- """ Sahana Eden Menu Structure and Layout @copyright: 2011-2014 (c) Sahana Software Foundation @license: MIT Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ __all__ = ("S3MainMenu", "S3OptionsMenu", ) import re from gluon import * from gluon.storage import Storage from s3 import * from s3layouts import * # ============================================================================= class S3MainMenu(object): """ The default configurations for the main application menu """ # ------------------------------------------------------------------------- @classmethod def menu(cls): main_menu = MM()( # Modules-menu, align-left cls.menu_modules(), # Service menus, align-right # Note: always define right-hand items in reverse order! cls.menu_help(right=True), cls.menu_lang(right=True), cls.menu_gis(right=True), cls.menu_auth(right=True), cls.menu_admin(right=True), ) return main_menu # ------------------------------------------------------------------------- @classmethod def menu_modules(cls): # --------------------------------------------------------------------- # Modules Menu # @todo: this is very ugly - cleanup or make a better solution # @todo: probably define the menu explicitly? # menu_modules = [] all_modules = current.deployment_settings.modules # Home always 1st module = all_modules["default"] menu_modules.append(MM(module.name_nice, c="default", f="index")) # Modules to hide due to insufficient permissions hidden_modules = current.auth.permission.hidden_modules() # The Modules to display at the top level (in order) for module_type in [1, 2, 3, 4, 5, 6, 7, 8, 9]: for module in all_modules: if module in hidden_modules: continue _module = all_modules[module] if (_module.module_type == module_type): if not _module.access: menu_modules.append(MM(_module.name_nice, c=module, f="index")) else: groups = re.split("\|", _module.access)[1:-1] menu_modules.append(MM(_module.name_nice, c=module, f="index", restrict=groups)) # Modules to display off the 'more' menu modules_submenu = [] for module in all_modules: if module in hidden_modules: continue _module = all_modules[module] if (_module.module_type == 10): if not _module.access: modules_submenu.append(MM(_module.name_nice, c=module, f="index")) else: groups = re.split("\|", _module.access)[1:-1] modules_submenu.append(MM(_module.name_nice, c=module, f="index", restrict=groups)) if modules_submenu: # Only show the 'more' menu if there are entries in the list module_more_menu = MM("more", link=False)(modules_submenu) menu_modules.append(module_more_menu) return menu_modules # ------------------------------------------------------------------------- @classmethod def menu_lang(cls, **attr): """ Language menu """ settings = current.deployment_settings if not settings.get_L10n_display_toolbar(): return None languages = current.response.s3.l10n_languages request = current.request menu_lang = MM("Language", **attr) for language in languages: menu_lang.append(MM(languages[language], r=request, translate=False, selectable=False, vars={"_language":language}, ltr=True )) return menu_lang # ------------------------------------------------------------------------- @classmethod def menu_help(cls, **attr): """ Help Menu """ menu_help = MM("Help", c="default", f="help", **attr)( MM("Contact us", f="contact"), MM("About", f="about") ) # ------------------------------------------------------------------- # Now add the available guided tours to the help menu # check that a guided_tour is enabled if current.deployment_settings.get_base_guided_tour(): # load the guided tour configuration from the database table = current.s3db.tour_config logged_in = current.auth.is_logged_in() if logged_in: query = (table.deleted == False) &\ (table.role != "") else: query = (table.deleted == False) &\ (table.role == "") tours = current.db(query).select(table.id, table.name, table.controller, table.function, table.role, ) if len(tours) > 0: menu_help.append(SEP()) for row in tours: menu_help.append(MM(row.name, c=row.controller, f=row.function, vars={"tour":row.id}, restrict=row.role ) ) return menu_help # ------------------------------------------------------------------------- @classmethod def menu_auth(cls, **attr): """ Auth Menu """ auth = current.auth logged_in = auth.is_logged_in() self_registration = current.deployment_settings.get_security_self_registration() if not logged_in: request = current.request login_next = URL(args=request.args, vars=request.vars) if request.controller == "default" and \ request.function == "user" and \ "_next" in request.get_vars: login_next = request.get_vars["_next"] menu_auth = MM("Login", c="default", f="user", m="login", _id="auth_menu_login", vars=dict(_next=login_next), **attr)( MM("Login", m="login", vars=dict(_next=login_next)), MM("Register", m="register", vars=dict(_next=login_next), check=self_registration), MM("Lost Password", m="retrieve_password") ) else: # Logged-in menu_auth = MM(auth.user.email, c="default", f="user", translate=False, link=False, _id="auth_menu_email", **attr)( MM("Logout", m="logout", _id="auth_menu_logout"), MM("User Profile", m="profile"), MM("Personal Data", c="default", f="person", m="update"), MM("Contact Details", c="pr", f="person", args="contact", vars={"person.pe_id" : auth.user.pe_id}), #MM("Subscriptions", c="pr", f="person", #args="pe_subscription", #vars={"person.pe_id" : auth.user.pe_id}), MM("Change Password", m="change_password"), SEP(), MM({"name": current.T("Rapid Data Entry"), "id": "rapid_toggle", "value": current.session.s3.rapid_data_entry is True}, f="rapid"), ) return menu_auth # ------------------------------------------------------------------------- @classmethod def menu_admin(cls, **attr): """ Administrator Menu """ s3_has_role = current.auth.s3_has_role settings = current.deployment_settings name_nice = settings.modules["admin"].name_nice if s3_has_role("ADMIN"): translate = settings.has_module("translate") menu_admin = MM(name_nice, c="admin", **attr)( MM("Settings", f="setting"), MM("Users", f="user"), MM("Person Registry", c="pr"), MM("Database", c="appadmin", f="index"), MM("Error Tickets", f="errors"), MM("Synchronization", c="sync", f="index"), MM("Translation", c="admin", f="translate", check=translate), MM("Test Results", f="result"), ) elif s3_has_role("ORG_ADMIN"): menu_admin = MM(name_nice, c="admin", f="user", **attr)() else: menu_admin = None return menu_admin # ------------------------------------------------------------------------- @classmethod def menu_gis(cls, **attr): """ GIS Config Menu """ settings = current.deployment_settings if not settings.get_gis_menu(): return None T = current.T db = current.db auth = current.auth s3db = current.s3db request = current.request s3 = current.session.s3 _config = s3.gis_config_id # See if we need to switch config before we decide which # config item to mark as active: if "_config" in request.get_vars: # The user has just selected a config from the GIS menu try: config = int(request.get_vars._config) except ValueError: # Manually-crafted URL? pass else: if _config is None or _config != config: # Set this as the current config s3.gis_config_id = config cfg = current.gis.get_config() s3.location_filter = cfg.region_location_id if settings.has_module("event"): # See if this config is associated with an Incident table = s3db.event_config query = (table.config_id == config) incident = db(query).select(table.incident_id, limitby=(0, 1)).first() if incident: s3.incident = incident.incident_id else: s3.incident = None # Don't use the outdated cache for this call cache = None else: cache = s3db.cache # Check if there are multiple GIS Configs for the user to switch between table = s3db.gis_menu ctable = s3db.gis_config query = (table.pe_id == None) if auth.is_logged_in(): # @ToDo: Search for OUs too (API call) query |= (table.pe_id == auth.user.pe_id) query &= (table.config_id == ctable.id) configs = db(query).select(ctable.id, ctable.name, cache=cache) gis_menu = MM(settings.get_gis_menu(), c=request.controller, f=request.function, **attr) args = request.args if len(configs): # Use short names for the site and personal configs else they'll wrap. # Provide checkboxes to select between pages gis_menu( MM({"name": T("Default"), "id": "gis_menu_id_0", # @ToDo: Show when default item is selected without having # to do a DB query to read the value #"value": _config is 0, "request_type": "load" }, args=args, vars={"_config": 0} ) ) for config in configs: gis_menu( MM({"name": config.name, "id": "gis_menu_id_%s" % config.id, "value": _config == config.id, "request_type": "load" }, args=args, vars={"_config": config.id} ) ) return gis_menu # ============================================================================= class S3OptionsMenu(object): """ The default configurations for options menus Define one function per controller with the controller prefix as function name and with "self" as its only argument (must be an instance method!), and let it return the controller menu definition as an instance of the layout (=an S3NavigationItem subclass, standard: M). In the standard layout, the main item in a controller menu does not have a label. If you want to re-use a menu for multiple controllers, do *not* define a controller setting (c="xxx") in the main item. """ def __init__(self, name): """ Constructor """ try: self.menu = getattr(self, name)() except: self.menu = None # ------------------------------------------------------------------------- def admin(self): """ ADMIN menu """ ADMIN = current.session.s3.system_roles.ADMIN settings_messaging = self.settings_messaging() translate = current.deployment_settings.has_module("translate") # NB: Do not specify a controller for the main menu to allow # re-use of this menu by other controllers return M(restrict=[ADMIN])( M("Settings", c="admin", f="setting")( settings_messaging, ), M("User Management", c="admin", f="user")( M("Create User", m="create"), M("List All Users"), M("Import Users", m="import"), M("List All Roles", f="role"), M("List All Organization Approvers & Whitelists", f="organisation"), #M("Roles", f="group"), #M("Membership", f="membership"), ), M("Database", c="appadmin", f="index")( M("Raw Database access", c="appadmin", f="index") ), M("Error Tickets", c="admin", f="errors"), M("Synchronization", c="sync", f="index")( M("Settings", f="config", args=[1], m="update"), M("Repositories", f="repository"), M("Log", f="log"), ), #M("Edit Application", a="admin", c="default", f="design", #args=[request.application]), M("Translation", c="admin", f="translate", check=translate)( M("Select Modules for translation", c="admin", f="translate", m="create", vars=dict(opt="1")), M("Upload translated files", c="admin", f="translate", m="create", vars=dict(opt="2")), M("View Translation Percentage", c="admin", f="translate", m="create", vars=dict(opt="3")), M("Add strings manually", c="admin", f="translate", m="create", vars=dict(opt="4")) ), M("View Test Result Reports", c="admin", f="result"), M("Portable App", c="admin", f="portable") ) # ------------------------------------------------------------------------- @staticmethod def assess(): """ ASSESS Menu """ #ADMIN = current.session.s3.system_roles.ADMIN return M(c="assess")( M("Building Assessments", f="building")( M("Create", m="create"), M("Map", m="map"), ), M("Canvassing", f="canvass")( M("Create", m="create"), M("Map", m="map"), ), #M("Rapid Assessments", f="rat")( # M("Create", m="create"), #), #M("Impact Assessments", f="assess")( # #M("Create", m="create"), # M("Create", f="basic_assess", p="create"), # #M("Search"), # M("Mobile", f="mobile_basic_assess"), #), ##M("Baseline Data")( # #M("Population", f="population"), ##), #M("Edit Options", restrict=ADMIN)( # M("List / Add Baseline Types", f="baseline_type"), # M("List / Add Impact Types", f="impact_type"), #) ) # ------------------------------------------------------------------------- @staticmethod def asset(): """ ASSET Controller """ ADMIN = current.session.s3.system_roles.ADMIN return M(c="asset")( M("Assets", f="asset", m="summary")( M("Create", m="create"), #M("Map", m="map"), M("Import", m="import", p="create"), ), #M("Brands", f="brand", # restrict=[ADMIN])( # M("Create", m="create"), #), M("Items", f="item", m="summary")( M("Create", m="create"), M("Import", f="catalog_item", m="import", p="create"), ), M("Item Categories", f="item_category", restrict=[ADMIN])( M("Create", m="create"), ), M("Catalogs", f="catalog", restrict=[ADMIN])( M("Create", m="create"), ), M("Suppliers", f="supplier")( M("Create", m="create"), M("Import", m="import", p="create"), ), ) # ------------------------------------------------------------------------- @staticmethod def budget(): """ BUDGET Controller """ return M(c="budget")( M("Budgets", f="budget")( M("Create", m="create"), ), M("Staff Types", f="staff")( M("Create", m="create"), ), M("Projects", f="project")( M("Create", m="create"), ), M("Locations", f="location")( M("Create", m="create"), ), M("Bundles", f="bundle")( M("Create", m="create"), ), M("Kits", f="kit")( M("Create", m="create"), ), M("Items", f="item")( M("Create", m="create"), ), M("Parameters", f="parameter"), ) # ------------------------------------------------------------------------- @staticmethod def building(): """ BUILDING Controller """ return M(c="building")( M("NZSEE Level 1", f="nzseel1")( M("Submit New (triage)", m="create", vars={"triage":1}), M("Submit New (full form)", m="create"), ), M("NZSEE Level 2", f="nzseel2")( M("Submit New", m="create"), ), M("Report", f="index")( M("Snapshot", f="report"), M("Assessment timeline", f="timeline"), M("Assessment admin level", f="adminLevel"), ), ) # ------------------------------------------------------------------------- @staticmethod def cap(): """ CAP menu """ return M(c="cap")( M("Alerts", f="alert")( M("Create", m="create"), M("Import from CSV", m="import", p="create"), M("Import from Feed URL", m="import_feed", p="create"), ), M("Templates", f="template")( M("Create", m="create"), ), M("RSS Channels", c="msg", f="rss_channel")( M("Create", m="create"), ), #M("CAP Profile", f="profile")( # M("Edit profile", f="profile") #) ) # ------------------------------------------------------------------------- @staticmethod def cr(): """ CR / Shelter Registry """ ADMIN = current.session.s3.system_roles.ADMIN if current.deployment_settings.get_ui_label_camp(): shelter = "Camps" types = "Camp Settings" else: shelter = "Shelters" types = "Shelter Settings" return M(c="cr")( M(shelter, f="shelter")( M("Create", m="create"), M("Map", m="map"), M("Report", m="report"), M("Import", m="import", p="create"), ), M(types, restrict=[ADMIN])( M("Types", f="shelter_type"), M("Services", f="shelter_service"), ) ) # ------------------------------------------------------------------------- @staticmethod def cms(): """ CMS / Content Management System """ return M(c="cms")( M("Series", f="series")( M("Create", m="create"), M("View as Pages", f="blog"), ), M("Posts", f="post")( M("Create", m="create"), M("View as Pages", f="page"), ), ) # ------------------------------------------------------------------------- @staticmethod def delphi(): """ DELPHI / Delphi Decision Maker """ ADMIN = current.session.s3.system_roles.ADMIN return M(c="delphi")( M("Active Problems", f="problem")( M("Create", m="create"), ), M("Groups", f="group")( M("Create", m="create"), ), #M("Solutions", f="solution"), #M("Administration", restrict=[ADMIN])( #M("Groups", f="group"), #M("Group Memberships", f="membership"), #M("Problems", f="problem"), #) ) # ------------------------------------------------------------------------- @staticmethod def deploy(): """ Deployments """ return M()(M("Missions", c="deploy", f="mission", m="summary")( M("Create", m="create"), M("Active Missions", m="summary", vars={"~.status__belongs": "2"}), ), M("Alerts", c="deploy", f="alert")( M("Create", m="create"), M("InBox", c="deploy", f="email_inbox", ), M("Settings", c="deploy", f="email_channel", p="update", t="msg_email_channel", ), ), M("Assignments", c="deploy", f="assignment", m="summary" ), M("Job Titles", c="deploy", f="job_title" ), M("Human Resources", c="deploy", f="human_resource", m="summary")( M("Add Deployables", c="deploy", f="application", m="select", p="create", t="deploy_application", ), M("Import Human Resources", c="deploy", f="person", m="import"), ), ) # ------------------------------------------------------------------------- @staticmethod def disease(): """ Disease Case Tracking and Contact Tracing """ return M(c="disease")( M("Cases", c="disease", f="case", m="summary")( M("Create", m="create"), M("Watch List", m="summary", vars={"~.monitoring_level__belongs": "OBSERVATION,DIAGNOSTICS"}), ), M("Contact Tracing", c="disease", f="tracing")( M("Create", m="create"), ), M("Diseases", c="disease", f="disease")( M("Create", m="create"), ), ) # ------------------------------------------------------------------------- @staticmethod def doc(): """ DOC Menu """ return M(c="doc")( M("Documents", f="document")( M("Create", m="create"), ), M("Photos", f="image")( M("Create", m="create"), #M("Bulk Uploader", f="bulk_upload"), ) ) # ------------------------------------------------------------------------- @staticmethod def dvi(): """ DVI / Disaster Victim Identification """ return M(c="dvi")( #M("Home", f="index"), M("Recovery Requests", f="recreq")( M("New Request", m="create"), M("List Current", vars={"recreq.status":"1,2,3"}), ), M("Dead Bodies", f="body")( M("Add", m="create"), M("List unidentified", vars={"identification.status": "None"}), M("Report by Age/Gender", m="report", vars=dict(rows="age_group", cols="gender", fact="pe_label", aggregate="count")), ), M("Missing Persons", f="person")( M("List all"), ), M("Morgues", f="morgue")( M("Create", m="create"), ), M("Dashboard", f="index"), ) # ------------------------------------------------------------------------- @staticmethod def dvr(): """ DVR Menu """ return M(c="dvr")( M("Cases", f="case")( M("Create", m="create"), ), ) # ------------------------------------------------------------------------- @staticmethod def event(): """ EVENT / Event Module """ return M()( M("Scenarios", c="scenario", f="scenario")( M("Create", m="create"), M("Import", m="import", p="create"), ), M("Events", c="event", f="event")( M("Create", m="create"), ), M("Event Types", c="event", f="event_type")( M("Create", m="create"), M("Import", m="import", p="create"), ), M("Incidents", c="event", f="incident")( M("Create", m="create"), ), M("Incident Reports", c="event", f="incident_report")( M("Create", m="create"), ), M("Incident Types", c="event", f="incident_type")( M("Create", m="create"), M("Import", m="import", p="create"), ), ) # ------------------------------------------------------------------------- @staticmethod def fire(): """ FIRE """ return M(c="fire")( M("Fire Stations", f="station")( M("Create", m="create"), M("Map", m="map"), M("Import Stations", m="import"), M("Import Vehicles", f="station_vehicle", m="import"), ), M("Fire Zones", f="zone")( M("Create", m="create"), #M("Map", m="map"), #M("Import", m="import"), ), M("Zone Types", f="zone_type")( M("Create", m="create"), #M("Map", m="map"), #M("Import", m="import"), ), M("Water Sources", f="water_source")( M("Create", m="create"), M("Map", m="map"), M("Import", m="import"), ), M("Hazard Points", f="hazard_point")( M("Create", m="create"), M("Import", m="import"), ) ) # ------------------------------------------------------------------------- @staticmethod def gis(): """ GIS / GIS Controllers """ MAP_ADMIN = current.session.s3.system_roles.MAP_ADMIN settings = current.deployment_settings gis_menu = settings.get_gis_menu() def pois(i): poi_resources = settings.get_gis_poi_create_resources() if not poi_resources: return False for res in poi_resources: if res["table"] == "gis_poi": return True return False def config_menu(i): auth = current.auth if not auth.is_logged_in(): # Anonymous users can never cofnigure the Map return False s3db = current.s3db if auth.s3_has_permission("create", s3db.gis_config): # If users can create configs then they can see the menu item return True # Look for this user's config table = s3db.gis_config query = (table.pe_id == auth.user.pe_id) config = current.db(query).select(table.id, limitby=(0, 1), cache=s3db.cache).first() if config: return True def config_args(): auth = current.auth if not auth.user: # Won't show anyway due to check return [] if auth.s3_has_role(MAP_ADMIN): # Full List return [] # Look for this user's config s3db = current.s3db table = s3db.gis_config query = (table.pe_id == auth.user.pe_id) config = current.db(query).select(table.id, limitby=(0, 1), cache=s3db.cache).first() if config: # Link direct to the User's config return [config.id, "layer_entity"] # Link to the Create form return ["create"] return M(c="gis")( M("Fullscreen Map", c="gis", f="map_viewing_client"), # Currently not got geocoding support #M("Bulk Uploader", c="doc", f="bulk_upload"), M("Locations", c="gis", f="location")( M("Create Location", m="create"), #M("Create Location Group", m="create", vars={"group": 1}), M("Import from CSV", m="import", restrict=[MAP_ADMIN]), M("Import from OpenStreetMap", m="import_poi", restrict=[MAP_ADMIN]), #M("Geocode", f="geocode_manual"), ), M("PoIs", c="gis", f="poi", check=pois)(), #M("Population Report", f="location", m="report", # vars=dict(rows="name", # fact="population", # aggregate="sum")), M("Configuration", c="gis", f="config", args=config_args(), _id="gis_menu_config", check=config_menu), M("Admin", c="gis", restrict=[MAP_ADMIN])( M("Hierarchy", f="hierarchy"), M("Layers", f="catalog"), M("Markers", f="marker"), M("Menu", f="menu", check=[gis_menu]), M("PoI Types", f="poi_type", check=[pois]), M("Projections", f="projection"), M("Styles", f="style"), ) ) # ------------------------------------------------------------------------- @staticmethod def hms(): """ HMS / Hospital Status Assessment and Request Management """ #s3 = current.response.s3 return M(c="hms")( M("Hospitals", f="hospital")( M("Create", m="create"), M("Map", m="map"), M("Report", m="report"), M("Import", m="import", p="create"), #SEP(), #M("Show Map", c="gis", f="map_viewing_client", #vars={"kml_feed" : "%s/hms/hospital.kml" % #s3.base_url, "kml_name" : "Hospitals_"}) ) ) # ------------------------------------------------------------------------- @staticmethod def hrm(): """ HRM / Human Resources Management """ s3 = current.session.s3 ADMIN = s3.system_roles.ADMIN # Custom conditions for the check-hook, as lambdas in order # to have them checked only immediately before rendering: manager_mode = lambda i: s3.hrm.mode is None personal_mode = lambda i: s3.hrm.mode is not None is_org_admin = lambda i: s3.hrm.orgs and True or \ ADMIN in s3.roles settings = current.deployment_settings teams = settings.get_hrm_teams() use_teams = lambda i: teams vol_enabled = lambda i: settings.has_module("vol") return M(c="hrm")( M(settings.get_hrm_staff_label(), f="staff", m="summary", check=manager_mode)( M("Create", m="create"), M("Search by Skills", f="competency"), M("Import", f="person", m="import", vars={"group":"staff"}, p="create"), ), M("Staff & Volunteers (Combined)", c="hrm", f="human_resource", m="summary", check=[manager_mode, vol_enabled]), M(teams, f="group", check=[manager_mode, use_teams])( M("Create", m="create"), M("Search Members", f="group_membership"), M("Import", f="group_membership", m="import"), ), M("Department Catalog", f="department", check=manager_mode)( M("Create", m="create"), ), M("Job Title Catalog", f="job_title", check=manager_mode)( M("Create", m="create"), ), M("Skill Catalog", f="skill", check=manager_mode)( M("Create", m="create"), #M("Skill Provisions", f="skill_provision"), ), M("Training Events", f="training_event", check=manager_mode)( M("Create", m="create"), M("Search Training Participants", f="training"), M("Import Participant List", f="training", m="import"), ), M("Training Course Catalog", f="course", check=manager_mode)( M("Create", m="create"), #M("Course Certificates", f="course_certificate"), ), M("Certificate Catalog", f="certificate", check=manager_mode)( M("Create", m="create"), #M("Skill Equivalence", f="certificate_skill"), ), M("Reports", f="staff", m="report", check=manager_mode)( M("Staff Report", m="report"), M("Expiring Staff Contracts Report", vars=dict(expiring=1)), M("Training Report", f="training", m="report"), ), M("Personal Profile", f="person", check=personal_mode, vars=dict(mode="personal")), # This provides the link to switch to the manager mode: M("Staff Management", f="index", check=[personal_mode, is_org_admin]), # This provides the link to switch to the personal mode: M("Personal Profile", f="person", check=manager_mode, vars=dict(mode="personal")) ) # ------------------------------------------------------------------------- @staticmethod def vol(): """ Volunteer Management """ s3 = current.session.s3 ADMIN = s3.system_roles.ADMIN # Custom conditions for the check-hook, as lambdas in order # to have them checked only immediately before rendering: manager_mode = lambda i: s3.hrm.mode is None personal_mode = lambda i: s3.hrm.mode is not None is_org_admin = lambda i: s3.hrm.orgs and True or \ ADMIN in s3.roles settings = current.deployment_settings show_programmes = lambda i: settings.get_hrm_vol_experience() == "programme" show_tasks = lambda i: settings.has_module("project") and \ settings.get_project_mode_task() teams = settings.get_hrm_teams() use_teams = lambda i: teams show_staff = lambda i: settings.get_hrm_show_staff() return M(c="vol")( M("Volunteers", f="volunteer", m="summary", check=[manager_mode])( M("Create", m="create"), M("Search by skills", f="competency"), M("Import", f="person", m="import", vars={"group":"volunteer"}, p="create"), ), M("Staff & Volunteers (Combined)", c="vol", f="human_resource", m="summary", check=[manager_mode, show_staff]), M(teams, f="group", check=[manager_mode, use_teams])( M("Create", m="create"), M("Search Members", f="group_membership"), M("Import", f="group_membership", m="import"), ), M("Department Catalog", f="department", check=manager_mode)( M("Create", m="create"), ), M("Volunteer Role Catalog", f="job_title", check=manager_mode)( M("Create", m="create"), ), M("Skill Catalog", f="skill", check=manager_mode)( M("Create", m="create"), #M("Skill Provisions", f="skill_provision"), ), M("Training Events", f="training_event", check=manager_mode)( M("Create", m="create"), M("Search Training Participants", f="training"), M("Import Participant List", f="training", m="import"), ), M("Training Course Catalog", f="course", check=manager_mode)( M("Create", m="create"), #M("Course Certificates", f="course_certificate"), ), M("Certificate Catalog", f="certificate", check=manager_mode)( M("Create", m="create"), #M("Skill Equivalence", f="certificate_skill"), ), M("Programs", f="programme", check=[manager_mode, show_programmes])( M("Create", m="create"), M("Import Hours", f="programme_hours", m="import"), ), M("Reports", f="volunteer", m="report", check=manager_mode)( M("Volunteer Report", m="report"), M("Hours by Role Report", f="programme_hours", m="report", vars=Storage(rows="job_title_id", cols="month", fact="sum(hours)"), check=show_programmes), M("Hours by Program Report", f="programme_hours", m="report", vars=Storage(rows="programme_id", cols="month", fact="sum(hours)"), check=show_programmes), M("Training Report", f="training", m="report"), ), M("My Profile", f="person", check=personal_mode, vars=dict(mode="personal")), M("My Tasks", f="task", check=[personal_mode, show_tasks], vars=dict(mode="personal", mine=1)), # This provides the link to switch to the manager mode: M("Volunteer Management", f="index", check=[personal_mode, is_org_admin]), # This provides the link to switch to the personal mode: M("Personal Profile", f="person", check=manager_mode, vars=dict(mode="personal")) ) # ------------------------------------------------------------------------- @staticmethod def inv(): """ INV / Inventory """ ADMIN = current.session.s3.system_roles.ADMIN current.s3db.inv_recv_crud_strings() inv_recv_list = current.response.s3.crud_strings.inv_recv.title_list settings = current.deployment_settings use_adjust = lambda i: not settings.get_inv_direct_stock_edits() use_commit = lambda i: settings.get_req_use_commit() return M()( #M("Home", f="index"), M("Warehouses", c="inv", f="warehouse")( M("Create", m="create"), M("Import", m="import", p="create"), ), M("Warehouse Stock", c="inv", f="inv_item")( M("Adjust Stock Levels", f="adj", check=use_adjust), M("Kitting", f="kit"), M("Import", f="inv_item", m="import", p="create"), ), M("Reports", c="inv", f="inv_item")( M("Warehouse Stock", f="inv_item", m="report"), M("Expiration Report", c="inv", f="track_item", vars=dict(report="exp")), M("Monetization Report", c="inv", f="inv_item", vars=dict(report="mon")), M("Utilization Report", c="inv", f="track_item", vars=dict(report="util")), M("Summary of Incoming Supplies", c="inv", f="track_item", vars=dict(report="inc")), M("Summary of Releases", c="inv", f="track_item", vars=dict(report="rel")), ), M(inv_recv_list, c="inv", f="recv", translate=False)( # Already T() M("Create", m="create"), M("Timeline", args="timeline"), ), M("Sent Shipments", c="inv", f="send")( M("Create", m="create"), M("Search Shipped Items", f="track_item"), M("Timeline", args="timeline"), ), M("Items", c="supply", f="item", m="summary")( M("Create", m="create"), M("Import", f="catalog_item", m="import", p="create"), ), # Catalog Items moved to be next to the Item Categories #M("Catalog Items", c="supply", f="catalog_item")( #M("Create", m="create"), #), #M("Brands", c="supply", f="brand", # restrict=[ADMIN])( # M("Create", m="create"), #), M("Catalogs", c="supply", f="catalog")( M("Create", m="create"), ), M("Item Categories", c="supply", f="item_category", restrict=[ADMIN])( M("Create", m="create"), ), M("Suppliers", c="inv", f="supplier")( M("Create", m="create"), M("Import", m="import", p="create"), ), M("Facilities", c="inv", f="facility")( M("Create", m="create", t="org_facility"), ), M("Facility Types", c="inv", f="facility_type", restrict=[ADMIN])( M("Create", m="create"), ), M("Requests", c="req", f="req")( M("Create", m="create"), M("Requested Items", f="req_item"), ), M("Commitments", c="req", f="commit", check=use_commit)( ), ) # ------------------------------------------------------------------------- @staticmethod def irs(): """ IRS / Incident Report System """ ADMIN = current.session.s3.system_roles.ADMIN return M(c="irs")( M("Incident Reports", f="ireport")( M("Create Incident Report", m="create"), M("Open Incidents", vars={"open":1}), M("Map", m="map"), M("Timeline", args="timeline"), M("Import", m="import"), M("Report", m="report") ), M("Incident Categories", f="icategory", restrict=[ADMIN])( M("Create", m="create"), ), M("Ushahidi Import", f="ireport", restrict=[ADMIN], args="ushahidi") ) # ------------------------------------------------------------------------- @staticmethod def security(): """ Security Management System """ ADMIN = current.session.s3.system_roles.ADMIN return M(c="security")( M("Incident Reports", c="irs", f="ireport")( M("Create", m="create"), M("Open Incidents", vars={"open":1}), M("Map", m="map"), M("Timeline", args="timeline"), M("Import", m="import"), M("Report", m="report", vars=dict(rows="L1", cols="category", fact="datetime", aggregate="count")) ), M("Incident Categories", c="irs", f="icategory", restrict=[ADMIN])( M("Create", m="create"), ), M("Facilities", c="org", f="facility", m="summary")( M("Create", m="create"), ), M("Facility Types", c="org", f="facility_type", restrict=[ADMIN])( M("Create", m="create"), ), M("Zones", f="zone")( M("Create", m="create"), ), M("Zone Types", f="zone_type", restrict=[ADMIN])( M("Create", m="create"), ), M("Personnel", f="staff")( M("Create", m="create"), M("List All Security-related Staff"), M("List All Essential Staff", f="essential"), ), M("Security Staff Types", f="staff_type", restrict=[ADMIN])( M("Create", m="create"), ), #M("Ushahidi Import", c="irs", f="ireport", restrict=[ADMIN], # args="ushahidi") ) # ------------------------------------------------------------------------- def scenario(self): """ SCENARIO """ # Use EVENT menu return self.event() # ------------------------------------------------------------------------- def supply(self): """ SUPPLY """ # Use INV menu return self.inv() # ------------------------------------------------------------------------- @staticmethod def survey(): """ SURVEY / Survey """ ADMIN = current.session.s3.system_roles.ADMIN # Do we have a series_id? series_id = False get_vars = Storage() try: series_id = int(current.request.args[0]) except: try: (dummy, series_id) = current.request.get_vars["viewing"].split(".") series_id = int(series_id) except: pass if series_id: get_vars.viewing = "survey_complete.%s" % series_id return M(c="survey")( M("Assessment Templates", f="template")( M("Create", m="create"), ), #M("Section", f="section")( # M("Create", args="create"), #), M("Disaster Assessments", f="series")( M("Create", m="create"), ), M("Administration", f="admin", restrict=[ADMIN])( M("Import Templates", f="question_list", m="import", p="create"), M("Import Template Layout", f="formatter", m="import", p="create"), M("Import Completed Assessment Forms", f="complete", m="import", p="create", vars=get_vars, check=series_id), ), ) # ------------------------------------------------------------------------- @staticmethod def member(): """ Membership Management """ return M(c="member")( M("Members", f="membership", m="summary")( M("Create Member", m="create"), #M("Report", m="report"), M("Import", f="person", m="import"), ), M("Membership Types", f="membership_type")( M("Create Membership Type", m="create"), #M("Import", m="import"), ), ) # ------------------------------------------------------------------------- @staticmethod def mpr(): """ MPR / Missing Person Registry """ return M(c="mpr")( M("Missing Persons", f="person")( M("Create", m="create"), ), ) # ------------------------------------------------------------------------- def msg(self): """ MSG / Messaging """ ADMIN = current.session.s3.system_roles.ADMIN if current.request.function in ("sms_outbound_gateway", "email_channel", "facebook_channel", "sms_modem_channel", "sms_smtp_channel", "sms_webapi_channel", "tropo_channel", "twitter_channel"): return self.admin() settings_messaging = self.settings_messaging() return M(c="msg")( M("Compose", f="compose"), M("InBox", f="inbox")( M("Email", f="email_inbox"), #M("Facebook", f="facebook_inbox"), M("RSS", f="rss"), M("SMS", f="sms_inbox"), M("Twitter", f="twitter_inbox"), ), M("Outbox", f="outbox")( M("Email", f="email_outbox"), M("Facebook", f="facebook_outbox"), M("SMS", f="sms_outbox"), M("Twitter", f="twitter_outbox"), ), M("Message Log", f="message"), M("Distribution groups", f="group")( M("Group Memberships", f="group_membership"), ), M("Twitter Search", f="twitter_result")( M("Search Queries", f="twitter_search"), M("Results", f="twitter_result"), # @ToDo KeyGraph Results ), M("Administration", restrict=[ADMIN])(settings_messaging) ) # ------------------------------------------------------------------------- @staticmethod def org(): """ ORG / Organization Registry """ ADMIN = current.session.s3.system_roles.ADMIN SECTORS = "Clusters" if current.deployment_settings.get_ui_label_cluster() \ else "Sectors" return M(c="org")( M("Organizations", f="organisation")( M("Create Organization", m="create"), M("Import", m="import") ), M("Offices", f="office")( M("Create", m="create"), M("Map", m="map"), M("Import", m="import") ), M("Facilities", f="facility", m="summary")( M("Create", m="create"), M("Import", m="import") ), M("Organization Types", f="organisation_type", restrict=[ADMIN])( M("Create", m="create"), ), M("Office Types", f="office_type", restrict=[ADMIN])( M("Create", m="create"), ), M("Facility Types", f="facility_type", restrict=[ADMIN])( M("Create", m="create"), ), M(SECTORS, f="sector", restrict=[ADMIN])( M("Create", m="create"), ), ) # ------------------------------------------------------------------------- @staticmethod def patient(): """ PATIENT / Patient Tracking """ return M(c="patient")( M("Patients", f="patient")( M("Create", m="create"), ) ) # ------------------------------------------------------------------------- @staticmethod def pr(): """ PR / Person Registry """ ADMIN = current.session.s3.system_roles.ADMIN return M(c="pr", restrict=ADMIN)( M("Persons", f="person")( M("Create", m="create"), ), M("Groups", f="group")( M("Create", m="create"), ), ) # ------------------------------------------------------------------------- @staticmethod def proc(): """ PROC / Procurement """ return M(c="proc")( M("Procurement Plans", f="plan")( M("Create", m="create"), ), M("Suppliers", f="supplier")( M("Create", m="create"), ), ) # ------------------------------------------------------------------------- @staticmethod def project(): """ PROJECT / Project Tracking & Management """ settings = current.deployment_settings #activities = lambda i: settings.get_project_activities() activity_types = lambda i: settings.get_project_activity_types() community = settings.get_project_community() if community: IMPORT = "Import Project Communities" else: IMPORT = "Import Project Locations" hazards = lambda i: settings.get_project_hazards() sectors = lambda i: settings.get_project_sectors() stats = lambda i: settings.has_module("stats") themes = lambda i: settings.get_project_themes() menu = M(c="project") if settings.get_project_mode_3w(): if community: menu( M("Projects", f="project")( M("Create", m="create"), ), M("Communities", f="location")( # Better created from tab (otherwise Activity Type filter won't work) #M("Create", m="create"), M("Map", m="map"), M("List Community Contacts", f="location_contact"), ), ) else: menu( M("Projects", f="project")( M("Create", m="create"), M("Map", f="location", m="map"), ) ) menu( M("Reports", f="location", m="report")( M("3W", f="location", m="report"), M("Beneficiaries", f="beneficiary", m="report", check = stats, ), M("Funding", f="organisation", m="report"), ), M("Import", f="project", m="import", p="create")( M("Import Projects", m="import", p="create"), M("Import Project Organizations", f="organisation", m="import", p="create"), M(IMPORT, f="location", m="import", p="create"), ), M("Partner Organizations", f="partners")( M("Create", m="create"), M("Import", m="import", p="create"), ), M("Activity Types", f="activity_type", check=activity_types)( M("Create", m="create"), ), M("Beneficiary Types", f="beneficiary_type", check=stats)( M("Create", m="create"), ), M("Demographics", f="demographic", check=stats)( M("Create", m="create"), ), M("Hazards", f="hazard", check=hazards)( M("Create", m="create"), ), M("Sectors", f="sector", check=sectors)( M("Create", m="create"), ), M("Themes", f="theme", check=themes)( M("Create", m="create"), ), ) elif settings.get_project_mode_task(): menu( M("Projects", f="project")( M("Create", m="create"), M("Open Tasks for Project", vars={"tasks":1}), ), M("Tasks", f="task")( M("Create", m="create"), ), ) if current.auth.s3_has_role("STAFF"): ADMIN = current.session.s3.system_roles.ADMIN menu( M("Daily Work", f="time")( M("My Logged Hours", vars={"mine":1}), M("My Open Tasks", f="task", vars={"mine":1}), ), M("Admin", restrict=[ADMIN])( M("Activity Types", f="activity_type"), M("Import Tasks", f="task", m="import", p="create"), ), M("Reports", f="report")( M("Activity Report", f="activity", m="report"), M("Last Week's Work", f="time", m="report", vars=Storage(rows="person_id", cols="day", fact="sum(hours)", week=1)), M("Last Month's Work", f="time", m="report", vars=Storage(rows="person_id", cols="week", fact="sum(hours)", month=1)), M("Project Time Report", f="time", m="report"), ), ) else: menu( M("Projects", f="project")( M("Create", m="create"), M("Import", m="import", p="create"), ), ) return menu # ------------------------------------------------------------------------- @staticmethod def req(): """ REQ / Request Management """ ADMIN = current.session.s3.system_roles.ADMIN settings = current.deployment_settings use_commit = lambda i: settings.get_req_use_commit() req_items = lambda i: "Stock" in settings.get_req_req_type() req_skills = lambda i: "People" in settings.get_req_req_type() return M(c="req")( M("Requests", f="req")( M("Create", m="create"), M("List Recurring Requests", f="req_template"), M("Map", m="map"), M("Report", m="report"), M("Search All Requested Items", f="req_item", check=req_items), M("Search All Requested Skills", f="req_skill", check=req_skills), ), M("Commitments", f="commit", check=use_commit)( ), M("Items", c="supply", f="item")( M("Create", m="create"), M("Report", m="report"), M("Import", m="import", p="create"), ), # Catalog Items moved to be next to the Item Categories #M("Catalog Items", c="supply", f="catalog_item")( #M("Create", m="create"), #), M("Catalogs", c="supply", f="catalog")( M("Create", m="create"), ), M("Item Categories", c="supply", f="item_category", restrict=[ADMIN])( M("Create", m="create"), ), ) # ------------------------------------------------------------------------- @staticmethod def stats(): """ Statistics """ return M(c="stats")( M("Demographics", f="demographic")( M("Create", m="create"), ), M("Demographic Data", f="demographic_data", args="summary")( M("Create", m="create"), M("Time Plot", m="timeplot"), M("Import", m="import"), ), ) # ------------------------------------------------------------------------- def sync(self): """ SYNC menu """ # Use admin menu return self.admin() # ------------------------------------------------------------------------- @staticmethod def tour(): """ Guided Tour """ ADMIN = current.session.s3.system_roles.ADMIN return M(c="tour")( M("Configuration", f="config", restrict=[ADMIN])( M("Import", m="import", restrict=[ADMIN]), ), M("Detail", f="details", restrict=[ADMIN]), M("User", f="user", restrict=[ADMIN]), ) # ------------------------------------------------------------------------- @staticmethod def transport(): """ TRANSPORT """ ADMIN = current.session.s3.system_roles.ADMIN return M(c="transport")( M("Airports", f="airport")( M("Create", m="create"), M("Map", m="map"), M("Import", m="import", restrict=[ADMIN]), ), M("Heliports", f="heliport")( M("Create", m="create"), M("Map", m="map"), M("Import", m="import", restrict=[ADMIN]), ), M("Seaports", f="seaport")( M("Create", m="create"), M("Map", m="map"), M("Import", m="import", restrict=[ADMIN]), ), ) # ------------------------------------------------------------------------- @staticmethod def vehicle(): """ VEHICLE / Vehicle Tracking """ return M(c="vehicle")( M("Vehicles", f="vehicle")( M("Create", m="create"), M("Import", m="import", p="create"), M("Map", m="map"), ), M("Vehicle Types", f="vehicle_type")( M("Create", m="create"), ), ) # ------------------------------------------------------------------------- @staticmethod def vulnerability(): """ Vulnerability """ return M(c="vulnerability")( M("Indicators", f="indicator")( M("Create", m="create"), ), M("Data", f="data")( M("Create", m="create"), M("Import", m="import"), ), ) # ------------------------------------------------------------------------- @staticmethod def water(): """ Water: Floods, etc """ return M(c="water")( M("Gauges", f="gauge")( M("Create", m="create"), M("Map", m="map"), M("Import", m="import"), ), M("Rivers", f="river")( M("Create", m="create"), M("Map", m="map"), #M("Import", m="import"), ), M("Zones", f="zone")( M("Create", m="create"), M("Map", m="map"), #M("Import", m="import"), ), M("Zone Types", f="zone_type")( M("Create", m="create"), M("Map", m="map"), #M("Import", m="import"), ), ) # ------------------------------------------------------------------------- @classmethod def settings_messaging(cls): """ Messaging settings menu items: These items are used in multiple menus, but each item instance can always only belong to one parent, so we need to re-instantiate with the same parameters, and therefore this is defined as a function here. """ return [ M("Email Channels (Inbound)", c="msg", f="email_channel"), M("Facebook Channels", c="msg", f="facebook_channel"), M("RSS Channels", c="msg", f="rss_channel"), M("SMS Outbound Gateways", c="msg", f="sms_outbound_gateway")( M("SMS Modem Channels", c="msg", f="sms_modem_channel"), M("SMS SMTP Channels", c="msg", f="sms_smtp_channel"), M("SMS WebAPI Channels", c="msg", f="sms_webapi_channel"), ), M("Mobile Commons Channels", c="msg", f="mcommons_channel"), M("Twilio Channels", c="msg", f="twilio_channel"), M("Parsers", c="msg", f="parser"), M("Twitter Settings", c="msg", f="twitter_channel", args=[1], m="update") ] # ------------------------------------------------------------------------- @classmethod def breadcrumbs(cls): """ Breadcrumbs from the current options menu """ # Configure the layout: layout = S3BreadcrumbsLayout request = current.request controller = request.controller function = request.function all_modules = current.deployment_settings.modules # Start with a link to the homepage - always: breadcrumbs = layout()( layout(all_modules["default"].name_nice) ) # Append the current module's homepage - always: # @note: this may give a breadcrumb for which there's no menu item # and should therefore perhaps be replaced by a real path-check in # the main menu? if controller != "default": try: breadcrumbs( layout(all_modules[controller].name_nice, c=controller) ) except: # Module not defined pass # This checks the path in the options menu, omitting the top-level item # (because that's the menu itself which doesn't have a linked label): menu = current.menu.options if menu and function != "index": branch = menu.branch() if branch: path = branch.path() if len(path) > 1: for item in path[1:]: breadcrumbs( layout(item.label, c=item.get("controller"), f=item.get("function"), args=item.args, # Should we retain the request vars in case # the item has no vars? Or shall we merge them # in any case? Didn't see the use-case yet # anywhere... vars=item.vars)) return breadcrumbs # END =========================================================================
import uuid from collections import OrderedDict, defaultdict from collections.abc import Sequence from django import forms from django.core.exceptions import NON_FIELD_ERRORS, ValidationError from django.forms.utils import ErrorList from django.template.loader import render_to_string from django.utils.html import format_html_join from django.utils.safestring import mark_safe from django.utils.translation import gettext as _ from wagtail.admin.staticfiles import versioned_static from wagtail.core.utils import escape_script from .base import Block, BoundBlock, DeclarativeSubBlocksMetaclass from .utils import indent, js_dict __all__ = ['BaseStreamBlock', 'StreamBlock', 'StreamValue', 'StreamBlockValidationError'] class StreamBlockValidationError(ValidationError): def __init__(self, block_errors=None, non_block_errors=None): params = {} if block_errors: params.update(block_errors) if non_block_errors: params[NON_FIELD_ERRORS] = non_block_errors super().__init__( 'Validation error in StreamBlock', params=params) class BaseStreamBlock(Block): def __init__(self, local_blocks=None, **kwargs): self._constructor_kwargs = kwargs super().__init__(**kwargs) # create a local (shallow) copy of base_blocks so that it can be supplemented by local_blocks self.child_blocks = self.base_blocks.copy() if local_blocks: for name, block in local_blocks: block.set_name(name) self.child_blocks[name] = block self.dependencies = self.child_blocks.values() def get_default(self): """ Default values set on a StreamBlock should be a list of (type_name, value) tuples - we can't use StreamValue directly, because that would require a reference back to the StreamBlock that hasn't been built yet. For consistency, then, we need to convert it to a StreamValue here for StreamBlock to work with. """ return StreamValue(self, self.meta.default) def sorted_child_blocks(self): """Child blocks, sorted in to their groups.""" return sorted(self.child_blocks.values(), key=lambda child_block: child_block.meta.group) def render_list_member(self, block_type_name, value, prefix, index, errors=None, id=None): """ Render the HTML for a single list item. This consists of a container, hidden fields to manage ID/deleted state/type, delete/reorder buttons, and the child block's own HTML. """ child_block = self.child_blocks[block_type_name] child = child_block.bind(value, prefix="%s-value" % prefix, errors=errors) return render_to_string('wagtailadmin/block_forms/stream_member.html', { 'child_blocks': self.sorted_child_blocks(), 'block_type_name': block_type_name, 'child_block': child_block, 'prefix': prefix, 'child': child, 'index': index, 'block_id': id, }) def html_declarations(self): return format_html_join( '\n', '<script type="text/template" id="{0}-newmember-{1}">{2}</script>', [ ( self.definition_prefix, name, mark_safe(escape_script(self.render_list_member(name, child_block.get_default(), '__PREFIX__', ''))) ) for name, child_block in self.child_blocks.items() ] ) @property def media(self): return forms.Media(js=[ versioned_static('wagtailadmin/js/blocks/sequence.js'), versioned_static('wagtailadmin/js/blocks/stream.js') ]) def js_initializer(self): # compile a list of info dictionaries, one for each available block type child_blocks = [] for name, child_block in self.child_blocks.items(): # each info dictionary specifies at least a block name child_block_info = {'name': "'%s'" % name} # if the child defines a JS initializer function, include that in the info dict # along with the param that needs to be passed to it for initializing an empty/default block # of that type child_js_initializer = child_block.js_initializer() if child_js_initializer: child_block_info['initializer'] = child_js_initializer child_blocks.append(indent(js_dict(child_block_info))) opts = { 'definitionPrefix': "'%s'" % self.definition_prefix, 'childBlocks': '[\n%s\n]' % ',\n'.join(child_blocks), 'maxNumChildBlocks': 'Infinity' if self.meta.max_num is None else ('%s' % self.meta.max_num), } return "StreamBlock(%s)" % js_dict(opts) def render_form(self, value, prefix='', errors=None): error_dict = {} if errors: if len(errors) > 1: # We rely on StreamBlock.clean throwing a single # StreamBlockValidationError with a specially crafted 'params' # attribute that we can pull apart and distribute to the child # blocks raise TypeError('StreamBlock.render_form unexpectedly received multiple errors') error_dict = errors.as_data()[0].params # value can be None when the StreamField is in a formset if value is None: value = self.get_default() # drop any child values that are an unrecognised block type valid_children = [child for child in value if child.block_type in self.child_blocks] list_members_html = [ self.render_list_member(child.block_type, child.value, "%s-%d" % (prefix, i), i, errors=error_dict.get(i), id=child.id) for (i, child) in enumerate(valid_children) ] return render_to_string('wagtailadmin/block_forms/stream.html', { 'prefix': prefix, 'help_text': getattr(self.meta, 'help_text', None), 'list_members_html': list_members_html, 'child_blocks': self.sorted_child_blocks(), 'header_menu_prefix': '%s-before' % prefix, 'block_errors': error_dict.get(NON_FIELD_ERRORS), 'classname': getattr(self.meta, 'form_classname', None), }) def value_from_datadict(self, data, files, prefix): count = int(data['%s-count' % prefix]) values_with_indexes = [] for i in range(0, count): if data['%s-%d-deleted' % (prefix, i)]: continue block_type_name = data['%s-%d-type' % (prefix, i)] try: child_block = self.child_blocks[block_type_name] except KeyError: continue values_with_indexes.append( ( int(data['%s-%d-order' % (prefix, i)]), block_type_name, child_block.value_from_datadict(data, files, '%s-%d-value' % (prefix, i)), data.get('%s-%d-id' % (prefix, i)), ) ) values_with_indexes.sort() return StreamValue(self, [ (child_block_type_name, value, block_id) for (index, child_block_type_name, value, block_id) in values_with_indexes ]) def value_omitted_from_data(self, data, files, prefix): return ('%s-count' % prefix) not in data @property def required(self): return self.meta.required def clean(self, value): cleaned_data = [] errors = {} non_block_errors = ErrorList() for i, child in enumerate(value): # child is a StreamChild instance try: cleaned_data.append( (child.block.name, child.block.clean(child.value), child.id) ) except ValidationError as e: errors[i] = ErrorList([e]) if self.meta.min_num is not None and self.meta.min_num > len(value): non_block_errors.append(ValidationError( _('The minimum number of items is %d') % self.meta.min_num )) elif self.required and len(value) == 0: non_block_errors.append(ValidationError(_('This field is required.'))) if self.meta.max_num is not None and self.meta.max_num < len(value): non_block_errors.append(ValidationError( _('The maximum number of items is %d') % self.meta.max_num )) if self.meta.block_counts: block_counts = defaultdict(int) for item in value: block_counts[item.block_type] += 1 for block_name, min_max in self.meta.block_counts.items(): block = self.child_blocks[block_name] max_num = min_max.get('max_num', None) min_num = min_max.get('min_num', None) block_count = block_counts[block_name] if min_num is not None and min_num > block_count: non_block_errors.append(ValidationError( '{}: {}'.format(block.label, _('The minimum number of items is %d') % min_num) )) if max_num is not None and max_num < block_count: non_block_errors.append(ValidationError( '{}: {}'.format(block.label, _('The maximum number of items is %d') % max_num) )) if errors or non_block_errors: # The message here is arbitrary - outputting error messages is delegated to the child blocks, # which only involves the 'params' list raise StreamBlockValidationError(block_errors=errors, non_block_errors=non_block_errors) return StreamValue(self, cleaned_data) def to_python(self, value): # the incoming JSONish representation is a list of dicts, each with a 'type' and 'value' field # (and possibly an 'id' too). # This is passed to StreamValue to be expanded lazily - but first we reject any unrecognised # block types from the list return StreamValue(self, [ child_data for child_data in value if child_data['type'] in self.child_blocks ], is_lazy=True) def bulk_to_python(self, values): # 'values' is a list of streams, each stream being a list of dicts with 'type', 'value' and # optionally 'id'. # We will iterate over these streams, constructing: # 1) a set of per-child-block lists ('child_inputs'), to be sent to each child block's # bulk_to_python method in turn (giving us 'child_outputs') # 2) a 'block map' of each stream, telling us the type and id of each block and the index we # need to look up in the corresponding child_outputs list to obtain its final value child_inputs = defaultdict(list) block_maps = [] for stream in values: block_map = [] for block_dict in stream: block_type = block_dict['type'] if block_type not in self.child_blocks: # skip any blocks with an unrecognised type continue child_input_list = child_inputs[block_type] child_index = len(child_input_list) child_input_list.append(block_dict['value']) block_map.append( (block_type, block_dict.get('id'), child_index) ) block_maps.append(block_map) # run each list in child_inputs through the relevant block's bulk_to_python # to obtain child_outputs child_outputs = { block_type: self.child_blocks[block_type].bulk_to_python(child_input_list) for block_type, child_input_list in child_inputs.items() } # for each stream, go through the block map, picking out the appropriately-indexed # value from the relevant list in child_outputs return [ StreamValue( self, [ (block_type, child_outputs[block_type][child_index], id) for block_type, id, child_index in block_map ], is_lazy=False ) for block_map in block_maps ] def get_prep_value(self, value): if not value: # Falsy values (including None, empty string, empty list, and # empty StreamValue) become an empty stream return [] else: # value is a StreamValue - delegate to its get_prep_value() method # (which has special-case handling for lazy StreamValues to avoid useless # round-trips to the full data representation and back) return value.get_prep_value() def get_api_representation(self, value, context=None): if value is None: # treat None as identical to an empty stream return [] return [ { 'type': child.block.name, 'value': child.block.get_api_representation(child.value, context=context), 'id': child.id } for child in value # child is a StreamChild instance ] def render_basic(self, value, context=None): return format_html_join( '\n', '<div class="block-{1}">{0}</div>', [ (child.render(context=context), child.block_type) for child in value ] ) def get_searchable_content(self, value): content = [] for child in value: content.extend(child.block.get_searchable_content(child.value)) return content def deconstruct(self): """ Always deconstruct StreamBlock instances as if they were plain StreamBlocks with all of the field definitions passed to the constructor - even if in reality this is a subclass of StreamBlock with the fields defined declaratively, or some combination of the two. This ensures that the field definitions get frozen into migrations, rather than leaving a reference to a custom subclass in the user's models.py that may or may not stick around. """ path = 'wagtail.core.blocks.StreamBlock' args = [list(self.child_blocks.items())] kwargs = self._constructor_kwargs return (path, args, kwargs) def check(self, **kwargs): errors = super().check(**kwargs) for name, child_block in self.child_blocks.items(): errors.extend(child_block.check(**kwargs)) errors.extend(child_block._check_name(**kwargs)) return errors class Meta: # No icon specified here, because that depends on the purpose that the # block is being used for. Feel encouraged to specify an icon in your # descendant block type icon = "placeholder" default = [] required = True min_num = None max_num = None block_counts = {} class StreamBlock(BaseStreamBlock, metaclass=DeclarativeSubBlocksMetaclass): pass class StreamValue(Sequence): """ Custom type used to represent the value of a StreamBlock; behaves as a sequence of BoundBlocks (which keep track of block types in a way that the values alone wouldn't). """ class StreamChild(BoundBlock): """ Extends BoundBlock with methods that make logical sense in the context of children of StreamField, but not necessarily elsewhere that BoundBlock is used """ def __init__(self, *args, **kwargs): self.id = kwargs.pop('id') super(StreamValue.StreamChild, self).__init__(*args, **kwargs) @property def block_type(self): """ Syntactic sugar so that we can say child.block_type instead of child.block.name. (This doesn't belong on BoundBlock itself because the idea of block.name denoting the child's "type" ('heading', 'paragraph' etc) is unique to StreamBlock, and in the wider context people are liable to confuse it with the block class (CharBlock etc). """ return self.block.name def __init__(self, stream_block, stream_data, is_lazy=False, raw_text=None): """ Construct a StreamValue linked to the given StreamBlock, with child values given in stream_data. Passing is_lazy=True means that stream_data is raw JSONish data as stored in the database, and needs to be converted to native values (using block.to_python()) when accessed. In this mode, stream_data is a list of dicts, each containing 'type' and 'value' keys. Passing is_lazy=False means that stream_data consists of immediately usable native values. In this mode, stream_data is a list of (type_name, value) or (type_name, value, id) tuples. raw_text exists solely as a way of representing StreamField content that is not valid JSON; this may legitimately occur if an existing text field is migrated to a StreamField. In this situation we return a blank StreamValue with the raw text accessible under the `raw_text` attribute, so that migration code can be rewritten to convert it as desired. """ self.is_lazy = is_lazy self.stream_block = stream_block # the StreamBlock object that handles this value self.stream_data = stream_data # a list of dicts (when lazy) or tuples (when non-lazy) as outlined above self._bound_blocks = {} # populated lazily from stream_data as we access items through __getitem__ self.raw_text = raw_text def __getitem__(self, i): if i not in self._bound_blocks: if self.is_lazy: raw_value = self.stream_data[i] type_name = raw_value['type'] child_block = self.stream_block.child_blocks[type_name] self._prefetch_blocks(type_name, child_block) return self._bound_blocks[i] else: try: type_name, value, block_id = self.stream_data[i] except ValueError: type_name, value = self.stream_data[i] block_id = None child_block = self.stream_block.child_blocks[type_name] self._bound_blocks[i] = StreamValue.StreamChild(child_block, value, id=block_id) return self._bound_blocks[i] def _prefetch_blocks(self, type_name, child_block): """Prefetch all child blocks for the given `type_name` using the given `child_blocks`. This prevents n queries for n blocks of a specific type. """ # create a mapping of all the child blocks matching the given block type, # mapping (index within the stream) => (raw block value) raw_values = OrderedDict( (i, item['value']) for i, item in enumerate(self.stream_data) if item['type'] == type_name ) # pass the raw block values to bulk_to_python as a list converted_values = child_block.bulk_to_python(raw_values.values()) # reunite the converted values with their stream indexes for i, value in zip(raw_values.keys(), converted_values): # also pass the block ID to StreamChild, if one exists for this stream index block_id = self.stream_data[i].get('id') self._bound_blocks[i] = StreamValue.StreamChild(child_block, value, id=block_id) def get_prep_value(self): prep_value = [] for i, stream_data_item in enumerate(self.stream_data): if self.is_lazy and i not in self._bound_blocks: # This child has not been accessed as a bound block, so its raw JSONish # value (stream_data_item here) is still valid prep_value_item = stream_data_item # As this method is preparing this value to be saved to the database, # this is an appropriate place to ensure that each block has a unique id. prep_value_item['id'] = prep_value_item.get('id', str(uuid.uuid4())) else: # convert the bound block back into JSONish data child = self[i] # As this method is preparing this value to be saved to the database, # this is an appropriate place to ensure that each block has a unique id. child.id = child.id or str(uuid.uuid4()) prep_value_item = { 'type': child.block.name, 'value': child.block.get_prep_value(child.value), 'id': child.id, } prep_value.append(prep_value_item) return prep_value def __eq__(self, other): if not isinstance(other, StreamValue): return False return self.stream_data == other.stream_data def __len__(self): return len(self.stream_data) def __repr__(self): return repr(list(self)) def render_as_block(self, context=None): return self.stream_block.render(self, context=context) def __html__(self): return self.stream_block.render(self) def __str__(self): return self.__html__()
#-*- coding: utf-8 -*- # pylint: disable=E0102, W0613 """ Values generators for common Django Fields """ from django.contrib.contenttypes.models import ContentType import re, os, random from decimal import Decimal from datetime import date, datetime, time from string import ascii_letters, digits, hexdigits from random import choice from django.core.exceptions import ValidationError from django.core import validators from django.db import models, IntegrityError from django.db.models import Q from django.db.models.fields.files import FieldFile from django.contrib.webdesign.lorem_ipsum import paragraphs from django.core.validators import validate_ipv4_address try: from django.core.validators import validate_ipv6_address, validate_ipv46_address except ImportError: validate_ipv6_address = None validate_ipv46_address = None from django_any import xunit from django_any.functions import valid_choices, split_model_kwargs, ExtensionMethod any_field = ExtensionMethod() any_model = ExtensionMethod(by_instance=True) @any_field.decorator def any_field_blank(function): """ Sometimes return None if field could be blank """ def wrapper(field, **kwargs): if kwargs.get('isnull', False): return None if field.blank and random.random < 0.1: return None return function(field, **kwargs) return wrapper @any_field.decorator def any_field_choices(function): """ Selection from field.choices >>> CHOICES = [('YNG', 'Child'), ('OLD', 'Parent')] >>> result = any_field(models.CharField(max_length=3, choices=CHOICES)) >>> result in ['YNG', 'OLD'] True """ def wrapper(field, **kwargs): if field.choices: return random.choice(list(valid_choices(field.choices))) return function(field, **kwargs) return wrapper @any_field.register(models.BigIntegerField) def any_biginteger_field(field, **kwargs): """ Return random value for BigIntegerField >>> result = any_field(models.BigIntegerField()) >>> type(result) <type 'long'> """ min_value = kwargs.get('min_value', 1) max_value = kwargs.get('max_value', 10**10) return long(xunit.any_int(min_value=min_value, max_value=max_value)) @any_field.register(models.BooleanField) def any_boolean_field(field, **kwargs): """ Return random value for BooleanField >>> result = any_field(models.BooleanField()) >>> type(result) <type 'bool'> """ return xunit.any_boolean() @any_field.register(models.PositiveIntegerField) def any_positiveinteger_field(field, **kwargs): """ An positive integer >>> result = any_field(models.PositiveIntegerField()) >>> type(result) <type 'int'> >>> result > 0 True """ min_value = kwargs.get('min_value', 1) max_value = kwargs.get('max_value', 9999) return xunit.any_int(min_value=min_value, max_value=max_value) @any_field.register(models.CharField) def any_char_field(field, **kwargs): """ Return random value for CharField >>> result = any_field(models.CharField(max_length=10)) >>> type(result) <type 'str'> """ min_length = kwargs.get('min_length', 1) max_length = kwargs.get('max_length', field.max_length) return xunit.any_string(min_length=min_length, max_length=max_length) @any_field.register(models.CommaSeparatedIntegerField) def any_commaseparatedinteger_field(field, **kwargs): """ Return random value for CharField >>> result = any_field(models.CommaSeparatedIntegerField(max_length=10)) >>> type(result) <type 'str'> >>> [int(num) for num in result.split(',')] and 'OK' 'OK' """ nums_count = field.max_length/2 nums = [str(xunit.any_int(min_value=0, max_value=9)) for _ in xrange(0, nums_count)] return ",".join(nums) @any_field.register(models.DateField) def any_date_field(field, **kwargs): """ Return random value for DateField, skips auto_now and auto_now_add fields >>> result = any_field(models.DateField()) >>> type(result) <type 'datetime.date'> """ if field.auto_now or field.auto_now_add: return None from_date = kwargs.get('from_date', date(1990, 1, 1)) to_date = kwargs.get('to_date', date.today()) return xunit.any_date(from_date=from_date, to_date=to_date) @any_field.register(models.DateTimeField) def any_datetime_field(field, **kwargs): """ Return random value for DateTimeField, skips auto_now and auto_now_add fields >>> result = any_field(models.DateTimeField()) >>> type(result) <type 'datetime.datetime'> """ from_date = kwargs.get('from_date', datetime(1990, 1, 1)) to_date = kwargs.get('to_date', datetime.today()) return xunit.any_datetime(from_date=from_date, to_date=to_date) @any_field.register(models.DecimalField) def any_decimal_field(field, **kwargs): """ Return random value for DecimalField >>> result = any_field(models.DecimalField(max_digits=5, decimal_places=2)) >>> type(result) <class 'decimal.Decimal'> """ min_value = kwargs.get('min_value', 0) max_value = kwargs.get('max_value', Decimal('%s.%s' % ('9'*(field.max_digits-field.decimal_places), '9'*field.decimal_places))) decimal_places = kwargs.get('decimal_places', field.decimal_places) return xunit.any_decimal(min_value=min_value, max_value=max_value, decimal_places = decimal_places) @any_field.register(models.EmailField) def any_email_field(field, **kwargs): """ Return random value for EmailField >>> result = any_field(models.EmailField()) >>> type(result) <type 'str'> >>> re.match(r"(?:^|\s)[-a-z0-9_.]+@(?:[-a-z0-9]+\.)+[a-z]{2,6}(?:\s|$)", result, re.IGNORECASE) is not None True """ return "%s@%s.%s" % (xunit.any_string(max_length=10), xunit.any_string(max_length=10), xunit.any_string(min_length=2, max_length=3)) @any_field.register(models.FloatField) def any_float_field(field, **kwargs): """ Return random value for FloatField >>> result = any_field(models.FloatField()) >>> type(result) <type 'float'> """ min_value = kwargs.get('min_value', 1) max_value = kwargs.get('max_value', 100) precision = kwargs.get('precision', 3) return xunit.any_float(min_value=min_value, max_value=max_value, precision=precision) @any_field.register(models.FileField) @any_field.register(models.ImageField) def any_file_field(field, **kwargs): """ Lookup for nearest existing file """ def get_some_file(path): subdirs, files = field.storage.listdir(path) if files: result_file = random.choice(files) instance = field.storage.open("%s/%s" % (path, result_file)).file return FieldFile(instance, field, result_file) for subdir in subdirs: result = get_some_file("%s/%s" % (path, subdir)) if result: return result if callable(field.upload_to): generated_filepath = field.upload_to(None, xunit.any_string(ascii_letters, 10, 20)) upload_to = os.path.dirname(generated_filepath) else: upload_to = field.upload_to result = get_some_file(upload_to) if result is None and not field.null: raise TypeError("Can't found file in %s for non nullable FileField" % field.upload_to) return result @any_field.register(models.FilePathField) def any_filepath_field(field, **kwargs): """ Lookup for nearest existing file """ def get_some_file(path): subdirs, files = [], [] for entry in os.listdir(path): entry_path = os.path.join(path, entry) if os.path.isdir(entry_path): subdirs.append(entry_path) else: if not field.match or re.match(field.match,entry): files.append(entry_path) if files: return random.choice(files) if field.recursive: for subdir in subdirs: result = get_some_file(subdir) if result: return result result = get_some_file(field.path) if result is None and not field.null: raise TypeError("Can't found file in %s for non nullable FilePathField" % field.path) return result @any_field.register(models.IPAddressField) def any_ipaddress_field(field, **kwargs): """ Return random value for IPAddressField >>> result = any_field(models.IPAddressField()) >>> type(result) <type 'str'> >>> from django.core.validators import ipv4_re >>> re.match(ipv4_re, result) is not None True """ nums = [str(xunit.any_int(min_value=0, max_value=255)) for _ in xrange(0, 4)] return ".".join(nums) if validate_ipv6_address: @any_field.register(models.GenericIPAddressField) def any_genericipaddress_field(field, **kwargs): """ Return random value for GenericIPAddressField >>> ipv4_address = any_field(models.GenericIPAddressField(protocol='ipv4')) >>> type(ipv4_address) <type 'str'> >>> from django.core.validators import ipv4_re >>> re.match(ipv4_re, ipv4_address) is not None True >>> ipv6_address = any_field(models.GenericIPAddressField(protocol='ipv6')) >>> type(ipv6_address) <type 'str'> >>> from django.utils.ipv6 import is_valid_ipv6_address >>> is_valid_ipv6_address(ipv6_address) is True True >>> ipv46_address = any_field(models.GenericIPAddressField()) >>> type(ipv46_address) <type 'str'> >>> from django.core.validators import validate_ipv46_address >>> validate_ipv46_address(ipv46_address) is True False """ if field.default_validators == [validate_ipv46_address]: protocol = random.choice(('ipv4', 'ipv6')) elif field.default_validators == [validate_ipv4_address]: protocol = 'ipv4' elif field.default_validators == [validate_ipv6_address]: protocol = 'ipv6' else: raise Exception('Unexpected validators') if protocol == 'ipv4': return any_ipaddress_field(field) if protocol == 'ipv6': nums = [str(xunit.any_string(hexdigits, min_length=4, max_length=4)) for _ in xrange(0, 8)] return ":".join(nums) @any_field.register(models.NullBooleanField) def any_nullboolean_field(field, **kwargs): """ Return random value for NullBooleanField >>> result = any_field(models.NullBooleanField()) >>> result in [None, True, False] True """ return random.choice([None, True, False]) @any_field.register(models.PositiveSmallIntegerField) def any_positivesmallinteger_field(field, **kwargs): """ Return random value for PositiveSmallIntegerField >>> result = any_field(models.PositiveSmallIntegerField()) >>> type(result) <type 'int'> >>> result < 256, result > 0 (True, True) """ min_value = kwargs.get('min_value', 1) max_value = kwargs.get('max_value', 255) return xunit.any_int(min_value=min_value, max_value=max_value) @any_field.register(models.SlugField) def any_slug_field(field, **kwargs): """ Return random value for SlugField >>> result = any_field(models.SlugField()) >>> type(result) <type 'str'> >>> from django.core.validators import slug_re >>> re.match(slug_re, result) is not None True """ letters = ascii_letters + digits + '_-' return xunit.any_string(letters = letters, max_length = field.max_length) @any_field.register(models.SmallIntegerField) def any_smallinteger_field(field, **kwargs): """ Return random value for SmallIntegerValue >>> result = any_field(models.SmallIntegerField()) >>> type(result) <type 'int'> >>> result > -256, result < 256 (True, True) """ min_value = kwargs.get('min_value', -255) max_value = kwargs.get('max_value', 255) return xunit.any_int(min_value=min_value, max_value=max_value) @any_field.register(models.IntegerField) def any_integer_field(field, **kwargs): """ Return random value for IntegerField >>> result = any_field(models.IntegerField()) >>> type(result) <type 'int'> """ min_value = kwargs.get('min_value', -10000) max_value = kwargs.get('max_value', 10000) return xunit.any_int(min_value=min_value, max_value=max_value) @any_field.register(models.TextField) def any_text_field(field, **kwargs): """ Return random 'lorem ipsum' Latin text >>> result = any_field(models.TextField()) >>> type(result) <type 'str'> """ return str("\n".join(paragraphs(10))) @any_field.register(models.URLField) def any_url_field(field, **kwargs): """ Return random value for URLField >>> result = any_field(models.URLField()) >>> from django.core.validators import URLValidator >>> re.match(URLValidator.regex, result) is not None True """ url = kwargs.get('url') if not url: verified = [validator for validator in field.validators \ if isinstance(validator, validators.URLValidator) and \ validator.verify_exists == True] if verified: url = choice(['http://news.yandex.ru/society.html', 'http://video.google.com/?hl=en&tab=wv', 'http://www.microsoft.com/en/us/default.aspx', 'http://habrahabr.ru/company/opera/', 'http://www.apple.com/support/hardware/', 'http://ya.ru', 'http://google.com', 'http://fr.wikipedia.org/wiki/France']) else: url = "http://%s.%s/%s" % ( xunit.any_string(max_length=10), xunit.any_string(min_length=2, max_length=3), xunit.any_string(max_length=20)) return url @any_field.register(models.TimeField) def any_time_field(field, **kwargs): """ Return random value for TimeField >>> result = any_field(models.TimeField()) >>> type(result) <type 'datetime.time'> """ return time( xunit.any_int(min_value=0, max_value=23), xunit.any_int(min_value=0, max_value=59), xunit.any_int(min_value=0, max_value=59)) @any_field.register(models.ForeignKey) def any_foreignkey_field(field, **kwargs): return any_model(field.rel.to, **kwargs) @any_field.register(models.OneToOneField) def any_onetoone_field(field, **kwargs): return any_model(field.rel.to, **kwargs) def _fill_model_fields(model, **kwargs): model_fields, fields_args = split_model_kwargs(kwargs) # fill virtual fields for field in model._meta.virtual_fields: if field.name in model_fields: object = kwargs[field.name] model_fields[field.ct_field] = kwargs[field.ct_field]= ContentType.objects.get_for_model(object) model_fields[field.fk_field] = kwargs[field.fk_field]= object.id # fill local fields for field in model._meta.fields: if field.name in model_fields: if isinstance(kwargs[field.name], Q): """ Lookup ForeingKey field in db """ key_field = model._meta.get_field(field.name) value = key_field.rel.to.objects.get(kwargs[field.name]) setattr(model, field.name, value) else: # TODO support any_model call setattr(model, field.name, kwargs[field.name]) elif isinstance(field, models.OneToOneField) and field.rel.parent_link: """ skip link to parent instance """ elif isinstance(field, models.fields.AutoField): """ skip primary key field """ elif isinstance(field, models.fields.related.ForeignKey) and field.model == field.rel.to: """ skip self relations """ else: setattr(model, field.name, any_field(field, **fields_args[field.name])) # procceed reversed relations onetoone = [(relation.var_name, relation.field) \ for relation in model._meta.get_all_related_objects() \ if relation.field.unique] # TODO and not relation.field.rel.parent_link ?? for field_name, field in onetoone: if field_name in model_fields: # TODO support any_model call setattr(model, field_name, kwargs[field_name]) @any_model.register_default def any_model_default(model_cls, **kwargs): result = model_cls() attempts = 10 while True: try: _fill_model_fields(result, **kwargs) result.full_clean() result.save() return result except (IntegrityError, ValidationError): attempts -= 1 if not attempts: raise
from math import ceil from hashlib import md5 from pecan import expose, request, abort, response, redirect from pecan.secure import secure from pecan.ext.wtforms import with_form from sqlalchemy import select, and_, or_, asc, desc, func, case, literal from draughtcraft import model from draughtcraft.lib.beerxml import export from draughtcraft.lib.forms.recipes.browse import RecipeSearchForm from create import RecipeCreationController from builder import RecipeBuilderController class SlugController(object): def __init__(self, slug): self.slug = slug # Make sure the provided slug is valid if not slug: redirect(request.context['recipe'].slugs[0].slug) if slug not in [slug.slug for slug in request.context['recipe'].slugs]: abort(404) @expose('recipes/builder/index.html') @expose('json', content_type='application/json') def index(self): recipe = request.context['recipe'] if recipe.state == "DRAFT": if recipe.author and recipe.author != request.context['user']: abort(404) if not recipe.author and recipe != request.context['trial_recipe']: abort(404) # Log a view for the recipe (if the viewer *is not* the author) if recipe.author != request.context['user'] and \ request.pecan.get('content_type') == 'application/json': model.RecipeView(recipe=recipe) return dict( recipe=recipe, editable=False ) @expose(content_type='application/xml') def xml(self): recipe = request.context['recipe'] if recipe.state == "DRAFT": if recipe.author and recipe.author != request.context['user']: abort(404) response.headers['Content-Disposition'] = \ 'attachment; filename="%s.xml"' % self.slug return export.to_xml([request.context['recipe']]) @expose(generic=True) def draft(self): abort(405) @draft.when(method="POST") def do_draft(self): source = request.context['recipe'] if source.author is None or source.author != request.context['user']: abort(401) if source.state != "PUBLISHED": abort(401) draft = source.draft() draft.flush() redirect("%sbuilder" % draft.url()) @expose(generic=True) def copy(self): abort(405) @copy.when(method="POST") def do_copy(self): source = request.context['recipe'] if request.context['user'] is None: redirect("/signup") if source.author is None: abort(401) diff_user = source.author != request.context['user'] name = source.name if diff_user else "%s (Duplicate)" % source.name copy = source.duplicate({ 'name': name, 'author': request.context['user'] }) if diff_user: copy.copied_from = source redirect("/") @expose(generic=True) def delete(self): abort(405) @delete.when(method="POST") def do_delete(self): source = request.context['recipe'] if source.author is None or source.author != request.context['user']: abort(401) source.delete() redirect("/") builder = secure( RecipeBuilderController(), RecipeBuilderController.check_permissions ) class RecipeController(object): @expose() def _lookup(self, slug, *remainder): return SlugController(slug), remainder def __init__(self, recipeID): try: primary_key = int(str(recipeID), 16) except ValueError: abort(404) recipe = model.Recipe.get(primary_key) if recipe is None: abort(404) request.context['recipe'] = recipe class RecipesController(object): @expose() def _lookup(self, recipeID, *remainder): return RecipeController(recipeID), remainder @expose('recipes/browse/index.html') def index(self): return dict( styles=model.Style.query.order_by(model.Style.name).all() ) @expose(template='recipes/browse/list.html') @with_form(RecipeSearchForm, validate_safe=True) def recipes(self, **kw): if request.pecan['form'].errors: abort(400) perpage = 25.0 offset = int(perpage * (kw['page'] - 1)) views = func.count(model.RecipeView.id).label('views') username = func.lower(model.User.username).label('username') sortable_type = case([ (model.Recipe.type == 'MASH', literal('All Grain')), (model.Recipe.type == 'EXTRACT', literal('Extract')), ( model.Recipe.type == 'EXTRACTSTEEP', literal('Extract w/ Steeped Grains') ), (model.Recipe.type == 'MINIMASH', literal('Mini-Mash')), ]).label('type') # map of columns column_map = dict( type=(sortable_type,), srm=(model.Recipe._srm,), name=(model.Recipe.name,), author=(username,), style=(model.Style.name,), last_updated=(model.Recipe.last_updated,), views=(views,) ) # determine the sorting direction and column order_column = column_map.get(kw['order_by']) order_direction = dict( ASC=asc, DESC=desc ).get(kw['direction']) where = [ model.Recipe.state == 'PUBLISHED' ] # If applicable, filter by style if kw['style']: query = where.append(model.Recipe.style == kw['style']) # If applicable, filter by type (MASH, etc...) where.append(or_( model.Recipe.id is None, model.Recipe.type == 'MASH' if kw['mash'] else None, model.Recipe.type == 'MINIMASH' if kw['minimash'] else None, model.Recipe.type.in_(('EXTRACTSTEEP', 'EXTRACT')) if kw['extract'] else None, )) # If applicable, filter by color if kw['color']: start, end = { 'light': (0, 8), 'amber': (8, 18), 'brown': (16, 25), 'dark': (25, 5000) }.get(kw['color']) where.append(and_( model.Recipe._srm >= start, model.Recipe._srm <= end, )) # Join the `recipe`, `recipeview`, `user`, and `style` tables from_obj = model.Recipe.table.outerjoin( model.RecipeView.table, onclause=model.RecipeView.recipe_id == model.Recipe.id ).outerjoin( model.Style.table, onclause=model.Recipe.style_id == model.Style.id ).join( model.User.table, onclause=model.Recipe.author_id == model.User.id ) username_full = model.User.username.label('username') email = model.User.email.label('email') style_name = model.Style.name.label('style_name') style_url = model.Style.url.label('style_url') query = select( [ model.Recipe.id, model.Recipe.name, model.Recipe._srm, username_full, email, sortable_type, style_name, style_url, model.Recipe.last_updated, views ], and_(*where), from_obj=[from_obj], group_by=model.Recipe.id ) total = select( [func.count(model.Recipe.id)], and_(*where) ).execute().fetchone()[0] if views not in order_column: query = query.group_by(*order_column) query = query.group_by(username_full) query = query.group_by(email) query = query.group_by(style_name) query = query.group_by(style_url) recipes = query.order_by( *[order_direction(column) for column in order_column] ).offset( offset ).limit( perpage ).execute().fetchall() class RecipeProxy(object): def __init__(self, recipe): self.id, self.name, self._srm, self.username, self.email, self.printable_type, self.style_name, self.style_url, self.last_updated, self.views = recipe @property def metric_unit(self): return 'EBC' if request.context['metric'] is True else 'SRM' @property def color(self): if self.metric_unit is 'SRM': return self._srm round(self._srm * 1.97, 1) @property def gravatar(self): return 'https://www.gravatar.com/avatar/%s?d=https://draughtcraft.com/images/glass-square.png' % ( md5(self.email.strip().lower()).hexdigest() ) @property def url(self): return '/recipes/%s/' % (('%x' % self.id).lower()) return dict( pages=max(1, int(ceil(total / perpage))), current_page=kw['page'], offset=offset, perpage=perpage, total=total, order_by=kw['order_by'], direction=kw['direction'], recipes=map(RecipeProxy, recipes) ) create = RecipeCreationController()
# (c) Copyright 2012-2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Volume driver for HP 3PAR Storage array. This driver requires 3.1.3 firmware on the 3PAR array, using the 3.x version of the hp3parclient. You will need to install the python hp3parclient. sudo pip install --upgrade "hp3parclient>=3.1" Set the following in the cinder.conf file to enable the 3PAR iSCSI Driver along with the required flags: volume_driver=cinder.volume.drivers.san.hp.hp_3par_iscsi.HP3PARISCSIDriver """ import re import sys try: from hp3parclient import exceptions as hpexceptions except ImportError: hpexceptions = None from oslo_log import log as logging import six from cinder import exception from cinder.i18n import _, _LE, _LW from cinder.volume import driver from cinder.volume.drivers.san.hp import hp_3par_common as hpcommon from cinder.volume.drivers.san import san from cinder.volume import utils as volume_utils LOG = logging.getLogger(__name__) DEFAULT_ISCSI_PORT = 3260 CHAP_USER_KEY = "HPQ-cinder-CHAP-name" CHAP_PASS_KEY = "HPQ-cinder-CHAP-secret" class HP3PARISCSIDriver(driver.TransferVD, driver.ManageableVD, driver.ExtendVD, driver.CloneableVD, driver.SnapshotVD, driver.MigrateVD, driver.ConsistencyGroupVD, driver.BaseVD): """OpenStack iSCSI driver to enable 3PAR storage array. Version history: 1.0 - Initial driver 1.1 - QoS, extend volume, multiple iscsi ports, remove domain, session changes, faster clone, requires 3.1.2 MU2 firmware. 1.2.0 - Updated the use of the hp3parclient to 2.0.0 and refactored the drivers to use the new APIs. 1.2.1 - Synchronized extend_volume method. 1.2.2 - Added try/finally around client login/logout. 1.2.3 - log exceptions before raising 1.2.4 - Fixed iSCSI active path bug #1224594 1.2.5 - Added metadata during attach/detach bug #1258033 1.2.6 - Use least-used iscsi n:s:p for iscsi volume attach bug #1269515 This update now requires 3.1.2 MU3 firmware 1.3.0 - Removed all SSH code. We rely on the hp3parclient now. 2.0.0 - Update hp3parclient API uses 3.0.x 2.0.2 - Add back-end assisted volume migrate 2.0.3 - Added support for managing/unmanaging of volumes 2.0.4 - Added support for volume retype 2.0.5 - Added CHAP support, requires 3.1.3 MU1 firmware and hp3parclient 3.1.0. 2.0.6 - Fixing missing login/logout around attach/detach bug #1367429 2.0.7 - Add support for pools with model update 2.0.8 - Migrate without losing type settings bug #1356608 2.0.9 - Removing locks bug #1381190 2.0.10 - Add call to queryHost instead SSH based findHost #1398206 2.0.11 - Added missing host name during attach fix #1398206 2.0.12 - Removed usage of host name cache #1398914 2.0.13 - Update LOG usage to fix translations. bug #1384312 2.0.14 - Do not allow a different iSCSI IP (hp3par_iscsi_ips) to be used during live-migration. bug #1423958 2.0.15 - Added support for updated detach_volume attachment. 2.0.16 - Added encrypted property to initialize_connection #1439917 2.0.17 - Python 3 fixes 2.0.18 - Improved VLUN creation and deletion logic. #1469816 2.0.19 - Changed initialize_connection to use getHostVLUNs. #1475064 2.0.20 - Adding changes to support 3PAR iSCSI multipath. 2.0.21 - Adds consistency group support 2.0.22 - Update driver to use ABC metaclasses 2.0.23 - Added update_migrated_volume. bug # 1492023 """ VERSION = "2.0.23" def __init__(self, *args, **kwargs): super(HP3PARISCSIDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(hpcommon.hp3par_opts) self.configuration.append_config_values(san.san_opts) def _init_common(self): return hpcommon.HP3PARCommon(self.configuration) def _login(self): common = self._init_common() common.do_setup(None) common.client_login() return common def _logout(self, common): common.client_logout() def _check_flags(self, common): """Sanity check to ensure we have required options set.""" required_flags = ['hp3par_api_url', 'hp3par_username', 'hp3par_password', 'san_ip', 'san_login', 'san_password'] common.check_flags(self.configuration, required_flags) def get_volume_stats(self, refresh=False): common = self._login() try: self._stats = common.get_volume_stats( refresh, self.get_filter_function(), self.get_goodness_function()) self._stats['storage_protocol'] = 'iSCSI' self._stats['driver_version'] = self.VERSION backend_name = self.configuration.safe_get('volume_backend_name') self._stats['volume_backend_name'] = (backend_name or self.__class__.__name__) return self._stats finally: self._logout(common) def do_setup(self, context): common = self._init_common() common.do_setup(context) self._check_flags(common) common.check_for_setup_error() common.client_login() try: self.initialize_iscsi_ports(common) finally: self._logout(common) def initialize_iscsi_ports(self, common): # map iscsi_ip-> ip_port # -> iqn # -> nsp self.iscsi_ips = {} temp_iscsi_ip = {} # use the 3PAR ip_addr list for iSCSI configuration if len(self.configuration.hp3par_iscsi_ips) > 0: # add port values to ip_addr, if necessary for ip_addr in self.configuration.hp3par_iscsi_ips: ip = ip_addr.split(':') if len(ip) == 1: temp_iscsi_ip[ip_addr] = {'ip_port': DEFAULT_ISCSI_PORT} elif len(ip) == 2: temp_iscsi_ip[ip[0]] = {'ip_port': ip[1]} else: LOG.warning(_LW("Invalid IP address format '%s'"), ip_addr) # add the single value iscsi_ip_address option to the IP dictionary. # This way we can see if it's a valid iSCSI IP. If it's not valid, # we won't use it and won't bother to report it, see below if (self.configuration.iscsi_ip_address not in temp_iscsi_ip): ip = self.configuration.iscsi_ip_address ip_port = self.configuration.iscsi_port temp_iscsi_ip[ip] = {'ip_port': ip_port} # get all the valid iSCSI ports from 3PAR # when found, add the valid iSCSI ip, ip port, iqn and nsp # to the iSCSI IP dictionary iscsi_ports = common.get_active_iscsi_target_ports() for port in iscsi_ports: ip = port['IPAddr'] if ip in temp_iscsi_ip: ip_port = temp_iscsi_ip[ip]['ip_port'] self.iscsi_ips[ip] = {'ip_port': ip_port, 'nsp': port['nsp'], 'iqn': port['iSCSIName'] } del temp_iscsi_ip[ip] # if the single value iscsi_ip_address option is still in the # temp dictionary it's because it defaults to $my_ip which doesn't # make sense in this context. So, if present, remove it and move on. if (self.configuration.iscsi_ip_address in temp_iscsi_ip): del temp_iscsi_ip[self.configuration.iscsi_ip_address] # lets see if there are invalid iSCSI IPs left in the temp dict if len(temp_iscsi_ip) > 0: LOG.warning(_LW("Found invalid iSCSI IP address(s) in " "configuration option(s) hp3par_iscsi_ips or " "iscsi_ip_address '%s.'"), (", ".join(temp_iscsi_ip))) if not len(self.iscsi_ips) > 0: msg = _('At least one valid iSCSI IP address must be set.') LOG.error(msg) raise exception.InvalidInput(reason=msg) def check_for_setup_error(self): """Setup errors are already checked for in do_setup so return pass.""" pass def create_volume(self, volume): common = self._login() try: return common.create_volume(volume) finally: self._logout(common) def create_cloned_volume(self, volume, src_vref): """Clone an existing volume.""" common = self._login() try: return common.create_cloned_volume(volume, src_vref) finally: self._logout(common) def delete_volume(self, volume): common = self._login() try: common.delete_volume(volume) finally: self._logout(common) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot. TODO: support using the size from the user. """ common = self._login() try: return common.create_volume_from_snapshot(volume, snapshot) finally: self._logout(common) def create_snapshot(self, snapshot): common = self._login() try: common.create_snapshot(snapshot) finally: self._logout(common) def delete_snapshot(self, snapshot): common = self._login() try: common.delete_snapshot(snapshot) finally: self._logout(common) def initialize_connection(self, volume, connector): """Assigns the volume to a server. Assign any created volume to a compute node/host so that it can be used from that host. This driver returns a driver_volume_type of 'iscsi'. The format of the driver data is defined in _get_iscsi_properties. Example return value: { 'driver_volume_type': 'iscsi' 'data': { 'encrypted': False, 'target_discovered': True, 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001', 'target_protal': '127.0.0.1:3260', 'volume_id': 1, } } Steps to export a volume on 3PAR * Get the 3PAR iSCSI iqn * Create a host on the 3par * create vlun on the 3par """ common = self._login() try: # we have to make sure we have a host host, username, password = self._create_host( common, volume, connector) if connector['multipath']: ready_ports = common.client.getiSCSIPorts( state=common.client.PORT_STATE_READY) target_portals = [] target_iqns = [] target_luns = [] # Target portal ips are defined in cinder.conf. target_portal_ips = self.iscsi_ips.keys() # Collect all existing VLUNs for this volume/host combination. existing_vluns = common.find_existing_vluns(volume, host) # Cycle through each ready iSCSI port and determine if a new # VLUN should be created or an existing one used. for port in ready_ports: iscsi_ip = port['IPAddr'] if iscsi_ip in target_portal_ips: vlun = None # check for an already existing VLUN matching the # nsp for this iSCSI IP. If one is found, use it # instead of creating a new VLUN. for v in existing_vluns: portPos = common.build_portPos( self.iscsi_ips[iscsi_ip]['nsp']) if v['portPos'] == portPos: vlun = v break else: vlun = common.create_vlun( volume, host, self.iscsi_ips[iscsi_ip]['nsp']) iscsi_ip_port = "%s:%s" % ( iscsi_ip, self.iscsi_ips[iscsi_ip]['ip_port']) target_portals.append(iscsi_ip_port) target_iqns.append(port['iSCSIName']) target_luns.append(vlun['lun']) else: LOG.warning(_LW("iSCSI IP: '%s' was not found in " "hp3par_iscsi_ips list defined in " "cinder.conf."), iscsi_ip) info = {'driver_volume_type': 'iscsi', 'data': {'target_portals': target_portals, 'target_iqns': target_iqns, 'target_luns': target_luns, 'target_discovered': True } } else: least_used_nsp = None # check if a VLUN already exists for this host existing_vlun = common.find_existing_vlun(volume, host) if existing_vlun: # We override the nsp here on purpose to force the # volume to be exported out the same IP as it already is. # This happens during nova live-migration, we want to # disable the picking of a different IP that we export # the volume to, or nova complains. least_used_nsp = common.build_nsp(existing_vlun['portPos']) if not least_used_nsp: least_used_nsp = self._get_least_used_nsp_for_host( common, host['name']) vlun = None if existing_vlun is None: # now that we have a host, create the VLUN vlun = common.create_vlun(volume, host, least_used_nsp) else: vlun = existing_vlun if least_used_nsp is None: LOG.warning(_LW("Least busy iSCSI port not found, " "using first iSCSI port in list.")) iscsi_ip = self.iscsi_ips.keys()[0] else: iscsi_ip = self._get_ip_using_nsp(least_used_nsp) iscsi_ip_port = self.iscsi_ips[iscsi_ip]['ip_port'] iscsi_target_iqn = self.iscsi_ips[iscsi_ip]['iqn'] info = {'driver_volume_type': 'iscsi', 'data': {'target_portal': "%s:%s" % (iscsi_ip, iscsi_ip_port), 'target_iqn': iscsi_target_iqn, 'target_lun': vlun['lun'], 'target_discovered': True } } if self.configuration.hp3par_iscsi_chap_enabled: info['data']['auth_method'] = 'CHAP' info['data']['auth_username'] = username info['data']['auth_password'] = password encryption_key_id = volume.get('encryption_key_id', None) info['data']['encrypted'] = encryption_key_id is not None return info finally: self._logout(common) def terminate_connection(self, volume, connector, **kwargs): """Driver entry point to unattach a volume from an instance.""" common = self._login() try: hostname = common._safe_hostname(connector['host']) common.terminate_connection( volume, hostname, iqn=connector['initiator']) self._clear_chap_3par(common, volume) finally: self._logout(common) def _clear_chap_3par(self, common, volume): """Clears CHAP credentials on a 3par volume. Ignore exceptions caused by the keys not being present on a volume. """ vol_name = common._get_3par_vol_name(volume['id']) try: common.client.removeVolumeMetaData(vol_name, CHAP_USER_KEY) except hpexceptions.HTTPNotFound: pass except Exception: raise try: common.client.removeVolumeMetaData(vol_name, CHAP_PASS_KEY) except hpexceptions.HTTPNotFound: pass except Exception: raise def _create_3par_iscsi_host(self, common, hostname, iscsi_iqn, domain, persona_id): """Create a 3PAR host. Create a 3PAR host, if there is already a host on the 3par using the same iqn but with a different hostname, return the hostname used by 3PAR. """ # first search for an existing host host_found = None hosts = common.client.queryHost(iqns=[iscsi_iqn]) if hosts and hosts['members'] and 'name' in hosts['members'][0]: host_found = hosts['members'][0]['name'] if host_found is not None: return host_found else: if isinstance(iscsi_iqn, six.string_types): iqn = [iscsi_iqn] else: iqn = iscsi_iqn persona_id = int(persona_id) common.client.createHost(hostname, iscsiNames=iqn, optional={'domain': domain, 'persona': persona_id}) return hostname def _modify_3par_iscsi_host(self, common, hostname, iscsi_iqn): mod_request = {'pathOperation': common.client.HOST_EDIT_ADD, 'iSCSINames': [iscsi_iqn]} common.client.modifyHost(hostname, mod_request) def _set_3par_chaps(self, common, hostname, volume, username, password): """Sets a 3PAR host's CHAP credentials.""" if not self.configuration.hp3par_iscsi_chap_enabled: return mod_request = {'chapOperation': common.client.HOST_EDIT_ADD, 'chapOperationMode': common.client.CHAP_INITIATOR, 'chapName': username, 'chapSecret': password} common.client.modifyHost(hostname, mod_request) def _create_host(self, common, volume, connector): """Creates or modifies existing 3PAR host.""" # make sure we don't have the host already host = None username = None password = None hostname = common._safe_hostname(connector['host']) cpg = common.get_cpg(volume, allowSnap=True) domain = common.get_domain(cpg) # Get the CHAP secret if CHAP is enabled if self.configuration.hp3par_iscsi_chap_enabled: vol_name = common._get_3par_vol_name(volume['id']) username = common.client.getVolumeMetaData( vol_name, CHAP_USER_KEY)['value'] password = common.client.getVolumeMetaData( vol_name, CHAP_PASS_KEY)['value'] try: host = common._get_3par_host(hostname) except hpexceptions.HTTPNotFound: # get persona from the volume type extra specs persona_id = common.get_persona_type(volume) # host doesn't exist, we have to create it hostname = self._create_3par_iscsi_host(common, hostname, connector['initiator'], domain, persona_id) self._set_3par_chaps(common, hostname, volume, username, password) host = common._get_3par_host(hostname) else: if 'iSCSIPaths' not in host or len(host['iSCSIPaths']) < 1: self._modify_3par_iscsi_host( common, hostname, connector['initiator']) self._set_3par_chaps( common, hostname, volume, username, password) host = common._get_3par_host(hostname) elif (not host['initiatorChapEnabled'] and self.configuration.hp3par_iscsi_chap_enabled): LOG.warning(_LW("Host exists without CHAP credentials set and " "has iSCSI attachments but CHAP is enabled. " "Updating host with new CHAP credentials.")) self._set_3par_chaps( common, hostname, volume, username, password) return host, username, password def _do_export(self, common, volume): """Gets the associated account, generates CHAP info and updates.""" model_update = {} if not self.configuration.hp3par_iscsi_chap_enabled: model_update['provider_auth'] = None return model_update # CHAP username will be the hostname chap_username = volume['host'].split('@')[0] chap_password = None try: # Get all active VLUNs for the host vluns = common.client.getHostVLUNs(chap_username) # Host has active VLUNs... is CHAP enabled on host? host_info = common.client.getHost(chap_username) if not host_info['initiatorChapEnabled']: LOG.warning(_LW("Host has no CHAP key, but CHAP is enabled.")) except hpexceptions.HTTPNotFound: chap_password = volume_utils.generate_password(16) LOG.warning(_LW("No host or VLUNs exist. Generating new " "CHAP key.")) else: # Get a list of all iSCSI VLUNs and see if there is already a CHAP # key assigned to one of them. Use that CHAP key if present, # otherwise create a new one. Skip any VLUNs that are missing # CHAP credentials in metadata. chap_exists = False active_vluns = 0 for vlun in vluns: if not vlun['active']: continue active_vluns += 1 # iSCSI connections start with 'iqn'. if ('remoteName' in vlun and re.match('iqn.*', vlun['remoteName'])): try: chap_password = common.client.getVolumeMetaData( vlun['volumeName'], CHAP_PASS_KEY)['value'] chap_exists = True break except hpexceptions.HTTPNotFound: LOG.debug("The VLUN %s is missing CHAP credentials " "but CHAP is enabled. Skipping.", vlun['remoteName']) else: LOG.warning(_LW("Non-iSCSI VLUN detected.")) if not chap_exists: chap_password = volume_utils.generate_password(16) LOG.warning(_LW("No VLUN contained CHAP credentials. " "Generating new CHAP key.")) # Add CHAP credentials to the volume metadata vol_name = common._get_3par_vol_name(volume['id']) common.client.setVolumeMetaData( vol_name, CHAP_USER_KEY, chap_username) common.client.setVolumeMetaData( vol_name, CHAP_PASS_KEY, chap_password) model_update['provider_auth'] = ('CHAP %s %s' % (chap_username, chap_password)) return model_update def create_export(self, context, volume, connector): common = self._login() try: return self._do_export(common, volume) finally: self._logout(common) def ensure_export(self, context, volume): """Ensure the volume still exists on the 3PAR. Also retrieves CHAP credentials, if present on the volume """ common = self._login() try: vol_name = common._get_3par_vol_name(volume['id']) common.client.getVolume(vol_name) except hpexceptions.HTTPNotFound: LOG.error(_LE("Volume %s doesn't exist on array."), vol_name) else: metadata = common.client.getAllVolumeMetaData(vol_name) username = None password = None model_update = {} model_update['provider_auth'] = None for member in metadata['members']: if member['key'] == CHAP_USER_KEY: username = member['value'] elif member['key'] == CHAP_PASS_KEY: password = member['value'] if username and password: model_update['provider_auth'] = ('CHAP %s %s' % (username, password)) return model_update finally: self._logout(common) def remove_export(self, context, volume): pass def _get_least_used_nsp_for_host(self, common, hostname): """Get the least used NSP for the current host. Steps to determine which NSP to use. * If only one iSCSI NSP, return it * If there is already an active vlun to this host, return its NSP * Return NSP with fewest active vluns """ iscsi_nsps = self._get_iscsi_nsps() # If there's only one path, use it if len(iscsi_nsps) == 1: return iscsi_nsps[0] # Try to reuse an existing iscsi path to the host vluns = common.client.getVLUNs() for vlun in vluns['members']: if vlun['active']: if vlun['hostname'] == hostname: temp_nsp = common.build_nsp(vlun['portPos']) if temp_nsp in iscsi_nsps: # this host already has an iscsi path, so use it return temp_nsp # Calculate the least used iscsi nsp least_used_nsp = self._get_least_used_nsp(common, vluns['members'], self._get_iscsi_nsps()) return least_used_nsp def _get_iscsi_nsps(self): """Return the list of candidate nsps.""" nsps = [] for value in self.iscsi_ips.values(): nsps.append(value['nsp']) return nsps def _get_ip_using_nsp(self, nsp): """Return IP associated with given nsp.""" for (key, value) in self.iscsi_ips.items(): if value['nsp'] == nsp: return key def _get_least_used_nsp(self, common, vluns, nspss): """Return the nsp that has the fewest active vluns.""" # return only the nsp (node:server:port) # count the number of nsps nsp_counts = {} for nsp in nspss: # initialize counts to zero nsp_counts[nsp] = 0 current_least_used_nsp = None for vlun in vluns: if vlun['active']: nsp = common.build_nsp(vlun['portPos']) if nsp in nsp_counts: nsp_counts[nsp] = nsp_counts[nsp] + 1 # identify key (nsp) of least used nsp current_smallest_count = sys.maxint for (nsp, count) in nsp_counts.items(): if count < current_smallest_count: current_least_used_nsp = nsp current_smallest_count = count return current_least_used_nsp def extend_volume(self, volume, new_size): common = self._login() try: common.extend_volume(volume, new_size) finally: self._logout(common) def create_consistencygroup(self, context, group): common = self._login() try: common.create_consistencygroup(context, group) finally: self._logout(common) def create_consistencygroup_from_src(self, context, group, volumes, cgsnapshot=None, snapshots=None, source_cg=None, source_vols=None): common = self._login() try: return common.create_consistencygroup_from_src( context, group, volumes, cgsnapshot, snapshots, source_cg, source_vols) finally: self._logout(common) def delete_consistencygroup(self, context, group): common = self._login() try: return common.delete_consistencygroup(context, group) finally: self._logout(common) def update_consistencygroup(self, context, group, add_volumes=None, remove_volumes=None): common = self._login() try: return common.update_consistencygroup(context, group, add_volumes, remove_volumes) finally: self._logout(common) def create_cgsnapshot(self, context, cgsnapshot): common = self._login() try: return common.create_cgsnapshot(context, cgsnapshot) finally: self._logout(common) def delete_cgsnapshot(self, context, cgsnapshot): common = self._login() try: return common.delete_cgsnapshot(context, cgsnapshot) finally: self._logout(common) def manage_existing(self, volume, existing_ref): common = self._login() try: return common.manage_existing(volume, existing_ref) finally: self._logout(common) def manage_existing_get_size(self, volume, existing_ref): common = self._login() try: return common.manage_existing_get_size(volume, existing_ref) finally: self._logout(common) def unmanage(self, volume): common = self._login() try: common.unmanage(volume) finally: self._logout(common) def attach_volume(self, context, volume, instance_uuid, host_name, mountpoint): common = self._login() try: common.attach_volume(volume, instance_uuid) finally: self._logout(common) def detach_volume(self, context, volume, attachment=None): common = self._login() try: common.detach_volume(volume, attachment) finally: self._logout(common) def retype(self, context, volume, new_type, diff, host): """Convert the volume to be of the new type.""" common = self._login() try: return common.retype(volume, new_type, diff, host) finally: self._logout(common) def migrate_volume(self, context, volume, host): if volume['status'] == 'in-use': protocol = host['capabilities']['storage_protocol'] if protocol != 'iSCSI': LOG.debug("3PAR ISCSI driver cannot migrate in-use volume " "to a host with storage_protocol=%s.", protocol) return False, None common = self._login() try: return common.migrate_volume(volume, host) finally: self._logout(common) def update_migrated_volume(self, context, volume, new_volume, original_volume_status): """Update the name of the migrated volume to it's new ID.""" common = self._login() try: return common.update_migrated_volume(context, volume, new_volume, original_volume_status) finally: self._logout(common) def get_pool(self, volume): common = self._login() try: return common.get_cpg(volume) except hpexceptions.HTTPNotFound: reason = (_("Volume %s doesn't exist on array.") % volume) LOG.error(reason) raise exception.InvalidVolume(reason) finally: self._logout(common)
from typing import Any, Callable, Dict, List, Optional, Set, Tuple import gc from pympler import summary from pympler.util import compat from inspect import isframe, stack from sys import getsizeof from pympler.asizeof import _Py_TPFLAGS_HAVE_GC def ignore_object(obj: Any) -> bool: try: return isframe(obj) except ReferenceError: return True def get_objects(remove_dups: bool = True, include_frames: bool = False ) -> List[Any]: """Return a list of all known objects excluding frame objects. If (outer) frame objects shall be included, pass `include_frames=True`. In order to prevent building reference cycles, the current frame object (of the caller of get_objects) is ignored. This will not prevent creating reference cycles if the object list is passed up the call-stack. Therefore, frame objects are not included by default. Keyword arguments: remove_dups -- if True, all duplicate objects will be removed. include_frames -- if True, includes frame objects. """ gc.collect() # Do not initialize local variables before calling gc.get_objects or those # will be included in the list. Furthermore, ignore frame objects to # prevent reference cycles. tmp = gc.get_objects() tmp = [o for o in tmp if not ignore_object(o)] res = [] for o in tmp: # gc.get_objects returns only container objects, but we also want # the objects referenced by them refs = get_referents(o) for ref in refs: if not gc.is_tracked(ref): # we already got the container objects, now we only add # non-container objects res.append(ref) res.extend(tmp) if remove_dups: res = _remove_duplicates(res) if include_frames: for sf in stack()[2:]: res.append(sf[0]) return res def get_size(objects: List[Any]) -> int: """Compute the total size of all elements in objects.""" res = 0 for o in objects: try: res += getsizeof(o) except AttributeError: print("IGNORING: type=%s; o=%s" % (str(type(o)), str(o))) return res def get_diff(left: List[Any], right: List[Any]) -> Dict[str, List[Any]]: """Get the difference of both lists. The result will be a dict with this form {'+': [], '-': []}. Items listed in '+' exist only in the right list, items listed in '-' exist only in the left list. """ res = {'+': [], '-': []} # type: Dict[str, List[Any]] def partition(objects: List[Any]) -> Dict[type, List[Any]]: """Partition the passed object list.""" res = {} # type: Dict[type, List[Any]] for o in objects: t = type(o) if type(o) not in res: res[t] = [] res[t].append(o) return res def get_not_included(foo: List[Any], bar: Dict[type, List[Any]] ) -> List[Any]: """Compare objects from foo with objects defined in the values of bar (set of partitions). Returns a list of all objects included in list, but not dict values. """ res = [] # type: List[Any] for o in foo: if not compat.object_in_list(type(o), bar): res.append(o) elif not compat.object_in_list(o, bar[type(o)]): res.append(o) return res # Create partitions of both lists. This will reduce the time required for # the comparison left_objects = partition(left) right_objects = partition(right) # and then do the diff res['+'] = get_not_included(right, left_objects) res['-'] = get_not_included(left, right_objects) return res def sort(objects: List[Any]) -> List[Any]: """Sort objects by size in bytes.""" objects = sorted(objects, key=getsizeof) return objects def filter(objects: List[Any], Type: Optional[type] = None, min: int = -1, max: int = -1) -> List[Any]: """Filter objects. The filter can be by type, minimum size, and/or maximum size. Keyword arguments: Type -- object type to filter by min -- minimum object size max -- maximum object size """ res = [] # type: List[Any] if min > max and max > -1: raise ValueError("minimum must be smaller than maximum") if Type is not None: objects = [o for o in objects if isinstance(o, Type)] if min > -1: objects = [o for o in objects if getsizeof(o) > min] if max > -1: objects = [o for o in objects if getsizeof(o) < max] return objects def get_referents(object: Any, level: int = 1) -> List[Any]: """Get all referents of an object up to a certain level. The referents will not be returned in a specific order and will not contain duplicate objects. Duplicate objects will be removed. Keyword arguments: level -- level of indirection to which referents considered. This function is recursive. """ res = gc.get_referents(object) level -= 1 if level > 0: for o in res: res.extend(get_referents(o, level)) res = _remove_duplicates(res) return res def _get_usage(function: Callable, *args: Any) -> Optional[List]: """Test if more memory is used after the function has been called. The function will be invoked twice and only the second measurement will be considered. Thus, memory used in initialisation (e.g. loading modules) will not be included in the result. The goal is to identify memory leaks caused by functions which use more and more memory. Any arguments next to the function will be passed on to the function on invocation. Note that this function is currently experimental, because it is not tested thoroughly and performs poorly. """ # The usage of a function is calculated by creating one summary of all # objects before the function is invoked and afterwards. These summaries # are compared and the diff is returned. # This function works in a 2-steps process. Before the actual function is # invoked an empty dummy function is measurement to identify the overhead # involved in the measuring process. This overhead then is subtracted from # the measurement performed on the passed function. The result reflects the # actual usage of a function call. # Also, a measurement is performed twice, allowing the adjustment to # initializing things, e.g. modules res = None def _get_summaries(function: Callable, *args: Any) -> Tuple: """Get a 2-tuple containing one summary from before, and one summary from after the function has been invoked. """ s_before = summary.summarize(get_objects()) function(*args) s_after = summary.summarize(get_objects()) return (s_before, s_after) def _get_usage(function: Callable, *args: Any) -> List: """Get the usage of a function call. This function is to be used only internally. The 'real' get_usage function is a wrapper around _get_usage, but the workload is done here. """ # init before calling (s_before, s_after) = _get_summaries(function, *args) # ignore all objects used for the measurement ignore = [] if s_before != s_after: ignore.append(s_before) for row in s_before: # ignore refs from summary and frame (loop) if len(gc.get_referrers(row)) == 2: ignore.append(row) for item in row: # ignore refs from summary and frame (loop) if len(gc.get_referrers(item)) == 2: ignore.append(item) for o in ignore: s_after = summary._subtract(s_after, o) res = summary.get_diff(s_before, s_after) return summary._sweep(res) # calibrate; twice for initialization def noop() -> None: pass offset = _get_usage(noop) offset = _get_usage(noop) # perform operation twice to handle objects possibly used in # initialisation tmp = _get_usage(function, *args) tmp = _get_usage(function, *args) tmp = summary.get_diff(offset, tmp) tmp = summary._sweep(tmp) if len(tmp) != 0: res = tmp return res def _is_containerobject(o: Any) -> bool: """Is the passed object a container object.""" return bool(getattr(type(o), '__flags__', 0) & _Py_TPFLAGS_HAVE_GC) def _remove_duplicates(objects: List[Any]) -> List[Any]: """Remove duplicate objects. Inspired by http://www.peterbe.com/plog/uniqifiers-benchmark """ seen = set() # type: Set[int] result = [] for item in objects: marker = id(item) if marker in seen: continue seen.add(marker) result.append(item) return result def print_summary() -> None: """Print a summary of all known objects.""" summary.print_(summary.summarize(get_objects()))
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2016 F5 Networks Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = { 'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.0' } DOCUMENTATION = ''' --- module: bigip_irule short_description: Manage iRules across different modules on a BIG-IP. description: - Manage iRules across different modules on a BIG-IP. version_added: "2.2" options: content: description: - When used instead of 'src', sets the contents of an iRule directly to the specified value. This is for simple values, but can be used with lookup plugins for anything complex or with formatting. Either one of C(src) or C(content) must be provided. module: description: - The BIG-IP module to add the iRule to. required: true choices: - ltm - gtm partition: description: - The partition to create the iRule on. required: false default: Common name: description: - The name of the iRule. required: true src: description: - The iRule file to interpret and upload to the BIG-IP. Either one of C(src) or C(content) must be provided. required: true state: description: - Whether the iRule should exist or not. required: false default: present choices: - present - absent notes: - Requires the f5-sdk Python package on the host. This is as easy as pip install f5-sdk. extends_documentation_fragment: f5 requirements: - f5-sdk author: - Tim Rupp (@caphrim007) ''' EXAMPLES = ''' - name: Add the iRule contained in template irule.tcl to the LTM module bigip_irule: content: "{{ lookup('template', 'irule.tcl') }}" module: "ltm" name: "MyiRule" password: "secret" server: "lb.mydomain.com" state: "present" user: "admin" delegate_to: localhost - name: Add the iRule contained in static file irule.tcl to the LTM module bigip_irule: module: "ltm" name: "MyiRule" password: "secret" server: "lb.mydomain.com" src: "irule.tcl" state: "present" user: "admin" delegate_to: localhost ''' RETURN = ''' module: description: The module that the iRule was added to returned: changed and success type: string sample: "gtm" src: description: The filename that included the iRule source returned: changed and success, when provided type: string sample: "/opt/src/irules/example1.tcl" name: description: The name of the iRule that was managed returned: changed and success type: string sample: "my-irule" content: description: The content of the iRule that was managed returned: changed and success type: string sample: "when LB_FAILED { set wipHost [LB::server addr] }" partition: description: The partition in which the iRule was managed returned: changed and success type: string sample: "Common" ''' try: from f5.bigip import ManagementRoot from icontrol.session import iControlUnexpectedHTTPError HAS_F5SDK = True except ImportError: HAS_F5SDK = False MODULES = ['gtm', 'ltm'] class BigIpiRule(object): def __init__(self, *args, **kwargs): if not HAS_F5SDK: raise F5ModuleError("The python f5-sdk module is required") if kwargs['state'] != 'absent': if not kwargs['content'] and not kwargs['src']: raise F5ModuleError( "Either 'content' or 'src' must be provided" ) source = kwargs['src'] if source: with open(source) as f: kwargs['content'] = f.read() # The params that change in the module self.cparams = dict() # Stores the params that are sent to the module self.params = kwargs self.api = ManagementRoot(kwargs['server'], kwargs['user'], kwargs['password'], port=kwargs['server_port'], token=True) def flush(self): result = dict() state = self.params['state'] try: if state == "present": changed = self.present() elif state == "absent": changed = self.absent() except iControlUnexpectedHTTPError as e: raise F5ModuleError(str(e)) result.update(**self.cparams) result.update(dict(changed=changed)) return result def read(self): """Read information and transform it The values that are returned by BIG-IP in the f5-sdk can have encoding attached to them as well as be completely missing in some cases. Therefore, this method will transform the data from the BIG-IP into a format that is more easily consumable by the rest of the class and the parameters that are supported by the module. """ p = dict() name = self.params['name'] partition = self.params['partition'] module = self.params['module'] if module == 'ltm': r = self.api.tm.ltm.rules.rule.load( name=name, partition=partition ) elif module == 'gtm': r = self.api.tm.gtm.rules.rule.load( name=name, partition=partition ) if hasattr(r, 'apiAnonymous'): p['content'] = str(r.apiAnonymous.strip()) p['name'] = name return p def delete(self): params = dict() check_mode = self.params['check_mode'] module = self.params['module'] params['name'] = self.params['name'] params['partition'] = self.params['partition'] self.cparams = camel_dict_to_snake_dict(params) if check_mode: return True if module == 'ltm': r = self.api.tm.ltm.rules.rule.load(**params) r.delete() elif module == 'gtm': r = self.api.tm.gtm.rules.rule.load(**params) r.delete() if self.exists(): raise F5ModuleError("Failed to delete the iRule") return True def exists(self): name = self.params['name'] partition = self.params['partition'] module = self.params['module'] if module == 'ltm': return self.api.tm.ltm.rules.rule.exists( name=name, partition=partition ) elif module == 'gtm': return self.api.tm.gtm.rules.rule.exists( name=name, partition=partition ) def present(self): if self.exists(): return self.update() else: return self.create() def update(self): params = dict() current = self.read() changed = False check_mode = self.params['check_mode'] content = self.params['content'] name = self.params['name'] partition = self.params['partition'] module = self.params['module'] if content is not None: content = content.strip() if 'content' in current: if content != current['content']: params['apiAnonymous'] = content else: params['apiAnonymous'] = content if params: changed = True params['name'] = name params['partition'] = partition self.cparams = camel_dict_to_snake_dict(params) if 'api_anonymous' in self.cparams: self.cparams['content'] = self.cparams.pop('api_anonymous') if self.params['src']: self.cparams['src'] = self.params['src'] if check_mode: return changed else: return changed if module == 'ltm': d = self.api.tm.ltm.rules.rule.load( name=name, partition=partition ) d.update(**params) d.refresh() elif module == 'gtm': d = self.api.tm.gtm.rules.rule.load( name=name, partition=partition ) d.update(**params) d.refresh() return True def create(self): params = dict() check_mode = self.params['check_mode'] content = self.params['content'] name = self.params['name'] partition = self.params['partition'] module = self.params['module'] if check_mode: return True if content is not None: params['apiAnonymous'] = content.strip() params['name'] = name params['partition'] = partition self.cparams = camel_dict_to_snake_dict(params) if 'api_anonymous' in self.cparams: self.cparams['content'] = self.cparams.pop('api_anonymous') if self.params['src']: self.cparams['src'] = self.params['src'] if check_mode: return True if module == 'ltm': d = self.api.tm.ltm.rules.rule d.create(**params) elif module == 'gtm': d = self.api.tm.gtm.rules.rule d.create(**params) if not self.exists(): raise F5ModuleError("Failed to create the iRule") return True def absent(self): changed = False if self.exists(): changed = self.delete() return changed def main(): argument_spec = f5_argument_spec() meta_args = dict( content=dict(required=False, default=None), src=dict(required=False, default=None), name=dict(required=True), module=dict(required=True, choices=MODULES) ) argument_spec.update(meta_args) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[ ['content', 'src'] ] ) try: obj = BigIpiRule(check_mode=module.check_mode, **module.params) result = obj.flush() module.exit_json(**result) except F5ModuleError as e: module.fail_json(msg=str(e)) from ansible.module_utils.basic import * from ansible.module_utils.ec2 import camel_dict_to_snake_dict from ansible.module_utils.f5_utils import * if __name__ == '__main__': main()
# Copyright 2014 # The Cloudscaling Group, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from cinderclient import client as cinderclient from glanceclient import client as glanceclient from keystoneclient.auth.identity.generic import password as keystone_auth from keystoneclient import client as keystoneclient from keystoneclient import session as keystone_session from neutronclient.v2_0 import client as neutronclient from novaclient import api_versions as nova_api_versions from novaclient import client as novaclient from oslo_config import cfg from oslo_log import log as logging import oslo_messaging as messaging from ec2api.i18n import _, _LI, _LW logger = logging.getLogger(__name__) ec2_opts = [ cfg.BoolOpt('ssl_insecure', default=False, help="Verify HTTPS connections."), cfg.StrOpt('ssl_ca_file', help="CA certificate file to use to verify " "connecting clients"), cfg.StrOpt('nova_service_type', default='compute', help='Service type of Compute API, registered in Keystone ' 'catalog. Should be v2.1 with microversion support. ' 'If it is obsolete v2, a lot of useful EC2 compliant ' 'instance properties will be unavailable.'), cfg.StrOpt('cinder_service_type', default='volume', help='Service type of Volume API, registered in Keystone ' 'catalog.'), # TODO(andrey-mp): keystone v3 allows to pass domain_name # or domain_id to auth. This code should support this feature. cfg.StrOpt('admin_user', help=_("Admin user to access specific cloud resourses")), cfg.StrOpt('admin_password', help=_("Admin password"), secret=True), cfg.StrOpt('admin_tenant_name', help=_("Admin tenant name")), ] CONF = cfg.CONF CONF.register_opts(ec2_opts) # Nova API version with microversions support REQUIRED_NOVA_API_VERSION = '2.1' REQUIRED_NOVA_API_VERSION_ID = 'v%s' % REQUIRED_NOVA_API_VERSION LEGACY_NOVA_API_VERSION = '2' # Nova API's 2.3 microversion provides additional EC2 compliant instance # properties # Nova API's 2.10 microversion provides admin access to users keypairs, # which allows metadata service to expose openssh part of an instance key REQUIRED_NOVA_API_MICROVERSION = '2.10' _nova_api_version = None def nova(context): global _nova_api_version if not _nova_api_version: _nova_api_version = _get_nova_api_version(context) clnt = novaclient.Client(_nova_api_version, session=context.session, service_type=CONF.nova_service_type) # NOTE(ft): workaround for LP #1494116 bug if not hasattr(clnt.client, 'last_request_id'): setattr(clnt.client, 'last_request_id', None) return clnt def neutron(context): return neutronclient.Client(session=context.session, service_type='network') def glance(context): return glanceclient.Client('1', service_type='image', session=context.session) def cinder(context): url = context.session.get_endpoint(service_type=CONF.cinder_service_type) # TODO(jamielennox): This should be using proper version discovery from # the cinder service rather than just inspecting the URL for certain string # values. version = cinderclient.get_volume_api_from_url(url) return cinderclient.Client(version, session=context.session, service_type=CONF.cinder_service_type) def keystone(context): auth_url = context.session.get_endpoint(service_type='identity') return keystoneclient.Client(auth_url=auth_url, session=context.session) def nova_cert(context): _cert_api = _rpcapi_CertAPI(context) return _cert_api def _get_nova_api_version(context): client = novaclient.Client(REQUIRED_NOVA_API_VERSION, session=context.session, service_type=CONF.nova_service_type) required = nova_api_versions.APIVersion(REQUIRED_NOVA_API_MICROVERSION) current = client.versions.get_current() if not current: logger.warning( _LW('Could not check Nova API version because no version ' 'was found in Nova version list for url %(url)s of service ' 'type "%(service_type)s". ' 'Use v%(required_api_version)s Nova API.'), {'url': client.client.get_endpoint(), 'service_type': CONF.nova_service_type, 'required_api_version': REQUIRED_NOVA_API_MICROVERSION}) return REQUIRED_NOVA_API_MICROVERSION if current.id != REQUIRED_NOVA_API_VERSION_ID: logger.warning( _LW('Specified "%s" Nova service type does not support v2.1 API. ' 'A lot of useful EC2 compliant instance properties ' 'will be unavailable.'), CONF.nova_service_type) return LEGACY_NOVA_API_VERSION if (nova_api_versions.APIVersion(current.version) < required): logger.warning( _LW('Nova support v%(nova_api_version)s, ' 'but v%(required_api_version)s is required. ' 'A lot of useful EC2 compliant instance properties ' 'will be unavailable.'), {'nova_api_version': current.version, 'required_api_version': REQUIRED_NOVA_API_MICROVERSION}) return current.version logger.info(_LI('Provided Nova API version is v%(nova_api_version)s, ' 'used one is v%(required_api_version)s'), {'nova_api_version': current.version, 'required_api_version': ( REQUIRED_NOVA_API_MICROVERSION)}) return REQUIRED_NOVA_API_MICROVERSION class _rpcapi_CertAPI(object): '''Client side of the cert rpc API.''' def __init__(self, context): super(_rpcapi_CertAPI, self).__init__() target = messaging.Target(topic=CONF.cert_topic, version='2.0') self.client = _rpc_get_client(target) self.context = context def decrypt_text(self, text): cctxt = self.client.prepare() return cctxt.call(self.context, 'decrypt_text', project_id=self.context.project_id, text=text) _rpc_TRANSPORT = None def _rpc_init(conf): global _rpc_TRANSPORT # NOTE(ft): set control_exchange parameter to use Nova cert topic messaging.set_transport_defaults('nova') _rpc_TRANSPORT = messaging.get_transport(conf) def _rpc_get_client(target): if not _rpc_TRANSPORT: _rpc_init(CONF) assert _rpc_TRANSPORT is not None serializer = _rpc_RequestContextSerializer() return messaging.RPCClient(_rpc_TRANSPORT, target, serializer=serializer) class _rpc_RequestContextSerializer(messaging.NoOpSerializer): def serialize_context(self, context): return context.to_dict() _admin_session = None def get_os_admin_session(): """Create a context to interact with OpenStack as an administrator.""" # NOTE(ft): this is a singletone because keystone's session looks thread # safe for both regular and token renewal requests global _admin_session if not _admin_session: auth = keystone_auth.Password( username=CONF.admin_user, password=CONF.admin_password, project_name=CONF.admin_tenant_name, tenant_name=CONF.admin_tenant_name, auth_url=CONF.keystone_url, ) params = {'auth': auth} update_request_params_with_ssl(params) _admin_session = keystone_session.Session(**params) return _admin_session def update_request_params_with_ssl(params): verify = CONF.ssl_ca_file or not CONF.ssl_insecure if verify is not True: params['verify'] = verify
from utils import Tools def _fromUtf8(s): return s import math from PyQt4.QtGui import * from PyQt4.QtCore import * from PyQt4 import QtCore, QtGui from PyQt4.phonon import Phonon from main_const import * from helpers import Help from helpers import Database, Reference from utils import Tools class KujiDesign(QMainWindow): def __init__(self, mainprogram): QMainWindow.__init__(self) qss_file = open(STYLESHEET).read() self.setStyleSheet(qss_file) self.sessiondb = Database.SessionDatabase(self) self.main = mainprogram self.player = None self.resize(1170, 830) self.sessioncreated = False app_icon = QtGui.QIcon() app_icon.addFile(os.path.join(WORKINGDIRECTORY, "assets", "icons", "mainwinicon"), QtCore.QSize(16, 16)) self.setWindowIcon(app_icon) self.initgui() def viewgoals(self): if self.sessiondb.goalsset: self.sessiondb.displaycurrentgoals() else: QMessageBox.information(None, "No Goals Set", "No Goals Set. Please Set A Goal", QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton) def viewcompletedgoals(self): if self.sessiondb.completedgoalsset: self.sessiondb.displaycompletedgoals() else: QMessageBox.information(None, "No Goals Completed Yet", "Keep Up The Hard Work And You Will Achieve A Goal Soon...", QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton) def howtousethisprogram(self): Help.KujiInHelp(self) def goalcompleted(self, goaltext): goalcompleteddialog = QDialog(self) goalcompleteddialog.resize(406, 235) goalcompleteddialog.setLayoutDirection(Qt.LeftToRight) goalcompleteddialog.setAutoFillBackground(False) goalcompleteddialog.setModal(False) self.goalcompletedtopLabel = QLabel(goalcompleteddialog) self.goalcompletedtopLabel.setGeometry(QRect(120, 10, 161, 20)) self.goalcompletedtopLabel.setStyleSheet("font: 12pt \"Arial\";\n" "color: rgb(152, 166, 168);") self.goalcompletedmiddleLabel = QLabel(goalcompleteddialog) self.goalcompletedmiddleLabel.setGeometry(QRect(30, 40, 341, 21)) self.goalcompletedValue = QLabel(goalcompleteddialog) self.goalcompletedValue.setGeometry(QRect(90, 70, 211, 51)) self.goalcompletedValue.setStyleSheet("border: 1px solid black;\n" "font: 75 18pt \"Arial\";\n" "color: rgb(152, 166, 168);") self.goalcompletedValue.setAlignment(QtCore.Qt.AlignCenter) self.goalcompletedbottomLabel = QtGui.QLabel(goalcompleteddialog) self.goalcompletedbottomLabel.setGeometry(QtCore.QRect(10, 130, 391, 51)) self.goalcompletedbottomLabel.setAlignment(QtCore.Qt.AlignCenter) self.goalcompletedbottomLabel.setWordWrap(True) self.goalcompletedButton = QtGui.QPushButton(goalcompleteddialog) self.goalcompletedButton.setGeometry(QtCore.QRect(310, 200, 84, 30)) goalcompleteddialog.setWindowTitle("Goal Completed") self.goalcompletedtopLabel.setText("GOAL COMPLETED!") self.goalcompletedmiddleLabel.setText("It Wasn\'t Easy, But You Stuck With It And Achieved Your Goal Of:") self.goalcompletedValue.setText(goaltext) self.goalcompletedbottomLabel.setText( "Celebrate In Your Success, And Achieve The Next Goal You Have Set. Anything Is Possible!") self.goalcompletedButton.setText("OK") QtCore.QObject.connect(self.goalcompletedButton, QtCore.SIGNAL("clicked()"), goalcompleteddialog.accept) goalcompleteddialog.exec_() # Test Here If A New Goal Is Set, If Not Ask User To Set One! def setgoalstatus(self): """Method To Set Goal Current, Percentage And Total On Startup And After A Session Is Finished""" currenttext, percent, totaltext = self.sessiondb.getgoalstatus() if None not in [currenttext, percent, totaltext]: self.currentgoalLabel.setText(currenttext) self.goalProgressBar.setValue(percent) self.goalLabel.setText(totaltext) def goalpacing(self): if self.sessiondb.goalsset: self.goalpacingDialog = QDialog(self) self.goalpacingDialog.resize(400, 258) self.goalpacingtopLabel = QtGui.QLabel(self.goalpacingDialog) self.goalpacingtopLabel.setGeometry(QtCore.QRect(10, 20, 371, 81)) self.goalpacingtopLabel.setAlignment(QtCore.Qt.AlignCenter) self.goalpacingtopLabel.setWordWrap(True) self.goalpacingtopLabel.setObjectName(_fromUtf8("goalpacingtopLabel")) self.horizontalLayoutWidget = QtGui.QWidget(self.goalpacingDialog) self.horizontalLayoutWidget.setGeometry(QtCore.QRect(110, 110, 171, 41)) self.horizontalLayoutWidget.setObjectName(_fromUtf8("horizontalLayoutWidget")) self.goalpacingValuesLayout = QtGui.QHBoxLayout(self.horizontalLayoutWidget) self.goalpacingValuesLayout.setMargin(0) self.goalpacingValuesLayout.setObjectName(_fromUtf8("goalpacingValuesLayout")) self.goalpaincgValue = QtGui.QSpinBox(self.horizontalLayoutWidget) self.goalpaincgValue.setMaximum(7) self.goalpaincgValue.setObjectName(_fromUtf8("goalpaincgValue")) self.goalpacingValuesLayout.addWidget(self.goalpaincgValue) self.goalpacingdaysLabel = QtGui.QLabel(self.horizontalLayoutWidget) self.goalpacingdaysLabel.setObjectName(_fromUtf8("goalpacingdaysLabel")) self.goalpacingValuesLayout.addWidget(self.goalpacingdaysLabel) self.horizontalLayoutWidget_2 = QtGui.QWidget(self.goalpacingDialog) self.horizontalLayoutWidget_2.setGeometry(QtCore.QRect(200, 190, 188, 41)) self.horizontalLayoutWidget_2.setObjectName(_fromUtf8("horizontalLayoutWidget_2")) self.goalpacingButtonLayout = QtGui.QHBoxLayout(self.horizontalLayoutWidget_2) self.goalpacingButtonLayout.setMargin(0) self.goalpacingButtonLayout.setObjectName(_fromUtf8("goalpacingButtonLayout")) self.goalpacingcalculateButton = QtGui.QPushButton(self.horizontalLayoutWidget_2) self.goalpacingcalculateButton.setObjectName(_fromUtf8("goalpacingcalculateButton")) self.goalpacingButtonLayout.addWidget(self.goalpacingcalculateButton) self.goalpacingcancelButton = QtGui.QPushButton(self.horizontalLayoutWidget_2) self.goalpacingcancelButton.setObjectName(_fromUtf8("goalpacingcancelButton")) self.goalpacingButtonLayout.addWidget(self.goalpacingcancelButton) self.goalpacingDialog.setWindowTitle("Goal Pacing") self.goalpacingtopLabel.setText( "In Order To Calculate How Much You Need To Meditate To Reach Your Goals, I Need To Know How Many Days A Week You Will Be Meditating") self.goalpacingdaysLabel.setText("Days A Week") self.goalpacingcalculateButton.setText("CALCULATE") self.goalpacingcancelButton.setText("CANCEl") QtCore.QObject.connect(self.goalpacingcalculateButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.goalpacingDialog.accept) QtCore.QObject.connect(self.goalpacingcancelButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.goalpacingDialog.reject) ret = self.goalpacingDialog.exec_() if ret == QDialog.Accepted: if self.goalpaincgValue.value() != 0: goalpacingtext = self.sessiondb.goalpacing(int(self.goalpaincgValue.value())) self.statusBar.showMessage(goalpacingtext, 10000) else: self.statusBar.showMessage( "You Must Practice At Least Once A Week For Me To Calculate Your Goal Pacing", 3000) else: QtGui.QMessageBox.information(None, "No Goals Set", "You Must Set At Least One Goal In Order For Me To Calculate Goal Pacing For That Goal", QtGui.QMessageBox.Ok | QtGui.QMessageBox.Default, QtGui.QMessageBox.NoButton) def closeEvent(self, event): """Stuff To Do Right Before Gui Is Closed""" self.player = self.main.sessionplayer playing = (self.player.entrainmentObject.state() == Phonon.PlayingState) if playing: quit_msg = "End The Session Prematurely?" reply = QtGui.QMessageBox.question(self, 'Confirmation End Session Prematurely', quit_msg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No) if reply == QtGui.QMessageBox.Yes: self.player.entrainmentObject.pause() if self.AmbienceOption.isChecked(): self.player.ambienceObject.pause() self.statusBar.showMessage("Session Is Currently Paused") self.confirmationDialog2 = QtGui.QDialog(self) self.confirmationDialog2.resize(434, 319) self.prematureendingreason = QtGui.QTextEdit(self.confirmationDialog2) self.prematureendingreason.setGeometry(QtCore.QRect(10, 50, 411, 221)) self.prematureendingreason.setObjectName(_fromUtf8("listView")) self.label2 = QtGui.QLabel(self.confirmationDialog2) self.label2.setGeometry(QtCore.QRect(10, 20, 411, 20)) self.label2.setAlignment(QtCore.Qt.AlignCenter) self.horizontalLayoutWidget2 = QtGui.QWidget(self.confirmationDialog2) self.horizontalLayoutWidget2.setGeometry(QtCore.QRect(80, 280, 341, 32)) self.horizontalLayoutWidget2.setObjectName(_fromUtf8("horizontalLayoutWidget")) self.horizontalLayout = QtGui.QHBoxLayout(self.horizontalLayoutWidget2) self.horizontalLayout.setMargin(0) self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout")) self.CreateButton = QtGui.QPushButton(self.horizontalLayoutWidget2) self.CreateButton.setObjectName(_fromUtf8("pushButton_2")) self.horizontalLayout.addWidget(self.CreateButton) self.listofsessionsButton = QtGui.QPushButton(self.horizontalLayoutWidget2) self.listofsessionsButton.setObjectName(_fromUtf8("pushButton")) self.horizontalLayout.addWidget(self.listofsessionsButton) self.confirmationDialog2.setWindowTitle("Premature Ending") self.label2.setText("Enter The Reason You Are Ending The Session Early:") self.CreateButton.setText("End Session") self.listofsessionsButton.setText("Resume Session") QtCore.QObject.connect(self.CreateButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.testprematureending) QtCore.QObject.connect(self.listofsessionsButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.confirmationDialog2.reject) result = self.confirmationDialog2.exec_() if result == QDialog.Rejected: self.statusBar.showMessage("Resuming Session...") self.player.entrainmentObject.play() if self.AmbienceOption.isChecked(): self.player.ambienceObject.play() event.ignore() else: event.ignore() else: quit_msg = "Really Exit?" reply = QtGui.QMessageBox.question(self, 'Message', quit_msg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No) if reply == QtGui.QMessageBox.Yes: event.accept() else: event.ignore() def testprematureending(self): if self.prematureendingreason.toPlainText(): sessionexpectedlist = list() for x, i in enumerate(self.slidervalues): if i.value() != 0: val = i.value() cutname = self.main.cuts[x]["name"] txt = "%s: %s " % (cutname, val) sessionexpectedlist.append(txt) self.statusBar.showMessage("Writing Reason To Database...") reason = self.prematureendingreason.toPlainText() self.sessiondb.writeprematureending(self.player.cutplayingname, sessionexpectedlist, reason) self.confirmationDialog2.accept() else: QtGui.QMessageBox.information(None, "Reason Empty", "Reason Cannot Be Blank In Order To End The Session Early", QtGui.QMessageBox.Ok | QtGui.QMessageBox.Default, QtGui.QMessageBox.NoButton) def aboutthisprogram(self): """Dialog Displaying Info About This Program""" self.aboutdialog = QDialog(self) self.aboutdialog.setObjectName(_fromUtf8("aboutdialog")) self.aboutdialog.resize(433, 268) self.aboutcloseButton = QtGui.QPushButton(self.aboutdialog) self.aboutcloseButton.setGeometry(QtCore.QRect(324, 230, 90, 23)) self.aboutcloseButton.setObjectName(_fromUtf8("aboutcloseButton")) self.aboutlabel = QtGui.QLabel(self.aboutdialog) self.aboutlabel.setGeometry(QtCore.QRect(20, 20, 391, 21)) self.aboutlabel.setStyleSheet(_fromUtf8("font: 14pt \"Arial Black\";")) self.aboutlabel.setAlignment(QtCore.Qt.AlignCenter) self.aboutlabel.setObjectName(_fromUtf8("aboutlabel")) self.abouttext = QtGui.QTextBrowser(self.aboutdialog) self.abouttext.setGeometry(QtCore.QRect(20, 50, 391, 170)) self.abouttext.setObjectName(_fromUtf8("abouttext")) self.aboutHowToUseButton = QtGui.QPushButton(self.aboutdialog) self.aboutHowToUseButton.setGeometry(QtCore.QRect(20, 230, 111, 23)) self.aboutHowToUseButton.setObjectName(_fromUtf8("aboutHowToUseButton")) self.aboutdialog.setWindowTitle("About") self.aboutcloseButton.setText("Close") self.aboutlabel.setText("About The Kuji-In Program") self.abouttext.setHtml( "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n" "<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n" "p, li { white-space: pre-wrap; }\n" "</style></head><body style=\" font-family:\'Arial\'; font-size:9pt; font-weight:400; font-style:normal;\">\n" "<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Sans Serif\'; font-size:10pt;\">This program was developed to be an aid to the practitioners of the Kuji-In through the use of brainwave entrainment technology and optional user-selected soundhelpers files. </span></p>\n" "<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Sans Serif\'; font-size:10pt;\"><br /></p>\n" "<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Sans Serif\'; font-size:10pt;\">Program Development By Caleb Trahan (c) 2015 All Rights Reserved</span></p>\n" "<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Sans Serif\'; font-size:10pt;\"><br /></p>\n" "<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Sans Serif\'; font-size:10pt;\">Program Written In </span><span style=\" font-family:\'Sans Serif\'; font-size:10pt; text-decoration: underline; color:#f0651f;\">Python 3</span><span style=\" font-family:\'Sans Serif\'; font-size:10pt;\">, Designed With </span><span style=\" font-family:\'Sans Serif\'; font-size:10pt; text-decoration: underline; color:#f0651f;\">Qt 4</span><span style=\" font-family:\'Sans Serif\'; font-size:10pt;\"></span> And pydub Used For Audio Manipulation</p>\n" "<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Sans Serif\'; font-size:10pt;\"><br /></p></body></html>") self.aboutHowToUseButton.setText("Tutorials") QtCore.QObject.connect(self.aboutHowToUseButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.aboutdialog.accept) QtCore.QObject.connect(self.aboutcloseButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.aboutdialog.reject) ret = self.aboutdialog.exec_() if ret == QDialog.Accepted: Help.KujiInHelp(self) def displaylistofsession(self): """Method To Display The List Of Sessions Practiced""" sessionlist = self.sessiondb.testifnosessions() if not sessionlist: QtGui.QMessageBox.information(None, "No Sessions", "No Sessions Practiced Yet", QtGui.QMessageBox.Ok | QtGui.QMessageBox.Default, QtGui.QMessageBox.NoButton) else: Database.DisplaySessionList(self, self.sessiondb) def displayprematureendings(self): """Method To Display List Of Sessions Ended Early, If Any""" a = Database.DisplayPrematureEndings(self, self.sessiondb) isprematureendings = a.testforprematureendings() if not isprematureendings: QtGui.QMessageBox.information(None, "No Premature Endings", "No Premature Endings, Excellent Work And Let's Keep It That Way!", QtGui.QMessageBox.Ok | QtGui.QMessageBox.Default, QtGui.QMessageBox.NoButton) def calculatetotalsessiontime(self): self.totalMinutesDisplay.display(0) self.totalhoursDisplay.display(0) totaltime = int() for x, i in enumerate(self.slidervalues): if i.value() != 0: totaltime += i.value() if totaltime > 0: if totaltime >= 60: hours = math.floor(totaltime / 60) if hours != 0: self.totalhoursDisplay.display(hours) totaltime -= hours * 60 if totaltime != 0: self.totalMinutesDisplay.display(totaltime) else: self.totalMinutesDisplay.display(totaltime) else: self.statusBar.showMessage("Nothing To Calculate. All Cuts Are Set To 0", 1000) def displaytotalprogress(self): rinlist = [self.rintotalhoursDisplay, self.rintotalminutesDisplay] kyolist = [self.kyototalhoursDisplay, self.kyototalminutesDisplay] tohlist = [self.tohtotalhoursDisplay, self.tohtotalminutesDisplay] shalist = [self.shatotalhoursDisplay, self.shatotalminutesDisplay] kailist = [self.kaitotalhoursDisplay, self.kaitotalminutesDisplay] jinlist = [self.jintotalhoursDisplay, self.jintotalminutesDisplay] retsulist = [self.retsutotalhoursDisplay, self.retsutotalminutesDisplay] zailist = [self.zaitotalhoursDisplay, self.zaitotalminutesDisplay] zenlist = [self.zentotalhoursDisplay, self.zentotalminutesDisplay] totallist = [rinlist, kyolist, tohlist, shalist, kailist, jinlist, retsulist, zailist, zenlist] for x, i in enumerate(totallist): self.sessiondb.calculatetotalminutesforindividualcut(x, i) return True def newgoaldialog(self): self.setgoalsdialog = QDialog(self) self.setgoalsdialog.setObjectName(_fromUtf8("setgoalsdialog")) self.setgoalsdialog.resize(434, 241) self.setgoaldialogtopLabel = QtGui.QLabel(self.setgoalsdialog) self.setgoaldialogtopLabel.setGeometry(QtCore.QRect(40, 30, 381, 16)) self.setgoaldialogtopLabel.setObjectName(_fromUtf8("setgoaldialogtopLabel")) self.setgoaldialoggoalLabel = QtGui.QLabel(self.setgoalsdialog) self.setgoaldialoggoalLabel.setGeometry(QtCore.QRect(130, 70, 59, 15)) self.setgoaldialoggoalLabel.setObjectName(_fromUtf8("setgoaldialoggoalLabel")) self.horizontalLayoutWidget = QtGui.QWidget(self.setgoalsdialog) self.horizontalLayoutWidget.setGeometry(QtCore.QRect(100, 90, 91, 41)) self.horizontalLayoutWidget.setObjectName(_fromUtf8("horizontalLayoutWidget")) self.setgoalsdialoggoallayout = QtGui.QHBoxLayout(self.horizontalLayoutWidget) self.setgoalsdialoggoallayout.setMargin(0) self.setgoalsdialoggoallayout.setObjectName(_fromUtf8("setgoalsdialoggoallayout")) self.setgoaldialogvalue = QtGui.QSpinBox(self.horizontalLayoutWidget) self.setgoaldialogvalue.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter) self.setgoaldialogvalue.setButtonSymbols(QtGui.QAbstractSpinBox.PlusMinus) self.setgoaldialogvalue.setObjectName(_fromUtf8("setgoaldialogvalue")) self.setgoalsdialoggoallayout.addWidget(self.setgoaldialogvalue) self.setgoaldialoghrslabel = QtGui.QLabel(self.horizontalLayoutWidget) self.setgoaldialoghrslabel.setObjectName(_fromUtf8("setgoaldialoghrslabel")) self.setgoalsdialoggoallayout.addWidget(self.setgoaldialoghrslabel) self.setgoaldialogDueDate = QtGui.QDateEdit(self.setgoalsdialog) self.setgoaldialogDueDate.setDateTime(QDateTime.currentDateTime()) self.setgoaldialogDueDate.setCalendarPopup(True) self.setgoaldialogDueDate.setGeometry(QtCore.QRect(220, 100, 110, 22)) self.setgoaldialogDueDate.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter) self.setgoaldialogDueDate.setButtonSymbols(QtGui.QAbstractSpinBox.PlusMinus) self.setgoaldialogDueDate.setDisplayFormat(_fromUtf8("")) self.setgoaldialogDueDate.setObjectName(_fromUtf8("setgoaldialogDueDate")) self.setgoalduedateLabel = QtGui.QLabel(self.setgoalsdialog) self.setgoalduedateLabel.setGeometry(QtCore.QRect(240, 70, 61, 20)) self.setgoalduedateLabel.setObjectName(_fromUtf8("setgoalduedateLabel")) self.horizontalLayoutWidget_2 = QtGui.QWidget(self.setgoalsdialog) self.horizontalLayoutWidget_2.setGeometry(QtCore.QRect(90, 180, 334, 41)) self.horizontalLayoutWidget_2.setObjectName(_fromUtf8("horizontalLayoutWidget_2")) self.setdialogbuttonslayout = QtGui.QHBoxLayout(self.horizontalLayoutWidget_2) self.setdialogbuttonslayout.setMargin(0) self.setdialogbuttonslayout.setObjectName(_fromUtf8("setdialogbuttonslayout")) self.setgoaldialoggoallistButton = QtGui.QPushButton(self.horizontalLayoutWidget_2) self.setgoaldialoggoallistButton.setObjectName(_fromUtf8("pushButton")) self.setdialogbuttonslayout.addWidget(self.setgoaldialoggoallistButton) self.setgoaldialogAcceptButton = QtGui.QPushButton(self.horizontalLayoutWidget_2) self.setgoaldialogAcceptButton.setObjectName(_fromUtf8("setgoaldialogAcceptButton")) self.setdialogbuttonslayout.addWidget(self.setgoaldialogAcceptButton) self.setgoaldialogCancelButton = QtGui.QPushButton(self.horizontalLayoutWidget_2) self.setgoaldialogCancelButton.setObjectName(_fromUtf8("setgoaldialogCancelButton")) self.setdialogbuttonslayout.addWidget(self.setgoaldialogCancelButton) self.setgoalsdialog.setWindowTitle("Set A New Goal") currenthours = self.sessiondb.calculatetotalhours() self.setgoaldialogtopLabel.setText("You Have Practiced For %s Hours. Please Set A New Goal:" % currenthours) self.setgoaldialoggoalLabel.setText("GOAL") self.setgoaldialoghrslabel.setText("hrs") self.setgoalduedateLabel.setText("Due Date") self.setgoaldialogAcceptButton.setText("Set This Goal") self.setgoaldialoggoallistButton.setText("Current Goals") self.setgoaldialogCancelButton.setText("Cancel") QtCore.QObject.connect(self.setgoaldialogAcceptButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.checkgoals) QtCore.QObject.connect(self.setgoaldialoggoallistButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.viewgoals) QtCore.QObject.connect(self.setgoaldialogCancelButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.setgoalsdialog.reject) self.setgoalsdialog.exec_() def checkgoals(self): goaldate = self.setgoaldialogDueDate.dateTime().toPyDateTime() goalhours = int(self.setgoaldialogvalue.value()) valueisgood = self.sessiondb.checknewgoals(goaldate, goalhours) if isinstance(valueisgood, bool): self.statusBar.showMessage("Adding Goal...") self.sessiondb.insertgoal(goaldate, goalhours) self.setgoalsdialog.accept() self.setgoalstatus() self.statusBar.showMessage("Goal Successfully Added", 3000) else: QtGui.QMessageBox.critical(None, "Error Adding Goal", valueisgood, QtGui.QMessageBox.Ok | QtGui.QMessageBox.Default, QtGui.QMessageBox.NoButton) return def setsignalsandslots(self): """Signals And Slot Bindings For Main GUI""" QtCore.QObject.connect(self.preSlider_2, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.preDisplay_2.display) QtCore.QObject.connect(self.rinSlider_2, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.rinDisplay_2.display) QtCore.QObject.connect(self.kyoSlider_2, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.kyoDisplay_2.display) QtCore.QObject.connect(self.tohSlider_2, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.tohDisplay_2.display) QtCore.QObject.connect(self.shaSlider_2, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.shaDisplay_2.display) QtCore.QObject.connect(self.kaiSlider_2, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.kaiDisplay_2.display) QtCore.QObject.connect(self.jinSlider_2, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.jinDisplay_2.display) QtCore.QObject.connect(self.retsuSlider_2, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.retsuDisplay_2.display) QtCore.QObject.connect(self.zaiSlider_2, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.zaiDisplay_2.display) QtCore.QObject.connect(self.zenSlider_2, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.zenDisplay_2.display) QtCore.QObject.connect(self.postSlider_2, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.postDisplay_2.display) QtCore.QObject.connect(self.CreateButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.main.createsession) QtCore.QObject.connect(self.listofsessionsButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.displaylistofsession) QtCore.QObject.connect(self.AmbienceOption, QtCore.SIGNAL(_fromUtf8("stateChanged(int)")), self.main.ambiencecheckboxchecked) QtCore.QObject.connect(self.ReferenceDisplayOption, QtCore.SIGNAL(_fromUtf8("stateChanged(int)")), self.main.referenceboxchecked) QtCore.QObject.connect(self.prematureendingsbutton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.displayprematureendings) QtCore.QObject.connect(self.PlayButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.main.playsession) QtCore.QObject.connect(self.PauseButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.main.pausesession) QtCore.QObject.connect(self.StopButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.main.stopsession) QtCore.QObject.connect(self.exportButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.main.exportsession) QtCore.QObject.connect(self.calculateTotalSessionTimeButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.calculatetotalsessiontime) QtCore.QObject.connect(self.viewgoalsButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.viewgoals) QtCore.QObject.connect(self.completedgoalsButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.viewcompletedgoals) QtCore.QObject.connect(self.setgoalButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.newgoaldialog) QtCore.QObject.connect(self.goalpacingButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.goalpacing) QtCore.QObject.connect(self.changeallvaluesButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.changeallvalues) # self.setnewmaxslidervalue() self.displaytotalprogress() def changeallvalues(self): """Method With A Dialog To Set All Values To _____""" self.changevaluesdialog = QtGui.QDialog(self) self.changevaluesdialog.setObjectName(_fromUtf8("changevaluedialog")) self.changevaluesdialog.resize(395, 130) self.changevaluesdialog.setMinimumSize(QtCore.QSize(0, 100)) self.changeValueTopLabel = QtGui.QLabel(self.changevaluesdialog) self.changeValueTopLabel.setGeometry(QtCore.QRect(70, 30, 131, 20)) self.changeValueTopLabel.setObjectName(_fromUtf8("changeValueTopLabel")) self.horizontalLayoutWidget_2 = QtGui.QWidget(self.changevaluesdialog) self.horizontalLayoutWidget_2.setGeometry(QtCore.QRect(20, 60, 337, 51)) self.horizontalLayoutWidget_2.setObjectName(_fromUtf8("horizontalLayoutWidget_2")) self.changeValueLayout2 = QtGui.QHBoxLayout(self.horizontalLayoutWidget_2) self.changeValueLayout2.setMargin(0) self.changeValueLayout2.setObjectName(_fromUtf8("changeValueLayout2")) self.changeValuecheckbox = QtGui.QCheckBox(self.horizontalLayoutWidget_2) self.changeValuecheckbox.setObjectName(_fromUtf8("changeValuecheckbox")) self.changeValueLayout2.addWidget(self.changeValuecheckbox) self.pushButton = QtGui.QPushButton(self.horizontalLayoutWidget_2) self.pushButton.setObjectName(_fromUtf8("pushButton")) self.changeValueLayout2.addWidget(self.pushButton) self.StopButton = QtGui.QPushButton(self.horizontalLayoutWidget_2) self.StopButton.setObjectName(_fromUtf8("pushButton_2")) self.changeValueLayout2.addWidget(self.StopButton) self.changeValuespinbox = QtGui.QSpinBox(self.changevaluesdialog) self.changeValuespinbox.setGeometry(QtCore.QRect(210, 30, 51, 20)) # self.changeValuespinbox.setLayoutDirection(QtCore.Qt.RightToLeft) self.changeValuespinbox.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter) self.changeValuespinbox.setObjectName(_fromUtf8("changeValuespinbox")) self.changeValueminuteslabel = QtGui.QLabel(self.changevaluesdialog) self.changeValueminuteslabel.setGeometry(QtCore.QRect(270, 20, 91, 39)) self.changevaluesdialog.setWindowTitle("Change All Values") self.changeValueTopLabel.setText("Change All Values To:") self.changeValuecheckbox.setText("Include Pre And Post") self.changeValueminuteslabel.setText("minute(s)") self.pushButton.setText("OK") self.StopButton.setText("Cancel") QtCore.QObject.connect(self.pushButton, QtCore.SIGNAL("clicked()"), self.changevaluesdialog.accept) QtCore.QObject.connect(self.StopButton, QtCore.SIGNAL("clicked()"), self.changevaluesdialog.reject) ret = self.changevaluesdialog.exec_() if ret == QDialog.Accepted: value = self.changeValuespinbox.value() if self.changeValuecheckbox.isChecked(): for x, i in enumerate(self.slidervalues): i.setValue(value) else: for x, i in enumerate(self.slidervalues): if x not in [0, 10]: i.setValue(value) def initgui(self): """Method To Setup Gui Bindings""" self.centralwidget = self self.centralwidget.setObjectName(_fromUtf8("Kuji-In Session Creator")) self.frame = QtGui.QFrame(self.centralwidget) self.frame.setGeometry(QtCore.QRect(0, 0, 1200, 821)) # self.frame.setFrameShape(QtGui.QFrame.StyledPanel) # self.frame.setFrameShadow(QtGui.QFrame.Raised) self.frame.setObjectName(_fromUtf8("frame")) ######## Creating Session Top Cutname Labels self.horizontalLayoutWidget_3 = QtGui.QWidget(self.frame) self.horizontalLayoutWidget_3.setGeometry(QtCore.QRect(36, 80, 716, 51)) self.DurationLabels_2 = QtGui.QHBoxLayout(self.horizontalLayoutWidget_3) self.DurationLabels_2.setMargin(0) self.preLabel_2 = QtGui.QLabel(self.horizontalLayoutWidget_3) self.DurationLabels_2.addWidget(self.preLabel_2) self.rinLabel_2 = QtGui.QLabel(self.horizontalLayoutWidget_3) self.DurationLabels_2.addWidget(self.rinLabel_2) self.kyoLabel_2 = QtGui.QLabel(self.horizontalLayoutWidget_3) self.DurationLabels_2.addWidget(self.kyoLabel_2) self.tohLabel_2 = QtGui.QLabel(self.horizontalLayoutWidget_3) self.DurationLabels_2.addWidget(self.tohLabel_2) self.shaLabel_2 = QtGui.QLabel(self.horizontalLayoutWidget_3) self.DurationLabels_2.addWidget(self.shaLabel_2) self.kaiLabel_2 = QtGui.QLabel(self.horizontalLayoutWidget_3) self.DurationLabels_2.addWidget(self.kaiLabel_2) self.jinLabel_2 = QtGui.QLabel(self.horizontalLayoutWidget_3) self.DurationLabels_2.addWidget(self.jinLabel_2) self.retsuLabel_2 = QtGui.QLabel(self.horizontalLayoutWidget_3) self.DurationLabels_2.addWidget(self.retsuLabel_2) self.zaiLabel_2 = QtGui.QLabel(self.horizontalLayoutWidget_3) self.DurationLabels_2.addWidget(self.zaiLabel_2) self.zenLabel_2 = QtGui.QLabel(self.horizontalLayoutWidget_3) self.DurationLabels_2.addWidget(self.zenLabel_2) self.postLabel_2 = QtGui.QLabel(self.horizontalLayoutWidget_3) self.DurationLabels_2.addWidget(self.postLabel_2) creatingsessiontopcutnamelabels = [ self.preLabel_2, self.rinLabel_2, self.kyoLabel_2, self.tohLabel_2, self.shaLabel_2, self.kaiLabel_2, self.jinLabel_2, self.retsuLabel_2, self.zaiLabel_2, self.zenLabel_2, self.postLabel_2 ] for i in creatingsessiontopcutnamelabels: i.setStyleSheet(_fromUtf8("color: #98A6A8;\n" "border-top: 1px solid black;\n" "border-left: 1px solid black;\n" "border-right: 1px solid black;")) i.setAlignment(QtCore.Qt.AlignCenter) ### Creating Session Slider self.sessionslidersLayout = QtGui.QWidget(self.frame) self.sessionslidersLayout.setGeometry(QtCore.QRect(10, 130, 761, 331)) self.DurationSliders_2 = QtGui.QHBoxLayout(self.sessionslidersLayout) self.DurationSliders_2.setContentsMargins(-1, -1, 0, -1) self.preSlider_2 = QtGui.QSlider(self.sessionslidersLayout) self.DurationSliders_2.addWidget(self.preSlider_2) self.rinSlider_2 = QtGui.QSlider(self.sessionslidersLayout) self.DurationSliders_2.addWidget(self.rinSlider_2) self.kyoSlider_2 = QtGui.QSlider(self.sessionslidersLayout) self.DurationSliders_2.addWidget(self.kyoSlider_2) self.tohSlider_2 = QtGui.QSlider(self.sessionslidersLayout) self.DurationSliders_2.addWidget(self.tohSlider_2) self.shaSlider_2 = QtGui.QSlider(self.sessionslidersLayout) self.DurationSliders_2.addWidget(self.shaSlider_2) self.kaiSlider_2 = QtGui.QSlider(self.sessionslidersLayout) self.DurationSliders_2.addWidget(self.kaiSlider_2) self.jinSlider_2 = QtGui.QSlider(self.sessionslidersLayout) self.DurationSliders_2.addWidget(self.jinSlider_2) self.retsuSlider_2 = QtGui.QSlider(self.sessionslidersLayout) self.DurationSliders_2.addWidget(self.retsuSlider_2) self.zaiSlider_2 = QtGui.QSlider(self.sessionslidersLayout) self.DurationSliders_2.addWidget(self.zaiSlider_2) self.zenSlider_2 = QtGui.QSlider(self.sessionslidersLayout) self.DurationSliders_2.addWidget(self.zenSlider_2) self.postSlider_2 = QtGui.QSlider(self.sessionslidersLayout) self.DurationSliders_2.addWidget(self.postSlider_2) creatingsessionsliders = [ self.preSlider_2, self.rinSlider_2, self.kyoSlider_2, self.tohSlider_2, self.shaSlider_2, self.kaiSlider_2, self.jinSlider_2, self.retsuSlider_2, self.zaiSlider_2, self.zenSlider_2, self.postSlider_2 ] for i in creatingsessionsliders: i.setMaximum(90) i.setSingleStep(5) i.setPageStep(5) i.setOrientation(QtCore.Qt.Vertical) i.setTickPosition(QtGui.QSlider.TicksBothSides) self.toptotalsLabel = QtGui.QLabel(self.frame) self.toptotalsLabel.setGeometry(QtCore.QRect(280, 40, 221, 21)) self.toptotalsLabel.setStyleSheet(_fromUtf8("font: 14pt \"Arial Black\";\n" "color: #98A6A8;")) self.toptotalsLabel.setAlignment(QtCore.Qt.AlignCenter) self.toptotalsLabel.setObjectName(_fromUtf8("toptotalsLabel")) ######## Values Below Sliders On Session Creator self.horizontalLayoutWidget_2 = QtGui.QWidget(self.frame) self.horizontalLayoutWidget_2.setGeometry(QtCore.QRect(20, 470, 721, 41)) self.CutDurationDisplays_2 = QtGui.QHBoxLayout(self.horizontalLayoutWidget_2) self.CutDurationDisplays_2.setMargin(0) self.preDisplay_2 = QtGui.QLCDNumber(self.horizontalLayoutWidget_2) self.CutDurationDisplays_2.addWidget(self.preDisplay_2) self.rinDisplay_2 = QtGui.QLCDNumber(self.horizontalLayoutWidget_2) self.CutDurationDisplays_2.addWidget(self.rinDisplay_2) self.kyoDisplay_2 = QtGui.QLCDNumber(self.horizontalLayoutWidget_2) self.CutDurationDisplays_2.addWidget(self.kyoDisplay_2) self.tohDisplay_2 = QtGui.QLCDNumber(self.horizontalLayoutWidget_2) self.CutDurationDisplays_2.addWidget(self.tohDisplay_2) self.shaDisplay_2 = QtGui.QLCDNumber(self.horizontalLayoutWidget_2) self.CutDurationDisplays_2.addWidget(self.shaDisplay_2) self.kaiDisplay_2 = QtGui.QLCDNumber(self.horizontalLayoutWidget_2) self.CutDurationDisplays_2.addWidget(self.kaiDisplay_2) self.jinDisplay_2 = QtGui.QLCDNumber(self.horizontalLayoutWidget_2) self.CutDurationDisplays_2.addWidget(self.jinDisplay_2) self.retsuDisplay_2 = QtGui.QLCDNumber(self.horizontalLayoutWidget_2) self.CutDurationDisplays_2.addWidget(self.retsuDisplay_2) self.zaiDisplay_2 = QtGui.QLCDNumber(self.horizontalLayoutWidget_2) self.CutDurationDisplays_2.addWidget(self.zaiDisplay_2) self.zenDisplay_2 = QtGui.QLCDNumber(self.horizontalLayoutWidget_2) self.CutDurationDisplays_2.addWidget(self.zenDisplay_2) self.postDisplay_2 = QtGui.QLCDNumber(self.horizontalLayoutWidget_2) lcdvaluesbelowsliders = [ self.preDisplay_2, self.rinDisplay_2, self.kyoDisplay_2, self.tohDisplay_2, self.shaDisplay_2, self.kaiDisplay_2, self.jinDisplay_2, self.retsuDisplay_2, self.zaiDisplay_2, self.zenDisplay_2, self.postDisplay_2 ] for i in lcdvaluesbelowsliders: i.setNumDigits(3) self.CutDurationDisplays_2.addWidget(self.postDisplay_2) ######## CutName Labels On Total Progress Display self.verticalLayoutWidget = QtGui.QWidget(self.frame) self.verticalLayoutWidget.setGeometry(QtCore.QRect(790, 80, 91, 432)) self.totalprogressLabels = QtGui.QVBoxLayout(self.verticalLayoutWidget) self.totalprogressLabels.setMargin(0) self.rintotalLabel = QtGui.QLabel(self.verticalLayoutWidget) self.rintotalLabel.setAlignment(QtCore.Qt.AlignCenter) self.totalprogressLabels.addWidget(self.rintotalLabel) self.kyototalLabel = QtGui.QLabel(self.verticalLayoutWidget) self.kyototalLabel.setAlignment(QtCore.Qt.AlignCenter) self.totalprogressLabels.addWidget(self.kyototalLabel) self.tohtotalLabel = QtGui.QLabel(self.verticalLayoutWidget) self.tohtotalLabel.setAlignment(QtCore.Qt.AlignCenter) self.totalprogressLabels.addWidget(self.tohtotalLabel) self.shatotalLabel = QtGui.QLabel(self.verticalLayoutWidget) self.shatotalLabel.setAlignment(QtCore.Qt.AlignCenter) self.totalprogressLabels.addWidget(self.shatotalLabel) self.kaitotalLabel = QtGui.QLabel(self.verticalLayoutWidget) self.kaitotalLabel.setAlignment(QtCore.Qt.AlignCenter) self.totalprogressLabels.addWidget(self.kaitotalLabel) self.jintotalLabel = QtGui.QLabel(self.verticalLayoutWidget) self.jintotalLabel.setAlignment(QtCore.Qt.AlignCenter) self.totalprogressLabels.addWidget(self.jintotalLabel) self.retsutotalLabel = QtGui.QLabel(self.verticalLayoutWidget) self.retsutotalLabel.setAlignment(QtCore.Qt.AlignCenter) self.totalprogressLabels.addWidget(self.retsutotalLabel) self.zaitotalLabel = QtGui.QLabel(self.verticalLayoutWidget) self.zaitotalLabel.setAlignment(QtCore.Qt.AlignCenter) self.totalprogressLabels.addWidget(self.zaitotalLabel) self.zentotalLabel = QtGui.QLabel(self.verticalLayoutWidget) self.zentotalLabel.setAlignment(QtCore.Qt.AlignCenter) cutnamelabels = [ self.rintotalLabel, self.kyototalLabel, self.tohtotalLabel, self.shatotalLabel, self.kaitotalLabel, self.jintotalLabel, self.retsutotalLabel, self.zaitotalLabel, self.zentotalLabel ] for i in cutnamelabels: i.setStyleSheet(_fromUtf8("color: #98A6A8;\n" "border-left: 1px solid black;\n" "border-top: 1px solid black;\n" "border-bottom: 1px solid black;")) self.totalprogressLabels.addWidget(self.zentotalLabel) ######## Hours LCD Numbers On Total Progress Display self.verticalLayoutWidget_4 = QtGui.QWidget(self.frame) self.verticalLayoutWidget_4.setGeometry(QtCore.QRect(880, 80, 66, 432)) self.totalhoursLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget_4) self.totalhoursLayout.setMargin(0) self.rintotalhoursDisplay = QtGui.QLCDNumber(self.verticalLayoutWidget_4) self.totalhoursLayout.addWidget(self.rintotalhoursDisplay) self.kyototalhoursDisplay = QtGui.QLCDNumber(self.verticalLayoutWidget_4) self.totalhoursLayout.addWidget(self.kyototalhoursDisplay) self.tohtotalhoursDisplay = QtGui.QLCDNumber(self.verticalLayoutWidget_4) self.totalhoursLayout.addWidget(self.tohtotalhoursDisplay) self.shatotalhoursDisplay = QtGui.QLCDNumber(self.verticalLayoutWidget_4) self.totalhoursLayout.addWidget(self.shatotalhoursDisplay) self.kaitotalhoursDisplay = QtGui.QLCDNumber(self.verticalLayoutWidget_4) self.totalhoursLayout.addWidget(self.kaitotalhoursDisplay) self.jintotalhoursDisplay = QtGui.QLCDNumber(self.verticalLayoutWidget_4) self.totalhoursLayout.addWidget(self.jintotalhoursDisplay) self.retsutotalhoursDisplay = QtGui.QLCDNumber(self.verticalLayoutWidget_4) self.totalhoursLayout.addWidget(self.retsutotalhoursDisplay) self.zaitotalhoursDisplay = QtGui.QLCDNumber(self.verticalLayoutWidget_4) self.totalhoursLayout.addWidget(self.zaitotalhoursDisplay) self.zentotalhoursDisplay = QtGui.QLCDNumber(self.verticalLayoutWidget_4) lcdhoursdisplays = [ self.rintotalhoursDisplay, self.kyototalhoursDisplay,self.tohtotalhoursDisplay, self.shatotalhoursDisplay, self.kaitotalhoursDisplay, self.jintotalhoursDisplay, self.retsutotalhoursDisplay, self.zaitotalhoursDisplay, self.zentotalhoursDisplay ] for i in lcdhoursdisplays: i.setNumDigits(4) i.setStyleSheet(_fromUtf8("color: rgb(187, 204, 207);\n" "border-top: 1px solid black;\n" "border-bottom: 1px solid black;")) self.totalhoursLayout.addWidget(self.zentotalhoursDisplay) ######## 'Hours' And 'Minutes' Labels On Total Progress self.verticalLayoutWidget_5 = QtGui.QWidget(self.frame) self.verticalLayoutWidget_5.setGeometry(QtCore.QRect(946, 80, 55, 432)) self.totalhoursLabels = QtGui.QVBoxLayout(self.verticalLayoutWidget_5) self.totalhoursLabels.setMargin(0) self.label_11 = QtGui.QLabel(self.verticalLayoutWidget_5) self.totalhoursLabels.addWidget(self.label_11) self.label_14 = QtGui.QLabel(self.verticalLayoutWidget_5) self.totalhoursLabels.addWidget(self.label_14) self.label_15 = QtGui.QLabel(self.verticalLayoutWidget_5) self.totalhoursLabels.addWidget(self.label_15) self.label_18 = QtGui.QLabel(self.verticalLayoutWidget_5) self.totalhoursLabels.addWidget(self.label_18) self.label_17 = QtGui.QLabel(self.verticalLayoutWidget_5) self.totalhoursLabels.addWidget(self.label_17) self.label_16 = QtGui.QLabel(self.verticalLayoutWidget_5) self.totalhoursLabels.addWidget(self.label_16) self.label_13 = QtGui.QLabel(self.verticalLayoutWidget_5) self.totalhoursLabels.addWidget(self.label_13) self.label_12 = QtGui.QLabel(self.verticalLayoutWidget_5) self.totalhoursLabels.addWidget(self.label_12) self.label_10 = QtGui.QLabel(self.verticalLayoutWidget_5) self.totalhoursLabels.addWidget(self.label_10) self.verticalLayoutWidget_6 = QtGui.QWidget(self.frame) self.verticalLayoutWidget_6.setGeometry(QtCore.QRect(1000.5, 80, 66, 432)) self.totalminutesLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget_6) self.totalminutesLayout.setMargin(0) self.rintotalminutesDisplay = QtGui.QLCDNumber(self.verticalLayoutWidget_6) self.totalminutesLayout.addWidget(self.rintotalminutesDisplay) self.kyototalminutesDisplay = QtGui.QLCDNumber(self.verticalLayoutWidget_6) self.totalminutesLayout.addWidget(self.kyototalminutesDisplay) self.tohtotalminutesDisplay = QtGui.QLCDNumber(self.verticalLayoutWidget_6) self.totalminutesLayout.addWidget(self.tohtotalminutesDisplay) self.shatotalminutesDisplay = QtGui.QLCDNumber(self.verticalLayoutWidget_6) self.totalminutesLayout.addWidget(self.shatotalminutesDisplay) self.kaitotalminutesDisplay = QtGui.QLCDNumber(self.verticalLayoutWidget_6) self.totalminutesLayout.addWidget(self.kaitotalminutesDisplay) self.jintotalminutesDisplay = QtGui.QLCDNumber(self.verticalLayoutWidget_6) self.totalminutesLayout.addWidget(self.jintotalminutesDisplay) self.retsutotalminutesDisplay = QtGui.QLCDNumber(self.verticalLayoutWidget_6) self.totalminutesLayout.addWidget(self.retsutotalminutesDisplay) self.zaitotalminutesDisplay = QtGui.QLCDNumber(self.verticalLayoutWidget_6) self.totalminutesLayout.addWidget(self.zaitotalminutesDisplay) self.zentotalminutesDisplay = QtGui.QLCDNumber(self.verticalLayoutWidget_6) self.totalminutesLayout.addWidget(self.zentotalminutesDisplay) self.verticalLayoutWidget_7 = QtGui.QWidget(self.frame) self.verticalLayoutWidget_7.setGeometry(QtCore.QRect(1064, 80, 71, 432)) self.totalminuteslabelsLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget_7) self.totalminuteslabelsLayout.setMargin(0) self.label_19 = QtGui.QLabel(self.verticalLayoutWidget_7) self.totalminuteslabelsLayout.addWidget(self.label_19) self.label_23 = QtGui.QLabel(self.verticalLayoutWidget_7) self.totalminuteslabelsLayout.addWidget(self.label_23) self.label_22 = QtGui.QLabel(self.verticalLayoutWidget_7) self.totalminuteslabelsLayout.addWidget(self.label_22) self.label_21 = QtGui.QLabel(self.verticalLayoutWidget_7) self.totalminuteslabelsLayout.addWidget(self.label_21) self.label_25 = QtGui.QLabel(self.verticalLayoutWidget_7) self.totalminuteslabelsLayout.addWidget(self.label_25) self.label_20 = QtGui.QLabel(self.verticalLayoutWidget_7) self.totalminuteslabelsLayout.addWidget(self.label_20) self.label_26 = QtGui.QLabel(self.verticalLayoutWidget_7) self.totalminuteslabelsLayout.addWidget(self.label_26) self.label_24 = QtGui.QLabel(self.verticalLayoutWidget_7) self.totalminuteslabelsLayout.addWidget(self.label_24) self.label_27 = QtGui.QLabel(self.verticalLayoutWidget_7) self.totalminuteslabelsLayout.addWidget(self.label_27) descriptionlabelslist = [ self.label_10, self.label_11, self.label_12, self.label_13, self.label_14, self.label_15, self.label_16, self.label_17, self.label_18, self.label_19, self.label_20, self.label_21, self.label_22, self.label_23, self.label_24, self.label_25, self.label_26, self.label_27 ] minuteslcdlist = [ self.rintotalminutesDisplay, self.kyototalminutesDisplay, self.tohtotalminutesDisplay, self.shatotalminutesDisplay, self.kaitotalminutesDisplay, self.jintotalminutesDisplay, self.retsutotalminutesDisplay, self.zaitotalminutesDisplay, self.zentotalminutesDisplay ] for i in minuteslcdlist: i.setStyleSheet(_fromUtf8("color: rgb(187, 204, 207);\n" # Styles For LCDDisplay "border-top: 1px solid black;\n" "border-bottom: 1px solid black;")) i.setNumDigits(4) for i in descriptionlabelslist: i.setStyleSheet(_fromUtf8("color: #98A6A8;\n" "border-right: 1px solid black;\n" "border-top: 1px solid black;\n" "border-bottom: 1px solid black;\n")) i.setAlignment(QtCore.Qt.AlignCenter) ######## self.horizontalLayoutWidget_6 = QtGui.QWidget(self.frame) self.horizontalLayoutWidget_6.setGeometry(QtCore.QRect(790, 17, 491, 61)) self.horizontalLayoutWidget_6.setObjectName(_fromUtf8("horizontalLayoutWidget_6")) self.horizontalLayout = QtGui.QHBoxLayout(self.horizontalLayoutWidget_6) self.horizontalLayout.setMargin(0) self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout")) self.topsessionLabel = QtGui.QLabel(self.frame) self.topsessionLabel.setGeometry(QtCore.QRect(796, 36, 335, 31)) self.topsessionLabel.setStyleSheet(_fromUtf8("color: #98A6A8;\n" "font: 14pt \"Arial Black\";")) self.topsessionLabel.setAlignment(QtCore.Qt.AlignCenter) self.topsessionLabel.setObjectName(_fromUtf8("topsessionLabel")) # self.horizontalLayout.addWidget(self.topsessionLabel) self.horizontalLayoutWidget_4 = QtGui.QWidget(self.frame) self.horizontalLayoutWidget_4.setGeometry(QtCore.QRect(784, 517, 361, 50)) self.horizontalLayoutWidget_4.setObjectName(_fromUtf8("horizontalLayoutWidget_4")) self.horizontalLayout_4 = QtGui.QHBoxLayout(self.horizontalLayoutWidget_4) self.listofsessionsButton = QtGui.QPushButton(self.horizontalLayoutWidget_6) self.listofsessionsButton.setObjectName(_fromUtf8("pushButton")) self.horizontalLayout_4.addWidget(self.listofsessionsButton) self.prematureendingsbutton = QtGui.QPushButton(self.horizontalLayoutWidget_6) self.prematureendingsbutton.setObjectName(_fromUtf8("pushButton_3")) self.horizontalLayout_4.addWidget(self.prematureendingsbutton) # self.horizontalLayout.addLayout(self.horizontalLayout_4) self.horizontalLayoutWidget2 = QtGui.QWidget(self.frame) self.horizontalLayoutWidget2.setGeometry(QtCore.QRect(30, 585, 310, 31)) self.horizontalLayoutWidget2.setObjectName(_fromUtf8("horizontalLayoutWidget")) self.horizontalLayout_2 = QtGui.QHBoxLayout(self.horizontalLayoutWidget2) self.horizontalLayout_2.setMargin(0) self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2")) self.AmbienceOption = QtGui.QCheckBox(self.horizontalLayoutWidget2) self.AmbienceOption.setObjectName(_fromUtf8("AmbienceOption")) self.horizontalLayout_2.addWidget(self.AmbienceOption) self.ReferenceDisplayOption = QtGui.QCheckBox(self.horizontalLayoutWidget2) self.ReferenceDisplayOption.setObjectName(_fromUtf8("ReferebceDisplayOption")) self.horizontalLayout_2.addWidget(self.ReferenceDisplayOption) self.widget = QtGui.QWidget(self.frame) self.widget.setGeometry(QtCore.QRect(10, 760, 311, 33)) self.widget.setObjectName(_fromUtf8("widget")) self.widget1 = QtGui.QWidget(self.frame) self.widget1.setGeometry(QtCore.QRect(390, 580, 349, 41)) self.widget1.setObjectName(_fromUtf8("widget1")) self.actionsbuttonsLayout = QtGui.QHBoxLayout(self.widget1) self.actionsbuttonsLayout.setMargin(0) self.actionsbuttonsLayout.setObjectName(_fromUtf8("actionsbuttonsLayout")) self.CreateButton = QtGui.QPushButton(self.widget1) self.CreateButton.setObjectName(_fromUtf8("CreateButton")) self.actionsbuttonsLayout.addWidget(self.CreateButton) self.exportButton = QtGui.QPushButton(self.widget1) self.exportButton.setObjectName(_fromUtf8("exportButton")) self.actionsbuttonsLayout.addWidget(self.exportButton) self.sessionPlayerFrame = QtGui.QFrame(self.frame) self.sessionPlayerFrame.setGeometry(QtCore.QRect(20, 640, 722, 165)) self.sessionPlayerFrame.setFrameShape(QtGui.QFrame.StyledPanel) self.sessionPlayerFrame.setFrameShadow(QtGui.QFrame.Raised) self.sessionPlayerFrame.setObjectName(_fromUtf8("sessionPlayerFrame")) self.widget1 = QtGui.QWidget(self.sessionPlayerFrame) self.widget1.setGeometry(QtCore.QRect(20, 40, 311, 33)) self.widget1.setObjectName(_fromUtf8("widget1")) self.horizontalLayout_3 = QtGui.QHBoxLayout(self.widget1) self.horizontalLayout_3.setMargin(0) self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3")) self.PlayButton = QtGui.QPushButton(self.widget1) self.PlayButton.setObjectName(_fromUtf8("PlayButton")) self.horizontalLayout_3.addWidget(self.PlayButton) self.PauseButton = QtGui.QPushButton(self.widget1) self.PauseButton.setObjectName(_fromUtf8("PauseButton")) self.horizontalLayout_3.addWidget(self.PauseButton) self.StopButton = QtGui.QPushButton(self.widget1) self.StopButton.setObjectName(_fromUtf8("pushButton_2")) self.horizontalLayout_3.addWidget(self.StopButton) self.EntrainmentVolumeSlider = Phonon.VolumeSlider(self.sessionPlayerFrame) self.EntrainmentVolumeSlider.setGeometry(QtCore.QRect(2, 120, 150, 20)) self.EntrainmentVolumeSlider.setOrientation(QtCore.Qt.Horizontal) self.EntrainmentVolumeSlider.setObjectName(_fromUtf8("EntrainmentVolumeSlider")) fonttop = QtGui.QFont() fonttop.setPointSize(14) self.sessionPlayertopLabel = QtGui.QLabel(self.sessionPlayerFrame) self.sessionPlayertopLabel.setFont(fonttop) # self.sessionPlayertopLabel.setGeometry(QtCore.QRect(310, 10, 91, 16)) self.sessionPlayertopLabel.setGeometry(QtCore.QRect(2, 10, 718, 20)) self.sessionPlayertopLabel.setAlignment(QtCore.Qt.AlignCenter) self.sessionPlayertopLabel.setObjectName(_fromUtf8("sessionPlayertopLabel")) self.EntrainmentVolumeTopLabel = QtGui.QLabel(self.sessionPlayerFrame) self.EntrainmentVolumeTopLabel.setGeometry(QtCore.QRect(30, 90, 121, 16)) self.EntrainmentVolumeTopLabel.setObjectName(_fromUtf8("EntrainmentVolumeTopLabel")) self.AmbienceVolumeTopLabel = QtGui.QLabel(self.sessionPlayerFrame) self.AmbienceVolumeTopLabel.setGeometry(QtCore.QRect(220, 90, 111, 16)) self.AmbienceVolumeTopLabel.setObjectName(_fromUtf8("AmbienceVolumeTopLabel")) self.AmbienceVolumeSlider = Phonon.VolumeSlider(self.sessionPlayerFrame) self.AmbienceVolumeSlider.setGeometry(QtCore.QRect(186, 120, 150, 20)) self.AmbienceVolumeSlider.setOrientation(QtCore.Qt.Horizontal) self.AmbienceVolumeSlider.setObjectName(_fromUtf8("AmbienceVolumeSlider")) font = QtGui.QFont() font.setPointSize(11) self.currentlyPlayingtopLabel = QtGui.QLabel(self.sessionPlayerFrame) self.currentlyPlayingtopLabel.setFont(font) self.currentlyPlayingtopLabel.setGeometry(QtCore.QRect(390, 30, 131, 21)) self.currentlyPlayingtopLabel.setObjectName(_fromUtf8("currentlyPlayingtopLabel")) self.CutPlayingName = QtGui.QLabel(self.sessionPlayerFrame) self.CutPlayingName.setFont(font) self.CutPlayingName.setGeometry(QtCore.QRect(550, 30, 151, 20)) self.CutPlayingName.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.CutPlayingName.setObjectName(_fromUtf8("CutPlayingName")) self.thiscutprogressDescriptionLabel = QtGui.QLabel(self.sessionPlayerFrame) self.thiscutprogressDescriptionLabel.setFont(font) self.thiscutprogressDescriptionLabel.setGeometry(QtCore.QRect(390, 60, 171, 21)) self.thiscutprogressDescriptionLabel.setObjectName(_fromUtf8("thiscutprogressDescriptionLabel")) self.CutPlayingProgressActual = QtGui.QLabel(self.sessionPlayerFrame) self.CutPlayingProgressActual.setFont(font) self.CutPlayingProgressActual.setGeometry(QtCore.QRect(540, 60, 161, 20)) self.CutPlayingProgressActual.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.CutPlayingProgressActual.setObjectName(_fromUtf8("CutPlayingProgressActual")) self.TotalSessionProgressDescriptionLabel = QtGui.QLabel(self.sessionPlayerFrame) self.TotalSessionProgressDescriptionLabel.setFont(font) self.TotalSessionProgressDescriptionLabel.setGeometry(QtCore.QRect(390, 90, 181, 21)) self.TotalSessionProgressDescriptionLabel.setObjectName(_fromUtf8("TotalSessionProgressDescriptionLabel")) self.TotalSessionPlayingProgressActual = QtGui.QLabel(self.sessionPlayerFrame) self.TotalSessionPlayingProgressActual.setFont(font) self.TotalSessionPlayingProgressActual.setGeometry(QtCore.QRect(540, 90, 161, 20)) self.TotalSessionPlayingProgressActual.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.TotalSessionPlayingProgressActual.setObjectName(_fromUtf8("TotalSessionPlayingProgressActual")) self.OpenThisCutsReferenceFilesButton = QtGui.QPushButton(self.sessionPlayerFrame) self.OpenThisCutsReferenceFilesButton.setGeometry(QtCore.QRect(534, 126, 170, 31)) self.OpenThisCutsReferenceFilesButton.setObjectName(_fromUtf8("OpenThisCutsReferenceFilesButton")) self.goalsFrame = QtGui.QFrame(self.frame) self.goalsFrame.setGeometry(QtCore.QRect(750, 570, 411, 234)) self.goalsFrame.setFrameShape(QtGui.QFrame.StyledPanel) self.goalsFrame.setFrameShadow(QtGui.QFrame.Raised) self.goalsFrame.setObjectName(_fromUtf8("goalsFrame")) self.layoutWidget2 = QtGui.QWidget(self.goalsFrame) self.layoutWidget2.setGeometry(QtCore.QRect(30, 50, 361, 100)) self.layoutWidget2.setObjectName(_fromUtf8("layoutWidget2")) self.goalsVLayout = QtGui.QVBoxLayout(self.layoutWidget2) self.goalsVLayout.setObjectName(_fromUtf8("goalsVLayout")) self.goallabelsLayout = QtGui.QHBoxLayout() self.goallabelsLayout.setObjectName(_fromUtf8("goallabelsLayout")) self.currenttopLabel = QtGui.QLabel(self.layoutWidget2) self.currenttopLabel.setStyleSheet(_fromUtf8("border-top: 1px solid black;\n" "border-left: 1px solid black;\n" "border-right: 1px solid black;")) self.currenttopLabel.setAlignment(QtCore.Qt.AlignCenter) self.currenttopLabel.setObjectName(_fromUtf8("currenttopLabel")) self.goallabelsLayout.addWidget(self.currenttopLabel) self.goaltopLabel = QtGui.QLabel(self.layoutWidget2) self.goaltopLabel.setStyleSheet(_fromUtf8("border-top: 1px solid black;\n" "border-right: 1px solid black;\n" "border-left: 1px solid black;")) self.goaltopLabel.setAlignment(QtCore.Qt.AlignCenter) self.goaltopLabel.setObjectName(_fromUtf8("goaltopLabel")) self.goallabelsLayout.addWidget(self.goaltopLabel) self.goalsVLayout.addLayout(self.goallabelsLayout) self.goalactualLayout = QtGui.QHBoxLayout() self.goalactualLayout.setObjectName(_fromUtf8("goalactualLayout")) self.currentgoalLabel = QtGui.QLabel(self.layoutWidget2) self.currentgoalLabel.setMinimumSize(QtCore.QSize(100, 0)) self.currentgoalLabel.setBaseSize(QtCore.QSize(0, 0)) self.currentgoalLabel.setStyleSheet(_fromUtf8("border-left: 1px solid black;\n" "border-bottom: 1px solid black;\n" "border-right: 1px solid black;")) self.currentgoalLabel.setAlignment(QtCore.Qt.AlignCenter) self.currentgoalLabel.setObjectName(_fromUtf8("currentgoalLabel")) self.goalactualLayout.addWidget(self.currentgoalLabel) self.goalLabel = QtGui.QLabel(self.layoutWidget2) self.goalLabel.setMinimumSize(QtCore.QSize(100, 0)) self.goalLabel.setStyleSheet(_fromUtf8("border-right: 1px solid black;\n" "border-left: 1px solid black;\n" "border-bottom: 1px solid black;")) self.goalLabel.setAlignment(QtCore.Qt.AlignCenter) self.goalLabel.setObjectName(_fromUtf8("goalLabel")) self.goalactualLayout.addWidget(self.goalLabel) self.goalsVLayout.addLayout(self.goalactualLayout) self.goalProgressBar = QtGui.QProgressBar(self.layoutWidget2) self.goalProgressBar.setProperty("value", 24) self.goalProgressBar.setObjectName(_fromUtf8("goalProgressBar")) self.goalsVLayout.addWidget(self.goalProgressBar) self.verticalLayoutWidget_2 = QtGui.QWidget(self.goalsFrame) self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(30, 152, 181, 81)) self.verticalLayoutWidget_2.setObjectName(_fromUtf8("verticalLayoutWidget_2")) self.goalsButtonLayoutLeft = QtGui.QVBoxLayout(self.verticalLayoutWidget_2) self.goalsButtonLayoutLeft.setObjectName(_fromUtf8("goalsButtonLayoutLeft")) self.setgoalButton = QtGui.QPushButton(self.verticalLayoutWidget_2) self.setgoalButton.setObjectName(_fromUtf8("setgoalButton")) self.goalsButtonLayoutLeft.addWidget(self.setgoalButton) self.viewgoalsButton = QtGui.QPushButton(self.verticalLayoutWidget_2) self.viewgoalsButton.setObjectName(_fromUtf8("viewgoalsButton")) self.goalsButtonLayoutLeft.addWidget(self.viewgoalsButton) self.verticalLayoutWidget_3 = QtGui.QWidget(self.goalsFrame) self.verticalLayoutWidget_3.setGeometry(QtCore.QRect(210, 152, 181, 81)) self.verticalLayoutWidget_3.setObjectName(_fromUtf8("verticalLayoutWidget_3")) self.goalsButtonLayoutRight = QtGui.QVBoxLayout(self.verticalLayoutWidget_3) self.goalsButtonLayoutRight.setObjectName(_fromUtf8("goalsButtonLayoutRight")) self.goalpacingButton = QtGui.QPushButton(self.verticalLayoutWidget_3) self.goalpacingButton.setObjectName(_fromUtf8("goalpacingButton")) self.goalsButtonLayoutRight.addWidget(self.goalpacingButton) self.completedgoalsButton = QtGui.QPushButton(self.verticalLayoutWidget_3) self.completedgoalsButton.setObjectName(_fromUtf8("completedgoalsButton")) self.goalsButtonLayoutRight.addWidget(self.completedgoalsButton) self.progresstopLabel = QtGui.QLabel(self.goalsFrame) self.progresstopLabel.setGeometry(QtCore.QRect(60, 20, 290, 21)) self.progresstopLabel.setMinimumSize(QtCore.QSize(290, 0)) self.progresstopLabel.setAlignment(QtCore.Qt.AlignCenter) self.progresstopLabel.setStyleSheet(_fromUtf8("font: 14pt \"Arial Black\";\n" "color: #98A6A8;")) self.progresstopLabel.setObjectName(_fromUtf8("progresstopLabel")) self.statusBar = QtGui.QStatusBar(self) self.statusBar.setObjectName(_fromUtf8("statusBar")) self.setStatusBar(self.statusBar) # self.statusBar.showMessage("Status Bar Is Still There") self.menuBar = QtGui.QMenuBar(self) self.menuBar.setGeometry(QtCore.QRect(0, 0, 1300, 21)) self.menuBar.setObjectName(_fromUtf8("menuBar")) self.menuBar.setStyleSheet(""" QMenuBar {background-color:#212526; color: #98A6A8;} QMenuBar::item {background-color:#212526; color: #98A6A8; selection-color: #212526} QMenuBar::item:selected {color: #212526; background-color: #d7801a;} """) self.menuFile = QtGui.QMenu(self.menuBar) self.menuFile.setObjectName(_fromUtf8("menuFile")) self.menuTools = QtGui.QMenu(self.menuBar) self.menuTools.setObjectName(_fromUtf8("menuTools")) self.menuHelp = QtGui.QMenu(self.menuBar) self.setMenuBar(self.menuBar) self.actionExit = QtGui.QAction(self) self.actionExit.setObjectName(_fromUtf8("actionExit")) self.actionExit.triggered.connect(self.close) self.actionChange_AlertFile = QtGui.QAction(self) self.actionChange_AlertFile.setObjectName(_fromUtf8("actionCheck_Integrity")) self.actionChange_AlertFile.triggered.connect(self.changealertfile) self.actionAddAmbience = QtGui.QAction(self) self.actionAddAmbience.setObjectName(_fromUtf8("actionadd_ambience")) self.actionAddAmbience.triggered.connect(self.addambiencefiles) self.actionEditAmbience = QtGui.QAction(self) self.actionEditAmbience.setObjectName(_fromUtf8("actionedit_ambience")) self.actionEditAmbience.triggered.connect(self.editambiencefiles) self.actionEditReferenceFiles = QtGui.QAction(self) self.actionEditReferenceFiles.triggered.connect(self.editreferencefiles) self.actionHowToUseThisProgram = QAction(self) self.actionHowToUseThisProgram.triggered.connect(self.howtousethisprogram) self.actionAboutThisProgram = QtGui.QAction(self) self.actionAboutThisProgram.triggered.connect(self.aboutthisprogram) self.actioncontactMe = QtGui.QAction(self) self.actioncontactMe.triggered.connect(self.contactme) self.menuFile.addAction(self.actionExit) self.menuTools.addAction(self.actionAddAmbience) self.menuTools.addAction(self.actionEditAmbience) self.menuTools.addAction(self.actionChange_AlertFile) self.menuTools.addAction(self.actionEditReferenceFiles) self.menuHelp.addAction(self.actionHowToUseThisProgram) self.menuHelp.addAction(self.actionAboutThisProgram) self.menuHelp.addAction(self.actioncontactMe) self.menuBar.addAction(self.menuFile.menuAction()) self.menuBar.addAction(self.menuTools.menuAction()) self.menuBar.addAction(self.menuHelp.menuAction()) QtCore.QObject.connect(self.PlayButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.statusBar.clearMessage) self.horizontalLayoutWidget_8 = QtGui.QWidget(self.frame) self.horizontalLayoutWidget_8.setGeometry(QtCore.QRect(250, 520, 271, 51)) self.horizontalLayoutWidget_8.setObjectName(_fromUtf8("horizontalLayoutWidget_8")) self.totalsessiondisplayLayout = QtGui.QHBoxLayout(self.horizontalLayoutWidget_8) self.totalsessiondisplayLayout.setMargin(0) self.totalsessiondisplayLayout.setObjectName(_fromUtf8("totalsessiondisplayLayout")) self.totalhoursDisplay = QtGui.QLCDNumber(self.horizontalLayoutWidget_8) self.totalhoursDisplay.setObjectName(_fromUtf8("totalhoursDisplay")) self.totalsessiondisplayLayout.addWidget(self.totalhoursDisplay) self.totalhoursLabel = QtGui.QLabel(self.horizontalLayoutWidget_8) self.totalhoursLabel.setObjectName(_fromUtf8("totalhoursLabel")) self.totalsessiondisplayLayout.addWidget(self.totalhoursLabel) self.totalMinutesDisplay = QtGui.QLCDNumber(self.horizontalLayoutWidget_8) self.totalMinutesDisplay.setObjectName(_fromUtf8("totalMinutesDisplay")) self.totalsessiondisplayLayout.addWidget(self.totalMinutesDisplay) self.totalminutesLabel = QtGui.QLabel(self.horizontalLayoutWidget_8) self.totalminutesLabel.setObjectName(_fromUtf8("totalminutesLabel")) self.totalsessiondisplayLayout.addWidget(self.totalminutesLabel) self.calculateTotalSessionTimeButton = QtGui.QPushButton(self.frame) self.calculateTotalSessionTimeButton.setGeometry(QtCore.QRect(540, 530, 201, 23)) self.calculateTotalSessionTimeButton.setObjectName(_fromUtf8("calculateTotalSessionTimeButton")) self.changeallvaluesButton = QtGui.QPushButton(self.frame) self.changeallvaluesButton.setGeometry(QtCore.QRect(30, 530, 201, 23)) self.changeallvaluesButton.setObjectName(_fromUtf8("changeallvaluesButton")) self.setWindowTitle("MainWindow") self.preLabel_2.setText("PRE") self.rinLabel_2.setText("RIN") self.kyoLabel_2.setText("KYO") self.tohLabel_2.setText("TOH") self.shaLabel_2.setText("SHA") self.kaiLabel_2.setText("KAI") self.jinLabel_2.setText("JIN") self.retsuLabel_2.setText("RETSU") self.zaiLabel_2.setText("ZAI") self.zenLabel_2.setText("ZEN") self.postLabel_2.setText("POST") self.toptotalsLabel.setText("Create New Session") self.rintotalLabel.setText("RIN") self.kyototalLabel.setText("KYO") self.tohtotalLabel.setText("TOH") self.shatotalLabel.setText("SHA") self.kaitotalLabel.setText("KAI") self.jintotalLabel.setText("JIN") self.retsutotalLabel.setText("RETSU") self.zaitotalLabel.setText("ZAI") self.zentotalLabel.setText("ZEN") self.label_11.setText("Hours") self.label_14.setText("Hours") self.label_15.setText("Hours") self.label_18.setText("Hours") self.label_17.setText("Hours") self.label_16.setText("Hours") self.label_13.setText("Hours") self.label_12.setText("Hours") self.label_10.setText("Hours") self.label_19.setText("Minutes") self.label_23.setText("Minutes") self.label_22.setText("Minutes") self.label_21.setText("Minutes") self.label_25.setText("Minutes") self.label_20.setText("Minutes") self.label_26.setText("Minutes") self.label_24.setText("Minutes") self.label_27.setText("Minutes") self.changeallvaluesButton.setText("Change All Values To...") self.topsessionLabel.setText("Total Progress") self.listofsessionsButton.setText("View Practiced Sessions") self.prematureendingsbutton.setText("View Premature Endings") self.AmbienceOption.setText("Ambience In Session?") self.ReferenceDisplayOption.setText("Display Reference?") self.CreateButton.setText("Create Session") self.exportButton.setText("Export Session") self.PlayButton.setText("Play") self.PauseButton.setText("Pause") self.StopButton.setText("Stop") self.sessionPlayertopLabel.setText("Session Player") self.EntrainmentVolumeTopLabel.setText("Entrainment Volume") self.AmbienceVolumeTopLabel.setText("Ambience Volume") self.currentlyPlayingtopLabel.setText("Currently Playing:") self.CutPlayingName.setText("No Session Playing") self.thiscutprogressDescriptionLabel.setText("Progress:") self.CutPlayingProgressActual.setText("No Session Playing") self.TotalSessionProgressDescriptionLabel.setText("Total Session Progress:") self.TotalSessionPlayingProgressActual.setText("No Session Playing") self.OpenThisCutsReferenceFilesButton.setText("No Cut Playing") self.setgoalButton.setText("New Goal") self.goalpacingButton.setText("Goal Pacing") self.viewgoalsButton.setText("View Goals") self.completedgoalsButton.setText("Completed Goals") self.totalminutesLabel.setText("Minutes") self.totalhoursLabel.setText("Hours") self.calculateTotalSessionTimeButton.setText("Calculate Total Session Time") self.currenttopLabel.setText("Current") self.goaltopLabel.setText("Goal") self.progresstopLabel.setText("Goal Progress") self.setgoalstatus() if not self.sessiondb.goalsset: self.goalProgressBar.setValue(0) self.currentgoalLabel.setText("No Goal Set") self.goalLabel.setText("No Goal Set") self.menuFile.setTitle("File") self.menuTools.setTitle("Tools") self.menuHelp.setTitle("Help") self.actionExit.setText("Exit") self.actionChange_AlertFile.setText("Change Alert File") self.actionAddAmbience.setText("Add Ambience To Program") self.actionEditAmbience.setText("Edit Program's Ambience") self.actionEditReferenceFiles.setText("Edit Reference Files") self.actionAboutThisProgram.setText("About") self.actionHowToUseThisProgram.setText("Tutorials") self.actioncontactMe.setText("Contact Me") self.slidervalues = [self.preSlider_2, self.rinSlider_2, self.kyoSlider_2, self.tohSlider_2, self.shaSlider_2, self.kaiSlider_2, self.jinSlider_2, self.retsuSlider_2, self.zaiSlider_2, self.zenSlider_2, self.postSlider_2] self.setsignalsandslots() def changealertfile(self): Tools.ChangeAlertFile(self) def editreferencefiles(self): Reference.EditReferenceFiles(self) def addambiencefiles(self): Tools.AddAmbienceFiles(self) def editambiencefiles(self): Tools.EditAmbienceFiles(self) def contactme(self): self.contactmedialog = QDialog(self) self.contactmedialog.resize(387, 206) self.contactmetextBrowser = QtGui.QTextBrowser(self.contactmedialog) self.contactmetextBrowser.setGeometry(QtCore.QRect(10, 10, 371, 161)) self.contactmeOKButton = QtGui.QPushButton(self.contactmedialog) self.contactmeOKButton.setGeometry(QtCore.QRect(300, 180, 80, 23)) self.contactmedialog.setWindowTitle("Contact Me") self.contactmetextBrowser.setHtml( "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n" "<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n" "p, li { white-space: pre-wrap; }\n" "</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:8.25pt; font-weight:400; font-style:normal;\">\n" "<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Arial\'; font-size:9pt;\">This program has been a project for me for a couple of years now, and it\'s grown from a basic terminal application to a full-fledged program (now over 4,000 lines of code). Seeing as I\'m the sole developer, chances are, I may have made a few mistakes, and sometimes programs don\'t work as expected. If this happens, please attach your log.txt file (in the program\'s directory) and shoot me an email at [email protected]. I\'ll do my best to get back to you, and resolve the problem ASAP.</span></p></body></html>") self.contactmeOKButton.setText("OK") QtCore.QObject.connect(self.contactmeOKButton, QtCore.SIGNAL("clicked()"), self.contactmedialog.accept) self.contactmedialog.exec_()
""" Originally from phpserialize by Armin Ronacher, BSD license. Heavily rewritten by Armin Rigo. """ from hippy.objects.reference import W_Reference from hippy.objects.convert import convert_string_to_number from hippy.builtin_klass import k_incomplete from rpython.rlib.debug import check_nonneg from rpython.rlib.rarithmetic import r_uint, intmask from rpython.rlib.listsort import make_timsort_class from rpython.rlib.rfloat import string_to_float from rpython.rlib.rstring import ParseStringError from collections import OrderedDict from rpython.rlib import rfloat class SerializerMemo(object): serialize_precision = 0 def __init__(self): self.memo = {} self.counter = 1 def memo_lookup(self, w_obj, builder, char='R'): try: m = self.memo[w_obj] except KeyError: self.memo[w_obj] = self.counter return False else: builder.append(char) builder.append(':') builder.append(str(m)) builder.append(';') return True def add_counter(self): self.counter += 1 class SerializerError(Exception): pass IntSort = make_timsort_class() def remove_duplicates_fin(lst): prev = lst[0] count = 1 for i in range(1, len(lst)): if prev != lst[i]: prev = lst[i] count += 1 result = [0] * (count + 1) prev = lst[0] result[0] = prev count = 1 for i in range(1, len(lst)): if prev != lst[i]: prev = lst[i] result[count] = prev count += 1 result[count] = -1 return result class UnserializerState(object): def __init__(self, space, s): self.space = space self.s = s self.pos = 0 self.error_pos = 0 self.next_reference_obj = -1 self._ref_numbers = [] check_nonneg(self.pos) # ---------- def pp_expect_nonneg_int(self, startpos): check_nonneg(startpos) s = self.s i = startpos limit = len(s) result = 0 while i < limit and s[i].isdigit(): result = result * 10 + (ord(s[i]) - 48) i += 1 self.pos = i result = intmask(result) if result < 0: result = 0 # integer overflow! return result def _preprocess(self, i=0): check_nonneg(i) s = self.s nesting = 0 assert i >= 0 while True: if i >= len(s): return -1 tp = s[i] if tp == 'b': i += 4 # 'b:0;' or 'b:1;' elif tp == 'i' or tp == 'd': i = s.find(';', i + 1) if i < 0: return -1 i += 1 elif tp == 's': # 's:LEN:"....";' length = self.pp_expect_nonneg_int(i + 2) i = self.pos + length + 4 elif tp == 'N': i += 2 # 'N;' elif tp == 'a': # 'a:LEN:{....}' i = s.find('{', i + 1) if i < 0: return -1 i += 1 nesting += 1 elif tp == 'O': # 'O:LEN:"....":LEN:{....}' length = self.pp_expect_nonneg_int(i + 2) i = self.pos + length + 4 i = s.find('{', i + 1) if i < 0: return -1 i += 1 nesting += 1 elif tp == '}': nesting -= 1 i += 1 elif tp == 'R' or tp == 'r': # 'R:BACKREF;' backref = self.pp_expect_nonneg_int(i + 2) self._ref_numbers.append(backref) i = self.pos + 1 else: return -1 if nesting <= 0: return i def _preprocess_ref_numbers(self): ref_numbers = self._ref_numbers if len(ref_numbers) > 0: IntSort(ref_numbers).sort() self.ref_numbers = remove_duplicates_fin(ref_numbers) self.ref_objects = {} self.next_reference_obj = self.ref_numbers[0] self.next_reference_idx = 0 def preprocess(self): result = self._preprocess() self._preprocess_ref_numbers() return result >= 0 def preprocess_session_mode(self): # for session.py: the string contains any number of 'KEY|SERIAL' i = 0 while True: bar = self.s.find('|', i) if bar < 0: valid = True break i = self._preprocess(bar + 1) if i < 0: valid = False break self._preprocess_ref_numbers() return valid def save_reference(self, w_result): assert isinstance(w_result, W_Reference) i = self.next_reference_idx index = self.ref_numbers[i] self.ref_objects[index] = w_result self.next_reference_obj = self.ref_numbers[i + 1] - self.ref_numbers[i] self.next_reference_idx = i + 1 # ---------- def expect_closing_brace(self): self.error_pos = self.pos + 1 i = self.consume(1) if self.s[i] != '}': raise SerializerError("'}' expected") def consume(self, n): s = self.s start = self.pos stop = start + n if stop > len(s): raise SerializerError("Unexpected end of data") self.pos = stop return start consume._always_inline_ = True def read_substring_until(self, delim): assert len(delim) == 1 start = self.pos s = self.s stop = s.find(delim, start) if stop < 0: raise SerializerError("unexpected end of stream") self.pos = stop + 1 return s[start:stop] def read_int_until(self, delim, can_be_negative=True): i = self.consume(2) s = self.s negative = False if not s[i].isdigit(): if not can_be_negative: raise SerializerError("unexpected negative int") if s[i] != '-': raise SerializerError("bad int") negative = True i += 1 if not s[i].isdigit(): raise SerializerError("bad int") value = r_uint(ord(s[i]) - 48) self.pos = i + 1 while True: i = self.consume(1) if not s[i].isdigit(): break value = value * 10 + r_uint(ord(s[i]) - 48) if s[i] != delim: raise SerializerError("bad int") if negative: value = -value return intmask(value) def load_str(fp, delim): oldpos = fp.pos length = fp.read_int_until(':', can_be_negative=False) if length < 0: raise SerializerError("integer overflow") try: i = fp.consume(length + 3) # quote, string, quote, semicolon except SerializerError: if len(fp.s) > fp.pos: extra = len(fp.s) - fp.pos - length if extra > 0: fp.error_pos = fp.pos + length + 1 if extra > 1: fp.error_pos += 2 else: fp.error_pos = oldpos raise check_nonneg(i) s = fp.s if s[i] != '"': raise SerializerError("'\"' expected") i += 1 data = s[i: i + length] i += length if s[i] != '"': raise SerializerError("'\"' expected") i += 1 if s[i] != delim: raise SerializerError("delim expected") return data def load_array_key(fp): oldpos = fp.pos fp.error_pos = fp.pos try: i = fp.consume(2) except SerializerError: pass else: s = fp.s tp = s[i] if tp == 'i': if s[i + 1] != ':': raise SerializerError("':' expected") i = fp.read_int_until(';') return fp.space.wrap(i) if tp == 's': if s[i + 1] != ':': raise SerializerError("':' expected") return fp.space.newstr(load_str(fp, ';')) fp.pos = oldpos load_object(fp) # to update the .pos to the end of the object fp.error_pos = fp.pos raise SerializerError('bad type for array key') def load_reference(fp): fp.next_reference_obj += 1 # undo index = fp.read_int_until(';') try: return fp.ref_objects[index] except KeyError: fp.error_pos = fp.pos raise SerializerError('bad reference number') def load_object(fp): fp.next_reference_obj -= 1 needs_ref = (fp.next_reference_obj == 0) # fp.error_pos = fp.pos try: i = fp.consume(2) except SerializerError: i = fp.consume(1) if fp.s[i] == '}': fp.space.ec.notice("unserialize(): " "Unexpected end of serialized data") raise s = fp.s tp = s[i] check_nonneg(i) check_nonneg(fp.pos) # if tp == 'i': if s[i + 1] != ':': raise SerializerError("':' expected") i = fp.read_int_until(';') w_result = fp.space.wrap(i) # elif tp == 's': if s[i + 1] != ':': raise SerializerError("':' expected") w_result = fp.space.newstr(load_str(fp, ';')) # elif tp == 'd': if s[i + 1] != ':': raise SerializerError("':' expected") data = fp.read_substring_until(';') try: result = string_to_float(data) except ParseStringError: raise SerializerError('bad double') w_result = fp.space.newfloat(result) # elif tp == 'b': if s[i + 1] != ':': raise SerializerError("':' expected") i = fp.consume(2) digit = s[i] if digit == '0': w_result = fp.space.w_False elif digit == '1': w_result = fp.space.w_True else: raise SerializerError('bad bool') if s[i + 1] != ';': raise SerializerError("';' expected") # elif tp == 'N': if s[i + 1] != ';': raise SerializerError("';' expected") w_result = fp.space.w_Null # elif tp == 'a': if s[i + 1] != ':': raise SerializerError("':' expected") length = fp.read_int_until(':', can_be_negative=False) if length < 0: raise SerializerError("integer overflow") i = fp.consume(1) if s[i] != '{': raise SerializerError("'{' expected") w_result = None if needs_ref: w_result = fp.space.empty_ref() fp.save_reference(w_result) # first try to load the array as a direct list lst_w = [] expected = ['i', ':', '0', ';'] for n in range(length): i = fp.pos if i + len(expected) >= len(s): break for j in range(len(expected)): if s[i] != expected[j]: break i += 1 else: # ok, we got exactly 'i:N;' where N is the expected index fp.pos = i lst_w.append(load_object(fp)) # increment the expected counter j = len(expected) - 2 while j >= 2: if expected[j] != '9': expected[j] = chr(ord(expected[j]) + 1) break expected[j] = '0' j -= 1 else: expected = ['i', ':', '1'] + expected[2:] continue break else: # we succeeded in loading the complete array as a list n = length _succeeded_as_a_list() # for tests # fill in the remaining entries, if any, the slow way w_array = fp.space.new_array_from_list(lst_w) for n in range(n, length): w_key = load_array_key(fp) w_value = load_object(fp) w_array = fp.space.setitem_maybe_inplace(w_array, w_key, w_value) fp.expect_closing_brace() if w_result is not None: # needs_ref w_result.store(w_array, unique=True) return w_result else: return w_array # elif tp == 'R': if s[i + 1] != ':': raise SerializerError("':' expected") return load_reference(fp) # elif tp == 'r': if s[i + 1] != ':': raise SerializerError("':' expected") return load_reference(fp).deref() # elif tp == 'O': if s[i + 1] != ':': raise SerializerError("':' expected") klass_name = load_str(fp, ':') space = fp.space interp = space.ec.interpreter klass = interp.lookup_class_or_intf(klass_name) if klass is None: klass = k_incomplete w_instance = klass.get_empty_instance(space) w_instance.setattr(interp, '__PHP_Incomplete_Class_Name', space.wrap(klass_name), None) else: w_instance = klass.get_empty_instance(space) w_result = w_instance if needs_ref: w_result = W_Reference(w_instance) fp.save_reference(w_result) count_attrs = fp.read_int_until(':') # negative value accepted :-( i = fp.consume(1) if s[i] != '{': raise SerializerError("'{' expected") attrs = {} for i in xrange(count_attrs): w_attr = load_array_key(fp) w_value = load_object(fp) attr_name = space.str_w(w_attr) attrs[attr_name] = w_value w_instance.setattr(interp, attr_name, w_value, None) fp.expect_closing_brace() w_instance.unserialize(space, attrs) if '__wakeup' in klass.methods: klass.methods['__wakeup'].method_func.call_args(space.ec.interpreter, [], w_this=w_instance, thisclass=klass) return w_result # else: if tp == '}': fp.space.ec.notice("unserialize(): " "Unexpected end of serialized data") raise SerializerError('malformed input') # this is for primitive types only; complex types 'return' above if needs_ref: w_result = W_Reference(w_result) fp.save_reference(w_result) return w_result def _succeeded_as_a_list(): "for tests" def unserialize(space, s): """Unserialize the string s. """ if len(s) == 0: space.ec.hippy_warn("unserialize(): empty string") return space.w_False fp = UnserializerState(space, s) valid = fp.preprocess() try: fp.pos = 0 w_result = load_object(fp) except SerializerError: space.ec.notice("unserialize(): Error at " "offset %d of %d bytes" % (fp.error_pos, len(s))) return space.w_False assert valid return w_result def unserialize_returning_dict(space, s): """Unserialize for session.py with an '|' extension """ fp = UnserializerState(space, s) valid = fp.preprocess_session_mode() d = OrderedDict() fp.pos = 0 while True: bar = s.find('|', fp.pos) if bar < 0: break key = s[fp.pos : bar] fp.pos = bar + 1 try: w_value = load_object(fp) except SerializerError: return None d[key] = w_value assert valid return d
"""This is the Bokeh charts interface. It gives you a high level API to build complex plot is a simple way. This is the TimeSeries class which lets you build your TimeSeries charts just passing the arguments to the Chart class and calling the proper functions. It also add detection of the incomming input to see if it is a pandas dataframe. """ #----------------------------------------------------------------------------- # Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved. # # Powered by the Bokeh Development Team. # # The full license is in the file LICENCE.txt, distributed with this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- import pandas as pd from ._chartobject import ChartObject from ..objects import ColumnDataSource, Range1d, DataRange1d #----------------------------------------------------------------------------- # Classes and functions #----------------------------------------------------------------------------- class TimeSeries(ChartObject): """This is the TimeSeries class and it is in charge of plotting TimeSeries charts in an easy and intuitive way. Essentially, we provide a way to ingest the data, make the proper calculations and push the references into a source object. We additionally make calculations for the ranges. And finally add the needed lines taking the references from the source. Examples: from collections import OrderedDict import pandas as pd # Here is some code to read in some stock data from the Yahoo Finance API AAPL = pd.read_csv( "http://ichart.yahoo.com/table.csv?s=AAPL&a=0&b=1&c=2000", parse_dates=['Date']) MSFT = pd.read_csv( "http://ichart.yahoo.com/table.csv?s=MSFT&a=0&b=1&c=2000", parse_dates=['Date']) IBM = pd.read_csv( "http://ichart.yahoo.com/table.csv?s=IBM&a=0&b=1&c=2000", parse_dates=['Date']) xyvalues = OrderedDict(AAPL=AAPL[['Date', 'Adj Close']], MSFT=MSFT[['Date', 'Adj Close']], IBM=IBM[['Date', 'Adj Close']]) df = pd.concat(xyvalues, axis=1, names=["l0", "l1"]) from bokeh.charts import TimeSeries ts = TimeSeries(df, title="timeseries, pd_input", notebook=True) ts.legend("top_left").show() """ def __init__(self, xy, title=None, xlabel=None, ylabel=None, legend=False, xscale="datetime", yscale="linear", width=800, height=600, tools=True, filename=False, server=False, notebook=False): """ Args: xy (dict): a dict containing the data with names as a key and the data as a value. title (str, optional): the title of your plot. Defaults to None. xlabel (str, optional): the x-axis label of your plot. Defaults to None. ylabel (str, optional): the y-axis label of your plot. Defaults to None. legend (str, optional): the legend of your plot. The legend content is inferred from incoming input.It can be ``top_left``, ``top_right``, ``bottom_left``, ``bottom_right``. It is ``top_right`` is you set it as True. Defaults to None. xscale (str, optional): the x-axis type scale of your plot. It can be ``linear``, ``datetime`` or ``categorical``. Defaults to ``datetime``. yscale (str, optional): the y-axis type scale of your plot. It can be ``linear``, ``datetime`` or ``categorical``. Defaults to ``linear``. width (int, optional): the width of your plot in pixels. Defaults to 800. height (int, optional): the height of you plot in pixels. Defaults to 600. tools (bool, optional): to enable or disable the tools in your plot. Defaults to True filename (str or bool, optional): the name of the file where your plot. will be written. If you pass True to this argument, it will use ``untitled`` as a filename. Defaults to False. server (str or bool, optional): the name of your plot in the server. If you pass True to this argument, it will use ``untitled`` as the name in the server. Defaults to False. notebook (bool, optional):if you want to output (or not) your plot into the IPython notebook. Defaults to False. Attributes: source (obj): datasource object for your plot, initialized as a dummy None. xdr (obj): x-associated datarange object for you plot, initialized as a dummy None. ydr (obj): y-associated datarange object for you plot, initialized as a dummy None. groups (list): to be filled with the incoming groups of data. Useful for legend construction. data (dict): to be filled with the incoming data and be passed to the ColumnDataSource in each chart inherited class. Needed for _set_And_get method. attr (list): to be filled with the new attributes created after loading the data dict. Needed for _set_And_get method. """ self.xy = xy self.source = None self.xdr = None self.ydr = None self.groups = [] self.data = dict() self.attr = [] super(TimeSeries, self).__init__(title, xlabel, ylabel, legend, xscale, yscale, width, height, tools, filename, server, notebook) def check_attr(self): """Check if any of the chained method were used. If they were not used, it assign the init parameters content by default. """ super(TimeSeries, self).check_attr() def get_data(self, **xy): """Take the x/y data from the input **value. It calculates the chart properties accordingly. Then build a dict containing references to all the points to be used by the line glyph inside the ``draw`` method. Args: xy (dict): a dict containing the data with names as a key and the data as a value. """ self.data = dict() # assuming value is an ordered dict self.xy = xy # list to save all the attributes we are going to create self.attr = [] # list to save all the groups available in the incomming input self.groups.extend(self.xy.keys()) # Grouping for i, val in enumerate(self.xy.keys()): xy = self.xy[val] self._set_and_get("x_", val, xy[:, 0]) self._set_and_get("y_", val, xy[:, 1]) def get_source(self): "Push the TimeSeries data into the ColumnDataSource and calculate the proper ranges." self.source = ColumnDataSource(self.data) self.xdr = DataRange1d(sources=[self.source.columns(self.attr[0])]) y_names = self.attr[1::2] endy = max(max(self.data[i]) for i in y_names) starty = min(min(self.data[i]) for i in y_names) self.ydr = Range1d(start=starty - 0.1 * (endy - starty), end=endy + 0.1 * (endy - starty)) def draw(self): """Use the line glyphs to conect the xy points in the time series. Takes reference points from the data loaded at the ColumnDataSurce. """ self.duplet = list(self._chunker(self.attr, 2)) colors = self._set_colors(self.duplet) for i, duplet in enumerate(self.duplet, start=1): self.chart.make_line(self.source, duplet[0], duplet[1], colors[i - 1]) def show(self): """Main TimeSeries show method. It essentially checks for chained methods, creates the chart, pass data into the plot object, draws the glyphs according to the data and shows the chart in the selected output. .. note:: the show method can not be chained. It has to be called at the end of the chain. """ # asumming we get an hierchiral pandas object if isinstance(self.xy, pd.DataFrame): self.labels = self.xy.columns.levels[1].values from collections import OrderedDict pdict = OrderedDict() for i in self.xy.columns.levels[0].values: pdict[i] = self.xy[i].dropna().values self.xy = pdict # we need to check the chained method attr self.check_attr() if self._xlabel is None: self._xlabel = self.labels[0] if self._ylabel is None: self._ylabel = self.labels[1] # we create the chart object self.create_chart() # we start the plot (adds axis, grids and tools) self.start_plot() # we get the data from the incoming input self.get_data(**self.xy) # we filled the source and ranges with the calculated data self.get_source() # we dynamically inject the source and ranges into the plot self.add_data_plot(self.xdr, self.ydr) # we add the glyphs into the plot self.draw() # we pass info to build the legend self.end_plot(self.groups) # and finally we show it self.show_chart() # Some helper methods def _set_and_get(self, prefix, val, content): """Set a new attr and then get it to fill the self.data dict. Keep track of the attributes created. Args: prefix (str): prefix of the new attribute val (string): name of the new attribute content (obj): content of the new attribute """ setattr(self, prefix + val, content) self.data[prefix + val] = getattr(self, prefix + val) self.attr.append(prefix + val)
"""Simple wrapper over libtorrent""" from urllib.parse import quote import tempfile import os import asyncio import logging import mimetypes from collections import namedtuple from functools import cached_property from random import randint import libtorrent as lt mimetypes.init() logging.basicConfig(level=logging.INFO) LOG = logging.getLogger(__name__) STATUSES = [ 'queued', 'checking', 'downloading_metadata', 'downloading', 'finished', 'seeding', 'allocating', 'checking_fastresume' ] TRACKERS = ("udp://tracker.openbittorrent.com:80/announce", "udp://tracker.publicbt.com:80/announce") DHT = (("router.utorrent.com", 6881), ("router.bittorrent.com", 6881), ("dht.transmissionbt.com", 6881), ("router.bitcomet.com", 6881), ("dht.aelitis.com", 6881)) EXTENSIONS = ('ut_pex', 'ut_metadata', 'smart_ban', 'metadata_transfoer') PORTS = (randint(1024, 2000), randint(1024, 2000)) def get_indexed(func): """Return currently indedex torrent""" def inner(*args, **kwargs): """Executes a method, and returns result[class_instance.index]""" return list(func(*args, **kwargs)())[args[0].index] return inner class TorrentSession: """Represent a torrent session. May handle multiple torrents""" def __init__(self, ports=PORTS, extensions=EXTENSIONS, dht_routers=DHT): self.session = lt.session() self.session.set_severity_level(lt.alert.severity_levels.critical) self.session.listen_on(*ports) for extension in extensions: self.session.add_extension(extension) self.session.start_dht() self.session.start_lsd() self.session.start_upnp() self.session.start_natpmp() for router in dht_routers: self.session.add_dht_router(*router) self.torrents = [] @property async def alerts(self): if all(a.finished for a in self): raise StopIteration() for alert in self.session.pop_alerts(): yield alert def remove_torrent(self, *args, **kwargs): """Remove torrent from session.""" self.session.remove_torrent(*args, **kwargs) def add_torrent(self, **kwargs): """Add a torrent to this session For accepted parameters reference, see over `Torrent` definition. """ torrent = Torrent(session=self, **kwargs) self.torrents.append(torrent) return torrent def __iter__(self): """Iterating trough a session will give you all the currently-downloading torrents""" return iter(self.torrents) class Torrent: """Wrapper over libtorrent""" def __init__(self, magnet_link: str, session: TorrentSession, trackers: tuple = TRACKERS, remove_after: bool = False, **params): """Set default parameters to a magnet link, and add ourselves to a session Arguments: magnet_link: Magnet link. Currently torrent files are not supported session: TorrentSession instance trackers: Tracker list to add to magnet link. Defaults to TRACKERS constant remove_after: Delete download dir upon __exit__. Only if params.save_path has not been specified save_path: Path to save the torrent into. A temporary directory will be created if not specified storage_mode: Property of lt.storage_mode_t """ self.session = session self.temp_dir = None self.remove_after = remove_after self.params = { 'save_path': None, 'storage_mode': lt.storage_mode_t.storage_mode_sparse, **params } #: Force trackers into magnet link. Not the best coding practice. trackers = (quote(t, safe='') for t in trackers) self.magnet_link = f'{magnet_link}&tr={"&tr=".join(trackers)}' self.handle = None def __enter__(self): if not self.params.get('save_path'): self.temp_dir = tempfile.TemporaryDirectory() self.params['save_path'] = self.temp_dir.name self.handle = lt.add_magnet_uri(self.session.session, self.magnet_link, self.params) return self def __exit__(self, *args, **kwargs): if self.temp_dir and self.remove_after: self.temp_dir.cleanup() self.session.remove_torrent(self.handle) def sequential(self, value: bool): """Set sequential download""" self.handle.set_sequential_download(value) @property def queue(self): """ Download queue """ return self.handle.get_download_queue() @property def queue_status(self): """ Returns a represented queue status """ state_char = [' ', '-', '=', '#'] def repr_piece(piece): """ Represents a piece """ return { piece['piece_index']: [state_char[block['state']] for block in piece['blocks']] } return [repr_piece(piece) for piece in self.queue] @property def name(self): """ Torrent name """ if not self.handle.has_metadata(): return "N/A" return self.torrent_info.name() @property def status(self): """ Return a status dict. """ status = self.handle.status() result = { 'name': self.name, 'download': status.download_rate, 'total_download': status.total_download, 'upload': status.upload_rate, 'total_upload': status.total_upload } if not self.finished: result.update({ 'state': STATUSES[status.state], 'total_downloaded': status.total_done, 'peers': status.num_peers, 'seeds': status.num_seeds, 'progress': '%5.4f%%' % (status.progress * 100), }) return result @property def finished(self): """Checks if torrent is finished.""" return self.handle.is_finished() @property def started(self): """ Checks if handle has metadata""" return self.handle.has_metadata() @property def torrent_info(self): """Return handle.torrent_info""" return self.handle.get_torrent_info() @cached_property def files(self): """Returns a `TorrentFile` object for each file""" fnum = range(len(self.torrent_info.files())) return [TorrentFile(self, i) for i in fnum] def update_priorities(self): """Update file priorities with self.files.""" self.handle.prioritize_files([a.priority for a in self.files]) def download_only(self, file): """ Filter out priorities for every file except this one""" if file not in self.files: return None for file_ in self.files: file.priority = 7 if file == file_ else 0 return file async def wait_for(self, status): """Wait for a specific status Example: >>> # This will wait for a torrent to start, and return the torrent >>> torrent = await Torrent("magnet:...").wait_for('started') >>> # This will wait for a torrent to finish, and return the torrent >>> torrent = await Torrent("magnet:...").wait_for('finished') """ while not getattr(self, status): await asyncio.sleep(1) def __iter__(self): """Iterating trough a Torrent instance will return each TorrentFile""" return iter(self.files) class TorrentFile: """ Wrapper over libtorrent.file """ def __init__(self, parent: Torrent, index: int): self.root = parent.params.get('save_path') self.index = index self.handle = parent.handle self.torrent = parent async def wait_for_completion(self, percent): while self.completed_percent < percent: await asyncio.sleep(5) async def launch(self): """Launch file with PLAYER envvar or xdg-open""" process = await asyncio.create_subprocess_exec( os.getenv('PLAYER', 'xdg-open'), f'{self.root}/{self.path}', stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE) await process.wait() @cached_property def mime_type(self): """Return file mimetype""" return mimetypes.guess_type(self.path)[0] or '' @cached_property def is_media(self): """Return true if file is a media type""" return any( self.mime_type.startswith(f) for f in ('audio', 'video', 'image')) @cached_property def path(self): """Return torrent path on filesystem""" return self.hfile.path @cached_property def file(self): """Return a file object with this file's path open in rb mode """ return open(self.path, 'rb') @property def filehash(self): """File hash""" return self.hfile.filehash @property def size(self): """File size""" return self.hfile.size @property @get_indexed def hfile(self): """ Return file from libtorrent """ return self.handle.get_torrent_info().files @property @get_indexed def priority(self): """ Readonly file priority from libtorrent """ return self.handle.file_priorities @priority.setter def priority(self, value): self._priority = value self.parent.update_priorities() @property @get_indexed def file_progress(self): """ Returns file progress """ return self.handle.file_progress @property def completed_percent(self): """ Returns this file completed percentage """ return (self.file_progress / self.size) * 100
from __future__ import print_function, division from sympy import ask, Q from sympy.core import Basic, Add from sympy.core.compatibility import range from sympy.strategies import typed, exhaust, condition, do_one, unpack from sympy.strategies.traverse import bottom_up from sympy.utilities import sift from sympy.utilities.misc import filldedent from sympy.matrices.expressions.matexpr import MatrixExpr, ZeroMatrix, Identity from sympy.matrices.expressions.matmul import MatMul from sympy.matrices.expressions.matadd import MatAdd from sympy.matrices.expressions.matpow import MatPow from sympy.matrices.expressions.transpose import Transpose, transpose from sympy.matrices.expressions.trace import Trace from sympy.matrices.expressions.determinant import det, Determinant from sympy.matrices.expressions.slice import MatrixSlice from sympy.matrices.expressions.inverse import Inverse from sympy.matrices import Matrix, ShapeError from sympy.functions.elementary.complexes import re, im class BlockMatrix(MatrixExpr): """A BlockMatrix is a Matrix comprised of other matrices. The submatrices are stored in a SymPy Matrix object but accessed as part of a Matrix Expression >>> from sympy import (MatrixSymbol, BlockMatrix, symbols, ... Identity, ZeroMatrix, block_collapse) >>> n,m,l = symbols('n m l') >>> X = MatrixSymbol('X', n, n) >>> Y = MatrixSymbol('Y', m ,m) >>> Z = MatrixSymbol('Z', n, m) >>> B = BlockMatrix([[X, Z], [ZeroMatrix(m,n), Y]]) >>> print(B) Matrix([ [X, Z], [0, Y]]) >>> C = BlockMatrix([[Identity(n), Z]]) >>> print(C) Matrix([[I, Z]]) >>> print(block_collapse(C*B)) Matrix([[X, Z + Z*Y]]) Some matrices might be comprised of rows of blocks with the matrices in each row having the same height and the rows all having the same total number of columns but not having the same number of columns for each matrix in each row. In this case, the matrix is not a block matrix and should be instantiated by Matrix. >>> from sympy import ones, Matrix >>> dat = [ ... [ones(3,2), ones(3,3)*2], ... [ones(2,3)*3, ones(2,2)*4]] ... >>> BlockMatrix(dat) Traceback (most recent call last): ... ValueError: Although this matrix is comprised of blocks, the blocks do not fill the matrix in a size-symmetric fashion. To create a full matrix from these arguments, pass them directly to Matrix. >>> Matrix(dat) Matrix([ [1, 1, 2, 2, 2], [1, 1, 2, 2, 2], [1, 1, 2, 2, 2], [3, 3, 3, 4, 4], [3, 3, 3, 4, 4]]) See Also ======== sympy.matrices.matrices.MatrixBase.irregular """ def __new__(cls, *args, **kwargs): from sympy.matrices.immutable import ImmutableDenseMatrix from sympy.utilities.iterables import is_sequence isMat = lambda i: getattr(i, 'is_Matrix', False) if len(args) != 1 or \ not is_sequence(args[0]) or \ len(set([isMat(r) for r in args[0]])) != 1: raise ValueError(filldedent(''' expecting a sequence of 1 or more rows containing Matrices.''')) rows = args[0] if args else [] if not isMat(rows): if rows and isMat(rows[0]): rows = [rows] # rows is not list of lists or [] # regularity check # same number of matrices in each row blocky = ok = len(set([len(r) for r in rows])) == 1 if ok: # same number of rows for each matrix in a row for r in rows: ok = len(set([i.rows for i in r])) == 1 if not ok: break blocky = ok # same number of cols for each matrix in each col for c in range(len(rows[0])): ok = len(set([rows[i][c].cols for i in range(len(rows))])) == 1 if not ok: break if not ok: # same total cols in each row ok = len(set([ sum([i.cols for i in r]) for r in rows])) == 1 if blocky and ok: raise ValueError(filldedent(''' Although this matrix is comprised of blocks, the blocks do not fill the matrix in a size-symmetric fashion. To create a full matrix from these arguments, pass them directly to Matrix.''')) raise ValueError(filldedent(''' When there are not the same number of rows in each row's matrices or there are not the same number of total columns in each row, the matrix is not a block matrix. If this matrix is known to consist of blocks fully filling a 2-D space then see Matrix.irregular.''')) mat = ImmutableDenseMatrix(rows, evaluate=False) obj = Basic.__new__(cls, mat) return obj @property def shape(self): numrows = numcols = 0 M = self.blocks for i in range(M.shape[0]): numrows += M[i, 0].shape[0] for i in range(M.shape[1]): numcols += M[0, i].shape[1] return (numrows, numcols) @property def blockshape(self): return self.blocks.shape @property def blocks(self): return self.args[0] @property def rowblocksizes(self): return [self.blocks[i, 0].rows for i in range(self.blockshape[0])] @property def colblocksizes(self): return [self.blocks[0, i].cols for i in range(self.blockshape[1])] def structurally_equal(self, other): return (isinstance(other, BlockMatrix) and self.shape == other.shape and self.blockshape == other.blockshape and self.rowblocksizes == other.rowblocksizes and self.colblocksizes == other.colblocksizes) def _blockmul(self, other): if (isinstance(other, BlockMatrix) and self.colblocksizes == other.rowblocksizes): return BlockMatrix(self.blocks*other.blocks) return self * other def _blockadd(self, other): if (isinstance(other, BlockMatrix) and self.structurally_equal(other)): return BlockMatrix(self.blocks + other.blocks) return self + other def _eval_transpose(self): # Flip all the individual matrices matrices = [transpose(matrix) for matrix in self.blocks] # Make a copy M = Matrix(self.blockshape[0], self.blockshape[1], matrices) # Transpose the block structure M = M.transpose() return BlockMatrix(M) def _eval_trace(self): if self.rowblocksizes == self.colblocksizes: return Add(*[Trace(self.blocks[i, i]) for i in range(self.blockshape[0])]) raise NotImplementedError( "Can't perform trace of irregular blockshape") def _eval_determinant(self): if self.blockshape == (2, 2): [[A, B], [C, D]] = self.blocks.tolist() if ask(Q.invertible(A)): return det(A)*det(D - C*A.I*B) elif ask(Q.invertible(D)): return det(D)*det(A - B*D.I*C) return Determinant(self) def as_real_imag(self): real_matrices = [re(matrix) for matrix in self.blocks] real_matrices = Matrix(self.blockshape[0], self.blockshape[1], real_matrices) im_matrices = [im(matrix) for matrix in self.blocks] im_matrices = Matrix(self.blockshape[0], self.blockshape[1], im_matrices) return (real_matrices, im_matrices) def transpose(self): """Return transpose of matrix. Examples ======== >>> from sympy import MatrixSymbol, BlockMatrix, ZeroMatrix >>> from sympy.abc import l, m, n >>> X = MatrixSymbol('X', n, n) >>> Y = MatrixSymbol('Y', m ,m) >>> Z = MatrixSymbol('Z', n, m) >>> B = BlockMatrix([[X, Z], [ZeroMatrix(m,n), Y]]) >>> B.transpose() Matrix([ [X.T, 0], [Z.T, Y.T]]) >>> _.transpose() Matrix([ [X, Z], [0, Y]]) """ return self._eval_transpose() def _entry(self, i, j, **kwargs): # Find row entry for row_block, numrows in enumerate(self.rowblocksizes): if (i < numrows) != False: break else: i -= numrows for col_block, numcols in enumerate(self.colblocksizes): if (j < numcols) != False: break else: j -= numcols return self.blocks[row_block, col_block][i, j] @property def is_Identity(self): if self.blockshape[0] != self.blockshape[1]: return False for i in range(self.blockshape[0]): for j in range(self.blockshape[1]): if i==j and not self.blocks[i, j].is_Identity: return False if i!=j and not self.blocks[i, j].is_ZeroMatrix: return False return True @property def is_structurally_symmetric(self): return self.rowblocksizes == self.colblocksizes def equals(self, other): if self == other: return True if (isinstance(other, BlockMatrix) and self.blocks == other.blocks): return True return super(BlockMatrix, self).equals(other) class BlockDiagMatrix(BlockMatrix): """ A BlockDiagMatrix is a BlockMatrix with matrices only along the diagonal >>> from sympy import MatrixSymbol, BlockDiagMatrix, symbols, Identity >>> n, m, l = symbols('n m l') >>> X = MatrixSymbol('X', n, n) >>> Y = MatrixSymbol('Y', m ,m) >>> BlockDiagMatrix(X, Y) Matrix([ [X, 0], [0, Y]]) See Also ======== sympy.matrices.dense.diag """ def __new__(cls, *mats): return Basic.__new__(BlockDiagMatrix, *mats) @property def diag(self): return self.args @property def blocks(self): from sympy.matrices.immutable import ImmutableDenseMatrix mats = self.args data = [[mats[i] if i == j else ZeroMatrix(mats[i].rows, mats[j].cols) for j in range(len(mats))] for i in range(len(mats))] return ImmutableDenseMatrix(data) @property def shape(self): return (sum(block.rows for block in self.args), sum(block.cols for block in self.args)) @property def blockshape(self): n = len(self.args) return (n, n) @property def rowblocksizes(self): return [block.rows for block in self.args] @property def colblocksizes(self): return [block.cols for block in self.args] def _eval_inverse(self, expand='ignored'): return BlockDiagMatrix(*[mat.inverse() for mat in self.args]) def _eval_transpose(self): return BlockDiagMatrix(*[mat.transpose() for mat in self.args]) def _blockmul(self, other): if (isinstance(other, BlockDiagMatrix) and self.colblocksizes == other.rowblocksizes): return BlockDiagMatrix(*[a*b for a, b in zip(self.args, other.args)]) else: return BlockMatrix._blockmul(self, other) def _blockadd(self, other): if (isinstance(other, BlockDiagMatrix) and self.blockshape == other.blockshape and self.rowblocksizes == other.rowblocksizes and self.colblocksizes == other.colblocksizes): return BlockDiagMatrix(*[a + b for a, b in zip(self.args, other.args)]) else: return BlockMatrix._blockadd(self, other) def block_collapse(expr): """Evaluates a block matrix expression >>> from sympy import MatrixSymbol, BlockMatrix, symbols, \ Identity, Matrix, ZeroMatrix, block_collapse >>> n,m,l = symbols('n m l') >>> X = MatrixSymbol('X', n, n) >>> Y = MatrixSymbol('Y', m ,m) >>> Z = MatrixSymbol('Z', n, m) >>> B = BlockMatrix([[X, Z], [ZeroMatrix(m, n), Y]]) >>> print(B) Matrix([ [X, Z], [0, Y]]) >>> C = BlockMatrix([[Identity(n), Z]]) >>> print(C) Matrix([[I, Z]]) >>> print(block_collapse(C*B)) Matrix([[X, Z + Z*Y]]) """ from sympy.strategies.util import expr_fns hasbm = lambda expr: isinstance(expr, MatrixExpr) and expr.has(BlockMatrix) conditioned_rl = condition( hasbm, typed( {MatAdd: do_one(bc_matadd, bc_block_plus_ident), MatMul: do_one(bc_matmul, bc_dist), MatPow: bc_matmul, Transpose: bc_transpose, Inverse: bc_inverse, BlockMatrix: do_one(bc_unpack, deblock)} ) ) rule = exhaust( bottom_up( exhaust(conditioned_rl), fns=expr_fns ) ) result = rule(expr) doit = getattr(result, 'doit', None) if doit is not None: return doit() else: return result def bc_unpack(expr): if expr.blockshape == (1, 1): return expr.blocks[0, 0] return expr def bc_matadd(expr): args = sift(expr.args, lambda M: isinstance(M, BlockMatrix)) blocks = args[True] if not blocks: return expr nonblocks = args[False] block = blocks[0] for b in blocks[1:]: block = block._blockadd(b) if nonblocks: return MatAdd(*nonblocks) + block else: return block def bc_block_plus_ident(expr): idents = [arg for arg in expr.args if arg.is_Identity] if not idents: return expr blocks = [arg for arg in expr.args if isinstance(arg, BlockMatrix)] if (blocks and all(b.structurally_equal(blocks[0]) for b in blocks) and blocks[0].is_structurally_symmetric): block_id = BlockDiagMatrix(*[Identity(k) for k in blocks[0].rowblocksizes]) return MatAdd(block_id * len(idents), *blocks).doit() return expr def bc_dist(expr): """ Turn a*[X, Y] into [a*X, a*Y] """ factor, mat = expr.as_coeff_mmul() if factor == 1: return expr unpacked = unpack(mat) if isinstance(unpacked, BlockDiagMatrix): B = unpacked.diag new_B = [factor * mat for mat in B] return BlockDiagMatrix(*new_B) elif isinstance(unpacked, BlockMatrix): B = unpacked.blocks new_B = [ [factor * B[i, j] for j in range(B.cols)] for i in range(B.rows)] return BlockMatrix(new_B) return unpacked def bc_matmul(expr): if isinstance(expr, MatPow): if expr.args[1].is_Integer: factor, matrices = (1, [expr.args[0]]*expr.args[1]) else: return expr else: factor, matrices = expr.as_coeff_matrices() i = 0 while (i+1 < len(matrices)): A, B = matrices[i:i+2] if isinstance(A, BlockMatrix) and isinstance(B, BlockMatrix): matrices[i] = A._blockmul(B) matrices.pop(i+1) elif isinstance(A, BlockMatrix): matrices[i] = A._blockmul(BlockMatrix([[B]])) matrices.pop(i+1) elif isinstance(B, BlockMatrix): matrices[i] = BlockMatrix([[A]])._blockmul(B) matrices.pop(i+1) else: i+=1 return MatMul(factor, *matrices).doit() def bc_transpose(expr): collapse = block_collapse(expr.arg) return collapse._eval_transpose() def bc_inverse(expr): if isinstance(expr.arg, BlockDiagMatrix): return expr._eval_inverse() expr2 = blockinverse_1x1(expr) if expr != expr2: return expr2 return blockinverse_2x2(Inverse(reblock_2x2(expr.arg))) def blockinverse_1x1(expr): if isinstance(expr.arg, BlockMatrix) and expr.arg.blockshape == (1, 1): mat = Matrix([[expr.arg.blocks[0].inverse()]]) return BlockMatrix(mat) return expr def blockinverse_2x2(expr): if isinstance(expr.arg, BlockMatrix) and expr.arg.blockshape == (2, 2): # Cite: The Matrix Cookbook Section 9.1.3 [[A, B], [C, D]] = expr.arg.blocks.tolist() return BlockMatrix([[ (A - B*D.I*C).I, (-A).I*B*(D - C*A.I*B).I], [-(D - C*A.I*B).I*C*A.I, (D - C*A.I*B).I]]) else: return expr def deblock(B): """ Flatten a BlockMatrix of BlockMatrices """ if not isinstance(B, BlockMatrix) or not B.blocks.has(BlockMatrix): return B wrap = lambda x: x if isinstance(x, BlockMatrix) else BlockMatrix([[x]]) bb = B.blocks.applyfunc(wrap) # everything is a block from sympy import Matrix try: MM = Matrix(0, sum(bb[0, i].blocks.shape[1] for i in range(bb.shape[1])), []) for row in range(0, bb.shape[0]): M = Matrix(bb[row, 0].blocks) for col in range(1, bb.shape[1]): M = M.row_join(bb[row, col].blocks) MM = MM.col_join(M) return BlockMatrix(MM) except ShapeError: return B def reblock_2x2(B): """ Reblock a BlockMatrix so that it has 2x2 blocks of block matrices """ if not isinstance(B, BlockMatrix) or not all(d > 2 for d in B.blocks.shape): return B BM = BlockMatrix # for brevity's sake return BM([[ B.blocks[0, 0], BM(B.blocks[0, 1:])], [BM(B.blocks[1:, 0]), BM(B.blocks[1:, 1:])]]) def bounds(sizes): """ Convert sequence of numbers into pairs of low-high pairs >>> from sympy.matrices.expressions.blockmatrix import bounds >>> bounds((1, 10, 50)) [(0, 1), (1, 11), (11, 61)] """ low = 0 rv = [] for size in sizes: rv.append((low, low + size)) low += size return rv def blockcut(expr, rowsizes, colsizes): """ Cut a matrix expression into Blocks >>> from sympy import ImmutableMatrix, blockcut >>> M = ImmutableMatrix(4, 4, range(16)) >>> B = blockcut(M, (1, 3), (1, 3)) >>> type(B).__name__ 'BlockMatrix' >>> ImmutableMatrix(B.blocks[0, 1]) Matrix([[1, 2, 3]]) """ rowbounds = bounds(rowsizes) colbounds = bounds(colsizes) return BlockMatrix([[MatrixSlice(expr, rowbound, colbound) for colbound in colbounds] for rowbound in rowbounds])
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt from __future__ import unicode_literals from six import iteritems, string_types import datetime import frappe, sys from frappe import _ from frappe.utils import (cint, flt, now, cstr, strip_html, getdate, get_datetime, to_timedelta, sanitize_html, sanitize_email, cast_fieldtype) from frappe.model import default_fields from frappe.model.naming import set_new_name from frappe.model.utils.link_count import notify_link_count from frappe.modules import load_doctype_module from frappe.model import display_fieldtypes from frappe.model.db_schema import type_map, varchar_len from frappe.utils.password import get_decrypted_password, set_encrypted_password _classes = {} def get_controller(doctype): """Returns the **class** object of the given DocType. For `custom` type, returns `frappe.model.document.Document`. :param doctype: DocType name as string.""" from frappe.model.document import Document global _classes if not doctype in _classes: module_name, custom = frappe.db.get_value("DocType", doctype, ["module", "custom"]) \ or ["Core", False] if custom: _class = Document else: module = load_doctype_module(doctype, module_name) classname = doctype.replace(" ", "").replace("-", "") if hasattr(module, classname): _class = getattr(module, classname) if issubclass(_class, BaseDocument): _class = getattr(module, classname) else: raise ImportError(doctype) else: raise ImportError(doctype) _classes[doctype] = _class return _classes[doctype] class BaseDocument(object): ignore_in_getter = ("doctype", "_meta", "meta", "_table_fields", "_valid_columns") def __init__(self, d): self.update(d) self.dont_update_if_missing = [] if hasattr(self, "__setup__"): self.__setup__() @property def meta(self): if not hasattr(self, "_meta"): self._meta = frappe.get_meta(self.doctype) return self._meta def update(self, d): if "doctype" in d: self.set("doctype", d.get("doctype")) # first set default field values of base document for key in default_fields: if key in d: self.set(key, d.get(key)) for key, value in iteritems(d): self.set(key, value) return self def update_if_missing(self, d): if isinstance(d, BaseDocument): d = d.get_valid_dict() if "doctype" in d: self.set("doctype", d.get("doctype")) for key, value in iteritems(d): # dont_update_if_missing is a list of fieldnames, for which, you don't want to set default value if (self.get(key) is None) and (value is not None) and (key not in self.dont_update_if_missing): self.set(key, value) def get_db_value(self, key): return frappe.db.get_value(self.doctype, self.name, key) def get(self, key=None, filters=None, limit=None, default=None): if key: if isinstance(key, dict): return _filter(self.get_all_children(), key, limit=limit) if filters: if isinstance(filters, dict): value = _filter(self.__dict__.get(key, []), filters, limit=limit) else: default = filters filters = None value = self.__dict__.get(key, default) else: value = self.__dict__.get(key, default) if value is None and key not in self.ignore_in_getter \ and key in (d.fieldname for d in self.meta.get_table_fields()): self.set(key, []) value = self.__dict__.get(key) return value else: return self.__dict__ def getone(self, key, filters=None): return self.get(key, filters=filters, limit=1)[0] def set(self, key, value, as_value=False): if isinstance(value, list) and not as_value: self.__dict__[key] = [] self.extend(key, value) else: self.__dict__[key] = value def delete_key(self, key): if key in self.__dict__: del self.__dict__[key] def append(self, key, value=None): if value==None: value={} if isinstance(value, (dict, BaseDocument)): if not self.__dict__.get(key): self.__dict__[key] = [] value = self._init_child(value, key) self.__dict__[key].append(value) # reference parent document value.parent_doc = self return value else: raise ValueError( "Document attached to child table must be a dict or BaseDocument, not " + str(type(value))[1:-1] ) def extend(self, key, value): if isinstance(value, list): for v in value: self.append(key, v) else: raise ValueError def remove(self, doc): self.get(doc.parentfield).remove(doc) def _init_child(self, value, key): if not self.doctype: return value if not isinstance(value, BaseDocument): if "doctype" not in value: value["doctype"] = self.get_table_field_doctype(key) if not value["doctype"]: raise AttributeError(key) value = get_controller(value["doctype"])(value) value.init_valid_columns() value.parent = self.name value.parenttype = self.doctype value.parentfield = key if value.docstatus is None: value.docstatus = 0 if not getattr(value, "idx", None): value.idx = len(self.get(key) or []) + 1 if not getattr(value, "name", None): value.__dict__['__islocal'] = 1 return value def get_valid_dict(self, sanitize=True, convert_dates_to_str=False): d = frappe._dict() for fieldname in self.meta.get_valid_columns(): d[fieldname] = self.get(fieldname) # if no need for sanitization and value is None, continue if not sanitize and d[fieldname] is None: continue df = self.meta.get_field(fieldname) if df: if df.fieldtype=="Check": if d[fieldname]==None: d[fieldname] = 0 elif (not isinstance(d[fieldname], int) or d[fieldname] > 1): d[fieldname] = 1 if cint(d[fieldname]) else 0 elif df.fieldtype=="Int" and not isinstance(d[fieldname], int): d[fieldname] = cint(d[fieldname]) elif df.fieldtype in ("Currency", "Float", "Percent") and not isinstance(d[fieldname], float): d[fieldname] = flt(d[fieldname]) elif df.fieldtype in ("Datetime", "Date") and d[fieldname]=="": d[fieldname] = None elif df.get("unique") and cstr(d[fieldname]).strip()=="": # unique empty field should be set to None d[fieldname] = None if isinstance(d[fieldname], list) and df.fieldtype != 'Table': frappe.throw(_('Value for {0} cannot be a list').format(_(df.label))) if convert_dates_to_str and isinstance(d[fieldname], (datetime.datetime, datetime.time, datetime.timedelta)): d[fieldname] = str(d[fieldname]) return d def init_valid_columns(self): for key in default_fields: if key not in self.__dict__: self.__dict__[key] = None if key in ("idx", "docstatus") and self.__dict__[key] is None: self.__dict__[key] = 0 for key in self.get_valid_columns(): if key not in self.__dict__: self.__dict__[key] = None def get_valid_columns(self): if self.doctype not in frappe.local.valid_columns: if self.doctype in ("DocField", "DocPerm") and self.parent in ("DocType", "DocField", "DocPerm"): from frappe.model.meta import get_table_columns valid = get_table_columns(self.doctype) else: valid = self.meta.get_valid_columns() frappe.local.valid_columns[self.doctype] = valid return frappe.local.valid_columns[self.doctype] def is_new(self): return self.get("__islocal") def as_dict(self, no_nulls=False, no_default_fields=False, convert_dates_to_str=False): doc = self.get_valid_dict(convert_dates_to_str=convert_dates_to_str) doc["doctype"] = self.doctype for df in self.meta.get_table_fields(): children = self.get(df.fieldname) or [] doc[df.fieldname] = [d.as_dict(no_nulls=no_nulls) for d in children] if no_nulls: for k in list(doc): if doc[k] is None: del doc[k] if no_default_fields: for k in list(doc): if k in default_fields: del doc[k] for key in ("_user_tags", "__islocal", "__onload", "_liked_by", "__run_link_triggers"): if self.get(key): doc[key] = self.get(key) return doc def as_json(self): return frappe.as_json(self.as_dict()) def get_table_field_doctype(self, fieldname): return self.meta.get_field(fieldname).options def get_parentfield_of_doctype(self, doctype): fieldname = [df.fieldname for df in self.meta.get_table_fields() if df.options==doctype] return fieldname[0] if fieldname else None def db_insert(self): """INSERT the document (with valid columns) in the database.""" if not self.name: # name will be set by document class in most cases set_new_name(self) if not self.creation: self.creation = self.modified = now() self.created_by = self.modifield_by = frappe.session.user d = self.get_valid_dict(convert_dates_to_str=True) columns = list(d) try: frappe.db.sql("""insert into `tab{doctype}` ({columns}) values ({values})""".format( doctype = self.doctype, columns = ", ".join(["`"+c+"`" for c in columns]), values = ", ".join(["%s"] * len(columns)) ), list(d.values())) except Exception as e: if e.args[0]==1062: if "PRIMARY" in cstr(e.args[1]): if self.meta.autoname=="hash": # hash collision? try again self.name = None self.db_insert() return frappe.msgprint(_("Duplicate name {0} {1}").format(self.doctype, self.name)) raise frappe.DuplicateEntryError(self.doctype, self.name, e) elif "Duplicate" in cstr(e.args[1]): # unique constraint self.show_unique_validation_message(e) else: raise else: raise self.set("__islocal", False) def db_update(self): if self.get("__islocal") or not self.name: self.db_insert() return d = self.get_valid_dict(convert_dates_to_str=True) # don't update name, as case might've been changed name = d['name'] del d['name'] columns = list(d) try: frappe.db.sql("""update `tab{doctype}` set {values} where name=%s""".format( doctype = self.doctype, values = ", ".join(["`"+c+"`=%s" for c in columns]) ), list(d.values()) + [name]) except Exception as e: if e.args[0]==1062 and "Duplicate" in cstr(e.args[1]): self.show_unique_validation_message(e) else: raise def show_unique_validation_message(self, e): type, value, traceback = sys.exc_info() fieldname, label = str(e).split("'")[-2], None # unique_first_fieldname_second_fieldname is the constraint name # created using frappe.db.add_unique if "unique_" in fieldname: fieldname = fieldname.split("_", 1)[1] df = self.meta.get_field(fieldname) if df: label = df.label frappe.msgprint(_("{0} must be unique".format(label or fieldname))) # this is used to preserve traceback raise frappe.UniqueValidationError(self.doctype, self.name, e) def update_modified(self): '''Update modified timestamp''' self.set("modified", now()) frappe.db.set_value(self.doctype, self.name, 'modified', self.modified, update_modified=False) def _fix_numeric_types(self): for df in self.meta.get("fields"): if df.fieldtype == "Check": self.set(df.fieldname, cint(self.get(df.fieldname))) elif self.get(df.fieldname) is not None: if df.fieldtype == "Int": self.set(df.fieldname, cint(self.get(df.fieldname))) elif df.fieldtype in ("Float", "Currency", "Percent"): self.set(df.fieldname, flt(self.get(df.fieldname))) if self.docstatus is not None: self.docstatus = cint(self.docstatus) def _get_missing_mandatory_fields(self): """Get mandatory fields that do not have any values""" def get_msg(df): if df.fieldtype == "Table": return "{}: {}: {}".format(_("Error"), _("Data missing in table"), _(df.label)) elif self.parentfield: return "{}: {} {} #{}: {}: {}".format(_("Error"), frappe.bold(_(self.doctype)), _("Row"), self.idx, _("Value missing for"), _(df.label)) else: return _("Error: Value missing for {0}: {1}").format(_(df.parent), _(df.label)) missing = [] for df in self.meta.get("fields", {"reqd": ('=', 1)}): if self.get(df.fieldname) in (None, []) or not strip_html(cstr(self.get(df.fieldname))).strip(): missing.append((df.fieldname, get_msg(df))) # check for missing parent and parenttype if self.meta.istable: for fieldname in ("parent", "parenttype"): if not self.get(fieldname): missing.append((fieldname, get_msg(frappe._dict(label=fieldname)))) return missing def get_invalid_links(self, is_submittable=False): '''Returns list of invalid links and also updates fetch values if not set''' def get_msg(df, docname): if self.parentfield: return "{} #{}: {}: {}".format(_("Row"), self.idx, _(df.label), docname) else: return "{}: {}".format(_(df.label), docname) invalid_links = [] cancelled_links = [] for df in (self.meta.get_link_fields() + self.meta.get("fields", {"fieldtype": ('=', "Dynamic Link")})): docname = self.get(df.fieldname) if docname: if df.fieldtype=="Link": doctype = df.options if not doctype: frappe.throw(_("Options not set for link field {0}").format(df.fieldname)) else: doctype = self.get(df.options) if not doctype: frappe.throw(_("{0} must be set first").format(self.meta.get_label(df.options))) # MySQL is case insensitive. Preserve case of the original docname in the Link Field. # get a map of values ot fetch along with this link query # that are mapped as link_fieldname.source_fieldname in Options of # Readonly or Data or Text type fields fields_to_fetch = [ _df for _df in self.meta.get_fields_to_fetch(df.fieldname) if not self.get(_df.fieldname) ] if not fields_to_fetch: # cache a single value type values = frappe._dict(name=frappe.db.get_value(doctype, docname, 'name', cache=True)) else: values_to_fetch = ['name'] + [_df.options.split('.')[-1] for _df in fields_to_fetch] # don't cache if fetching other values too values = frappe.db.get_value(doctype, docname, values_to_fetch, as_dict=True) if frappe.get_meta(doctype).issingle: values.name = doctype if values: setattr(self, df.fieldname, values.name) for _df in fields_to_fetch: setattr(self, _df.fieldname, values[_df.options.split('.')[-1]]) notify_link_count(doctype, docname) if not values.name: invalid_links.append((df.fieldname, docname, get_msg(df, docname))) elif (df.fieldname != "amended_from" and (is_submittable or self.meta.is_submittable) and frappe.get_meta(doctype).is_submittable and cint(frappe.db.get_value(doctype, docname, "docstatus"))==2): cancelled_links.append((df.fieldname, docname, get_msg(df, docname))) return invalid_links, cancelled_links def _validate_selects(self): if frappe.flags.in_import: return for df in self.meta.get_select_fields(): if df.fieldname=="naming_series" or not (self.get(df.fieldname) and df.options): continue options = (df.options or "").split("\n") # if only empty options if not filter(None, options): continue # strip and set self.set(df.fieldname, cstr(self.get(df.fieldname)).strip()) value = self.get(df.fieldname) if value not in options and not (frappe.flags.in_test and value.startswith("_T-")): # show an elaborate message prefix = _("Row #{0}:").format(self.idx) if self.get("parentfield") else "" label = _(self.meta.get_label(df.fieldname)) comma_options = '", "'.join(_(each) for each in options) frappe.throw(_('{0} {1} cannot be "{2}". It should be one of "{3}"').format(prefix, label, value, comma_options)) def _validate_constants(self): if frappe.flags.in_import or self.is_new() or self.flags.ignore_validate_constants: return constants = [d.fieldname for d in self.meta.get("fields", {"set_only_once": ('=',1)})] if constants: values = frappe.db.get_value(self.doctype, self.name, constants, as_dict=True) for fieldname in constants: df = self.meta.get_field(fieldname) # This conversion to string only when fieldtype is Date if df.fieldtype == 'Date' or df.fieldtype == 'Datetime': value = str(values.get(fieldname)) else: value = values.get(fieldname) if self.get(fieldname) != value: frappe.throw(_("Value cannot be changed for {0}").format(self.meta.get_label(fieldname)), frappe.CannotChangeConstantError) def _validate_length(self): if frappe.flags.in_install: return if self.meta.issingle: # single doctype value type is mediumtext return for fieldname, value in iteritems(self.get_valid_dict()): df = self.meta.get_field(fieldname) if df and df.fieldtype in type_map and type_map[df.fieldtype][0]=="varchar": max_length = cint(df.get("length")) or cint(varchar_len) if len(cstr(value)) > max_length: if self.parentfield and self.idx: reference = _("{0}, Row {1}").format(_(self.doctype), self.idx) else: reference = "{0} {1}".format(_(self.doctype), self.name) frappe.throw(_("{0}: '{1}' ({3}) will get truncated, as max characters allowed is {2}")\ .format(reference, _(df.label), max_length, value), frappe.CharacterLengthExceededError, title=_('Value too big')) def _validate_update_after_submit(self): # get the full doc with children db_values = frappe.get_doc(self.doctype, self.name).as_dict() for key in self.as_dict(): df = self.meta.get_field(key) db_value = db_values.get(key) if df and not df.allow_on_submit and (self.get(key) or db_value): if df.fieldtype=="Table": # just check if the table size has changed # individual fields will be checked in the loop for children self_value = len(self.get(key)) db_value = len(db_value) else: self_value = self.get_value(key) if self_value != db_value: frappe.throw(_("Not allowed to change {0} after submission").format(df.label), frappe.UpdateAfterSubmitError) def _sanitize_content(self): """Sanitize HTML and Email in field values. Used to prevent XSS. - Ignore if 'Ignore XSS Filter' is checked or fieldtype is 'Code' """ if frappe.flags.in_install: return for fieldname, value in self.get_valid_dict().items(): if not value or not isinstance(value, string_types): continue value = frappe.as_unicode(value) if (u"<" not in value and u">" not in value): # doesn't look like html so no need continue elif "<!-- markdown -->" in value and not ("<script" in value or "javascript:" in value): # should be handled separately via the markdown converter function continue df = self.meta.get_field(fieldname) sanitized_value = value if df and df.get("fieldtype") in ("Data", "Code", "Small Text") and df.get("options")=="Email": sanitized_value = sanitize_email(value) elif df and (df.get("ignore_xss_filter") or (df.get("fieldtype")=="Code" and df.get("options")!="Email") or df.get("fieldtype") in ("Attach", "Attach Image") # cancelled and submit but not update after submit should be ignored or self.docstatus==2 or (self.docstatus==1 and not df.get("allow_on_submit"))): continue else: sanitized_value = sanitize_html(value, linkify=df.fieldtype=='Text Editor') self.set(fieldname, sanitized_value) def _save_passwords(self): '''Save password field values in __Auth table''' if self.flags.ignore_save_passwords: return for df in self.meta.get('fields', {'fieldtype': ('=', 'Password')}): new_password = self.get(df.fieldname) if new_password and not self.is_dummy_password(new_password): # is not a dummy password like '*****' set_encrypted_password(self.doctype, self.name, new_password, df.fieldname) # set dummy password like '*****' self.set(df.fieldname, '*'*len(new_password)) def get_password(self, fieldname='password', raise_exception=True): if self.get(fieldname) and not self.is_dummy_password(self.get(fieldname)): return self.get(fieldname) return get_decrypted_password(self.doctype, self.name, fieldname, raise_exception=raise_exception) def is_dummy_password(self, pwd): return ''.join(set(pwd))=='*' def precision(self, fieldname, parentfield=None): """Returns float precision for a particular field (or get global default). :param fieldname: Fieldname for which precision is required. :param parentfield: If fieldname is in child table.""" from frappe.model.meta import get_field_precision if parentfield and not isinstance(parentfield, string_types): parentfield = parentfield.parentfield cache_key = parentfield or "main" if not hasattr(self, "_precision"): self._precision = frappe._dict() if cache_key not in self._precision: self._precision[cache_key] = frappe._dict() if fieldname not in self._precision[cache_key]: self._precision[cache_key][fieldname] = None doctype = self.meta.get_field(parentfield).options if parentfield else self.doctype df = frappe.get_meta(doctype).get_field(fieldname) if df.fieldtype in ("Currency", "Float", "Percent"): self._precision[cache_key][fieldname] = get_field_precision(df, self) return self._precision[cache_key][fieldname] def get_formatted(self, fieldname, doc=None, currency=None, absolute_value=False, translated=False): from frappe.utils.formatters import format_value df = self.meta.get_field(fieldname) if not df and fieldname in default_fields: from frappe.model.meta import get_default_df df = get_default_df(fieldname) val = self.get(fieldname) if translated: val = _(val) if absolute_value and isinstance(val, (int, float)): val = abs(self.get(fieldname)) if not doc: doc = getattr(self, "parent_doc", None) or self return format_value(val, df=df, doc=doc, currency=currency) def is_print_hide(self, fieldname, df=None, for_print=True): """Returns true if fieldname is to be hidden for print. Print Hide can be set via the Print Format Builder or in the controller as a list of hidden fields. Example class MyDoc(Document): def __setup__(self): self.print_hide = ["field1", "field2"] :param fieldname: Fieldname to be checked if hidden. """ meta_df = self.meta.get_field(fieldname) if meta_df and meta_df.get("__print_hide"): return True print_hide = 0 if self.get(fieldname)==0 and not self.meta.istable: print_hide = ( df and df.print_hide_if_no_value ) or ( meta_df and meta_df.print_hide_if_no_value ) if not print_hide: if df and df.print_hide is not None: print_hide = df.print_hide elif meta_df: print_hide = meta_df.print_hide return print_hide def in_format_data(self, fieldname): """Returns True if shown via Print Format::`format_data` property. Called from within standard print format.""" doc = getattr(self, "parent_doc", self) if hasattr(doc, "format_data_map"): return fieldname in doc.format_data_map else: return True def reset_values_if_no_permlevel_access(self, has_access_to, high_permlevel_fields): """If the user does not have permissions at permlevel > 0, then reset the values to original / default""" to_reset = [] for df in high_permlevel_fields: if df.permlevel not in has_access_to and df.fieldtype not in display_fieldtypes: to_reset.append(df) if to_reset: if self.is_new(): # if new, set default value ref_doc = frappe.new_doc(self.doctype) else: # get values from old doc if self.parent: self.parent_doc.get_latest() ref_doc = [d for d in self.parent_doc.get(self.parentfield) if d.name == self.name][0] else: ref_doc = self.get_latest() for df in to_reset: self.set(df.fieldname, ref_doc.get(df.fieldname)) def get_value(self, fieldname): df = self.meta.get_field(fieldname) val = self.get(fieldname) return self.cast(val, df) def cast(self, value, df): return cast_fieldtype(df.fieldtype, value) def _extract_images_from_text_editor(self): from frappe.utils.file_manager import extract_images_from_doc if self.doctype != "DocType": for df in self.meta.get("fields", {"fieldtype": ('=', "Text Editor")}): extract_images_from_doc(self, df.fieldname) def _filter(data, filters, limit=None): """pass filters as: {"key": "val", "key": ["!=", "val"], "key": ["in", "val"], "key": ["not in", "val"], "key": "^val", "key" : True (exists), "key": False (does not exist) }""" out, _filters = [], {} if not data: return out # setup filters as tuples if filters: for f in filters: fval = filters[f] if not isinstance(fval, (tuple, list)): if fval is True: fval = ("not None", fval) elif fval is False: fval = ("None", fval) elif isinstance(fval, string_types) and fval.startswith("^"): fval = ("^", fval[1:]) else: fval = ("=", fval) _filters[f] = fval for d in data: add = True for f, fval in iteritems(_filters): if not frappe.compare(getattr(d, f, None), fval[0], fval[1]): add = False break if add: out.append(d) if limit and (len(out)-1)==limit: break return out
from itertools import izip from django.db.models.query import sql from django.db.models.fields.related import ForeignKey from django.contrib.gis.db.backend import SpatialBackend from django.contrib.gis.db.models.fields import GeometryField from django.contrib.gis.db.models.sql import aggregates as gis_aggregates_module from django.contrib.gis.db.models.sql.conversion import AreaField, DistanceField, GeomField from django.contrib.gis.db.models.sql.where import GeoWhereNode from django.contrib.gis.measure import Area, Distance # Valid GIS query types. ALL_TERMS = sql.constants.QUERY_TERMS.copy() ALL_TERMS.update(SpatialBackend.gis_terms) # Pulling out other needed constants/routines to avoid attribute lookups. TABLE_NAME = sql.constants.TABLE_NAME get_proxied_model = sql.query.get_proxied_model class GeoQuery(sql.Query): """ A single spatial SQL query. """ # Overridding the valid query terms. query_terms = ALL_TERMS aggregates_module = gis_aggregates_module #### Methods overridden from the base Query class #### def __init__(self, model, conn): super(GeoQuery, self).__init__(model, conn, where=GeoWhereNode) # The following attributes are customized for the GeoQuerySet. # The GeoWhereNode and SpatialBackend classes contain backend-specific # routines and functions. self.custom_select = {} self.transformed_srid = None self.extra_select_fields = {} if SpatialBackend.oracle: # Have to override this so that GeoQuery, instead of OracleQuery, # is returned when unpickling. def __reduce__(self): callable, args, data = super(GeoQuery, self).__reduce__() return (unpickle_geoquery, (), data) def clone(self, *args, **kwargs): obj = super(GeoQuery, self).clone(*args, **kwargs) # Customized selection dictionary and transformed srid flag have # to also be added to obj. obj.custom_select = self.custom_select.copy() obj.transformed_srid = self.transformed_srid obj.extra_select_fields = self.extra_select_fields.copy() return obj def get_columns(self, with_aliases=False): """ Return the list of columns to use in the select statement. If no columns have been specified, returns all columns relating to fields in the model. If 'with_aliases' is true, any column names that are duplicated (without the table names) are given unique aliases. This is needed in some cases to avoid ambiguitity with nested queries. This routine is overridden from Query to handle customized selection of geometry columns. """ qn = self.quote_name_unless_alias qn2 = self.connection.ops.quote_name result = ['(%s) AS %s' % (self.get_extra_select_format(alias) % col[0], qn2(alias)) for alias, col in self.extra_select.iteritems()] aliases = set(self.extra_select.keys()) if with_aliases: col_aliases = aliases.copy() else: col_aliases = set() if self.select: only_load = self.deferred_to_columns() # This loop customized for GeoQuery. for col, field in izip(self.select, self.select_fields): if isinstance(col, (list, tuple)): alias, column = col table = self.alias_map[alias][TABLE_NAME] if table in only_load and col not in only_load[table]: continue r = self.get_field_select(field, alias, column) if with_aliases: if col[1] in col_aliases: c_alias = 'Col%d' % len(col_aliases) result.append('%s AS %s' % (r, c_alias)) aliases.add(c_alias) col_aliases.add(c_alias) else: result.append('%s AS %s' % (r, qn2(col[1]))) aliases.add(r) col_aliases.add(col[1]) else: result.append(r) aliases.add(r) col_aliases.add(col[1]) else: result.append(col.as_sql(quote_func=qn)) if hasattr(col, 'alias'): aliases.add(col.alias) col_aliases.add(col.alias) elif self.default_cols: cols, new_aliases = self.get_default_columns(with_aliases, col_aliases) result.extend(cols) aliases.update(new_aliases) result.extend([ '%s%s' % ( self.get_extra_select_format(alias) % aggregate.as_sql(quote_func=qn), alias is not None and ' AS %s' % alias or '' ) for alias, aggregate in self.aggregate_select.items() ]) # This loop customized for GeoQuery. for (table, col), field in izip(self.related_select_cols, self.related_select_fields): r = self.get_field_select(field, table, col) if with_aliases and col in col_aliases: c_alias = 'Col%d' % len(col_aliases) result.append('%s AS %s' % (r, c_alias)) aliases.add(c_alias) col_aliases.add(c_alias) else: result.append(r) aliases.add(r) col_aliases.add(col) self._select_aliases = aliases return result def get_default_columns(self, with_aliases=False, col_aliases=None, start_alias=None, opts=None, as_pairs=False): """ Computes the default columns for selecting every field in the base model. Will sometimes be called to pull in related models (e.g. via select_related), in which case "opts" and "start_alias" will be given to provide a starting point for the traversal. Returns a list of strings, quoted appropriately for use in SQL directly, as well as a set of aliases used in the select statement (if 'as_pairs' is True, returns a list of (alias, col_name) pairs instead of strings as the first component and None as the second component). This routine is overridden from Query to handle customized selection of geometry columns. """ result = [] if opts is None: opts = self.model._meta aliases = set() only_load = self.deferred_to_columns() # Skip all proxy to the root proxied model proxied_model = get_proxied_model(opts) if start_alias: seen = {None: start_alias} for field, model in opts.get_fields_with_model(): if start_alias: try: alias = seen[model] except KeyError: if model is proxied_model: alias = start_alias else: link_field = opts.get_ancestor_link(model) alias = self.join((start_alias, model._meta.db_table, link_field.column, model._meta.pk.column)) seen[model] = alias else: # If we're starting from the base model of the queryset, the # aliases will have already been set up in pre_sql_setup(), so # we can save time here. alias = self.included_inherited_models[model] table = self.alias_map[alias][TABLE_NAME] if table in only_load and field.column not in only_load[table]: continue if as_pairs: result.append((alias, field.column)) aliases.add(alias) continue # This part of the function is customized for GeoQuery. We # see if there was any custom selection specified in the # dictionary, and set up the selection format appropriately. field_sel = self.get_field_select(field, alias) if with_aliases and field.column in col_aliases: c_alias = 'Col%d' % len(col_aliases) result.append('%s AS %s' % (field_sel, c_alias)) col_aliases.add(c_alias) aliases.add(c_alias) else: r = field_sel result.append(r) aliases.add(r) if with_aliases: col_aliases.add(field.column) return result, aliases def resolve_columns(self, row, fields=()): """ This routine is necessary so that distances and geometries returned from extra selection SQL get resolved appropriately into Python objects. """ values = [] aliases = self.extra_select.keys() if self.aggregates: # If we have an aggregate annotation, must extend the aliases # so their corresponding row values are included. aliases.extend([None for i in xrange(len(self.aggregates))]) # Have to set a starting row number offset that is used for # determining the correct starting row index -- needed for # doing pagination with Oracle. rn_offset = 0 if SpatialBackend.oracle: if self.high_mark is not None or self.low_mark: rn_offset = 1 index_start = rn_offset + len(aliases) # Converting any extra selection values (e.g., geometries and # distance objects added by GeoQuerySet methods). values = [self.convert_values(v, self.extra_select_fields.get(a, None)) for v, a in izip(row[rn_offset:index_start], aliases)] if SpatialBackend.oracle or getattr(self, 'geo_values', False): # We resolve the rest of the columns if we're on Oracle or if # the `geo_values` attribute is defined. for value, field in map(None, row[index_start:], fields): values.append(self.convert_values(value, field)) else: values.extend(row[index_start:]) return tuple(values) def convert_values(self, value, field): """ Using the same routines that Oracle does we can convert our extra selection objects into Geometry and Distance objects. TODO: Make converted objects 'lazy' for less overhead. """ if SpatialBackend.oracle: # Running through Oracle's first. value = super(GeoQuery, self).convert_values(value, field or GeomField()) if value is None: # Output from spatial function is NULL (e.g., called # function on a geometry field with NULL value). pass elif isinstance(field, DistanceField): # Using the field's distance attribute, can instantiate # `Distance` with the right context. value = Distance(**{field.distance_att : value}) elif isinstance(field, AreaField): value = Area(**{field.area_att : value}) elif isinstance(field, (GeomField, GeometryField)) and value: value = SpatialBackend.Geometry(value) return value def resolve_aggregate(self, value, aggregate): """ Overridden from GeoQuery's normalize to handle the conversion of GeoAggregate objects. """ if isinstance(aggregate, self.aggregates_module.GeoAggregate): if aggregate.is_extent: return self.aggregates_module.convert_extent(value) else: return self.aggregates_module.convert_geom(value, aggregate.source) else: return super(GeoQuery, self).resolve_aggregate(value, aggregate) #### Routines unique to GeoQuery #### def get_extra_select_format(self, alias): sel_fmt = '%s' if alias in self.custom_select: sel_fmt = sel_fmt % self.custom_select[alias] return sel_fmt def get_field_select(self, field, alias=None, column=None): """ Returns the SELECT SQL string for the given field. Figures out if any custom selection SQL is needed for the column The `alias` keyword may be used to manually specify the database table where the column exists, if not in the model associated with this `GeoQuery`. Similarly, `column` may be used to specify the exact column name, rather than using the `column` attribute on `field`. """ sel_fmt = self.get_select_format(field) if field in self.custom_select: field_sel = sel_fmt % self.custom_select[field] else: field_sel = sel_fmt % self._field_column(field, alias, column) return field_sel def get_select_format(self, fld): """ Returns the selection format string, depending on the requirements of the spatial backend. For example, Oracle and MySQL require custom selection formats in order to retrieve geometries in OGC WKT. For all other fields a simple '%s' format string is returned. """ if SpatialBackend.select and hasattr(fld, 'geom_type'): # This allows operations to be done on fields in the SELECT, # overriding their values -- used by the Oracle and MySQL # spatial backends to get database values as WKT, and by the # `transform` method. sel_fmt = SpatialBackend.select # Because WKT doesn't contain spatial reference information, # the SRID is prefixed to the returned WKT to ensure that the # transformed geometries have an SRID different than that of the # field -- this is only used by `transform` for Oracle and # SpatiaLite backends. if self.transformed_srid and ( SpatialBackend.oracle or SpatialBackend.spatialite ): sel_fmt = "'SRID=%d;'||%s" % (self.transformed_srid, sel_fmt) else: sel_fmt = '%s' return sel_fmt # Private API utilities, subject to change. def _field_column(self, field, table_alias=None, column=None): """ Helper function that returns the database column for the given field. The table and column are returned (quoted) in the proper format, e.g., `"geoapp_city"."point"`. If `table_alias` is not specified, the database table associated with the model of this `GeoQuery` will be used. If `column` is specified, it will be used instead of the value in `field.column`. """ if table_alias is None: table_alias = self.model._meta.db_table return "%s.%s" % (self.quote_name_unless_alias(table_alias), self.connection.ops.quote_name(column or field.column)) def _geo_field(self, field_name=None): """ Returns the first Geometry field encountered; or specified via the `field_name` keyword. The `field_name` may be a string specifying the geometry field on this GeoQuery's model, or a lookup string to a geometry field via a ForeignKey relation. """ if field_name is None: # Incrementing until the first geographic field is found. for fld in self.model._meta.fields: if isinstance(fld, GeometryField): return fld return False else: # Otherwise, check by the given field name -- which may be # a lookup to a _related_ geographic field. return GeoWhereNode._check_geo_field(self.model._meta, field_name) if SpatialBackend.oracle: def unpickle_geoquery(): """ Utility function, called by Python's unpickling machinery, that handles unpickling of GeoQuery subclasses of OracleQuery. """ return GeoQuery.__new__(GeoQuery) unpickle_geoquery.__safe_for_unpickling__ = True
"""Test the Google Maps Travel Time config flow.""" from homeassistant import config_entries, data_entry_flow from homeassistant.components.google_travel_time.const import ( ARRIVAL_TIME, CONF_ARRIVAL_TIME, CONF_AVOID, CONF_DEPARTURE_TIME, CONF_DESTINATION, CONF_LANGUAGE, CONF_OPTIONS, CONF_ORIGIN, CONF_TIME, CONF_TIME_TYPE, CONF_TRAFFIC_MODEL, CONF_TRANSIT_MODE, CONF_TRANSIT_ROUTING_PREFERENCE, CONF_UNITS, DEFAULT_NAME, DEPARTURE_TIME, DOMAIN, ) from homeassistant.const import ( CONF_API_KEY, CONF_MODE, CONF_NAME, CONF_UNIT_SYSTEM_IMPERIAL, ) from tests.common import MockConfigEntry async def test_minimum_fields(hass, validate_config_entry, bypass_setup): """Test we get the form.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["errors"] == {} result2 = await hass.config_entries.flow.async_configure( result["flow_id"], { CONF_API_KEY: "api_key", CONF_ORIGIN: "location1", CONF_DESTINATION: "location2", }, ) assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result2["title"] == f"{DEFAULT_NAME}: location1 -> location2" assert result2["data"] == { CONF_API_KEY: "api_key", CONF_ORIGIN: "location1", CONF_DESTINATION: "location2", } async def test_invalid_config_entry(hass, invalidate_config_entry): """Test we get the form.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["errors"] == {} result2 = await hass.config_entries.flow.async_configure( result["flow_id"], { CONF_API_KEY: "api_key", CONF_ORIGIN: "location1", CONF_DESTINATION: "location2", }, ) assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM assert result2["errors"] == {"base": "cannot_connect"} async def test_options_flow(hass, validate_config_entry, bypass_update): """Test options flow.""" entry = MockConfigEntry( domain=DOMAIN, data={ CONF_API_KEY: "api_key", CONF_ORIGIN: "location1", CONF_DESTINATION: "location2", }, options={ CONF_MODE: "driving", CONF_ARRIVAL_TIME: "test", CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL, }, ) entry.add_to_hass(hass) await hass.config_entries.async_setup(entry.entry_id) await hass.async_block_till_done() result = await hass.config_entries.options.async_init(entry.entry_id, data=None) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "init" result = await hass.config_entries.options.async_configure( result["flow_id"], user_input={ CONF_MODE: "driving", CONF_LANGUAGE: "en", CONF_AVOID: "tolls", CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL, CONF_TIME_TYPE: ARRIVAL_TIME, CONF_TIME: "test", CONF_TRAFFIC_MODEL: "best_guess", CONF_TRANSIT_MODE: "train", CONF_TRANSIT_ROUTING_PREFERENCE: "less_walking", }, ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "" assert result["data"] == { CONF_MODE: "driving", CONF_LANGUAGE: "en", CONF_AVOID: "tolls", CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL, CONF_ARRIVAL_TIME: "test", CONF_TRAFFIC_MODEL: "best_guess", CONF_TRANSIT_MODE: "train", CONF_TRANSIT_ROUTING_PREFERENCE: "less_walking", } assert entry.options == { CONF_MODE: "driving", CONF_LANGUAGE: "en", CONF_AVOID: "tolls", CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL, CONF_ARRIVAL_TIME: "test", CONF_TRAFFIC_MODEL: "best_guess", CONF_TRANSIT_MODE: "train", CONF_TRANSIT_ROUTING_PREFERENCE: "less_walking", } async def test_options_flow_departure_time(hass, validate_config_entry, bypass_update): """Test options flow wiith departure time.""" entry = MockConfigEntry( domain=DOMAIN, data={ CONF_API_KEY: "api_key", CONF_ORIGIN: "location1", CONF_DESTINATION: "location2", }, ) entry.add_to_hass(hass) await hass.config_entries.async_setup(entry.entry_id) await hass.async_block_till_done() result = await hass.config_entries.options.async_init(entry.entry_id, data=None) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "init" result = await hass.config_entries.options.async_configure( result["flow_id"], user_input={ CONF_MODE: "driving", CONF_LANGUAGE: "en", CONF_AVOID: "tolls", CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL, CONF_TIME_TYPE: DEPARTURE_TIME, CONF_TIME: "test", CONF_TRAFFIC_MODEL: "best_guess", CONF_TRANSIT_MODE: "train", CONF_TRANSIT_ROUTING_PREFERENCE: "less_walking", }, ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "" assert result["data"] == { CONF_MODE: "driving", CONF_LANGUAGE: "en", CONF_AVOID: "tolls", CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL, CONF_DEPARTURE_TIME: "test", CONF_TRAFFIC_MODEL: "best_guess", CONF_TRANSIT_MODE: "train", CONF_TRANSIT_ROUTING_PREFERENCE: "less_walking", } assert entry.options == { CONF_MODE: "driving", CONF_LANGUAGE: "en", CONF_AVOID: "tolls", CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL, CONF_DEPARTURE_TIME: "test", CONF_TRAFFIC_MODEL: "best_guess", CONF_TRANSIT_MODE: "train", CONF_TRANSIT_ROUTING_PREFERENCE: "less_walking", } async def test_dupe_id(hass, validate_config_entry, bypass_setup): """Test setting up the same entry twice fails.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["errors"] == {} result2 = await hass.config_entries.flow.async_configure( result["flow_id"], { CONF_API_KEY: "test", CONF_ORIGIN: "location1", CONF_DESTINATION: "location2", }, ) assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["errors"] == {} result2 = await hass.config_entries.flow.async_configure( result["flow_id"], { CONF_API_KEY: "test", CONF_ORIGIN: "location1", CONF_DESTINATION: "location2", }, ) await hass.async_block_till_done() assert result2["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result2["reason"] == "already_configured" async def test_import_flow(hass, validate_config_entry, bypass_update): """Test import_flow.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data={ CONF_API_KEY: "api_key", CONF_ORIGIN: "location1", CONF_DESTINATION: "location2", CONF_NAME: "test_name", CONF_OPTIONS: { CONF_MODE: "driving", CONF_LANGUAGE: "en", CONF_AVOID: "tolls", CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL, CONF_ARRIVAL_TIME: "test", CONF_TRAFFIC_MODEL: "best_guess", CONF_TRANSIT_MODE: "train", CONF_TRANSIT_ROUTING_PREFERENCE: "less_walking", }, }, ) await hass.async_block_till_done() assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "test_name" assert result["data"] == { CONF_API_KEY: "api_key", CONF_ORIGIN: "location1", CONF_DESTINATION: "location2", CONF_NAME: "test_name", CONF_OPTIONS: { CONF_MODE: "driving", CONF_LANGUAGE: "en", CONF_AVOID: "tolls", CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL, CONF_ARRIVAL_TIME: "test", CONF_TRAFFIC_MODEL: "best_guess", CONF_TRANSIT_MODE: "train", CONF_TRANSIT_ROUTING_PREFERENCE: "less_walking", }, } entry = hass.config_entries.async_entries(DOMAIN)[0] assert entry.data == { CONF_API_KEY: "api_key", CONF_ORIGIN: "location1", CONF_DESTINATION: "location2", } assert entry.options == { CONF_MODE: "driving", CONF_LANGUAGE: "en", CONF_AVOID: "tolls", CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL, CONF_ARRIVAL_TIME: "test", CONF_TRAFFIC_MODEL: "best_guess", CONF_TRANSIT_MODE: "train", CONF_TRANSIT_ROUTING_PREFERENCE: "less_walking", }
#!/usr/bin/python # # This source file is part of appleseed. # Visit https://appleseedhq.net/ for additional information and resources. # # This software is released under the MIT license. # # Copyright (c) 2017-2018 Esteban Tovagliari, The appleseedhq Organization # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the 'Software'), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # from __future__ import print_function from distutils import archive_util, dir_util from xml.etree.ElementTree import ElementTree import glob import os import platform import re import shutil import stat import subprocess import sys import time import traceback import urllib #-------------------------------------------------------------------------------------------------- # Constants. #-------------------------------------------------------------------------------------------------- VERSION = "1.1.3" SETTINGS_FILENAME = "appleseed-maya.package.configuration.xml" #-------------------------------------------------------------------------------------------------- # Utility functions. #-------------------------------------------------------------------------------------------------- def info(message): print(" " + message) def progress(message): print(" " + message + "...") def fatal(message): print("Fatal: " + message + ". Aborting.") if sys.exc_info()[0]: print(traceback.format_exc()) sys.exit(1) def exe(filepath): return filepath + ".exe" if os.name == "nt" else filepath def safe_delete_file(path): try: if os.path.exists(path): os.remove(path) except OSError: fatal("Failed to delete file '" + path + "'") def on_rmtree_error(func, path, exc_info): # path contains the path of the file that couldn't be removed. # Let's just assume that it's read-only and unlink it. os.chmod(path, stat.S_IWRITE) os.unlink(path) def safe_delete_directory(path): Attempts = 10 for attempt in range(Attempts): try: if os.path.exists(path): shutil.rmtree(path, onerror=on_rmtree_error) return except OSError: if attempt < Attempts - 1: time.sleep(0.5) else: fatal("Failed to delete directory '" + path + "'") def safe_make_directory(path): if not os.path.isdir(path): os.makedirs(path) def pushd(path): old_path = os.getcwd() os.chdir(path) return old_path def run_subprocess(cmdline): p = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() return p.returncode, out, err def copy_glob(input_pattern, output_path): for input_file in glob.glob(input_pattern): shutil.copy(input_file, output_path) #-------------------------------------------------------------------------------------------------- # Settings. #-------------------------------------------------------------------------------------------------- class Settings: def load(self): self.this_dir = os.path.dirname(os.path.realpath(__file__)) self.root_dir = os.path.join(self.this_dir, "..") print("Loading settings from " + SETTINGS_FILENAME + "...") tree = ElementTree() try: tree.parse(SETTINGS_FILENAME) except IOError: fatal("Failed to load configuration file '" + SETTINGS_FILENAME + "'") self.__load_values(tree) self.maya_version = self.__get_maya_version() if self.maya_version is None: fatal("Failed to determine Maya version from CMakeCache.txt file") self.plugin_version = self.__get_plugin_version() if self.plugin_version is None: fatal("Failed to determine appleseed-maya version from version.h file") self.print_summary() def __load_values(self, tree): self.platform = self.__get_required(tree, "platform") self.build_path = self.__get_required(tree, "build_path") self.bin_path = self.__get_required(tree, "bin_path") self.appleseed_bin_path = self.__get_required(tree, "appleseed_bin_path") self.appleseed_lib_path = self.__get_required(tree, "appleseed_lib_path") self.appleseed_shaders_path = self.__get_required(tree, "appleseed_shaders_path") self.appleseed_schemas_path = self.__get_required(tree, "appleseed_schemas_path") self.appleseed_settings_path = self.__get_required(tree, "appleseed_settings_path") self.appleseed_python_path = self.__get_required(tree, "appleseed_python_path") self.maketx_path = self.__get_required(tree, "maketx_path") self.package_output_path = self.__get_required(tree, "package_output_path") def __get_required(self, tree, key): value = tree.findtext(key) if value is None: fatal("Missing value \"{0}\" in configuration file".format(key)) return value def __get_maya_version(self): # Find Maya include dir from CMake's cache. maya_include_dir = None with open(os.path.join(self.build_path, "CMakeCache.txt"), "r") as f: for line in f.readlines(): if line.startswith("MAYA_INCLUDE_DIR:PATH="): maya_include_dir = line.split("=")[1].strip() break if maya_include_dir is None: return None # Find Maya version from Maya's MTypes.h header. with open(os.path.join(maya_include_dir, "maya", "MTypes.h"), "r") as f: for line in f.readlines(): if "#define" in line: if "MAYA_API_VERSION" in line: tokens = line.split() return tokens[-1][:4] return None def __get_plugin_version(self): major = -1 minor = -1 patch = -1 maturity = None # Find the Maya include dir from CMake's cache. with open(os.path.join(self.root_dir, "src", "appleseedmaya", "version.h"), "r") as f: for line in f.readlines(): if line.startswith("#define APPLESEED_MAYA_VERSION_"): tokens = line.split() if tokens[1] == "APPLESEED_MAYA_VERSION_MAJOR": major = int(tokens[2]) elif tokens[1] == "APPLESEED_MAYA_VERSION_MINOR": minor = int(tokens[2]) elif tokens[1] == "APPLESEED_MAYA_VERSION_PATCH": patch = int(tokens[2]) elif tokens[1] == "APPLESEED_MAYA_VERSION_MATURITY": maturity = tokens[2].strip("\"") if major == -1 or minor == -1 or patch == -1 or maturity == None: return None return "{0}.{1}.{2}-{3}".format(major, minor, patch, maturity) def print_summary(self): print("") print(" Platform: " + self.platform) print(" Maya version: " + self.maya_version) print(" appleseed-maya version: " + self.plugin_version) print(" Build path: " + self.build_path) print(" Path to appleseed-maya binaries: " + self.bin_path) print(" Path to appleseed binaries: " + self.appleseed_bin_path) print(" Path to appleseed libraries: " + self.appleseed_lib_path) print(" Path to appleseed shaders: " + self.appleseed_shaders_path) print(" Path to appleseed schemas: " + self.appleseed_schemas_path) print(" Path to appleseed settings: " + self.appleseed_settings_path) print(" Path to appleseed.python: " + self.appleseed_python_path) print(" Path to maketx: " + self.maketx_path) print(" Output directory: " + self.package_output_path) print("") #-------------------------------------------------------------------------------------------------- # Base package builder. #-------------------------------------------------------------------------------------------------- class PackageBuilder(object): def __init__(self, settings): self.settings = settings def build_package(self): print("Building package:") print("") self.orchestrate() print("") print("The package was successfully built.") def orchestrate(self): progress("Removing leftovers from previous invocations") safe_delete_directory(self.settings.package_output_path) progress("Creating deployment directory") safe_make_directory(self.settings.package_output_path) progress("Copying license") shutil.copy(os.path.join(self.settings.root_dir, "LICENSE.txt"), self.settings.package_output_path) progress("Copying icons") dir_util.copy_tree(os.path.join(self.settings.root_dir, "icons"), os.path.join(self.settings.package_output_path, "icons")) progress("Copying presets") dir_util.copy_tree(os.path.join(self.settings.root_dir, "presets"), os.path.join(self.settings.package_output_path, "presets")) progress("Copying renderDesc") dir_util.copy_tree(os.path.join(self.settings.root_dir, "renderDesc"), os.path.join(self.settings.package_output_path, "renderDesc")) progress("Copying scripts") dir_util.copy_tree(os.path.join(self.settings.root_dir, "scripts"), os.path.join(self.settings.package_output_path, "scripts")) progress("Copying appleseed.python") dir_util.copy_tree(os.path.expandvars(self.settings.appleseed_python_path), os.path.join(self.settings.package_output_path, "scripts")) progress("Removing pyc files") for root, dirs, files in os.walk(os.path.join(self.settings.package_output_path, "scripts")): for f in files: if f.endswith(".pyc"): safe_delete_file(os.path.join(root, f)) progress("Generating module file") self.generate_module_file() progress("Copying binaries") self.copy_binaries() progress("Copying schemas") dir_util.copy_tree(os.path.expandvars(self.settings.appleseed_schemas_path), os.path.join(self.settings.package_output_path, "schemas")) safe_delete_file(os.path.join(self.settings.package_output_path, "schemas", ".gitignore")) progress("Downloading settings files") self.download_settings() progress("Copying shaders") self.copy_shaders() progress("Copying plugins") self.copy_plugins() progress("Copying dependencies") self.copy_dependencies() progress("Post-processing package") self.post_process_package() progress("Building final zip file") self.build_final_zip_file() def do_generate_module_file(self, maya_platform_id): with open(os.path.join(self.settings.package_output_path, "appleseed-maya.mod"), "w") as f: f.write("+ MAYAVERSION:{0} PLATFORM:{1} appleseed-maya {2} .\n".format( self.settings.maya_version, maya_platform_id, self.settings.plugin_version)) f.write("plug-ins: plug-ins/{0}\n".format(self.settings.maya_version)) f.write("PATH +:= bin\n") f.write("PYTHONPATH +:= scripts\n") f.write("APPLESEED_SEARCHPATH +:= shaders\n") f.write("MAYA_PRESET_PATH +:= presets\n") f.write("MAYA_CUSTOM_TEMPLATE_PATH +:= scripts/appleseedMaya/AETemplates\n") f.write("MAYA_RENDER_DESC_PATH +:= renderDesc\n") f.write("XBMLANGPATH +:= icons/%B\n") def copy_binaries(self): bin_dir = os.path.join(self.settings.package_output_path, "bin") safe_make_directory(bin_dir) binaries_to_copy = [exe("appleseed.cli")] for bin in binaries_to_copy: shutil.copy(os.path.join(os.path.expandvars(self.settings.appleseed_bin_path), bin), bin_dir) shutil.copy(os.path.expandvars(self.settings.maketx_path), bin_dir) def download_settings(self): settings_dir = os.path.join(self.settings.package_output_path, "settings") safe_make_directory(settings_dir) settings_to_download = ["appleseed.cli.xml"] for file in settings_to_download: urllib.urlretrieve( "https://raw.githubusercontent.com/appleseedhq/appleseed/master/sandbox/settings/{0}".format(file), os.path.join(settings_dir, file)) def copy_shaders(self): shaders_dir = os.path.join(self.settings.package_output_path, "shaders") safe_make_directory(shaders_dir) for shader in glob.glob(os.path.join(os.path.expandvars(self.settings.appleseed_shaders_path), "maya", "*.oso")): shutil.copy(shader, shaders_dir) for root, dirs, files in os.walk(os.path.join(os.path.expandvars(self.settings.appleseed_shaders_path), "appleseed")): for f in files: if f.endswith(".oso"): shutil.copy(os.path.join(root, f), shaders_dir) def copy_plugins(self): plugin_ext = self.plugin_extension() plugins_dir = os.path.join(self.settings.package_output_path, "plug-ins", self.settings.maya_version) safe_make_directory(plugins_dir) shutil.copy( os.path.join(self.settings.bin_path, "appleseedMaya" + plugin_ext), plugins_dir) def build_final_zip_file(self): package_name = "appleseed-maya{0}-{1}-{2}".format( self.settings.maya_version, self.settings.plugin_version, self.settings.platform) old_path = pushd(self.settings.package_output_path) archive_util.make_zipfile(os.path.join(self.settings.this_dir, package_name), ".") os.chdir(old_path) def run(self, cmdline): info("Running command line: {0}".format(cmdline)) os.system(cmdline) # ------------------------------------------------------------------------------------------------- # Mac package builder. # ------------------------------------------------------------------------------------------------- class MacPackageBuilder(PackageBuilder): SYSTEM_LIBS_PREFIXES = [ "/System/Library/", "/usr/lib/libcurl", "/usr/lib/libc++", "/usr/lib/libbz2", "/usr/lib/libiconv", "/usr/lib/libSystem", "/usr/lib/libxml", "/usr/lib/libexpat", "/usr/lib/libz", "/usr/lib/libncurses", "/usr/lib/libobjc.A.dylib" ] def plugin_extension(self): return ".bundle" def generate_module_file(self): self.do_generate_module_file("mac") def copy_dependencies(self): progress("Mac-specific: Copying dependencies") # Create destination directory. lib_dir = os.path.join(self.settings.package_output_path, "lib") safe_make_directory(lib_dir) # Copy appleseed libraries. for lib in ["libappleseed.dylib"]: shutil.copy(os.path.join(os.path.expandvars(self.settings.appleseed_lib_path), lib), lib_dir) libs_to_check = set() all_libs = set() for bin in glob.glob(os.path.join(os.path.expandvars(self.settings.root_dir), "appleseed", "bin", "*")): libs = self.__get_dependencies_for_file(bin) libs_to_check = libs_to_check.union(libs) appleseedpython_libs = self.__get_dependencies_for_file( os.path.join(self.settings.package_output_path, "scripts", "appleseed", "_appleseedpython.so")) libs_to_check = libs_to_check.union(appleseedpython_libs) # find all dependencies while libs_to_check: current_lib = libs_to_check.pop() for lib in self.__get_dependencies_for_file(current_lib): if lib not in all_libs: libs_to_check.add(lib) all_libs.add(current_lib) if True: # Print dependencies. info(" Dependencies:") for lib in all_libs: info(" {0}".format(lib)) # Copy needed libs to lib directory. for lib in all_libs: # Don't copy Python, this dependency will be relinked to # Maya's Python later in the packaging process if os.path.basename(lib) == "Python": continue if True: info(" Copying {0} to {1}...".format(lib, lib_dir)) dest = os.path.join(lib_dir, os.path.basename(lib)) # It is possible for the dylib to link to same lib in multiple paths # check that is not already copied if not os.path.exists(dest): shutil.copy(lib, lib_dir) def post_process_package(self): progress("Mac-specific: Post-processing package") self.__fixup_binaries() def __fixup_binaries(self): progress("Mac-specific: Fixing up binaries") self.__fix_permissions(); self.set_libraries_ids() self.__change_library_paths_in_libraries() self.__change_library_paths_in_executables() def __fix_permissions(self): lib_dir = os.path.join(self.settings.package_output_path, "lib") for dirpath, dirnames, filenames in os.walk(lib_dir): for filename in filenames: ext = os.path.splitext(filename)[1] if ext == ".dylib" or ext == ".so": lib_path = os.path.join(dirpath, filename) self.run("chmod 777 {}".format(lib_path)) bin_dir = os.path.join(self.settings.package_output_path, "bin") for dirpath, dirnames, filenames in os.walk(bin_dir): for filename in filenames: ext = os.path.splitext(filename)[1] if ext != ".py" and ext != ".conf": exe_path = os.path.join(dirpath, filename) self.run("chmod 777 {}".format(exe_path)) def set_libraries_ids(self): lib_dir = os.path.join(self.settings.package_output_path, "lib") for dirpath, dirnames, filenames in os.walk(lib_dir): for filename in filenames: ext = os.path.splitext(filename)[1] if ext == ".dylib" or ext == ".so": lib_path = os.path.join(dirpath, filename) self.__set_library_id(lib_path, filename) plugins_dir = os.path.join(self.settings.package_output_path, "plug-ins", self.settings.maya_version) for plugin_path in glob.glob(os.path.join(plugins_dir, "*.bundle")): filename = os.path.basename(plugin_path) self.__set_library_id(plugin_path, filename) python_plugin = os.path.join(self.settings.package_output_path, "scripts", "appleseed") for plugin_path in glob.glob(os.path.join(python_plugin, "*.so")): filename = os.path.basename(plugin_path) self.__set_library_id(plugin_path, filename) def __change_library_paths_in_libraries(self): lib_dir = os.path.join(self.settings.package_output_path, "lib") for dirpath, dirnames, filenames in os.walk(lib_dir): for filename in filenames: ext = os.path.splitext(filename)[1] if ext == ".dylib" or ext == ".so": lib_path = os.path.join(dirpath, filename) self.__change_library_paths_in_binary(lib_path) plugins_dir = os.path.join(self.settings.package_output_path, "plug-ins", self.settings.maya_version) for plugin_path in glob.glob(os.path.join(plugins_dir, "*.bundle")): filename = os.path.basename(plugin_path) self.__change_library_paths_in_binary(plugin_path) python_plugin = os.path.join(self.settings.package_output_path, "scripts", "appleseed") for plugin_path in glob.glob(os.path.join(python_plugin, "*.so")): filename = os.path.basename(plugin_path) self.__change_library_paths_in_binary(plugin_path) def __change_library_paths_in_executables(self): bin_dir = os.path.join(self.settings.package_output_path, "bin") for dirpath, dirnames, filenames in os.walk(bin_dir): for filename in filenames: ext = os.path.splitext(filename)[1] if ext != ".py" and ext != ".conf": exe_path = os.path.join(dirpath, filename) self.__change_library_paths_in_binary(exe_path) # Can be used on executables and dynamic libraries. def __change_library_paths_in_binary(self, bin_path): progress("Patching {0}".format(bin_path)) bin_dir = os.path.dirname(bin_path) lib_dir = os.path.join(self.settings.package_output_path, "lib") path_to_appleseed_lib = os.path.relpath(lib_dir, bin_dir) # fix_paths set to False because we must retrieve the unmodified dependency in order to replace it by the correct one. for lib_path in self.__get_dependencies_for_file(bin_path, fix_paths=False): lib_name = os.path.basename(lib_path) # relink python dependencies to maya's embed python if lib_name == "Python": maya_python = "@executable_path/../Frameworks/Python.framework/Versions/2.7/Python" self.__change_library_path(bin_path, lib_path, maya_python) elif path_to_appleseed_lib == ".": self.__change_library_path(bin_path, lib_path, "@loader_path/{0}".format(lib_name)) else: self.__change_library_path(bin_path, lib_path, "@loader_path/{0}/{1}".format(path_to_appleseed_lib, lib_name)) def __set_library_id(self, target, name): self.run('install_name_tool -id "{0}" {1}'.format(name, target)) def __change_library_path(self, target, old, new): self.run('install_name_tool -change "{0}" "{1}" {2}'.format(old, new, target)) def __get_lib_search_paths(self, filepath): returncode, out, err = run_subprocess(["otool", "-l", filepath]) if returncode != 0: fatal("Failed to invoke otool(1) to get rpath for {0}: {1}".format(filepath, err)) lc_path_found = False rpaths = [] # parse otool output for rpaths, there can be multiple for line in out.split("\n"): line = line.strip() if lc_path_found and line.startswith("path"): path_split = line.split(' ') if len(path_split) < 2: fatal("Failed to parse line from otool(1) output: " + line) rpaths.append(path_split[1]) lc_path_found = False if line == "cmd LC_RPATH": lc_path_found = True DYLD_LIBRARY_PATH = os.environ.get("DYLD_LIBRARY_PATH", "").split(":") DYLD_FALLBACK_LIBRARY_PATH = os.environ.get("DYLD_FALLBACK_LIBRARY_PATH", "").split(":") search_paths = [] # DYLD_LIBRARY_PATH overrides rpaths for path in DYLD_LIBRARY_PATH: if os.path.exists(path): search_paths.append(path) for path in rpaths: if os.path.exists(path): search_paths.append(path) for path in DYLD_FALLBACK_LIBRARY_PATH: if os.path.exists(path): search_paths.append(path) return search_paths def __get_dependencies_for_file(self, filepath, fix_paths=True): filename = os.path.basename(filepath) search_paths = self.__get_lib_search_paths(filepath) loader_path = os.path.dirname(filepath) if True: info("Gathering dependencies for file") info(" {0}".format(filepath)) info("with @loader_path set to") info(" {0}".format(loader_path)) info("and @rpath search path to:") for path in search_paths: info(" {0}".format(path)) returncode, out, err = run_subprocess(["otool", "-L", filepath]) if returncode != 0: fatal("Failed to invoke otool(1) to get dependencies for {0}: {1}".format(filepath, err)) libs = set() for line in out.split("\n")[1:]: # skip the first line line = line.strip() # Ignore empty lines. if len(line) == 0: continue if line.startswith("@executable_path"): continue # Parse the line. m = re.match(r"(.*) \(compatibility version .*, current version .*\)", line) if not m: fatal("Failed to parse line from otool(1) output: " + line) lib = m.group(1) # Ignore self-references (why do these happen?). if lib == filename: continue # Ignore system libs. if self.__is_system_lib(lib): continue # Ignore Qt frameworks. if re.search(r"Qt.*\.framework", lib): continue if fix_paths: # handle no search paths case if not search_paths: fixed_lib = lib.replace("@loader_path", loader_path) if os.path.exists(fixed_lib): lib = fixed_lib else: # Try to handle other relative libs. candidate = os.path.join(loader_path, fixed_lib) if not os.path.exists(candidate): fatal("Unable to Resolve lib {}", lib) lib = candidate for path in search_paths: # Handle libs relative to @loader_path. fixed_lib = lib.replace("@loader_path", loader_path) # Handle libs relative to @rpath. fixed_lib = fixed_lib.replace("@rpath", path) if os.path.exists(fixed_lib): lib = fixed_lib break # Try to handle other relative libs. elif not os.path.isabs(fixed_lib): candidate = os.path.join(loader_path, fixed_lib) if not os.path.exists(candidate): candidate = os.path.join(path, fixed_lib) if os.path.exists(candidate): info("Resolved relative dependency {0} as {1}".format(fixed_lib, candidate)) lib = candidate break libs.add(lib) if True: info("Dependencies for file {0}:".format(filepath)) for lib in libs: if os.path.isfile(lib): info(u" found:{0}".format(lib)) else: info(u" missing:{0}".format(lib)) # Don't check for missing dependencies if we didn't attempt to fix them. if fix_paths: for lib in libs: if not os.path.isfile(lib): fatal("Dependency {0} could not be found on disk".format(lib)) return libs def __is_system_lib(self, lib): for prefix in self.SYSTEM_LIBS_PREFIXES: if lib.startswith(prefix): return True return False #-------------------------------------------------------------------------------------------------- # Linux package builder. #-------------------------------------------------------------------------------------------------- class LinuxPackageBuilder(PackageBuilder): SYSTEM_LIBS_PREFIXES = [ "linux", "librt", "libpthread", "libGL", "libX", "libselinux", "libICE", "libSM", "libdl", "libm.so", "libgcc", "libc.so", "/lib64/ld-linux-", "libstdc++", "libxcb", "libdrm", "libnsl", "libuuid", "libgthread", "libglib", "libgobject", "libglapi", "libffi", "libfontconfig", "libutil", "libpython", "libxshmfence.so" ] def plugin_extension(self): return ".so" def generate_module_file(self): self.do_generate_module_file("linux") def copy_dependencies(self): # Create the lib directory. lib_dir = os.path.join(self.settings.package_output_path, "lib") safe_make_directory(lib_dir) # Copy appleseed libraries. libraries_to_copy = ["libappleseed.so"] for lib in libraries_to_copy: shutil.copy(os.path.join(os.path.expandvars(self.settings.appleseed_lib_path), lib), lib_dir) # Get shared libs needed by binaries. all_libs = set() for bin in glob.glob(os.path.join(self.settings.package_output_path, "bin", "*")): libs = self.__get_dependencies_for_file(bin) all_libs = all_libs.union(libs) # Get shared libs needed by appleseed.python. libs = self.__get_dependencies_for_file( os.path.join(self.settings.package_output_path, "scripts", "appleseed", "_appleseedpython.so")) all_libs = all_libs.union(libs) # Get shared libs needed by libraries. lib_libs = set() for lib in all_libs: libs = self.__get_dependencies_for_file(lib) lib_libs = lib_libs.union(libs) all_libs = all_libs.union(lib_libs) # Copy all shared libraries. for lib in all_libs: shutil.copy(lib, lib_dir) def post_process_package(self): for bin in glob.glob(os.path.join(self.settings.package_output_path, "bin", "*")): self.run("chrpath -r \$ORIGIN/../lib " + bin) # for lib in glob.glob(os.path.join(self.settings.package_output_path, "lib", "*")): # self.run("chrpath -d " + lib) plugins_dir = os.path.join(self.settings.package_output_path, "plug-ins", self.settings.maya_version) for plugin in glob.glob(os.path.join(plugins_dir, "*.so")): self.run("chrpath -r \$ORIGIN/../../lib " + plugin) appleseed_python_dir = os.path.join(self.settings.package_output_path, "scripts", "appleseed") for py_cpp_module in glob.glob(os.path.join(appleseed_python_dir, "*.so")): self.run("chrpath -r \$ORIGIN/../../lib " + py_cpp_module) def __is_system_lib(self, lib): for prefix in self.SYSTEM_LIBS_PREFIXES: if lib.startswith(prefix): return True return False def __get_dependencies_for_file(self, filename): returncode, out, err = run_subprocess(["ldd", filename]) if returncode != 0: fatal("Failed to invoke ldd(1) to get dependencies for {0}: {1}".format(filename, err)) libs = set() for line in out.split("\n"): line = line.strip() # Ignore empty lines. if len(line) == 0: continue # Ignore system libs. if self.__is_system_lib(line): continue # Ignore appleseed libs. if "libappleseed" in line: continue libs.add(line.split()[2]) return libs #-------------------------------------------------------------------------------------------------- # Windows package builder. #-------------------------------------------------------------------------------------------------- class WindowsPackageBuilder(PackageBuilder): def plugin_extension(self): return ".mll" def generate_module_file(self): self.do_generate_module_file("win64") def copy_dependencies(self): bin_dir = os.path.join(self.settings.package_output_path, "bin") dlls_to_copy = ["appleseed.dll"] for dll in dlls_to_copy: shutil.copy(os.path.join(self.settings.appleseed_bin_path, dll), bin_dir) def post_process_package(self): pass #-------------------------------------------------------------------------------------------------- # Entry point. #-------------------------------------------------------------------------------------------------- def main(): print("appleseed-maya.package version " + VERSION) print("") settings = Settings() settings.load() if os.name == "nt": package_builder = WindowsPackageBuilder(settings) elif os.name == "posix" and platform.mac_ver()[0] != "": package_builder = MacPackageBuilder(settings) elif os.name == "posix" and platform.mac_ver()[0] == "": package_builder = LinuxPackageBuilder(settings) else: fatal("Unsupported platform: " + os.name) package_builder.build_package() if __name__ == "__main__": main()
########################################################################## # # Copyright (c) 2013, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with # the distribution. # # * Neither the name of John Haddon nor the names of # any other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import weakref import IECore import Gaffer import GafferUI QtCore = GafferUI._qtImport( "QtCore" ) QtGui = GafferUI._qtImport( "QtGui" ) ## The MessageWidget class displays a log of messages in the format specified by # IECore MessageHandlers. class MessageWidget( GafferUI.Widget ) : def __init__( self, **kw ) : row = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing=4 ) GafferUI.Widget.__init__( self, row, **kw ) with row : with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical, spacing=6 ) : buttonSpecs = [ ( IECore.Msg.Level.Error, "Errors. These may chill you to your very core. Click to scroll to the next one (if you can stomach it)." ), ( IECore.Msg.Level.Warning, "Warnings. These may give you pause for thought. Click to scroll to the next thinking point." ), ( IECore.Msg.Level.Info, "Information. You may find this edifying. Click to scroll to the next enlightening nugget." ), ( IECore.Msg.Level.Debug, "Debug information. You may find this very dull. Click to scroll to the next item." ), ] self.__levelButtons = {} self.__buttonClickedConnections = [] for buttonSpec in buttonSpecs : button = GafferUI.Button( image = IECore.Msg.levelAsString( buttonSpec[0] ).lower() + "Notification.png", hasFrame = False, ) button.__level = buttonSpec[0] self.__levelButtons[buttonSpec[0]] = button self.__buttonClickedConnections.append( button.clickedSignal().connect( Gaffer.WeakMethod( self.__buttonClicked ) ) ) button.setVisible( False ) button.setToolTip( buttonSpec[1] ) GafferUI.Spacer( IECore.V2i( 10 ) ) self.__text = GafferUI.MultiLineTextWidget( editable=False ) self.__messageLevel = IECore.Msg.Level.Info self.__messageHandler = _MessageHandler( self ) self.__messages = [] self.__processingEvents = False ## Returns a MessageHandler which will output to this Widget. ## \threading It is safe to use the handler on threads other than the main thread. def messageHandler( self ) : return self.__messageHandler ## It can be useful to forward messages captured by this widget # on to other message handlers - for instance to perform centralised # logging in addition to local display. This method returns a # CompoundMessageHandler which can be used for such forwarding - # simply add a handler with forwardingMessageHandler().addHandler(). def forwardingMessageHandler( self ) : return self.__messageHandler._forwarder ## Sets an IECore.MessageHandler.Level specifying which # type of messages will be visible to the user - levels above # that specified will be invisible. Note that the invisible # messages are still stored, so they can be made visible at a later # time by a suitable call to setMessageLevel(). This can be useful # for revealing debug messages only after a warning or error has # alerted the user to a problem. def setMessageLevel( self, messageLevel ) : assert( isinstance( messageLevel, IECore.MessageHandler.Level ) ) if messageLevel == self.__messageLevel : return self.__messageLevel = messageLevel self.__text.setText( "" ) for message in self.__messages : self.__appendMessageToText( *message ) def getMessageLevel( self ) : return self.__messageLevel ## May be called to append a message manually. # \note Because these are not real messages, they are not # passed to the forwardingMessageHandler(). # \deprecated. def appendMessage( self, level, context, message ) : self.__levelButtons[level].setVisible( True ) self.__messages.append( ( level, context, message ) ) if self.__appendMessageToText( level, context, message ) : # Update the gui so messages are output as they occur, rather than all getting queued # up till the end. We have to be careful to avoid recursion when doing this - another # thread may be queuing up loads of messages using self.messageHandler(), and those # will get processed by processEvents(), resulting in a recursive call to appendMessage(). # If the other thread supplies messages fast enough and we don't guard against recursion # then we can end up exceeding Python's stack limit very quickly. if not self.__processingEvents : try : self.__processingEvents = True QtGui.QApplication.instance().processEvents( QtCore.QEventLoop.ExcludeUserInputEvents ) finally : self.__processingEvents = False ## Returns the number of messages being displayed, optionally # restricted to the specified level. def messageCount( self, level = None ) : if level is not None : return reduce( lambda x, y : x + ( 1 if y[0] == level else 0 ), self.__messages, 0 ) else : return sum( [ self.messageCount( IECore.Msg.Level.Debug ), self.messageCount( IECore.Msg.Level.Info ), self.messageCount( IECore.Msg.Level.Warning ), self.messageCount( IECore.Msg.Level.Error ), ] ) ## Clears all the displayed messages. def clear( self ) : self.__text.setText( "" ) self.__messages = [] for button in self.__levelButtons.values() : button.setVisible( False ) def __buttonClicked( self, button ) : # make sure messages of this level are being displayed if button.__level > self.__messageLevel : self.setMessageLevel( button.__level ) # scroll to the next one toFind = IECore.Msg.levelAsString( button.__level ) + " : " if not self.__text._qtWidget().find( toFind ) : self.__text._qtWidget().moveCursor( QtGui.QTextCursor.Start ) self.__text._qtWidget().find( toFind ) def __appendMessageToText( self, level, context, message ) : if level > self.__messageLevel : return False formatted = "<h1 class='%s'>%s : %s </h1><span class='message'>%s</span><br>" % ( IECore.Msg.levelAsString( level ), IECore.Msg.levelAsString( level ), context, message.replace( "\n", "<br>" ) ) self.__text.appendHTML( formatted ) return True class _MessageHandler( IECore.MessageHandler ) : def __init__( self, messageWidget ) : IECore.MessageHandler.__init__( self ) self._forwarder = IECore.CompoundMessageHandler() # using a weak reference because we're owned by the MessageWidget, # so we mustn't have a reference back. self.__messageWidget = weakref.ref( messageWidget ) def handle( self, level, context, msg ) : self._forwarder.handle( level, context, msg ) w = self.__messageWidget() if w : GafferUI.EventLoop.executeOnUIThread( IECore.curry( w.appendMessage, level, context, msg ) ) else : # the widget has died. bad things are probably afoot so its best # that we output the messages somewhere to aid in debugging. IECore.MessageHandler.getDefaultHandler().handle( level, context, msg )
""" Alexa Recipe Reader Template Skill """ from __future__ import print_function import os import time import types import re import json import boto3 from boto3.dynamodb.types import TypeSerializer, TypeDeserializer # --------------- Helpers that build all of the responses ---------------------- def build_speechlet_response(title, output, reprompt_text, should_end_session, show_card = True, card_content = ""): response = { 'outputSpeech': { }, 'reprompt': { 'outputSpeech': { } }, 'shouldEndSession': should_end_session } if show_card or card_content: response['card'] = { 'type': 'Simple', 'title': title } if card_content: response['card']['content'] = card_content else: response['card']['content'] = strip_ssml(output) # detect use of SSML in response response['outputSpeech']['type'] = 'SSML' if output: response['outputSpeech']['ssml'] = to_ssml(output + '<audio src="https://s3.amazonaws.com/recipereader/silent_48k.mp3" />') else: response['outputSpeech']['ssml'] = to_ssml(" ") # detect use of SSML in reprompt response['reprompt']['outputSpeech']['type'] = 'SSML' if reprompt_text: response['reprompt']['outputSpeech']['ssml'] = to_ssml(reprompt_text) else: response['reprompt']['outputSpeech']['ssml'] = to_ssml(" ") return response def build_response(session_attributes, speechlet_response): return { 'version': os.environ['VERSION'], 'sessionAttributes': session_attributes, 'response': speechlet_response } # --------------- SSML Helper functions ------------------------------- def strip_ssml(ssml): ssmlre = re.compile('<[^>]+>') return re.sub(ssmlre, "", ssml) def to_ssml(text): """Adds SSML headers to string. """ return '<speak>'+ text + '</speak>' def add_ssml_pause(duration): """Turns a duration into an SSML tag.""" if duration: return '<break time="%s"/> ' % (duration) else: return '' def step_to_ssml(step): return step['instruction']+add_ssml_pause(step['estimated_time']) def done_this_step(): return "Are you finished with this step? " # --------------- Functions that control the skill's behavior ------------------ def get_welcome_response(): """ If we wanted to initialize the session to have some attributes we could add those here """ session_attributes = {} card_title = "Welcome to Recipe Reader!" speech_output = "Welcome to Recipe Reader. Which set of instructions should I read? " \ "You can say begin song, or begin dance." reprompt_text = "Should I begin song, or begin dance? " should_end_session = False return build_response(session_attributes, build_speechlet_response( card_title, speech_output, reprompt_text, should_end_session)) def get_help_response(): """ Describes functionality and options """ session_attributes = {} card_title = "How to use Recipe Reader" speech_output = "Recipe Reader lets you navigate sets of instructions. "\ "To begin, say begin song to hear instructions for building a tune. "\ "Or you can say begin dance to hear instructions for a far-out dance. " reprompt_text = "Try saying begin song to start." should_end_session = False return build_response(session_attributes, build_speechlet_response( card_title, speech_output, reprompt_text, should_end_session)) def handle_session_end_request(): """Session end close-out and response""" card_title = "Signing Off" speech_output = "" # Setting this to true ends the session and exits the skill. return build_response({}, build_speechlet_response(card_title, speech_output, reprompt_text=None, should_end_session=True, show_card=False)) def start_instructions(intent, session): """Start a set of instructions based on slot value """ sesh_attr = persist_attributes(session) chosen_set = str(get_slot_value(intent, "Recipe")) recipes = load_recipes() if chosen_set in recipes: recipe = recipes[chosen_set] else: return build_response({}, build_speechlet_response("Unknown Recipe", "I'm sorry, but I don't know that recipe.", None, should_end_session=True, show_card=False)) # save recipe as part of session variables sesh_attr['recipe'] = recipe # begin preparing response title = recipe['title'] speech = "Great! Let's begin. " + recipe['intro'] # append begin message with first step of instructions first_step = recipe['recipe'][0] speech += "First, " + step_to_ssml(first_step) sesh_attr['last_step'] = first_step # log to database for cross-session use db_log_step(session['user']['userId'], recipe, first_step) return build_response(sesh_attr, build_speechlet_response(title, speech, reprompt_text=done_this_step(), should_end_session=False, show_card=False)) def load_recipes(filepath="recipes.json"): """ Load recipe file """ f = open(filepath, 'r') recipes = json.loads(f.read()) f.close() return recipes def get_yes_no(intent, session): """ Handle a yes/no answer based on previous intent & follow-up question """ sesh_attr = persist_attributes(session) if 'last_step' in sesh_attr and 'recipe' in sesh_attr: if intent['name'] == "AMAZON.YesIntent": return get_next(intent, session) else: return handle_session_end_request() return build_response(sesh_attr, build_speechlet_response("Not sure what you mean", "I am not sure which question you're answering.", reprompt_text=None, should_end_session=True, show_card=False)) def get_next(intent, session): """ Move to next item in instructions """ sesh_attr = persist_attributes(session) userID = session['user']['userId'] if 'last_step' in sesh_attr and 'recipe' in sesh_attr: recipe = sesh_attr['recipe'] last_step = sesh_attr['last_step'] else: ds = TypeDeserializer() full_last_step = db_get_last_step(userID) recipe = ds.deserialize(full_last_step['recipe']) last_step = ds.deserialize(full_last_step['step']) next_step = recipe_next_step(recipe, last_step) if next_step: # log to database for cross-session use db_log_step(userID, recipe, next_step) sesh_attr.update({'last_step': next_step, 'recipe': recipe}) return build_response(sesh_attr, build_speechlet_response("Next Step", step_to_ssml(next_step), reprompt_text=done_this_step(), should_end_session=False, show_card=False)) else: return build_response(sesh_attr, build_speechlet_response( recipe['title']+": Finished!", "That was the last step! " + recipe['conclusion'], reprompt_text=None, should_end_session=True, show_card=False)) def recipe_next_step(recipe, step): """Compares a recipe and a step, and returns the next step """ steps = recipe['recipe'] try: return steps[steps.index(step)+1] except IndexError: return None def set_pause(intent, session): """ Pause instructions until resume is called for """ sesh_attr = persist_attributes(session) return build_response(sesh_attr, build_speechlet_response("Waiting...", add_ssml_pause("10s"), reprompt_text=None, should_end_session=True, show_card=False)) def get_previous(intent, session): """ Go back to previous step in instructions """ sesh_attr = persist_attributes(session) userID = session['user']['userId'] if 'last_step' in sesh_attr and 'recipe' in sesh_attr: recipe = sesh_attr['recipe'] last_step = sesh_attr['last_step'] else: ds = TypeDeserializer() full_last_step = db_get_last_step(userID) recipe = ds.deserialize(full_last_step['recipe']) last_step = ds.deserialize(full_last_step['step']) next_step = recipe_prior_step(recipe, last_step) # log to database for cross-session use db_log_step(userID, recipe, next_step) sesh_attr.update({'last_step': next_step, 'recipe': recipe}) return build_response(sesh_attr, build_speechlet_response("Going Back", step_to_ssml(next_step), reprompt_text=done_this_step(), should_end_session=False, show_card=False)) def recipe_prior_step(recipe, step): """Compares a recipe and a step, and returns the prior step """ steps = recipe['recipe'] if steps.index(step) == 0: return step return steps[steps.index(step)-1] def get_repeat(intent, session): """ Repeat the current step in instructions """ sesh_attr = persist_attributes(session) userID = session['user']['userId'] if 'last_step' in sesh_attr and 'recipe' in sesh_attr: last_step = sesh_attr['last_step'] else: ds = TypeDeserializer() full_last_step = db_get_last_step(userID) last_step = ds.deserialize(full_last_step['step']) return build_response(sesh_attr, build_speechlet_response("Replay Step", step_to_ssml(last_step), reprompt_text=done_this_step(), should_end_session=False, show_card=False)) def get_start_over(intent, session): """ Go back to the beginning of the instruction set """ sesh_attr = persist_attributes(session) userID = session['user']['userId'] if 'last_step' in sesh_attr and 'recipe' in sesh_attr: recipe = sesh_attr['recipe'] else: ds = TypeDeserializer() full_last_step = db_get_last_step(userID) recipe = ds.deserialize(full_last_step['recipe']) next_step = recipe['recipe'][0] # log to database for cross-session use db_log_step(userID, recipe, next_step) sesh_attr.update({'last_step': next_step, 'recipe': recipe}) return build_response(sesh_attr, build_speechlet_response("Starting Over", step_to_ssml(next_step), reprompt_text=done_this_step(), should_end_session=False, show_card=False)) # --------------- Events ------------------------------------- # def on_session_started(session_started_request, session): """ Called when the session starts Can be used to initialize values if used across intents. """ print("on_session_started requestId=" + session_started_request['requestId'] + ", sessionId=" + session['sessionId']) def on_launch(launch_request, session): """ Called when the user launches the skill without an intent """ print("on_launch requestId=" + launch_request['requestId'] + ", sessionId=" + session['sessionId']) # Dispatch to your skill's launch return get_welcome_response() def on_intent(intent_request, session): """ Called when the user specifies an intent for this skill """ print("on_intent requestId=" + intent_request['requestId'] + ", sessionId=" + session['sessionId']) intent = intent_request['intent'] intent_name = intent_request['intent']['name'] # Dispatch to your skill's intent handlers if intent_name == "StartIntent": return start_instructions(intent, session) elif intent_name in ['AMAZON.YesIntent','AMAZON.NoIntent']: return get_yes_no(intent, session) elif intent_name == "AMAZON.NextIntent": return get_next(intent, session) elif intent_name == "AMAZON.PauseIntent": return set_pause(intent, session) elif intent_name == "AMAZON.PreviousIntent": return get_previous(intent, session) elif intent_name == "AMAZON.RepeatIntent": return get_repeat(intent, session) elif intent_name == "AMAZON.ResumeIntent": return get_repeat(intent, session) elif intent_name == "AMAZON.StartOverIntent": return get_start_over(intent, session) elif intent_name in ["AMAZON.HelpIntent"]: return get_help_response() elif intent_name in ["AMAZON.CancelIntent", "AMAZON.StopIntent"]: return handle_session_end_request() else: raise ValueError("Invalid intent") def on_session_ended(session_ended_request, session): """ Called when the user ends the session. Is not called when the skill returns should_end_session=true """ print("on_session_ended requestId=" + session_ended_request['requestId'] + ", sessionId=" + session['sessionId']) # add cleanup logic here def persist_attributes(session): if 'attributes' in session.keys(): return session['attributes'] else: return {} def get_slot_value(intent, slot_name, default=None, avoid=[""]): """Function to safely return a slot value from the dictionary. Only returns non-default value if intent contains a value for the slot, and that value is not an empty string. """ if slot_name in intent['slots'] and "value" in intent['slots'][slot_name] \ and intent['slots'][slot_name]['value'] not in avoid: # only return return intent['slots'][slot_name]['value'] else: return default # --------------- Main handler ------------------ def lambda_handler(event, context): """ Route the incoming request based on type (LaunchRequest, IntentRequest, etc.) The JSON body of the request is provided in the event parameter. """ print("event.session.application.applicationId=" + event['session']['application']['applicationId']) #Application ID to prevent someone else from calling this function. if (event['session']['application']['applicationId'] != os.environ['SKILL_ID']): raise ValueError("Invalid Application ID") if event['session']['new']: on_session_started({'requestId': event['request']['requestId']}, event['session']) if event['request']['type'] == "LaunchRequest": return on_launch(event['request'], event['session']) elif event['request']['type'] == "IntentRequest": return on_intent(event['request'], event['session']) elif event['request']['type'] == "SessionEndedRequest": return on_session_ended(event['request'], event['session']) # ---------------------- Speech helper functions ---------------------- def comma_conjoin(inlist, conjunction): """Parses the elements of a list into a string joined by commas, with an 'and' before the final element. Oxford comma! """ if len(inlist) == 0: return "" elif len(inlist) == 1: return str(inlist.pop()) elif len(inlist) == 2: return (" " + conjunction + " ").join(inlist) text = ", " + conjunction + " " + inlist.pop() text = ", ".join(inlist) + text return text def comma_and(inlist): """Parses the elements of a list into a string joined by commas, with an 'and' before the final element. """ return comma_conjoin(inlist, "and") def comma_or(inlist): """Parses the elements of a list into a string joined by commas, with an 'or' before the final element. """ return comma_conjoin(inlist, "or") # --------------- DynamoDB Helper calls ------------------------------ # def db_connect(): return boto3.client('dynamodb', aws_access_key_id=os.environ['AWS_KEY_ID'], aws_secret_access_key=os.environ['AWS_SECRET']) def db_log_step(userID, recipe, step): """Log the most recent step that a user has been given """ dynamo = db_connect() ts = TypeSerializer() put_resp = dynamo.put_item(TableName=os.environ['STEP_HISTORY_TABLE'], Item={'userID': {'S': userID}, 'time': {'N': str(time.time())}, 'step': ts.serialize(step), 'recipe': ts.serialize(recipe) }) upd_resp = dynamo.update_item(TableName=os.environ['STEP_LAST_TABLE'], Key={'userID': {'S': userID}}, AttributeUpdates={'step': {'Action': 'PUT', 'Value': ts.serialize(step)}, 'recipe': {'Action': 'PUT', 'Value': ts.serialize(recipe)}} ) return (put_resp, upd_resp) def db_get_last_step(userID): """Get the most recent step that a user has executed """ dynamo = db_connect() ds = TypeDeserializer() response = dynamo.get_item(TableName=os.environ['STEP_LAST_TABLE'], Key={'userID': {'S': userID}}) return response['Item']
""" This module contains mix-in classes with methods designed to test RoachInterface instances. These classes do not set up real or mock hardware, so they are not intended to be run directly. To use them, write a class that inherits from one or more of them and has a setup() class method that creates a RoachInterface instance: @classmethod def setup(cls): cls.ri = set_up_roach_somehow() using combination of real and mock hardware. Currently, the setup() method is run before the tests in each mix-in class. I'm not sure how to get nose to stop doing this, and in any case it's probably easier to write tests if each class can expects a fresh ROACH. """ import numpy as np from kid_readout.measurement import core from kid_readout.roach import calculate class RoachMixin(object): """ This class has methods that are tests applicable to all ROACH types and configurations. """ def test_state(self): _ = self.ri.state _ = self.ri.get_state() _ = self.ri.state_arrays _ = self.ri.get_state_arrays() _ = self.ri.active_state_arrays _ = self.ri.get_active_state_arrays() def test_precomputed_wavenorm(self): np.random.seed(123) for k in range(9): self.ri.set_tone_baseband_freqs(np.linspace(100, 120, 2 ** k), nsamp=2 ** 16, preset_norm=False) actual_wavenorm = self.ri.wavenorm self.ri.set_tone_baseband_freqs(np.linspace(100, 120, 2 ** k), nsamp=2 ** 16, phases=self.ri.phases, preset_norm=True) assert self.ri.wavenorm >= 0.99 * actual_wavenorm # guarantees the wave won't overflow def test_calculate_modulation_period(self): roach_state = core.StateDict(modulation_rate=7, modulation_output=2, heterodyne=True) assert calculate.modulation_period_samples(roach_state) == 256 roach_state = core.StateDict(modulation_rate=7, modulation_output=2, heterodyne=False) assert calculate.modulation_period_samples(roach_state) == 128 def test_get_measurement_blocks(self): num_tones = 32 self.ri.set_tone_baseband_freqs(np.linspace(100, 120, num_tones), nsamp=2 ** 16) self.ri.select_fft_bins(range(num_tones)) _ = self.ri.get_measurement_blocks(2) def test_get_current_bank(self): assert self.ri.get_current_bank() is not None def test_hardware_delay(self): if self.ri.heterodyne: self.ri.set_lo(1000.) self.ri.set_loopback(True) self.ri.set_debug(False) self.ri.set_fft_gain(0) delay = self.ri.measure_hardware_delay() if not self.ri._using_mock_roach: assert(np.abs(delay) < 1e-6) class Roach1Mixin(object): """ This class contains tests for the ROACH1 that can run using either real or mock hardware. """ pass class Roach2Mixin(object): """ This class contains tests for the ROACH2 that can run using either real or mock hardware. """ pass class BasebandSoftwareMixin(object): """ This class contains tests for baseband software that can run using either real or mock hardware. """ def test_is_not_heterodyne(self): assert not self.ri.heterodyne def test_calc_fft_bins(self): for nsamp in 2 ** np.arange(10, 18): max_nsamp = nsamp / 2 # only positive bins are valid for baseband # Arguments give closed interval, and need to avoid max_nsamp edge tone_bins = np.random.random_integers(0, max_nsamp - 1, size=128) bins = self.ri.calc_fft_bins(tone_bins, nsamp) assert np.all(bins >= 0) assert np.all(bins < self.ri.nfft) class HeterodyneSoftwareMixin(object): """ This class contains tests for heterodyne software that can run using either real or mock hardware. """ def test_is_heterodyne(self): assert self.ri.heterodyne def test_calc_fft_bins(self): for nsamp in 2 ** np.arange(10, 18): max_nsamp = nsamp # Both positive and negative bins are valid for heterodyne # Arguments give closed interval, and need to avoid max_nsamp edge tone_bins = np.random.random_integers(0, max_nsamp - 1, size=128) bins = self.ri.calc_fft_bins(tone_bins, nsamp) assert np.all(bins >= 0) assert np.all(bins < self.ri.nfft) def test_iq_delay(self): self.ri.set_loopback(True) self.ri.set_debug(False) self.ri.set_fft_gain(0) self.ri.find_best_iq_delay() class BasebandHardwareMixin(object): """ This class contains tests for baseband hardware that can run in loopback mode. """ def test_not_mock(self): assert not self.ri._using_mock_roach class HeterodyneHardwareMixin(object): """ This class contains tests for heterodyne hardware that can run in loopback mode. """ def test_iq_delay(self): self.ri.set_loopback(True) self.ri.set_debug(False) self.ri.set_fft_gain(0) iq_delay,rejection = self.ri.find_best_iq_delay() assert iq_delay == 0 assert rejection > 55.0 def test_not_mock(self): assert not self.ri._using_mock_roach def test_fft_bin_selection(self): test_cases = [#np.array([[16368, 16370, 16372, 16374, 16376, 16379, 16381, 16383, 1, #3, 5, 8, 10, 12, 14, 16]]), #this special case doesn't quite work because of the # readout order np.array([[16368, 16370, 16372, 16374, 16376, 16379, 16381, 16383, 8, 10, 12, 14, 16, 18, 20, 22]]), np.array([[7333,7335]]), np.array([[ 9328, 10269, 11210, 12150, 13091, 14032, 14973, 15914, 470, 1411, 2352, 3293, 4234, 5174, 6115, 7056]]), np.array([[7040, 7042, 7044, 7046, 7048, 7051, 7053, 7055, 7057, 7059, 7061, 7064, 7066, 7068, 7070, 7072]]), np.array([[8193, 8195, 8197, 8199, 8201, 8203, 8206, 8208, 8210, 8212, 8214, 8216, 8218, 8220, 8222, 8224, 8160, 8162, 8164, 8166, 8168, 8171, 8173, 8175, 8177, 8179, 8181, 8183, 8185, 8187, 8189, 8191]])] for bin_array in test_cases: yield self.check_fft_bin_selection, bin_array def check_fft_bin_selection(self,bin_array): self.ri.select_bank(0) self.ri.set_debug(True) self.ri.fft_bins = bin_array self.ri.select_fft_bins(range(self.ri.fft_bins.shape[1])) #time.sleep(0.1) data,sequence = self.ri.get_data(demod=False) assert(np.all(data[0,:].imag.astype('int') == self.ri.fpga_fft_readout_indexes)) def test_peak_fft_bin(self): # Because the PFB channels currently have rather poor isolation up to several channels away, this test will # only work for widely spaced channeels test_cases = [np.array([[1000, 1201, 1457, 8096]]), np.array([[8193, 11111, 12345, 16383, 1000, 1201, 1457, 7096]])] for bin_array in test_cases: yield self.check_peak_fft_bin, bin_array def check_peak_fft_bin(self,bin_array): baseband_freqs = bin_array[0,:]/2.**14 baseband_freqs[baseband_freqs>0.5] = baseband_freqs[baseband_freqs>0.5]-1 baseband_freqs = self.ri.fs*baseband_freqs self.ri.set_tone_baseband_freqs(baseband_freqs,nsamp=2**16) self.ri.set_debug(False) self.ri.set_loopback(True) self.ri.set_fft_gain(0) base_fft_bins = self.ri.fft_bins.copy() values = [] bin_offsets = [-1,0,1] for bin_offset in bin_offsets: self.ri.fft_bins = base_fft_bins + bin_offset self.ri.fft_bins = np.mod(self.ri.fft_bins,2**14) self.ri.select_fft_bins(range(self.ri.fft_bins.shape[1])) data,sequence = self.ri.get_data(demod=False) values.append((np.abs(data)**2).mean(0)) values = np.array(values) for bin_index in range(values.shape[1]): assert(bin_offsets[values[:,bin_index].argmax()]==0) class BasebandAnalogMixin(object): """ This class contains tests for heterodyne hardware that require an analog board. """ pass class HeterodyneAnalogMixin(object): """ This class contains tests for heterodyne hardware that require an analog board. """ pass class MockMixin(object): """ This class contains tests specifically for the MockRoach and MockValon classes. """ pass
# Copyright 2017 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import logging import os import sys import time from gpu_tests import gpu_integration_test from gpu_tests import path_util data_path = os.path.join(path_util.GetChromiumSrcDir(), 'content', 'test', 'data') test_harness_script = r""" var domAutomationController = {}; domAutomationController._finished = false; domAutomationController._succeeded = false; domAutomationController.send = function(msg) { domAutomationController._finished = true; if (msg.toLowerCase() == "finished") { domAutomationController._succeeded = true; } else { domAutomationController._succeeded = false; } } window.domAutomationController = domAutomationController; function GetDriverBugWorkarounds() { var query_result = document.querySelector('.workarounds-list'); var browser_list = [] for (var i=0; i < query_result.childElementCount; i++) browser_list.push(query_result.children[i].textContent); return browser_list; }; """ class GpuProcessIntegrationTest(gpu_integration_test.GpuIntegrationTest): @classmethod def Name(cls): """The name by which this test is invoked on the command line.""" return 'gpu_process' @classmethod def SetUpProcess(cls): super(GpuProcessIntegrationTest, cls).SetUpProcess() cls.CustomizeBrowserArgs(cls._AddDefaultArgs([])) cls.StartBrowser() cls.SetStaticServerDirs([data_path]) @staticmethod def _AddDefaultArgs(browser_args): # All tests receive the following options. return [ '--enable-gpu-benchmarking', # TODO(kbr): figure out why the following option seems to be # needed on Android for robustness. # https://github.com/catapult-project/catapult/issues/3122 '--no-first-run', # Disable: # Do you want the application "Chromium Helper.app" to accept incoming # network connections? # dialogs on macOS. crbug.com/969559 '--disable-device-discovery-notifications', ] + browser_args @classmethod def GenerateGpuTests(cls, options): # The browser test runner synthesizes methods with the exact name # given in GenerateGpuTests, so in order to hand-write our tests but # also go through the _RunGpuTest trampoline, the test needs to be # slightly differently named. # Also note that since functional_video.html refers to files in # ../media/ , the serving dir must be the common parent directory. tests = (('GpuProcess_canvas2d', 'gpu/functional_canvas_demo.html'), ('GpuProcess_css3d', 'gpu/functional_3d_css.html'), ('GpuProcess_webgl', 'gpu/functional_webgl.html'), ('GpuProcess_video', 'gpu/functional_video.html'), ('GpuProcess_gpu_info_complete', 'gpu/functional_3d_css.html'), ('GpuProcess_driver_bug_workarounds_in_gpu_process', 'chrome:gpu'), ('GpuProcess_readback_webgl_gpu_process', 'chrome:gpu'), ('GpuProcess_feature_status_under_swiftshader', 'chrome:gpu'), ('GpuProcess_one_extra_workaround', 'chrome:gpu'), ('GpuProcess_disable_gpu', 'gpu/functional_webgl.html'), ('GpuProcess_disable_gpu_and_swiftshader', 'gpu/functional_webgl.html'), ('GpuProcess_disable_swiftshader', 'gpu/functional_webgl.html'), ('GpuProcess_disabling_workarounds_works', 'chrome:gpu'), ('GpuProcess_mac_webgl_backgrounded_high_performance', 'gpu/functional_blank.html'), ('GpuProcess_mac_webgl_high_performance', 'gpu/functional_webgl_high_performance.html'), ('GpuProcess_mac_webgl_low_power', 'gpu/functional_webgl_low_power.html'), ('GpuProcess_mac_webgl_terminated_high_performance', 'gpu/functional_blank.html'), ('GpuProcess_swiftshader_for_webgl', 'gpu/functional_webgl.html'), ('GpuProcess_webgl_disabled_extension', 'gpu/functional_webgl_disabled_extension.html')) for t in tests: yield (t[0], t[1], ('_' + t[0])) def RunActualGpuTest(self, test_path, *args): test_name = args[0] getattr(self, test_name)(test_path) ###################################### # Helper functions for the tests below def _Navigate(self, test_path): url = self.UrlOfStaticFilePath(test_path) # It's crucial to use the action_runner, rather than the tab's # Navigate method directly. It waits for the document ready state # to become interactive or better, avoiding critical race # conditions. self.tab.action_runner.Navigate( url, script_to_evaluate_on_commit=test_harness_script) def _WaitForTestCompletion(self, tab): tab.action_runner.WaitForJavaScriptCondition( 'window.domAutomationController._finished', timeout=10) if not tab.EvaluateJavaScript('window.domAutomationController._succeeded'): self.fail('Test reported that it failed') def _NavigateAndWait(self, test_path): self._Navigate(test_path) self._WaitForTestCompletion(self.tab) def _VerifyGpuProcessPresent(self): tab = self.tab if not tab.EvaluateJavaScript('chrome.gpuBenchmarking.hasGpuChannel()'): self.fail('No GPU channel detected') def _ValidateDriverBugWorkaroundsImpl(self, is_expected, workaround_name): tab = self.tab gpu_driver_bug_workarounds = tab.EvaluateJavaScript( 'chrome.gpuBenchmarking.getGpuDriverBugWorkarounds()') is_present = workaround_name in gpu_driver_bug_workarounds failure = False if is_expected and not is_present: failure = True error_message = "is missing" elif not is_expected and is_present: failure = True error_message = "is not expected" if failure: print 'Test failed. Printing page contents:' print tab.EvaluateJavaScript('document.body.innerHTML') self.fail('%s %s workarounds: %s' % (workaround_name, error_message, gpu_driver_bug_workarounds)) def _ValidateDriverBugWorkarounds(self, expected_workaround, unexpected_workaround): if not expected_workaround and not unexpected_workaround: return if expected_workaround: self._ValidateDriverBugWorkaroundsImpl(True, expected_workaround) if unexpected_workaround: self._ValidateDriverBugWorkaroundsImpl(False, unexpected_workaround) # This can only be called from one of the tests, i.e., after the # browser's been brought up once. def _RunningOnAndroid(self): options = self.GetOriginalFinderOptions().browser_options return options.browser_type.startswith('android') def _SupportsSwiftShader(self): # Currently we enable SwiftShader on Windows, Linux and MacOS. return (sys.platform in ('cygwin', 'win32', 'darwin') or (sys.platform.startswith('linux') and not self._RunningOnAndroid())) @staticmethod def _Filterer(workaround): # Filter all entries starting with "disabled_extension_" and # "disabled_webgl_extension_", as these are synthetic entries # added to make it easier to read the logs. banned_prefixes = ['disabled_extension_', 'disabled_webgl_extension_'] for p in banned_prefixes: if workaround.startswith(p): return False return True def _CompareAndCaptureDriverBugWorkarounds(self): tab = self.tab if not tab.EvaluateJavaScript('chrome.gpuBenchmarking.hasGpuProcess()'): self.fail('No GPU process detected') if not tab.EvaluateJavaScript('chrome.gpuBenchmarking.hasGpuChannel()'): self.fail('No GPU channel detected') browser_list = [ x for x in tab.EvaluateJavaScript('GetDriverBugWorkarounds()') if self._Filterer(x) ] gpu_list = [ x for x in tab.EvaluateJavaScript( 'chrome.gpuBenchmarking.getGpuDriverBugWorkarounds()') if self._Filterer(x) ] diff = set(browser_list).symmetric_difference(set(gpu_list)) if len(diff) > 0: self.fail('Browser and GPU process list of driver bug' 'workarounds are not equal: %s != %s, diff: %s' % (browser_list, gpu_list, list(diff))) basic_infos = tab.EvaluateJavaScript('browserBridge.gpuInfo.basicInfo') disabled_gl_extensions = None for info in basic_infos: if info['description'].startswith('Disabled Extensions'): disabled_gl_extensions = info['value'] break return gpu_list, disabled_gl_extensions ###################################### # The actual tests def _GpuProcess_canvas2d(self, test_path): self.RestartBrowserIfNecessaryWithArgs(self._AddDefaultArgs([])) self._NavigateAndWait(test_path) self._VerifyGpuProcessPresent() def _GpuProcess_css3d(self, test_path): self.RestartBrowserIfNecessaryWithArgs(self._AddDefaultArgs([])) self._NavigateAndWait(test_path) self._VerifyGpuProcessPresent() def _GpuProcess_webgl(self, test_path): self.RestartBrowserIfNecessaryWithArgs(self._AddDefaultArgs([])) self._NavigateAndWait(test_path) self._VerifyGpuProcessPresent() def _GpuProcess_video(self, test_path): self.RestartBrowserIfNecessaryWithArgs(self._AddDefaultArgs([])) self._NavigateAndWait(test_path) self._VerifyGpuProcessPresent() def _GpuProcess_gpu_info_complete(self, test_path): # Regression test for crbug.com/454906 self.RestartBrowserIfNecessaryWithArgs(self._AddDefaultArgs([])) self._NavigateAndWait(test_path) tab = self.tab system_info = tab.browser.GetSystemInfo() if not system_info: self.fail('Browser must support system info') if not system_info.gpu: self.fail('Target machine must have a GPU') if not system_info.gpu.aux_attributes: self.fail('Browser must support GPU aux attributes') if not 'gl_renderer' in system_info.gpu.aux_attributes: self.fail('Browser must have gl_renderer in aux attribs') if (sys.platform != 'darwin' and len(system_info.gpu.aux_attributes['gl_renderer']) <= 0): # On MacOSX we don't create a context to collect GL strings.1 self.fail('Must have a non-empty gl_renderer string') def _GpuProcess_driver_bug_workarounds_in_gpu_process(self, test_path): self.RestartBrowserIfNecessaryWithArgs( self._AddDefaultArgs(['--use_gpu_driver_workaround_for_testing'])) self._Navigate(test_path) self._ValidateDriverBugWorkarounds('use_gpu_driver_workaround_for_testing', None) def _GpuProcess_readback_webgl_gpu_process(self, test_path): # Hit test group 1 with entry 152 from kSoftwareRenderingListEntries. self.RestartBrowserIfNecessaryWithArgs( self._AddDefaultArgs( ['--gpu-blacklist-test-group=1', '--disable-gpu-compositing'])) self._Navigate(test_path) feature_status_list = self.tab.EvaluateJavaScript( 'browserBridge.gpuInfo.featureStatus.featureStatus') result = True for name, status in feature_status_list.items(): if name == 'webgl': result = result and status == 'enabled_readback' elif name == 'webgl2': result = result and status == 'unavailable_off' else: pass if not result: self.fail('WebGL readback setup failed: %s' % feature_status_list) def _GpuProcess_feature_status_under_swiftshader(self, test_path): if not self._SupportsSwiftShader(): return # Hit test group 2 with entry 153 from kSoftwareRenderingListEntries. self.RestartBrowserIfNecessaryWithArgs( self._AddDefaultArgs(['--gpu-blacklist-test-group=2'])) self._Navigate(test_path) feature_status_list = self.tab.EvaluateJavaScript( 'browserBridge.gpuInfo.featureStatus.featureStatus') for name, status in feature_status_list.items(): if name == 'webgl': if status != 'unavailable_software': self.fail('WebGL status for SwiftShader failed: %s' % status) return elif name == '2d_canvas': if status != 'unavailable_software': self.fail('2D Canvas status for SwiftShader failed: %s' % status) return else: pass if not sys.platform.startswith('linux'): # On Linux we relaunch GPU process to fallback to SwiftShader, therefore # featureStatusForHardwareGpu isn't available. feature_status_for_hardware_gpu_list = self.tab.EvaluateJavaScript( 'browserBridge.gpuInfo.featureStatusForHardwareGpu.featureStatus') for name, status in feature_status_for_hardware_gpu_list.items(): if name == 'webgl': if status != 'unavailable_off': self.fail('WebGL status for hardware GPU failed: %s' % status) return elif name == '2d_canvas': if status != 'enabled': self.fail('2D Canvas status for hardware GPU failed: %s' % status) return else: pass def _GpuProcess_one_extra_workaround(self, test_path): # Start this test by launching the browser with no command line # arguments. self.RestartBrowserIfNecessaryWithArgs(self._AddDefaultArgs([])) self._Navigate(test_path) self._VerifyGpuProcessPresent() recorded_workarounds, recorded_disabled_gl_extensions = ( self._CompareAndCaptureDriverBugWorkarounds()) # Relaunch the browser enabling test group 1 with entry 215, where # use_gpu_driver_workaround_for_testing is enabled. browser_args = ['--gpu-driver-bug-list-test-group=1'] # Add the testing workaround to the recorded workarounds. recorded_workarounds.append('use_gpu_driver_workaround_for_testing') browser_args.append('--disable-gl-extensions=' + recorded_disabled_gl_extensions) self.RestartBrowserIfNecessaryWithArgs(self._AddDefaultArgs(browser_args)) self._Navigate(test_path) self._VerifyGpuProcessPresent() new_workarounds, new_disabled_gl_extensions = ( self._CompareAndCaptureDriverBugWorkarounds()) diff = set(recorded_workarounds).symmetric_difference(new_workarounds) tab = self.tab if len(diff) > 0: print 'Test failed. Printing page contents:' print tab.EvaluateJavaScript('document.body.innerHTML') self.fail('GPU process and expected list of driver bug ' 'workarounds are not equal: %s != %s, diff: %s' % (recorded_workarounds, new_workarounds, list(diff))) if recorded_disabled_gl_extensions != new_disabled_gl_extensions: print 'Test failed. Printing page contents:' print tab.EvaluateJavaScript('document.body.innerHTML') self.fail('The expected disabled gl extensions are ' 'incorrect: %s != %s:' % (recorded_disabled_gl_extensions, new_disabled_gl_extensions)) def _GpuProcess_disable_gpu(self, test_path): # This test loads functional_webgl.html so that there is a # deliberate attempt to use an API which would start the GPU # process. if self._RunningOnAndroid(): # Chrome on Android doesn't support software fallback, skip it. # TODO(zmo): If this test runs on ChromeOS, we also need to skip it. return self.RestartBrowserIfNecessaryWithArgs( self._AddDefaultArgs(['--disable-gpu'])) self._NavigateAndWait(test_path) # On Windows, Linux or MacOS, SwiftShader is enabled, so GPU process # will still launch with SwiftShader. supports_swiftshader = self._SupportsSwiftShader() has_gpu_process = self.tab.EvaluateJavaScript( 'chrome.gpuBenchmarking.hasGpuProcess()') if supports_swiftshader and not has_gpu_process: self.fail('GPU process not detected') elif not supports_swiftshader and has_gpu_process: self.fail('GPU process detected') def _GpuProcess_disable_gpu_and_swiftshader(self, test_path): # Disable SwiftShader, so GPU process should not launch anywhere. if self._RunningOnAndroid(): # Chrome on Android doesn't support software fallback, skip it. # TODO(zmo): If this test runs on ChromeOS, we also need to skip it. return self.RestartBrowserIfNecessaryWithArgs( self._AddDefaultArgs(['--disable-gpu', '--disable-software-rasterizer'])) self._NavigateAndWait(test_path) # Windows will run the display compositor in the browser process if # accelerated GL and Swiftshader are both disabled. should_have_gpu_process = sys.platform != 'win32' has_gpu_process = self.tab.EvaluateJavaScript( 'chrome.gpuBenchmarking.hasGpuProcess()') if should_have_gpu_process and not has_gpu_process: self.fail('GPU process not detected') elif not should_have_gpu_process and has_gpu_process: self.fail('GPU process detected') def _GpuProcess_disable_swiftshader(self, test_path): # Disable SwiftShader, GPU process should be able to launch. self.RestartBrowserIfNecessaryWithArgs( self._AddDefaultArgs(['--disable-software-rasterizer'])) self._NavigateAndWait(test_path) if not self.tab.EvaluateJavaScript( 'chrome.gpuBenchmarking.hasGpuProcess()'): self.fail('GPU process not detected') def _GpuProcess_disabling_workarounds_works(self, test_path): # Hit exception from id 215 from kGpuDriverBugListEntries. self.RestartBrowserIfNecessaryWithArgs( self._AddDefaultArgs([ '--gpu-driver-bug-list-test-group=1', '--use_gpu_driver_workaround_for_testing=0' ])) self._Navigate(test_path) workarounds, _ = (self._CompareAndCaptureDriverBugWorkarounds()) if 'use_gpu_driver_workaround_for_testing' in workarounds: self.fail('use_gpu_driver_workaround_for_testing erroneously present') def _GpuProcess_swiftshader_for_webgl(self, test_path): # This test loads functional_webgl.html so that there is a deliberate # attempt to use an API which would start the GPU process. # On platforms where SwiftShader is not supported, skip this test. if not self._SupportsSwiftShader(): return args_list = ( # Triggering test_group 2 where WebGL is blacklisted. ['--gpu-blacklist-test-group=2'], # Explicitly disable GPU access. ['--disable-gpu']) for args in args_list: self.RestartBrowserIfNecessaryWithArgs(self._AddDefaultArgs(args)) self._NavigateAndWait(test_path) # Validate the WebGL unmasked renderer string. renderer = self.tab.EvaluateJavaScript('gl_renderer') if not renderer: self.fail('getParameter(UNMASKED_RENDERER_WEBGL) was null') if 'SwiftShader' not in renderer: self.fail('Expected SwiftShader renderer; instead got ' + renderer) # Validate GPU info. system_info = self.browser.GetSystemInfo() if not system_info: self.fail("Browser doesn't support GetSystemInfo") gpu = system_info.gpu if not gpu: self.fail('Target machine must have a GPU') if not gpu.aux_attributes: self.fail('Browser must support GPU aux attributes') if not gpu.aux_attributes['software_rendering']: self.fail("Software rendering was disabled") if 'SwiftShader' not in gpu.aux_attributes['gl_renderer']: self.fail("Expected 'SwiftShader' in GPU info GL renderer string") if 'Google' not in gpu.aux_attributes['gl_vendor']: self.fail("Expected 'Google' in GPU info GL vendor string") device = gpu.devices[0] if not device: self.fail("System Info doesn't have a device") # Validate extensions. ext_list = [ 'ANGLE_instanced_arrays', 'EXT_blend_minmax', 'EXT_texture_filter_anisotropic', 'WEBKIT_EXT_texture_filter_anisotropic', 'OES_element_index_uint', 'OES_standard_derivatives', 'OES_texture_float', 'OES_texture_float_linear', 'OES_texture_half_float', 'OES_texture_half_float_linear', 'OES_vertex_array_object', 'WEBGL_compressed_texture_etc1', 'WEBGL_debug_renderer_info', 'WEBGL_debug_shaders', 'WEBGL_depth_texture', 'WEBKIT_WEBGL_depth_texture', 'WEBGL_draw_buffers', 'WEBGL_lose_context', 'WEBKIT_WEBGL_lose_context', ] tab = self.tab for ext in ext_list: if tab.EvaluateJavaScript('!gl_context.getExtension("' + ext + '")'): self.fail("Expected " + ext + " support") def _GpuProcess_webgl_disabled_extension(self, test_path): # Hit exception from id 257 from kGpuDriverBugListEntries. self.RestartBrowserIfNecessaryWithArgs( self._AddDefaultArgs([ '--gpu-driver-bug-list-test-group=2', ])) self._NavigateAndWait(test_path) def _GpuProcess_mac_webgl_low_power(self, test_path): # Ensures that low-power WebGL content stays on the low-power GPU. if not self._IsDualGPUMacLaptop(): logging.info('Skipping test because not running on dual-GPU Mac laptop') return # Start with a clean browser instance to ensure the GPU process is in a # clean state. self.RestartBrowserWithArgs(self._AddDefaultArgs([])) # Wait a few seconds for the system to dispatch any GPU switched # notifications. time.sleep(3) self._NavigateAndWait(test_path) # Sleep for several seconds to ensure that any GPU switch is detected. time.sleep(6) if not self._IsIntelGPUActive(): self.fail( 'Low-power WebGL context incorrectly activated the high-performance ' 'GPU') def _GpuProcess_mac_webgl_high_performance(self, test_path): # Ensures that high-performance WebGL content activates the high-performance # GPU. if not self._IsDualGPUMacLaptop(): logging.info('Skipping test because not running on dual-GPU Mac laptop') return # Start with a clean browser instance to ensure the GPU process is in a # clean state. self.RestartBrowserWithArgs(self._AddDefaultArgs([])) # Wait a few seconds for the system to dispatch any GPU switched # notifications. time.sleep(3) self._NavigateAndWait(test_path) # Sleep for several seconds to ensure that any GPU switch is detected. time.sleep(6) if self._IsIntelGPUActive(): self.fail('High-performance WebGL context did not activate the ' 'high-performance GPU') def _GpuProcess_mac_webgl_backgrounded_high_performance(self, test_path): # Ensures that high-performance WebGL content in a background tab releases # the hold on the discrete GPU after 10 seconds. if not self._IsDualGPUMacLaptop(): logging.info('Skipping test because not running on dual-GPU Mac laptop') return # Start with a clean browser instance to ensure the GPU process is in a # clean state. self.RestartBrowserWithArgs(self._AddDefaultArgs([])) # Wait a few seconds for the system to dispatch any GPU switched # notifications. time.sleep(3) self._NavigateAndWait(test_path) blank_tab = self.tab # Create a new tab and navigate it to the high-power WebGL test. webgl_tab = self.browser.tabs.New() webgl_tab.Activate() webgl_url = self.UrlOfStaticFilePath( 'gpu/functional_webgl_high_performance.html') webgl_tab.action_runner.Navigate( webgl_url, script_to_evaluate_on_commit=test_harness_script) self._WaitForTestCompletion(webgl_tab) # Verify that the high-performance GPU is active. if self._IsIntelGPUActive(): self.fail('High-performance WebGL context did not activate the ' 'high-performance GPU') # Now activate the original tab. blank_tab.Activate() # Sleep for >10 seconds in order to wait for the hold on the # high-performance GPU to be released. time.sleep(15) if not self._IsIntelGPUActive(): self.fail( 'Backgrounded high-performance WebGL context did not release the ' 'hold on the high-performance GPU') def _GpuProcess_mac_webgl_terminated_high_performance(self, test_path): # Ensures that high-performance WebGL content in a background tab releases # the hold on the discrete GPU after 10 seconds. if not self._IsDualGPUMacLaptop(): logging.info('Skipping test because not running on dual-GPU Mac laptop') return # Start with a clean browser instance to ensure the GPU process is in a # clean state. self.RestartBrowserWithArgs(self._AddDefaultArgs([])) # Wait a few seconds for the system to dispatch any GPU switched # notifications. time.sleep(3) self._NavigateAndWait(test_path) # Create a new tab and navigate it to the high-power WebGL test. webgl_tab = self.browser.tabs.New() webgl_tab.Activate() webgl_url = self.UrlOfStaticFilePath( 'gpu/functional_webgl_high_performance.html') webgl_tab.action_runner.Navigate( webgl_url, script_to_evaluate_on_commit=test_harness_script) self._WaitForTestCompletion(webgl_tab) # Verify that the high-performance GPU is active. if self._IsIntelGPUActive(): self.fail('High-performance WebGL context did not activate the ' 'high-performance GPU') # Close the high-performance WebGL tab. webgl_tab.Close() # Sleep for >10 seconds in order to wait for the hold on the # high-performance GPU to be released. time.sleep(15) if not self._IsIntelGPUActive(): self.fail( 'Backgrounded high-performance WebGL context did not release the ' 'hold on the high-performance GPU') @classmethod def ExpectationsFiles(cls): return [ os.path.join( os.path.dirname(os.path.abspath(__file__)), 'test_expectations', 'gpu_process_expectations.txt') ] def load_tests(loader, tests, pattern): del loader, tests, pattern # Unused. return gpu_integration_test.LoadAllTestsInModule(sys.modules[__name__])
# Copyright 2016 Pinterest, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # -*- coding: utf-8 -*- from django.conf.urls import url import alarm_views import capacity_views import config_map_views import env_config_views import env_views import metrics_views import promote_views import webhook_views import deploy_views import build_views import host_views import util_views import security import user_views import docs_views import cluster_view import schedule_views urlpatterns = [ # deploy related url(r'^deploy/inline_update/$', deploy_views.inline_update), url(r'^deploy/(?P<deploy_id>[a-zA-Z0-9\-_]+)', deploy_views.DeployView.as_view()), url(r'^deploys/ongoing/$', deploy_views.get_ongoing_deploys), url(r'^deploys/dailycount', deploy_views.get_daily_deploy_count), url(r'^env/create/$', env_views.post_create_env), # envs related url(r'^envs/recent/$', env_views.get_recent_envs), url(r'^envs/names/$', env_views.get_env_names), url(r'^envs/deploys/$', env_views.get_all_deploys), url(r'^envs/search/(?P<filter>[a-zA-Z0-9\-_]+)/$', env_views.search_envs), url(r'^envs/search/$', env_views.EnvListView.as_view()), url(r'^envs/$', env_views.EnvListView.as_view()), url(r'^envs/enable/$', env_views.enable_all_env_change), url(r'^envs/disable/$', env_views.disable_all_env_change), url(r'^envs/get_tag_message/$', env_views.get_tag_message), # env related url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/add_stage/$', env_views.post_add_stage), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/get_users_config/$', user_views.get_users_config), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/update_users_config/$', user_views.update_users_config), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/get_users/$', user_views.get_users), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/get_user_token/(?P<user_name>[a-zA-Z0-9\-_]+)/$', user_views.get_user_token), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/deploy/$', env_views.EnvLandingView.as_view()), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/$', env_views.EnvLandingView.as_view()), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/$', env_views.EnvLandingView.as_view()), # stage related url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/new_deploy/$', env_views.EnvNewDeployView.as_view()), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/update_deploy_progress/$', env_views.update_deploy_progress), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/check_log_status/$', env_views.logging_status), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/check_log_status_action/$', env_views.check_logging_status), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/update_service_add_ons/$', env_views.update_service_add_ons), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/deploys/$', env_views.get_env_deploys), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/new_commits/$', env_views.get_new_commits), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/compare_deploys/$', env_views.compare_deploys), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/compare_deploys_2/$', env_views.compare_deploys_2), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/pred_deploys/$', env_views.get_pred_deploys), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/(?P<buildId>[a-zA-Z0-9\-_]+)' r'/get_duplicate_commit_message/$', deploy_views.get_duplicate_commit_deploy_message), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/' r'(?P<buildId>[a-zA-Z0-9\-_]+)/warn_for_deploy/$', env_views.warn_for_deploy), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/builds/$', env_views.get_builds), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/upload_private_build/$', env_views.upload_private_build), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/groups/$', env_views.get_groups), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/enable/$', env_views.enable_env_change), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/disable/$', env_views.disable_env_change), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/restart/$', env_views.restart), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/pause/$', env_views.pause), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/resume/$', env_views.resume), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/rollback/$', env_views.rollback), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/rollback_to/' r'(?P<deploy_id>[a-zA-Z0-9\-_]+)', env_views.rollback_to), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/promote_to/' r'(?P<deploy_id>[a-zA-Z0-9\-_]+)$', env_views.promote_to), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/promote/' r'(?P<deploy_id>[a-zA-Z0-9\-_]+)$', env_views.promote), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/build/' r'(?P<build_id>[a-zA-Z0-9\-_]+)', env_views.deploy_build), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/commit/' r'(?P<commit>[a-zA-Z0-9\-_]+)', env_views.deploy_commit), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/deploy/' r'(?P<deploy_id>[a-zA-Z0-9\-_]+)', env_views.get_deploy), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/get_service_metrics/$', util_views.get_service_metrics), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/get_service_alarms/$', util_views.get_service_alarms), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/config_history/$', env_views.get_env_config_history), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/get_config_comparison/', env_views.get_config_comparison), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/show_config_comparison/', env_views.show_config_comparison), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/remove/$', env_views.remove_stage), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/hosts/$', env_views.get_hosts), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/update_schedule/$', env_views.update_schedule), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/delete_schedule/$', env_views.delete_schedule), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/schedule/$', env_views.get_deploy_schedule), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/override_session/$', env_views.override_session), # environment configs url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/config/$', env_config_views.EnvConfigView.as_view()), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/config/auto_deploy/$', promote_views.EnvPromoteConfigView.as_view()), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/config/capacity/$', capacity_views.EnvCapacityConfigView.as_view()), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/config/map/$', config_map_views.EnvConfigMapView.as_view()), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/config/alarm/$', alarm_views.EnvAlarmView.as_view()), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/config/metrics/$', metrics_views.EnvMetricsView.as_view()), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/config/webhooks/$', webhook_views.EnvWebhooksView.as_view()), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/config/schedule/$', schedule_views.EnvScheduleView.as_view()), # host related url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/hosts/unknowns/$', env_views.get_unknown_hosts), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/hosts/provision/$', env_views.get_provisioning_hosts), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/hosts/all/$', env_views.get_all_hosts), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/hosts/failed/$', env_views.get_failed_hosts), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/' r'(?P<deploy_id>[a-zA-Z0-9\-_]+)/hosts/$', env_views.get_hosts_by_deploy), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/reset_failed_hosts/' r'(?P<deploy_id>[a-zA-Z0-9\-_]+)/$', env_views.reset_failed_hosts), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/addinstance/$', env_views.add_instance), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/reset_deploy/' r'(?P<host_id>[a-zA-Z0-9\-_]+)/$', env_views.reset_deploy), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/pause_deploy/' r'(?P<host_id>[a-zA-Z0-9\-_]+)/$', env_views.pause_deploy), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/resume_deploy/' r'(?P<host_id>[a-zA-Z0-9\-_]+)/$', env_views.resume_deploy), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/reset_hosts/$', env_views.reset_hosts), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/pause_hosts/$', env_views.pause_hosts), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/resume_hosts/$', env_views.resume_hosts), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/host/(?P<hostname>[a-zA-Z0-9\-_]+)', host_views.HostDetailView.as_view()), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/terminate_hosts/$', cluster_view.terminate_hosts), url(r'^env/(?P<name>[a-zA-Z0-9\-_]+)/(?P<stage>[a-zA-Z0-9\-_]+)/force_terminate_hosts/$', cluster_view.force_terminate_hosts), url(r'^hosts/$', host_views.hosts_list), # builds related url(r'^builds/get_all_builds/$', build_views.get_all_builds), url(r'^builds/search_commit/(?P<commit>[a-zA-Z0-9\-_]+)/$', build_views.search_commit), url(r'^builds/names/(?P<name>[a-zA-Z0-9\-_]+)/builds/$', build_views.list_builds), url(r'^builds/names/(?P<name>[a-zA-Z0-9\-_]+)/branches/$', build_views.list_build_branches), url(r'^builds/names/$', build_views.get_build_names), url(r'^builds/(?P<id>[a-zA-Z0-9\-_]+)/$', build_views.get_build), url(r'^builds/(?P<id>[a-zA-Z0-9\-_]+)/tags/$', build_views.tag_build), url(r'^builds/$', build_views.builds_landing), # Commits related url(r'^commits/compare_commits/$', build_views.compare_commits), url(r'^commits/compare_commits_datatables/$', build_views.compare_commits_datatables), url(r'^commits/get_more_commits/$', build_views.get_more_commits), # metrics url(r'^get_site_health_metrics/$', util_views.get_site_health_metrics), url(r'^validate_metrics_url/$', util_views.validate_metrics_url), # mix url(r'^health_check/$', util_views.health_check), url(r'^auth/$', security.login_authorized), url(r'^logout/$', security.logout), url(r'^loggedout/$', util_views.loggedout), url(r'^api-docs/$', docs_views.SwaggerUIView.as_view()), url(r'^$', deploy_views.get_landing_page), ]
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 Jacob Kaplan-Moss # Copyright 2011 OpenStack Foundation # Copyright 2012 Grid Dynamics # Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Base utilities to build API operation managers and objects on top of. """ # E1102: %s is not callable # pylint: disable=E1102 import abc import six import urllib from odlclient.openstack.common.apiclient import exceptions from odlclient.openstack.common.py3kcompat import urlutils from odlclient.openstack.common import strutils def getid(obj): """Return id if argument is a Resource. Abstracts the common pattern of allowing both an object or an object's ID (UUID) as a parameter when dealing with relationships. """ try: if obj.uuid: return obj.uuid except AttributeError: pass try: return obj.id except AttributeError: return obj # TODO(aababilov): call run_hooks() in HookableMixin's child classes class HookableMixin(object): """Mixin so classes can register and run hooks.""" _hooks_map = {} @classmethod def add_hook(cls, hook_type, hook_func): """Add a new hook of specified type. :param cls: class that registers hooks :param hook_type: hook type, e.g., '__pre_parse_args__' :param hook_func: hook function """ if hook_type not in cls._hooks_map: cls._hooks_map[hook_type] = [] cls._hooks_map[hook_type].append(hook_func) @classmethod def run_hooks(cls, hook_type, *args, **kwargs): """Run all hooks of specified type. :param cls: class that registers hooks :param hook_type: hook type, e.g., '__pre_parse_args__' :param **args: args to be passed to every hook function :param **kwargs: kwargs to be passed to every hook function """ hook_funcs = cls._hooks_map.get(hook_type) or [] for hook_func in hook_funcs: hook_func(*args, **kwargs) class BaseManager(HookableMixin): """Basic manager type providing common operations. Managers interact with a particular type of API (servers, flavors, images, etc.) and provide CRUD operations for them. """ resource_class = None def __init__(self, client): """Initializes BaseManager with `client`. :param client: instance of BaseClient descendant for HTTP requests """ super(BaseManager, self).__init__() self.client = client def _extract_data(self, data, response_key=None): """ Extract the data within response_key in data. :param data: The data to handle. :param response_key: The response key to extract. """ return data[response_key] if response_key else data def _make_obj(self, data, obj_class=None): """ Make a object out of obj_class. :param data: The data to handle. :param obj_class: class for constructing the returned objects (self.resource_class will be used by default) """ cls = obj_class or self.resource_class return cls(self, data, loaded=True) def _list(self, url, response_key=None, obj_class=None, json=None, return_raw=False): """List the collection. :param url: a partial URL, e.g., '/servers' :param response_key: the key to be looked up in response dictionary, e.g., 'server' :param obj_class: class for constructing the returned objects (self.resource_class will be used by default) :param return_raw: flag to force returning raw JSON instead of Python object of self.resource_class :param json: data that will be encoded as JSON and passed in POST request (GET will be sent by default) """ if json: body = self.client.post(url, json=json).json() else: body = self.client.get(url).json() data = self._extract_data(body, response_key=response_key) if return_raw: return data else: items = [] for i in data: items.append(self._make_obj(i, obj_class=obj_class)) return items def _get(self, url, response_key=None, return_raw=False, obj_class=None): """Get an object from collection. :param url: a partial URL, e.g., '/servers' :param response_key: the key to be looked up in response dictionary, e.g., 'server' :param obj_class: class for constructing the returned objects (self.resource_class will be used by default) :param return_raw: flag to force returning raw JSON instead of Python object of self.resource_class """ # NOTE(ekarlso): If the response key is None, then we just return the # body. body = self.client.get(url).json() data = self._extract_data(body, response_key=response_key) if return_raw: return data else: return self._make_obj(data, obj_class=obj_class) def _head(self, url): """Retrieve request headers for an object. :param url: a partial URL, e.g., '/servers' """ resp = self.client.head(url) return resp.status_code == 204 def _post(self, url, json, response_key=None, return_raw=False, obj_class=None): """Create an object. :param url: a partial URL, e.g., '/servers' :param json: data that will be encoded as JSON and passed in POST request (GET will be sent by default) :param response_key: the key to be looked up in response dictionary, e.g., 'server' :param obj_class: class for constructing the returned objects (self.resource_class will be used by default) :param return_raw: flag to force returning raw JSON instead of Python object of self.resource_class """ body = self.client.post(url, json=json).json() data = self._extract_data(body, response_key=response_key) if return_raw: return data else: return self._make_obj(data, obj_class=obj_class) def _put(self, url, json=None, response_key=None, return_raw=False, obj_class=None): """Update an object with PUT method. :param url: a partial URL, e.g., '/servers' :param json: data that will be encoded as JSON and passed in POST request (GET will be sent by default) :param response_key: the key to be looked up in response dictionary, e.g., 'server' :param obj_class: class for constructing the returned objects (self.resource_class will be used by default) :param return_raw: flag to force returning raw JSON instead of Python object of self.resource_class """ resp = self.client.put(url, json=json) # PUT requests may not return a body if resp.content: body = resp.json() data = self._extract_data(body, response_key=response_key) if return_raw: return data else: return self._make_obj(data, obj_class=obj_class) def _patch(self, url, json=None, response_key=None, return_raw=False, obj_class=None): """Update an object with PATCH method. :param url: a partial URL, e.g., '/servers' :param json: data that will be encoded as JSON and passed in POST request (GET will be sent by default) :param response_key: the key to be looked up in response dictionary, e.g., 'server' :param obj_class: class for constructing the returned objects (self.resource_class will be used by default) :param return_raw: flag to force returning raw JSON instead of Python object of self.resource_class """ body = self.client.patch(url, json=json).json() data = self._extract_data(body, response_key=response_key) if return_raw: return data else: return self._make_obj(data, obj_class=obj_class) def _delete(self, url): """Delete an object. :param url: a partial URL, e.g., '/servers/my-server' """ return self.client.delete(url) @six.add_metaclass(abc.ABCMeta) class ManagerWithFind(BaseManager): """Manager with additional `find()`/`findall()` methods.""" @abc.abstractmethod def list(self): pass def find(self, **kwargs): """Find a single item with attributes matching ``**kwargs``. This isn't very efficient: it loads the entire list then filters on the Python side. """ matches = self.findall(**kwargs) num_matches = len(matches) if num_matches == 0: msg = "No %s matching %s." % (self.resource_class.__name__, kwargs) raise exceptions.NotFound(msg) elif num_matches > 1: raise exceptions.NoUniqueMatch() else: return matches[0] def findall(self, **kwargs): """Find all items with attributes matching ``**kwargs``. This isn't very efficient: it loads the entire list then filters on the Python side. """ found = [] searches = kwargs.items() for obj in self.list(): try: if all(getattr(obj, attr) == value for (attr, value) in searches): found.append(obj) except AttributeError: continue return found class CrudManager(BaseManager): """Base manager class for manipulating entities. Children of this class are expected to define a `collection_key` and `key`. - `collection_key`: Usually a plural noun by convention (e.g. `entities`); used to refer collections in both URL's (e.g. `/v3/entities`) and JSON objects containing a list of member resources (e.g. `{'entities': [{}, {}, {}]}`). - `key`: Usually a singular noun by convention (e.g. `entity`); used to refer to an individual member of the collection. """ collection_key = None key = None def build_url(self, base_url=None, **kwargs): """Builds a resource URL for the given kwargs. Given an example collection where `collection_key = 'entities'` and `key = 'entity'`, the following URL's could be generated. By default, the URL will represent a collection of entities, e.g.:: /entities If kwargs contains an `entity_id`, then the URL will represent a specific member, e.g.:: /entities/{entity_id} :param base_url: if provided, the generated URL will be appended to it """ url = base_url if base_url is not None else '' url += '/%s' % self.collection_key # do we have a specific entity? entity_id = kwargs.get('%s_id' % self.key) if entity_id is not None: url += '/%s' % entity_id return url def _filter_kwargs(self, kwargs): """Drop null values and handle ids.""" for key, ref in kwargs.copy().iteritems(): if ref is None: kwargs.pop(key) else: if isinstance(ref, Resource): kwargs.pop(key) kwargs['%s_id' % key] = getid(ref) return kwargs def create(self, **kwargs): kwargs = self._filter_kwargs(kwargs) return self._post( self.build_url(**kwargs), {self.key: kwargs}, self.key) def get(self, **kwargs): kwargs = self._filter_kwargs(kwargs) return self._get( self.build_url(**kwargs), self.key) def head(self, **kwargs): kwargs = self._filter_kwargs(kwargs) return self._head(self.build_url(**kwargs)) def list(self, base_url=None, **kwargs): """List the collection. :param base_url: if provided, the generated URL will be appended to it """ kwargs = self._filter_kwargs(kwargs) return self._list( '%(base_url)s%(query)s' % { 'base_url': self.build_url(base_url=base_url, **kwargs), 'query': '?%s' % urlutils.urlencode(kwargs) if kwargs else '', }, self.collection_key) def put(self, base_url=None, **kwargs): """Update an element. :param base_url: if provided, the generated URL will be appended to it """ kwargs = self._filter_kwargs(kwargs) return self._put(self.build_url(base_url=base_url, **kwargs)) def update(self, **kwargs): kwargs = self._filter_kwargs(kwargs) params = kwargs.copy() params.pop('%s_id' % self.key) return self._patch( self.build_url(**kwargs), {self.key: params}, self.key) def delete(self, **kwargs): kwargs = self._filter_kwargs(kwargs) return self._delete( self.build_url(**kwargs)) def find(self, base_url=None, **kwargs): """Find a single item with attributes matching ``**kwargs``. :param base_url: if provided, the generated URL will be appended to it """ kwargs = self._filter_kwargs(kwargs) rl = self._list( '%(base_url)s%(query)s' % { 'base_url': self.build_url(base_url=base_url, **kwargs), 'query': '?%s' % urllib.urlencode(kwargs) if kwargs else '', }, self.collection_key) num = len(rl) if num == 0: msg = "No %s matching %s." % (self.resource_class.__name__, kwargs) raise exceptions.NotFound(404, msg) elif num > 1: raise exceptions.NoUniqueMatch else: return rl[0] class Extension(HookableMixin): """Extension descriptor.""" SUPPORTED_HOOKS = ('__pre_parse_args__', '__post_parse_args__') manager_class = None def __init__(self, name, module): super(Extension, self).__init__() self.name = name self.module = module self._parse_extension_module() def _parse_extension_module(self): self.manager_class = None for attr_name, attr_value in self.module.__dict__.items(): if attr_name in self.SUPPORTED_HOOKS: self.add_hook(attr_name, attr_value) else: try: if issubclass(attr_value, BaseManager): self.manager_class = attr_value except TypeError: pass def __repr__(self): return "<Extension '%s'>" % self.name class Resource(object): """Base class for OpenStack resources (tenant, user, etc.). This is pretty much just a bag for attributes. """ HUMAN_ID = False NAME_ATTR = 'name' def __init__(self, manager, info, loaded=False): """Populate and bind to a manager. :param manager: BaseManager object :param info: dictionary representing resource attributes :param loaded: prevent lazy-loading if set to True """ self.manager = manager self._info = info self._add_details(info) self._loaded = loaded def __repr__(self): reprkeys = sorted(k for k in self.__dict__.keys() if k[0] != '_' and k != 'manager') info = ", ".join("%s=%s" % (k, getattr(self, k)) for k in reprkeys) return "<%s %s>" % (self.__class__.__name__, info) @property def human_id(self): """Human-readable ID which can be used for bash completion. """ if self.NAME_ATTR in self.__dict__ and self.HUMAN_ID: return strutils.to_slug(getattr(self, self.NAME_ATTR)) return None def _add_details(self, info): for (k, v) in info.iteritems(): try: setattr(self, k, v) self._info[k] = v except AttributeError: # In this case we already defined the attribute on the class pass def __getattr__(self, k): if k not in self.__dict__: #NOTE(bcwaldon): disallow lazy-loading if already loaded once if not self.is_loaded(): self.get() return self.__getattr__(k) raise AttributeError(k) else: return self.__dict__[k] def get(self): # set_loaded() first ... so if we have to bail, we know we tried. self.set_loaded(True) if not hasattr(self.manager, 'get'): return new = self.manager.get(self.id) if new: self._add_details(new._info) def __eq__(self, other): if not isinstance(other, Resource): return NotImplemented # two resources of different types are not equal if not isinstance(other, self.__class__): return False if hasattr(self, 'id') and hasattr(other, 'id'): return self.id == other.id return self._info == other._info def is_loaded(self): return self._loaded def set_loaded(self, val): self._loaded = val
# Copyright 2006-2009 the V8 project authors. All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Dictionary that is passed as defines for js2c.py. # Used for defines that must be defined for all native JS files. define NONE = 0; define READ_ONLY = 1; define DONT_ENUM = 2; define DONT_DELETE = 4; define NEW_ONE_BYTE_STRING = true; define NEW_TWO_BYTE_STRING = false; # Constants used for getter and setter operations. define GETTER = 0; define SETTER = 1; # Safe maximum number of arguments to push to stack, when multiplied by # pointer size. Used by Function.prototype.apply(), Reflect.apply() and # Reflect.construct(). define kSafeArgumentsLength = 0x800000; # 2^53 - 1 define kMaxSafeInteger = 9007199254740991; # 2^32 - 1 define kMaxUint32 = 4294967295; # Strict mode flags for passing to %SetProperty define kSloppyMode = 0; define kStrictMode = 1; # Native cache ids. define STRING_TO_REGEXP_CACHE_ID = 0; # Type query macros. # # Note: We have special support for typeof(foo) === 'bar' in the compiler. # It will *not* generate a runtime typeof call for the most important # values of 'bar'. macro IS_ARRAY(arg) = (%_IsArray(arg)); macro IS_ARRAYBUFFER(arg) = (%_ClassOf(arg) === 'ArrayBuffer'); macro IS_BOOLEAN(arg) = (typeof(arg) === 'boolean'); macro IS_BOOLEAN_WRAPPER(arg) = (%_ClassOf(arg) === 'Boolean'); macro IS_DATAVIEW(arg) = (%_ClassOf(arg) === 'DataView'); macro IS_DATE(arg) = (%IsDate(arg)); macro IS_ERROR(arg) = (%_ClassOf(arg) === 'Error'); macro IS_FUNCTION(arg) = (%IsFunction(arg)); macro IS_GENERATOR(arg) = (%_ClassOf(arg) === 'Generator'); macro IS_GLOBAL(arg) = (%_ClassOf(arg) === 'global'); macro IS_MAP(arg) = (%_ClassOf(arg) === 'Map'); macro IS_MAP_ITERATOR(arg) = (%_ClassOf(arg) === 'Map Iterator'); macro IS_NULL(arg) = (arg === null); macro IS_NULL_OR_UNDEFINED(arg) = (arg == null); macro IS_NUMBER(arg) = (typeof(arg) === 'number'); macro IS_NUMBER_WRAPPER(arg) = (%_ClassOf(arg) === 'Number'); macro IS_OBJECT(arg) = (typeof(arg) === 'object'); macro IS_PROXY(arg) = (%_IsJSProxy(arg)); macro IS_REGEXP(arg) = (%_IsRegExp(arg)); macro IS_SCRIPT(arg) = (%_ClassOf(arg) === 'Script'); macro IS_SET(arg) = (%_ClassOf(arg) === 'Set'); macro IS_SET_ITERATOR(arg) = (%_ClassOf(arg) === 'Set Iterator'); macro IS_SHAREDARRAYBUFFER(arg) = (%_ClassOf(arg) === 'SharedArrayBuffer'); macro IS_SIMD_VALUE(arg) = (%IsSimdValue(arg)); macro IS_STRING(arg) = (typeof(arg) === 'string'); macro IS_STRING_WRAPPER(arg) = (%_ClassOf(arg) === 'String'); macro IS_SYMBOL(arg) = (typeof(arg) === 'symbol'); macro IS_SYMBOL_WRAPPER(arg) = (%_ClassOf(arg) === 'Symbol'); macro IS_TYPEDARRAY(arg) = (%_IsTypedArray(arg)); macro IS_UNDEFINED(arg) = (arg === (void 0)); macro IS_WEAKMAP(arg) = (%_ClassOf(arg) === 'WeakMap'); macro IS_WEAKSET(arg) = (%_ClassOf(arg) === 'WeakSet'); # Macro for ES queries of the type: "Type(O) is Object." macro IS_RECEIVER(arg) = (%_IsJSReceiver(arg)); # Macro for ES queries of the type: "IsCallable(O)" macro IS_CALLABLE(arg) = (typeof(arg) === 'function'); # Macro for ES6 CheckObjectCoercible # Will throw a TypeError of the form "[functionName] called on null or undefined". macro CHECK_OBJECT_COERCIBLE(arg, functionName) = if (IS_NULL(%IS_VAR(arg)) || IS_UNDEFINED(arg)) throw MakeTypeError(kCalledOnNullOrUndefined, functionName); # Inline macros. Use %IS_VAR to make sure arg is evaluated only once. macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg)); macro NUMBER_IS_FINITE(arg) = (%_IsSmi(%IS_VAR(arg)) || ((arg == arg) && (arg != 1/0) && (arg != -1/0))); macro TO_BOOLEAN(arg) = (!!(arg)); macro TO_INTEGER(arg) = (%_ToInteger(arg)); macro TO_INT32(arg) = ((arg) | 0); macro TO_UINT32(arg) = ((arg) >>> 0); macro INVERT_NEG_ZERO(arg) = ((arg) + 0); macro TO_LENGTH(arg) = (%_ToLength(arg)); macro TO_STRING(arg) = (%_ToString(arg)); macro TO_NUMBER(arg) = (%_ToNumber(arg)); macro TO_OBJECT(arg) = (%_ToObject(arg)); macro TO_PRIMITIVE(arg) = (%_ToPrimitive(arg)); macro TO_PRIMITIVE_NUMBER(arg) = (%_ToPrimitive_Number(arg)); macro TO_PRIMITIVE_STRING(arg) = (%_ToPrimitive_String(arg)); macro TO_NAME(arg) = (%_ToName(arg)); macro JSON_NUMBER_TO_STRING(arg) = ((%_IsSmi(%IS_VAR(arg)) || arg - arg == 0) ? %_NumberToString(arg) : "null"); macro HAS_OWN_PROPERTY(obj, key) = (%_Call(ObjectHasOwnProperty, obj, key)); # Private names. macro IS_PRIVATE(sym) = (%SymbolIsPrivate(sym)); macro HAS_PRIVATE(obj, key) = HAS_OWN_PROPERTY(obj, key); macro HAS_DEFINED_PRIVATE(obj, sym) = (!IS_UNDEFINED(obj[sym])); macro GET_PRIVATE(obj, sym) = (obj[sym]); macro SET_PRIVATE(obj, sym, val) = (obj[sym] = val); # To avoid ES2015 Function name inference. macro ANONYMOUS_FUNCTION(fn) = (0, (fn)); # Constants. The compiler constant folds them. define INFINITY = (1/0); define UNDEFINED = (void 0); # Macros implemented in Python. python macro CHAR_CODE(str) = ord(str[1]); # Constants used on an array to implement the properties of the RegExp object. define REGEXP_NUMBER_OF_CAPTURES = 0; define REGEXP_FIRST_CAPTURE = 3; # Macros for internal slot access. macro REGEXP_GLOBAL(regexp) = (%_RegExpFlags(regexp) & 1); macro REGEXP_IGNORE_CASE(regexp) = (%_RegExpFlags(regexp) & 2); macro REGEXP_MULTILINE(regexp) = (%_RegExpFlags(regexp) & 4); macro REGEXP_STICKY(regexp) = (%_RegExpFlags(regexp) & 8); macro REGEXP_UNICODE(regexp) = (%_RegExpFlags(regexp) & 16); macro REGEXP_SOURCE(regexp) = (%_RegExpSource(regexp)); # We can't put macros in macros so we use constants here. # REGEXP_NUMBER_OF_CAPTURES macro NUMBER_OF_CAPTURES(array) = ((array)[0]); # Last input and last subject of regexp matches. define LAST_SUBJECT_INDEX = 1; macro LAST_SUBJECT(array) = ((array)[1]); macro LAST_INPUT(array) = ((array)[2]); # REGEXP_FIRST_CAPTURE macro CAPTURE(index) = (3 + (index)); define CAPTURE0 = 3; define CAPTURE1 = 4; # For the regexp capture override array. This has the same # format as the arguments to a function called from # String.prototype.replace. macro OVERRIDE_MATCH(override) = ((override)[0]); macro OVERRIDE_POS(override) = ((override)[(override).length - 2]); macro OVERRIDE_SUBJECT(override) = ((override)[(override).length - 1]); # 1-based so index of 1 returns the first capture macro OVERRIDE_CAPTURE(override, index) = ((override)[(index)]); # For messages.js # Matches Script::Type from objects.h define TYPE_NATIVE = 0; define TYPE_EXTENSION = 1; define TYPE_NORMAL = 2; # Matches Script::CompilationType from objects.h define COMPILATION_TYPE_HOST = 0; define COMPILATION_TYPE_EVAL = 1; define COMPILATION_TYPE_JSON = 2; # Matches Messages::kNoLineNumberInfo from v8.h define kNoLineNumberInfo = 0; # Must match PropertyFilter in property-details.h define PROPERTY_FILTER_NONE = 0; define PROPERTY_FILTER_ONLY_ENUMERABLE = 2; define PROPERTY_FILTER_SKIP_STRINGS = 8; define PROPERTY_FILTER_SKIP_SYMBOLS = 16; # Use for keys, values and entries iterators. define ITERATOR_KIND_KEYS = 1; define ITERATOR_KIND_VALUES = 2; define ITERATOR_KIND_ENTRIES = 3; macro FIXED_ARRAY_GET(array, index) = (%_FixedArrayGet(array, (index) | 0)); macro FIXED_ARRAY_SET(array, index, value) = (%_FixedArraySet(array, (index) | 0, value)); # TODO(adamk): Find a more robust way to force Smi representation. macro FIXED_ARRAY_SET_SMI(array, index, value) = (FIXED_ARRAY_SET(array, index, (value) | 0)); macro ORDERED_HASH_TABLE_BUCKET_COUNT(table) = (FIXED_ARRAY_GET(table, 0)); macro ORDERED_HASH_TABLE_ELEMENT_COUNT(table) = (FIXED_ARRAY_GET(table, 1)); macro ORDERED_HASH_TABLE_SET_ELEMENT_COUNT(table, count) = (FIXED_ARRAY_SET_SMI(table, 1, count)); macro ORDERED_HASH_TABLE_DELETED_COUNT(table) = (FIXED_ARRAY_GET(table, 2)); macro ORDERED_HASH_TABLE_SET_DELETED_COUNT(table, count) = (FIXED_ARRAY_SET_SMI(table, 2, count)); macro ORDERED_HASH_TABLE_BUCKET_AT(table, bucket) = (FIXED_ARRAY_GET(table, 3 + (bucket))); macro ORDERED_HASH_TABLE_SET_BUCKET_AT(table, bucket, entry) = (FIXED_ARRAY_SET(table, 3 + (bucket), entry)); macro ORDERED_HASH_TABLE_HASH_TO_BUCKET(hash, numBuckets) = (hash & ((numBuckets) - 1)); macro ORDERED_HASH_SET_ENTRY_TO_INDEX(entry, numBuckets) = (3 + (numBuckets) + ((entry) << 1)); macro ORDERED_HASH_SET_KEY_AT(table, entry, numBuckets) = (FIXED_ARRAY_GET(table, ORDERED_HASH_SET_ENTRY_TO_INDEX(entry, numBuckets))); macro ORDERED_HASH_SET_CHAIN_AT(table, entry, numBuckets) = (FIXED_ARRAY_GET(table, ORDERED_HASH_SET_ENTRY_TO_INDEX(entry, numBuckets) + 1)); macro ORDERED_HASH_MAP_ENTRY_TO_INDEX(entry, numBuckets) = (3 + (numBuckets) + ((entry) * 3)); macro ORDERED_HASH_MAP_KEY_AT(table, entry, numBuckets) = (FIXED_ARRAY_GET(table, ORDERED_HASH_MAP_ENTRY_TO_INDEX(entry, numBuckets))); macro ORDERED_HASH_MAP_VALUE_AT(table, entry, numBuckets) = (FIXED_ARRAY_GET(table, ORDERED_HASH_MAP_ENTRY_TO_INDEX(entry, numBuckets) + 1)); macro ORDERED_HASH_MAP_CHAIN_AT(table, entry, numBuckets) = (FIXED_ARRAY_GET(table, ORDERED_HASH_MAP_ENTRY_TO_INDEX(entry, numBuckets) + 2)); # Must match OrderedHashTable::kNotFound. define NOT_FOUND = -1; # Check whether debug is active. define DEBUG_IS_ACTIVE = (%_DebugIsActive() != 0); # SharedFlag equivalents define kNotShared = false; define kShared = true; # UseCounters from include/v8.h define kUseAsm = 0; define kBreakIterator = 1; define kLegacyConst = 2; define kMarkDequeOverflow = 3; define kStoreBufferOverflow = 4; define kSlotsBufferOverflow = 5; define kForcedGC = 7; define kSloppyMode = 8; define kStrictMode = 9; define kRegExpPrototypeStickyGetter = 11; define kRegExpPrototypeToString = 12; define kRegExpPrototypeUnicodeGetter = 13; define kIntlV8Parse = 14; define kIntlPattern = 15; define kIntlResolved = 16; define kPromiseChain = 17; define kPromiseAccept = 18; define kPromiseDefer = 19; define kHtmlCommentInExternalScript = 20; define kHtmlComment = 21; define kSloppyModeBlockScopedFunctionRedefinition = 22; define kForInInitializer = 23; define kArrayProtectorDirtied = 24; define kArraySpeciesModified = 25; define kArrayPrototypeConstructorModified = 26; define kArrayInstanceProtoModified = 27; define kArrayInstanceConstructorModified = 28; define kLegacyFunctionDeclaration = 29; define kRegExpPrototypeSourceGetter = 30; define kRegExpPrototypeOldFlagGetter = 31;
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Guidelines for writing new hacking checks - Use only for Rally specific tests. OpenStack general tests should be submitted to the common 'hacking' module. - Pick numbers in the range N3xx. Find the current test with the highest allocated number and then pick the next value. - Keep the test method code in the source file ordered based on the N3xx value. - List the new rule in the top level HACKING.rst file - Add test cases for each new rule to tests/unit/test_hacking.py """ import functools import re import tokenize re_assert_true_instance = re.compile( r"(.)*assertTrue\(isinstance\((\w|\.|\'|\"|\[|\])+, " r"(\w|\.|\'|\"|\[|\])+\)\)") re_assert_equal_type = re.compile( r"(.)*assertEqual\(type\((\w|\.|\'|\"|\[|\])+\), " r"(\w|\.|\'|\"|\[|\])+\)") re_assert_equal_end_with_none = re.compile(r"assertEqual\(.*?,\s+None\)$") re_assert_equal_start_with_none = re.compile(r"assertEqual\(None,") re_assert_true_false_with_in_or_not_in = re.compile( r"assert(True|False)\(" r"(\w|[][.'\"])+( not)? in (\w|[][.'\",])+(, .*)?\)") re_assert_true_false_with_in_or_not_in_spaces = re.compile( r"assert(True|False)\((\w|[][.'\"])+( not)? in [\[|'|\"](\w|[][.'\", ])+" r"[\[|'|\"](, .*)?\)") re_assert_equal_in_end_with_true_or_false = re.compile( r"assertEqual\((\w|[][.'\"])+( not)? in (\w|[][.'\", ])+, (True|False)\)") re_assert_equal_in_start_with_true_or_false = re.compile( r"assertEqual\((True|False), (\w|[][.'\"])+( not)? in (\w|[][.'\", ])+\)") re_no_construct_dict = re.compile( r"\sdict\(\)") re_no_construct_list = re.compile( r"\slist\(\)") re_str_format = re.compile(r""" % # start of specifier \(([^)]+)\) # mapping key, in group 1 [#0 +\-]? # optional conversion flag (?:-?\d*)? # optional minimum field width (?:\.\d*)? # optional precision [hLl]? # optional length modifier [A-z%] # conversion modifier """, re.X) re_raises = re.compile( r"\s:raise[^s] *.*$|\s:raises *:.*$|\s:raises *[^:]+$") re_db_import = re.compile(r"^from rally.common import db") re_objects_import = re.compile(r"^from rally.common import objects") re_old_type_class = re.compile(r"^\s*class \w+(\(\))?:") re_datetime_alias = re.compile(r"^(from|import) datetime(?!\s+as\s+dt$)") def skip_ignored_lines(func): @functools.wraps(func) def wrapper(logical_line, physical_line, filename): line = physical_line.strip() if not line or line.startswith("#") or line.endswith("# noqa"): return yield next(func(logical_line, physical_line, filename)) return wrapper def _parse_assert_mock_str(line): point = line.find(".assert_") if point == -1: point = line.find(".called_once_with(") if point != -1: end_pos = line[point:].find("(") + point return point, line[point + 1: end_pos], line[: point] else: return None, None, None @skip_ignored_lines def check_assert_methods_from_mock(logical_line, physical_line, filename): """Ensure that ``assert_*`` methods from ``mock`` library is used correctly N301 - base error number N302 - related to nonexistent "assert_called" N303 - related to nonexistent "assert_called_once" N304 - related to nonexistent "called_once_with" """ correct_names = ["assert_any_call", "assert_called_once_with", "assert_called_with", "assert_has_calls", "assert_not_called"] ignored_files = ["./tests/unit/test_hacking.py"] if filename.startswith("./tests") and filename not in ignored_files: pos, method_name, obj_name = _parse_assert_mock_str(logical_line) if pos: if method_name not in correct_names: error_number = "N301" msg = ("%(error_number)s:'%(method)s' is not present in `mock`" " library. %(custom_msg)s For more details, visit " "http://www.voidspace.org.uk/python/mock/ .") if method_name == "assert_called": error_number = "N302" custom_msg = ("Maybe, you should try to use " "'assertTrue(%s.called)' instead." % obj_name) elif method_name == "assert_called_once": # For more details, see a bug in Rally: # https://bugs.launchpad.net/rally/+bug/1305991 error_number = "N303" custom_msg = ("Maybe, you should try to use " "'assertEqual(1, %s.call_count)' " "or '%s.assert_called_once_with()'" " instead." % (obj_name, obj_name)) elif method_name == "called_once_with": error_number = "N304" custom_msg = ("Maybe, you should try to use " "'%s.assert_called_once_with()'" " instead." % obj_name) else: custom_msg = ("Correct 'assert_*' methods: '%s'." % "', '".join(correct_names)) yield (pos, msg % { "error_number": error_number, "method": method_name, "custom_msg": custom_msg}) @skip_ignored_lines def check_import_of_logging(logical_line, physical_line, filename): """Check correctness import of logging module N310 """ excluded_files = ["./rally/common/logging.py", "./tests/unit/test_logging.py", "./tests/ci/rally_verify.py"] forbidden_imports = ["from oslo_log", "import oslo_log", "import logging"] if filename not in excluded_files: for forbidden_import in forbidden_imports: if logical_line.startswith(forbidden_import): yield (0, "N310 Wrong module for logging is imported. Please " "use `rally.common.logging` instead.") @skip_ignored_lines def no_translate_debug_logs(logical_line, physical_line, filename): """Check for "LOG.debug(_(" As per our translation policy, https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation we shouldn't translate debug level logs. * This check assumes that 'LOG' is a logger. * Use filename so we can start enforcing this in specific folders instead of needing to do so all at once. N311 """ if logical_line.startswith("LOG.debug(_("): yield(0, "N311 Don't translate debug level logs") @skip_ignored_lines def no_use_conf_debug_check(logical_line, physical_line, filename): """Check for "cfg.CONF.debug" Rally has two DEBUG level: - Full DEBUG, which include all debug-messages from all OpenStack services - Rally DEBUG, which include only Rally debug-messages so we should use custom check to know debug-mode, instead of CONF.debug N312 """ excluded_files = ["./rally/common/logging.py"] point = logical_line.find("CONF.debug") if point != -1 and filename not in excluded_files: yield(point, "N312 Don't use `CONF.debug`. " "Function `rally.common.logging.is_debug` " "should be used instead.") @skip_ignored_lines def assert_true_instance(logical_line, physical_line, filename): """Check for assertTrue(isinstance(a, b)) sentences N320 """ if re_assert_true_instance.match(logical_line): yield (0, "N320 assertTrue(isinstance(a, b)) sentences not allowed, " "you should use assertIsInstance(a, b) instead.") @skip_ignored_lines def assert_equal_type(logical_line, physical_line, filename): """Check for assertEqual(type(A), B) sentences N321 """ if re_assert_equal_type.match(logical_line): yield (0, "N321 assertEqual(type(A), B) sentences not allowed, " "you should use assertIsInstance(a, b) instead.") @skip_ignored_lines def assert_equal_none(logical_line, physical_line, filename): """Check for assertEqual(A, None) or assertEqual(None, A) sentences N322 """ res = (re_assert_equal_start_with_none.search(logical_line) or re_assert_equal_end_with_none.search(logical_line)) if res: yield (0, "N322 assertEqual(A, None) or assertEqual(None, A) " "sentences not allowed, you should use assertIsNone(A) " "instead.") @skip_ignored_lines def assert_true_or_false_with_in(logical_line, physical_line, filename): """Check assertTrue/False(A in/not in B) with collection contents Check for assertTrue/False(A in B), assertTrue/False(A not in B), assertTrue/False(A in B, message) or assertTrue/False(A not in B, message) sentences. N323 """ res = (re_assert_true_false_with_in_or_not_in.search(logical_line) or re_assert_true_false_with_in_or_not_in_spaces.search(logical_line)) if res: yield (0, "N323 assertTrue/assertFalse(A in/not in B)sentences not " "allowed, you should use assertIn(A, B) or assertNotIn(A, B)" " instead.") @skip_ignored_lines def assert_equal_in(logical_line, physical_line, filename): """Check assertEqual(A in/not in B, True/False) with collection contents Check for assertEqual(A in B, True/False), assertEqual(True/False, A in B), assertEqual(A not in B, True/False) or assertEqual(True/False, A not in B) sentences. N324 """ res = (re_assert_equal_in_end_with_true_or_false.search(logical_line) or re_assert_equal_in_start_with_true_or_false.search(logical_line)) if res: yield (0, "N324: Use assertIn/NotIn(A, B) rather than " "assertEqual(A in/not in B, True/False) when checking " "collection contents.") @skip_ignored_lines def check_no_direct_rally_objects_import(logical_line, physical_line, filename): """Check if rally.common.objects are properly imported. If you import "from rally.common import objects" you are able to use objects directly like objects.Task. N340 """ if filename == "./rally/common/objects/__init__.py": return if filename == "./rally/common/objects/endpoint.py": return if (logical_line.startswith("from rally.common.objects") or logical_line.startswith("import rally.common.objects.")): yield (0, "N340: Import objects module:" "`from rally.common import objects`. " "After that you can use directly objects e.g. objects.Task") @skip_ignored_lines def check_no_oslo_deprecated_import(logical_line, physical_line, filename): """Check if oslo.foo packages are not imported instead of oslo_foo ones. Libraries from oslo.foo namespace are deprecated because of namespace problems. N341 """ if (logical_line.startswith("from oslo.") or logical_line.startswith("import oslo.")): yield (0, "N341: Import oslo module: `from oslo_xyz import ...`. " "The oslo.xyz namespace was deprecated, use oslo_xyz " "instead") @skip_ignored_lines def check_quotes(logical_line, physical_line, filename): """Check that single quotation marks are not used N350 """ in_string = False in_multiline_string = False single_quotas_are_used = False check_tripple = ( lambda line, i, char: ( i + 2 < len(line) and (char == line[i] == line[i + 1] == line[i + 2]) ) ) i = 0 while i < len(logical_line): char = logical_line[i] if in_string: if char == "\"": in_string = False if char == "\\": i += 1 # ignore next char elif in_multiline_string: if check_tripple(logical_line, i, "\""): i += 2 # skip next 2 chars in_multiline_string = False elif char == "#": break elif char == "'": single_quotas_are_used = True break elif char == "\"": if check_tripple(logical_line, i, "\""): in_multiline_string = True i += 3 continue in_string = True i += 1 if single_quotas_are_used: yield (i, "N350 Remove Single quotes") @skip_ignored_lines def check_no_constructor_data_struct(logical_line, physical_line, filename): """Check that data structs (lists, dicts) are declared using literals N351 """ match = re_no_construct_dict.search(logical_line) if match: yield (0, "N351 Remove dict() construct and use literal {}") match = re_no_construct_list.search(logical_line) if match: yield (0, "N351 Remove list() construct and use literal []") def check_dict_formatting_in_string(logical_line, tokens): """Check that strings do not use dict-formatting with a single replacement N352 """ # NOTE(stpierre): Can't use @skip_ignored_lines here because it's # a stupid decorator that only works on functions that take # (logical_line, filename) as arguments. if (not logical_line or logical_line.startswith("#") or logical_line.endswith("# noqa")): return current_string = "" in_string = False for token_type, text, start, end, line in tokens: if token_type == tokenize.STRING: if not in_string: current_string = "" in_string = True current_string += text.strip("\"") elif token_type == tokenize.OP: if not current_string: continue # NOTE(stpierre): The string formatting operator % has # lower precedence than +, so we assume that the logical # string has concluded whenever we hit an operator of any # sort. (Most operators don't work for strings anyway.) # Some string operators do have higher precedence than %, # though, so you can technically trick this check by doing # things like: # # "%(foo)s" * 1 % {"foo": 1} # "%(foo)s"[:] % {"foo": 1} # # It also will produce false positives if you use explicit # parenthesized addition for two strings instead of # concatenation by juxtaposition, e.g.: # # ("%(foo)s" + "%(bar)s") % vals # # But if you do any of those things, then you deserve all # of the horrible things that happen to you, and probably # many more. in_string = False if text == "%": format_keys = set() for match in re_str_format.finditer(current_string): format_keys.add(match.group(1)) if len(format_keys) == 1: yield (0, "N353 Do not use mapping key string formatting " "with a single key") if text != ")": # NOTE(stpierre): You can have a parenthesized string # followed by %, so a closing paren doesn't obviate # the possibility for a substitution operator like # every other operator does. current_string = "" elif token_type in (tokenize.NL, tokenize.COMMENT): continue else: in_string = False if token_type == tokenize.NEWLINE: current_string = "" @skip_ignored_lines def check_using_unicode(logical_line, physical_line, filename): """Check crosspython unicode usage N353 """ if re.search(r"\bunicode\(", logical_line): yield (0, "N353 'unicode' function is absent in python3. Please " "use 'six.text_type' instead.") def check_raises(physical_line, filename): """Check raises usage N354 """ ignored_files = ["./tests/unit/test_hacking.py", "./tests/hacking/checks.py"] if filename not in ignored_files: if re_raises.search(physical_line): return (0, "N354 ':Please use ':raises Exception: conditions' " "in docstrings.") @skip_ignored_lines def check_old_type_class(logical_line, physical_line, filename): """Use new-style Python classes N355 """ if re_old_type_class.search(logical_line): yield (0, "N355 This class does not inherit from anything and thus " "will be an old-style class by default. Try to inherit from " "``object`` or another new-style class.") @skip_ignored_lines def check_datetime_alias(logical_line, physical_line, filename): """Ensure using ``dt`` as alias for ``datetime`` N356 """ if re_datetime_alias.search(logical_line): yield (0, "N356 Please use ``dt`` as alias for ``datetime``.") @skip_ignored_lines def check_db_imports_in_cli(logical_line, physical_line, filename): """Ensure that CLI modules do not use ``rally.common.db`` N360 """ if (not filename.startswith("./rally/cli") or filename == "./rally/cli/manage.py"): return if re_db_import.search(logical_line): yield (0, "N360 CLI modules do not allow to work with " "`rally.common.db``.") @skip_ignored_lines def check_objects_imports_in_cli(logical_line, physical_line, filename): """Ensure that CLI modules do not use ``rally.common.objects`` N361 """ if (not filename.startswith("./rally/cli") or filename == "./rally/cli/commands/show.py"): return if re_objects_import.search(logical_line): yield (0, "N361 CLI modules do not allow to work with " "`rally.common.objects``.") def factory(register): register(check_assert_methods_from_mock) register(check_import_of_logging) register(no_translate_debug_logs) register(no_use_conf_debug_check) register(assert_true_instance) register(assert_equal_type) register(assert_equal_none) register(assert_true_or_false_with_in) register(assert_equal_in) register(check_no_direct_rally_objects_import) register(check_no_oslo_deprecated_import) register(check_quotes) register(check_no_constructor_data_struct) register(check_dict_formatting_in_string) register(check_using_unicode) register(check_raises) register(check_datetime_alias) register(check_db_imports_in_cli) register(check_objects_imports_in_cli) register(check_old_type_class)
#!/usr/bin/env python # Copyright (c) 2011-2021, wradlib developers. # Distributed under the MIT License. See LICENSE.txt for more info. import tempfile from dataclasses import dataclass import numpy as np import pytest from wradlib import georef, io, zonalstats from . import osr, requires_gdal, requires_geos np.set_printoptions( edgeitems=3, infstr="inf", linewidth=75, nanstr="nan", precision=8, suppress=False, threshold=1000, formatter=None, ) @pytest.fixture def data_base(): @dataclass(init=False, repr=False, eq=False) class Data: # GK3-Projection proj = osr.SpatialReference() proj.ImportFromEPSG(31466) # create synthetic box box0 = np.array( [ [2600000.0, 5630000.0], [2600000.0, 5640000.0], [2610000.0, 5640000.0], [2610000.0, 5630000.0], [2600000.0, 5630000.0], ] ) box1 = np.array( [ [2610000.0, 5630000.0], [2610000.0, 5640000.0], [2620000.0, 5640000.0], [2620000.0, 5630000.0], [2610000.0, 5630000.0], ] ) box3 = np.array( [ [2595000.0, 5625000.0], [2595000.0, 5635000.0], [2605000.0, 5635000.0], [2605000.0, 5625000.0], [2595000.0, 5625000.0], ] ) box4 = np.array( [ [2615000.0, 5635000.0], [2615000.0, 5645000.0], [2625000.0, 5645000.0], [2625000.0, 5635000.0], [2615000.0, 5635000.0], ] ) box5 = np.array( [ # [2600000.0, 5635000.0], [2605000.0, 5635000.0], [2605000.0, 5630000.0], [2600000.0, 5630000.0], [2600000.0, 5635000.0], [2605000.0, 5635000.0], ] ) box6 = np.array( [ # [2615000.0, 5635000.0], [2615000.0, 5640000.0], [2620000.0, 5640000.0], [2620000.0, 5635000.0], [2615000.0, 5635000.0], [2615000.0, 5640000.0], ] ) box7 = np.array( [ [2715000.0, 5635000.0], [2715000.0, 5640000.0], [2720000.0, 5640000.0], [2720000.0, 5635000.0], [2715000.0, 5635000.0], ] ) src = np.array([box0, box1]) trg = np.array([box3, box4]) dst = np.array([[box5], [box6]]) zdb = zonalstats.ZonalDataBase(src, trg, srs=proj) f = tempfile.NamedTemporaryFile(mode="w+b").name zdb.dump_vector(f) yield Data @requires_geos class TestZonalDataBase: @requires_gdal def test___init__(self, data_base): zdb = zonalstats.ZonalDataBase(data_base.src, data_base.trg, srs=data_base.proj) assert isinstance(zdb.src, io.VectorSource) assert isinstance(zdb.trg, io.VectorSource) assert isinstance(zdb.dst, io.VectorSource) assert zdb._count_intersections == 2 zd = io.VectorSource(data_base.src, name="src", srs=data_base.proj) zdb = zonalstats.ZonalDataBase(zd, data_base.trg, srs=data_base.proj) assert isinstance(zdb.src, io.VectorSource) assert isinstance(zdb.trg, io.VectorSource) assert isinstance(zdb.dst, io.VectorSource) assert zdb._count_intersections == 2 zd1 = io.VectorSource(data_base.src, name="src", srs=data_base.proj) zd2 = io.VectorSource(data_base.trg, name="trg", srs=data_base.proj) zdb = zonalstats.ZonalDataBase(zd1, zd2, srs=data_base.proj) assert isinstance(zdb.src, io.VectorSource) assert isinstance(zdb.trg, io.VectorSource) assert isinstance(zdb.dst, io.VectorSource) assert zdb._count_intersections == 2 @requires_gdal def test_count_intersections(self, data_base): zdb = zonalstats.ZonalDataBase(data_base.src, data_base.trg, srs=data_base.proj) assert zdb.count_intersections == 2 @requires_gdal def test_srs(self, data_base): zdb = zonalstats.ZonalDataBase(data_base.src, data_base.trg, srs=data_base.proj) assert zdb.srs == data_base.proj @requires_gdal def test_isecs(self, data_base): # todo: Normalize Polygons before comparison. zdb = zonalstats.ZonalDataBase(data_base.src, data_base.trg, srs=data_base.proj) np.testing.assert_equal(zdb.isecs, data_base.dst) @requires_gdal def test_get_isec(self, data_base): zdb = zonalstats.ZonalDataBase(data_base.src, data_base.trg, srs=data_base.proj) np.testing.assert_equal(zdb.get_isec(0), [data_base.box5]) np.testing.assert_equal(zdb.get_isec(1), [data_base.box6]) @requires_gdal def test_get_source_index(self, data_base): zdb = zonalstats.ZonalDataBase(data_base.src, data_base.trg, srs=data_base.proj) assert zdb.get_source_index(0) == 0 assert zdb.get_source_index(1) == 1 @requires_gdal def test_dump_vector(self, data_base): zdb = zonalstats.ZonalDataBase(data_base.src, data_base.trg, srs=data_base.proj) f = tempfile.NamedTemporaryFile(mode="w+b").name zdb.dump_vector(f) @requires_gdal def test_load_vector(self, data_base): zonalstats.ZonalDataBase(data_base.f) @requires_gdal def test__get_intersection(self, data_base): zdb = zonalstats.ZonalDataBase(data_base.src, data_base.trg, srs=data_base.proj) with pytest.raises(TypeError): zdb._get_intersection() np.testing.assert_equal( zdb._get_intersection(trg=data_base.box3), [data_base.box5] ) np.testing.assert_equal(zdb._get_intersection(idx=0), [data_base.box5]) with pytest.raises(TypeError): zdb._get_intersection(idx=2) zdb = zonalstats.ZonalDataBase( data_base.src, [data_base.box7], srs=data_base.proj ) zdb.trg = None with pytest.raises(TypeError): zdb._get_intersection(idx=0) @pytest.fixture def data_poly(): @dataclass(init=False, repr=False, eq=False) class Data: # GK3-Projection proj = osr.SpatialReference() proj.ImportFromEPSG(31466) # create synthetic box box0 = np.array( [ [2600000.0, 5630000.0], [2600000.0, 5640000.0], [2610000.0, 5640000.0], [2610000.0, 5630000.0], [2600000.0, 5630000.0], ] ) box1 = np.array( [ [2610000.0, 5630000.0], [2610000.0, 5640000.0], [2620000.0, 5640000.0], [2620000.0, 5630000.0], [2610000.0, 5630000.0], ] ) box3 = np.array( [ [2595000.0, 5625000.0], [2595000.0, 5635000.0], [2605000.0, 5635000.0], [2605000.0, 5625000.0], [2595000.0, 5625000.0], ] ) box4 = np.array( [ [2615000.0, 5635000.0], [2615000.0, 5645000.0], [2625000.0, 5645000.0], [2625000.0, 5635000.0], [2615000.0, 5635000.0], ] ) box5 = np.array( [ [2600000.0, 5635000.0], [2605000.0, 5635000.0], [2605000.0, 5630000.0], [2600000.0, 5630000.0], [2600000.0, 5635000.0], ] ) box6 = np.array( [ [2615000.0, 5635000.0], [2615000.0, 5640000.0], [2620000.0, 5640000.0], [2620000.0, 5635000.0], [2615000.0, 5635000.0], ] ) box7 = np.array( [ [2715000.0, 5635000.0], [2715000.0, 5640000.0], [2720000.0, 5640000.0], [2720000.0, 5635000.0], [2715000.0, 5635000.0], ] ) src = np.array([box0, box1]) trg = np.array([box3, box4]) dst = np.array([[box5], [box6]]) zdb = zonalstats.ZonalDataBase(src, trg, srs=proj) f = tempfile.NamedTemporaryFile(mode="w+b").name zdb.dump_vector(f) yield Data @requires_geos class TestZonalDataPoly: @requires_gdal def test__get_idx_weights(self, data_poly): zdp = zonalstats.ZonalDataPoly(data_poly.src, data_poly.trg, srs=data_poly.proj) assert zdp._get_idx_weights() == ( [np.array([0]), np.array([1])], [np.array([25000000.0]), np.array([25000000.0])], ) @pytest.fixture def data_point(): @dataclass(init=False, repr=False, eq=False) class Data: # GK3-Projection proj = osr.SpatialReference() proj.ImportFromEPSG(31466) # create synthetic box point0 = np.array([2600000.0, 5630000.0]) point1 = np.array([2620000.0, 5640000.0]) box3 = np.array( [ [2595000.0, 5625000.0], [2595000.0, 5635000.0], [2605000.0, 5635000.0], [2605000.0, 5625000.0], [2595000.0, 5625000.0], ] ) box4 = np.array( [ [2615000.0, 5635000.0], [2615000.0, 5645000.0], [2625000.0, 5645000.0], [2625000.0, 5635000.0], [2615000.0, 5635000.0], ] ) box5 = np.array( [ [2600000.0, 5635000.0], [2605000.0, 5635000.0], [2605000.0, 5630000.0], [2600000.0, 5630000.0], [2600000.0, 5635000.0], ] ) box6 = np.array( [ [2615000.0, 5635000.0], [2615000.0, 5640000.0], [2620000.0, 5640000.0], [2620000.0, 5635000.0], [2615000.0, 5635000.0], ] ) box7 = np.array( [ [2715000.0, 5635000.0], [2715000.0, 5640000.0], [2720000.0, 5640000.0], [2720000.0, 5635000.0], [2715000.0, 5635000.0], ] ) src = np.array([point0, point1]) trg = np.array([box3, box4]) dst = np.array([[point0], [point1]]) zdb = zonalstats.ZonalDataBase(src, trg, srs=proj) f = tempfile.NamedTemporaryFile(mode="w+b").name zdb.dump_vector(f) yield Data @requires_geos class TestZonalDataPoint: @requires_gdal def test__get_idx_weights(self, data_point): zdp = zonalstats.ZonalDataPoint( data_point.src, data_point.trg, srs=data_point.proj ) assert zdp._get_idx_weights() == ( [np.array([0]), np.array([1])], [np.array([1.0]), np.array([1.0])], ) @pytest.fixture def stats_base(): @dataclass(init=False, repr=False, eq=False) class Data: # GK3-Projection proj = osr.SpatialReference() proj.ImportFromEPSG(31466) # create synthetic box box0 = np.array( [ [2600000.0, 5630000.0], [2600000.0, 5640000.0], [2610000.0, 5640000.0], [2610000.0, 5630000.0], [2600000.0, 5630000.0], ] ) box1 = np.array( [ [2610000.0, 5630000.0], [2610000.0, 5640000.0], [2620000.0, 5640000.0], [2620000.0, 5630000.0], [2610000.0, 5630000.0], ] ) box3 = np.array( [ [2595000.0, 5625000.0], [2595000.0, 5635000.0], [2605000.0, 5635000.0], [2605000.0, 5625000.0], [2595000.0, 5625000.0], ] ) box4 = np.array( [ [2615000.0, 5635000.0], [2615000.0, 5645000.0], [2625000.0, 5645000.0], [2625000.0, 5635000.0], [2615000.0, 5635000.0], ] ) box5 = np.array( [ [2600000.0, 5635000.0], [2605000.0, 5635000.0], [2605000.0, 5630000.0], [2600000.0, 5630000.0], [2600000.0, 5635000.0], ] ) box6 = np.array( [ [2615000.0, 5635000.0], [2615000.0, 5640000.0], [2620000.0, 5640000.0], [2620000.0, 5635000.0], [2615000.0, 5635000.0], ] ) box7 = np.array( [ [2715000.0, 5635000.0], [2715000.0, 5640000.0], [2720000.0, 5640000.0], [2720000.0, 5635000.0], [2715000.0, 5635000.0], ] ) src = np.array([box0, box1]) trg = np.array([box3, box4]) dst = np.array([[box5], [box6]]) zdb = zonalstats.ZonalDataBase(src, trg, srs=proj) zdp = zonalstats.ZonalDataPoly(src, trg, srs=proj) yield Data @requires_geos class TestZonalStatsBase: @requires_gdal def test__init__(self, stats_base): with pytest.raises(NotImplementedError): zonalstats.ZonalStatsBase(stats_base.zdb) zonalstats.ZonalStatsBase(stats_base.zdp) with pytest.raises(TypeError): zonalstats.ZonalStatsBase("test") with pytest.raises(TypeError): zonalstats.ZonalStatsBase() with pytest.raises(TypeError): zonalstats.ZonalStatsBase(ix=np.arange(10), w=np.arange(11)) @requires_gdal def test_w(self, stats_base): zdp = zonalstats.ZonalStatsBase(stats_base.zdp) np.testing.assert_equal(zdp.w, np.array([[25000000.0], [25000000.0]])) np.testing.assert_equal(zdp.ix, np.array([[0], [1]])) @requires_gdal def test__check_vals(self, stats_base): zdp = zonalstats.ZonalStatsBase(stats_base.zdp) with pytest.raises(AssertionError): zdp._check_vals(np.arange(3)) @requires_gdal def test_mean(self, stats_base): zdp = zonalstats.ZonalStatsBase(stats_base.zdp) np.testing.assert_equal(zdp.mean(np.arange(10, 21, 10)), np.array([10, 20])) @requires_gdal def test_var(self, stats_base): zdp = zonalstats.ZonalStatsBase(stats_base.zdp) np.testing.assert_equal(zdp.var(np.arange(10, 21, 10)), np.array([0, 0])) @pytest.fixture def zonal_data(): @dataclass(init=False, repr=False, eq=False) class Data: # setup test grid and catchment lon = 7.071664 lat = 50.730521 r = np.array(range(50, 100 * 1000 + 50, 100)) a = np.array(range(0, 360, 1)) rays = a.shape[0] bins = r.shape[0] # setup OSR objects proj_gk = osr.SpatialReference() proj_gk.ImportFromEPSG(31466) proj_ll = osr.SpatialReference() proj_ll.ImportFromEPSG(4326) # create polar grid polygon vertices in lat,lon coords = georef.spherical_to_polyvert(r, a, 0, (lon, lat), proj=proj_ll) radar_ll = coords[..., 0:2] # create polar grid centroids in lat,lon coords = georef.spherical_to_centroids(r, a, 0, (lon, lat), proj=proj_ll) radar_llc = coords[..., 0:2] # project ll grids to GK2 radar_gk = georef.reproject( radar_ll, projection_source=proj_ll, projection_target=proj_gk ) radar_gkc = georef.reproject( radar_llc, projection_source=proj_ll, projection_target=proj_gk ) # reshape radar_gk.shape = (rays, bins, 5, 2) radar_gkc.shape = (rays, bins, 2) box0 = np.array( [ [2600000.0, 5630000.0], [2600000.0, 5630100.0], [2600100.0, 5630100.0], [2600100.0, 5630000.0], [2600000.0, 5630000.0], ] ) box1 = np.array( [ [2600100.0, 5630000.0], [2600100.0, 5630100.0], [2600200.0, 5630100.0], [2600200.0, 5630000.0], [2600100.0, 5630000.0], ] ) data = np.array([box0, box1]) # create catchment bounding box buffer = 5000.0 bbox = zonalstats.get_bbox(data[..., 0], data[..., 1]) bbox = dict( left=bbox["left"] - buffer, right=bbox["right"] + buffer, bottom=bbox["bottom"] - buffer, top=bbox["top"] + buffer, ) mask, shape = zonalstats.mask_from_bbox( radar_gkc[..., 0], radar_gkc[..., 1], bbox, polar=True ) radar_gkc = radar_gkc[mask, :] radar_gk = radar_gk[mask] zdpoly = zonalstats.ZonalDataPoly(radar_gk, data, srs=proj_gk) # zdpoly.dump_vector('test_zdpoly') zdpoint = zonalstats.ZonalDataPoint(radar_gkc, data, srs=proj_gk) # zdpoint.dump_vector('test_zdpoint') isec_poly0 = np.array( [ np.array( [ # [2600000.0, 5630000.0], [2600000.0, 5630100.0], [2600009.61157242, 5630100.0], [2600041.77844048, 5630000.0], [2600000.0, 5630000.0], [2600000.0, 5630100.0], ] ), np.array( [ # [2600009.61157242, 5630100.0], [2600100.0, 5630100.0], [2600100.0, 5630000.0], [2600041.77844048, 5630000.0], [2600009.61157242, 5630100.0], [2600100.0, 5630100.0], ] ), np.array( [ # [2600091.80406488, 5630100.0], [2600100.0, 5630100.0], [2600100.0, 5630074.58501104], [2600091.80406488, 5630100.0], [2600100.0, 5630100.0], ] ), ], dtype=object, ) isec_poly1 = np.array( [ np.array( [ # [2600100.0, 5630000.0], [2600100.0, 5630100.0], [2600114.66582085, 5630100.0], [2600146.83254704, 5630000.0], [2600100.0, 5630000.0], [2600100.0, 5630100.0], ] ), np.array( [ # [2600114.66582085, 5630100.0], [2600200.0, 5630100.0], [2600200.0, 5630000.0], [2600146.83254704, 5630000.0], [2600114.66582085, 5630100.0], [2600200.0, 5630100.0], ] ), np.array( [ # [2600197.20644071, 5630100.0], [2600200.0, 5630100.0], [2600200.0, 5630091.33737992], [2600197.20644071, 5630100.0], [2600200.0, 5630100.0], ] ), ], dtype=object, ) isec_point0 = np.array([[2600077.2899581, 5630056.0874306]]) isec_point1 = np.array([[2600172.498418, 5630086.7127034]]) isec_poly = np.array([isec_poly0, isec_poly1]) isec_point = np.array([isec_point0, isec_point1]) yield Data @requires_geos class TestZonalData: @requires_gdal def test_srs(self, zonal_data): assert zonal_data.zdpoly.srs == zonal_data.proj_gk assert zonal_data.zdpoint.srs == zonal_data.proj_gk @requires_gdal def test_isecs(self, zonal_data): # need to iterate over nested array for correct testing for i, ival in enumerate(zonal_data.zdpoly.isecs): for k, kval in enumerate(ival): np.testing.assert_allclose( kval.astype(float), zonal_data.isec_poly[i, k], rtol=1e-6 ) np.testing.assert_allclose( zonal_data.zdpoint.isecs.astype(float), zonal_data.isec_point, rtol=1e-6 ) @requires_gdal def test_get_isec(self, zonal_data): for i in [0, 1]: for k, arr in enumerate(zonal_data.zdpoly.get_isec(i)): np.testing.assert_allclose( arr.astype(float), zonal_data.isec_poly[i, k], rtol=1e-6 ) np.testing.assert_allclose( zonal_data.zdpoint.get_isec(i).astype(float), zonal_data.isec_point[i], rtol=1e-6, ) @requires_gdal def test_get_source_index(self, zonal_data): np.testing.assert_array_equal( zonal_data.zdpoly.get_source_index(0), np.array([2254, 2255]) ) np.testing.assert_array_equal( zonal_data.zdpoly.get_source_index(1), np.array([2255, 2256]) ) np.testing.assert_array_equal( zonal_data.zdpoint.get_source_index(0), np.array([2255]) ) np.testing.assert_array_equal( zonal_data.zdpoint.get_source_index(1), np.array([2256]) ) @pytest.fixture def npobj(): yield np.array( [ [2600000.0, 5630000.0], [2600000.0, 5630100.0], [2600100.0, 5630100.0], [2600100.0, 5630000.0], [2600000.0, 5630000.0], ] ) @pytest.fixture def ogrobj(npobj): yield georef.numpy_to_ogr(npobj, "Polygon") class TestZonalStatsUtil: def test_angle_between(self): assert zonalstats.angle_between(355.0, 5.0) == pytest.approx(10.0) assert zonalstats.angle_between(5.0, 355.0) == pytest.approx(-10.0) @requires_gdal def test_get_clip_mask(self, npobj): proj_gk = osr.SpatialReference() proj_gk.ImportFromEPSG(31466) coords = np.array( [ [2600020.0, 5630020.0], [2600030.0, 5630030.0], [2600040.0, 5630040.0], [2700100.0, 5630030.0], [2600040.0, 5640000.0], ] ) mask = zonalstats.get_clip_mask(coords, npobj, proj_gk) out = np.array([True, True, True, False, False]) np.testing.assert_array_equal(mask, out)
""" Management utility to create superusers. """ import getpass import sys from django.contrib.auth import get_user_model from django.contrib.auth.management import get_default_username from django.contrib.auth.password_validation import validate_password from django.core import exceptions from django.core.management.base import BaseCommand, CommandError from django.db import DEFAULT_DB_ALIAS from django.utils.text import capfirst class NotRunningInTTYException(Exception): pass class Command(BaseCommand): help = 'Used to create a superuser.' requires_migrations_checks = True stealth_options = ('stdin',) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.UserModel = get_user_model() self.username_field = self.UserModel._meta.get_field(self.UserModel.USERNAME_FIELD) def add_arguments(self, parser): parser.add_argument( '--%s' % self.UserModel.USERNAME_FIELD, dest=self.UserModel.USERNAME_FIELD, default=None, help='Specifies the login for the superuser.', ) parser.add_argument( '--noinput', '--no-input', action='store_false', dest='interactive', help=( 'Tells Django to NOT prompt the user for input of any kind. ' 'You must use --%s with --noinput, along with an option for ' 'any other required field. Superusers created with --noinput will ' 'not be able to log in until they\'re given a valid password.' % self.UserModel.USERNAME_FIELD ), ) parser.add_argument( '--database', action='store', dest='database', default=DEFAULT_DB_ALIAS, help='Specifies the database to use. Default is "default".', ) for field in self.UserModel.REQUIRED_FIELDS: parser.add_argument( '--%s' % field, dest=field, default=None, help='Specifies the %s for the superuser.' % field, ) def execute(self, *args, **options): self.stdin = options.get('stdin', sys.stdin) # Used for testing return super().execute(*args, **options) def handle(self, *args, **options): username = options[self.UserModel.USERNAME_FIELD] database = options['database'] # If not provided, create the user with an unusable password password = None user_data = {} # Same as user_data but with foreign keys as fake model instances # instead of raw IDs. fake_user_data = {} verbose_field_name = self.username_field.verbose_name # Do quick and dirty validation if --noinput if not options['interactive']: try: if not username: raise CommandError("You must use --%s with --noinput." % self.UserModel.USERNAME_FIELD) username = self.username_field.clean(username, None) for field_name in self.UserModel.REQUIRED_FIELDS: if options[field_name]: field = self.UserModel._meta.get_field(field_name) user_data[field_name] = field.clean(options[field_name], None) else: raise CommandError("You must use --%s with --noinput." % field_name) except exceptions.ValidationError as e: raise CommandError('; '.join(e.messages)) else: # Prompt for username/password, and any other required fields. # Enclose this whole thing in a try/except to catch # KeyboardInterrupt and exit gracefully. default_username = get_default_username() try: if hasattr(self.stdin, 'isatty') and not self.stdin.isatty(): raise NotRunningInTTYException("Not running in a TTY") # Get a username while username is None: input_msg = capfirst(verbose_field_name) if default_username: input_msg += " (leave blank to use '%s')" % default_username username_rel = self.username_field.remote_field input_msg = '%s%s: ' % ( input_msg, ' (%s.%s)' % ( username_rel.model._meta.object_name, username_rel.field_name ) if username_rel else '' ) username = self.get_input_data(self.username_field, input_msg, default_username) if not username: continue if self.username_field.unique: try: self.UserModel._default_manager.db_manager(database).get_by_natural_key(username) except self.UserModel.DoesNotExist: pass else: self.stderr.write("Error: That %s is already taken." % verbose_field_name) username = None if not username: raise CommandError('%s cannot be blank.' % capfirst(verbose_field_name)) for field_name in self.UserModel.REQUIRED_FIELDS: field = self.UserModel._meta.get_field(field_name) user_data[field_name] = options[field_name] while user_data[field_name] is None: message = '%s%s: ' % ( capfirst(field.verbose_name), ' (%s.%s)' % ( field.remote_field.model._meta.object_name, field.remote_field.field_name, ) if field.remote_field else '', ) input_value = self.get_input_data(field, message) user_data[field_name] = input_value fake_user_data[field_name] = input_value # Wrap any foreign keys in fake model instances if field.remote_field: fake_user_data[field_name] = field.remote_field.model(input_value) # Get a password while password is None: password = getpass.getpass() password2 = getpass.getpass('Password (again): ') if password != password2: self.stderr.write("Error: Your passwords didn't match.") password = None # Don't validate passwords that don't match. continue if password.strip() == '': self.stderr.write("Error: Blank passwords aren't allowed.") password = None # Don't validate blank passwords. continue try: validate_password(password2, self.UserModel(**fake_user_data)) except exceptions.ValidationError as err: self.stderr.write('\n'.join(err.messages)) response = input('Bypass password validation and create user anyway? [y/N]: ') if response.lower() != 'y': password = None except KeyboardInterrupt: self.stderr.write("\nOperation cancelled.") sys.exit(1) except NotRunningInTTYException: self.stdout.write( "Superuser creation skipped due to not running in a TTY. " "You can run `manage.py createsuperuser` in your project " "to create one manually." ) if username: user_data[self.UserModel.USERNAME_FIELD] = username user_data['password'] = password self.UserModel._default_manager.db_manager(database).create_superuser(**user_data) if options['verbosity'] >= 1: self.stdout.write("Superuser created successfully.") def get_input_data(self, field, message, default=None): """ Override this method if you want to customize data inputs or validation exceptions. """ raw_value = input(message) if default and raw_value == '': raw_value = default try: val = field.clean(raw_value, None) except exceptions.ValidationError as e: self.stderr.write("Error: %s" % '; '.join(e.messages)) val = None return val
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project # All rights reserved. # # This file is part of NeuroM <https://github.com/BlueBrain/NeuroM> # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the names of # its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Test neurom.io.utils.""" from pathlib import Path import sys from io import StringIO from pathlib import Path import warnings import numpy as np from nose import tools as nt from neurom import get from neurom.core import Neuron, SomaError from neurom.exceptions import NeuroMError, RawDataError, SomaError from neurom.features import neuritefunc as _nf from neurom.io import utils DATA_PATH = Path(__file__).parent.parent.parent.parent / 'test_data' SWC_PATH = DATA_PATH / 'swc' VALID_DATA_PATH = DATA_PATH / 'valid_set' NRN_NAMES = ('Neuron', 'Neuron_h5v1', 'Neuron_h5v2') FILES = [Path(SWC_PATH, f) for f in ['Neuron.swc', 'Single_apical_no_soma.swc', 'Single_apical.swc', 'Single_basal.swc', 'Single_axon.swc', 'sequential_trunk_off_0_16pt.swc', 'sequential_trunk_off_1_16pt.swc', 'sequential_trunk_off_42_16pt.swc', 'Neuron_no_missing_ids_no_zero_segs.swc']] FILENAMES = [Path(VALID_DATA_PATH, f) for f in ['Neuron.swc', 'Neuron_h5v1.h5', 'Neuron_h5v2.h5']] NO_SOMA_FILE = Path(SWC_PATH, 'Single_apical_no_soma.swc') DISCONNECTED_POINTS_FILE = Path(SWC_PATH, 'Neuron_disconnected_components.swc') MISSING_PARENTS_FILE = Path(SWC_PATH, 'Neuron_missing_parents.swc') INVALID_ID_SEQUENCE_FILE = Path(SWC_PATH, 'non_increasing_trunk_off_1_16pt.swc') def _mock_load_neuron(filename): class MockNeuron(object): def __init__(self, name): self.soma = 42 self.neurites = list() self.name = name return MockNeuron(Path(filename).stem) def _check_neurites_have_no_parent(nrn): for n in nrn.neurites: nt.assert_true(n.root_node.parent is None) def test_get_morph_files(): ref = set(['Neuron_h5v2.h5', 'Neuron_2_branch_h5v2.h5', 'Neuron_slice.h5', 'Neuron.swc', 'Neuron_h5v1.h5', 'Neuron_2_branch_h5v1.h5']) files = set(f.name for f in utils.get_morph_files(VALID_DATA_PATH)) nt.assert_equal(ref, files) def test_load_neurons(): # List of strings nrns = utils.load_neurons(map(str, FILES), neuron_loader=_mock_load_neuron) for i, nrn in enumerate(nrns): nt.assert_equal(nrn.name, FILES[i].stem) # Single string nrns = utils.load_neurons(str(FILES[0]), neuron_loader=_mock_load_neuron) nt.assert_equal(nrns[0].name, FILES[0].stem) # Single Path nrns = utils.load_neurons(FILES[0], neuron_loader=_mock_load_neuron) nt.assert_equal(nrns[0].name, FILES[0].stem) # sequence of strings nrns = utils.load_neurons(map(str, FILES), neuron_loader=_mock_load_neuron) for i, nrn in enumerate(nrns): nt.assert_equal(nrn.name, FILES[i].stem) # sequence of Path objects nrns = utils.load_neurons(FILES, neuron_loader=_mock_load_neuron) for nrn, file in zip(nrns, FILES): nt.assert_equal(nrn.name, file.stem) # string path to a directory nrns = utils.load_neurons(str(SWC_PATH), neuron_loader=_mock_load_neuron) # is subset so that if new morpho are added to SWC_PATH, the test does not break nt.assert_true({f.stem for f in FILES}.issubset({nrn.name for nrn in nrns})) # Path path to a directory nrns = utils.load_neurons(SWC_PATH, neuron_loader=_mock_load_neuron) # is subset so that if new morpho are added to SWC_PATH, the test does not break nt.assert_true({f.stem for f in FILES}.issubset({nrn.name for nrn in nrns})) nt.assert_raises(SomaError, utils.load_neurons, NO_SOMA_FILE) def test_load_neuron(): nrn = utils.load_neuron(FILENAMES[0]) nt.assert_true(isinstance(NRN, Neuron)) nt.assert_equal(NRN.name, 'Neuron') _check_neurites_have_no_parent(nrn) neuron_str = u""" 1 1 0 0 0 1. -1 2 3 0 0 0 1. 1 3 3 0 5 0 1. 2 4 3 -5 5 0 0. 3 5 3 6 5 0 0. 3 6 2 0 0 0 1. 1 7 2 0 -4 0 1. 6 8 2 6 -4 0 0. 7 9 2 -5 -4 0 0. 7 """ utils.load_neuron(StringIO(neuron_str), reader='swc') def test_neuron_name(): for fn, nn in zip(FILENAMES, NRN_NAMES): nrn = utils.load_neuron(fn) nt.eq_(nrn.name, nn) @nt.raises(SomaError) def test_load_bifurcating_soma_points_raises_SomaError(): utils.load_neuron(Path(SWC_PATH, 'soma', 'bifurcating_soma.swc')) def test_load_neuromorpho_3pt_soma(): with warnings.catch_warnings(record=True): nrn = utils.load_neuron(Path(SWC_PATH, 'soma', 'three_pt_soma.swc')) nt.eq_(len(nrn.neurites), 4) nt.eq_(len(nrn.soma.points), 3) nt.eq_(nrn.soma.radius, 2) _check_neurites_have_no_parent(nrn) NRN = utils.load_neuron(FILENAMES[0]) def test_neuron_section_ids(): # check section IDs for i, sec in enumerate(NRN.sections): nt.eq_(i, sec.id) def test_neurites_have_no_parent(): _check_neurites_have_no_parent(NRN) def test_neuron_sections(): all_nodes = set(NRN.sections) neurite_nodes = set(_nf.iter_sections(NRN.neurites)) # check no duplicates nt.assert_true(len(all_nodes) == len(NRN.sections)) # check all neurite tree nodes are # in sections attribute nt.assert_true(len(set(NRN.sections) - neurite_nodes) > 0) def test_neuron_sections_are_connected(): # check traversal by counting number of sections un trees for nrt in NRN.neurites: root_node = nrt.root_node nt.assert_equal(sum(1 for _ in root_node.ipreorder()), sum(1 for _ in NRN.sections[root_node.id].ipreorder())) def test_load_neuron_soma_only(): nrn = utils.load_neuron(Path(DATA_PATH, 'swc', 'Soma_origin.swc')) nt.eq_(len(nrn.neurites), 0) nt.assert_equal(nrn.name, 'Soma_origin') @nt.raises(SomaError) def test_load_neuron_no_soma_raises_SomaError(): utils.load_neuron(NO_SOMA_FILE) # TODO: decide if we want to check for this in fst. @nt.nottest @nt.raises(RawDataError) def test_load_neuron_disconnected_points_raises(): utils.load_neuron(DISCONNECTED_POINTS_FILE) @nt.raises(RawDataError) def test_load_neuron_missing_parents_raises(): utils.load_neuron(MISSING_PARENTS_FILE) # TODO: decide if we want to check for this in fst. @nt.nottest @nt.raises(RawDataError) def test_load_neuron_invalid_id_sequence_raises(): utils.load_neuron(INVALID_ID_SEQUENCE_FILE) def test_load_neurons_directory(): pop = utils.load_neurons(VALID_DATA_PATH) nt.assert_equal(len(pop.neurons), 6) nt.assert_equal(len(pop), 6) nt.assert_equal(pop.name, 'valid_set') for nrn in pop: nt.assert_true(isinstance(nrn, Neuron)) def test_load_neurons_directory_name(): pop = utils.load_neurons(VALID_DATA_PATH, name='test123') nt.assert_equal(len(pop.neurons), 6) nt.assert_equal(len(pop), 6) nt.assert_equal(pop.name, 'test123') for nrn in pop: nt.assert_true(isinstance(nrn, Neuron)) def test_load_neurons_filenames(): pop = utils.load_neurons(FILENAMES, name='test123') nt.assert_equal(len(pop.neurons), 3) nt.assert_equal(pop.name, 'test123') for nrn, name in zip(pop.neurons, NRN_NAMES): nt.assert_true(isinstance(nrn, Neuron)) nt.assert_equal(nrn.name, name) SWC_ORD_PATH = Path(DATA_PATH, 'swc', 'ordering') SWC_ORD_REF = utils.load_neuron(Path(SWC_ORD_PATH, 'sample.swc')) def assert_items_equal(a, b): nt.eq_(sorted(a), sorted(b)) def test_load_neuron_mixed_tree_swc(): nrn_mix = utils.load_neuron(Path(SWC_ORD_PATH, 'sample_mixed_tree_sections.swc')) assert_items_equal(get('number_of_sections_per_neurite', nrn_mix), [5, 3]) assert_items_equal(get('number_of_sections_per_neurite', nrn_mix), get('number_of_sections_per_neurite', SWC_ORD_REF)) assert_items_equal(get('number_of_segments', nrn_mix), get('number_of_segments', SWC_ORD_REF)) assert_items_equal(get('total_length', nrn_mix), get('total_length', SWC_ORD_REF)) def test_load_neuron_section_order_break_swc(): nrn_mix = utils.load_neuron(Path(SWC_ORD_PATH, 'sample_disordered.swc')) assert_items_equal(get('number_of_sections_per_neurite', nrn_mix), [5, 3]) assert_items_equal(get('number_of_sections_per_neurite', nrn_mix), get('number_of_sections_per_neurite', SWC_ORD_REF)) assert_items_equal(get('number_of_segments', nrn_mix), get('number_of_segments', SWC_ORD_REF)) assert_items_equal(get('total_length', nrn_mix), get('total_length', SWC_ORD_REF)) H5_PATH = Path(DATA_PATH, 'h5', 'v1', 'ordering') H5_ORD_REF = utils.load_neuron(Path(H5_PATH, 'sample.h5')) def test_load_neuron_mixed_tree_h5(): nrn_mix = utils.load_neuron(Path(H5_PATH, 'sample_mixed_tree_sections.h5')) assert_items_equal(get('number_of_sections_per_neurite', nrn_mix), [5, 3]) assert_items_equal(get('number_of_sections_per_neurite', nrn_mix), get('number_of_sections_per_neurite', H5_ORD_REF)) def test_load_h5_trunk_points_regression(): # regression test for issue encountered while # implementing PR #479, related to H5 unpacking # of files with non-standard soma structure. # See #480. nrn = utils.load_neuron(Path(DATA_PATH, 'h5', 'v1', 'Neuron.h5')) nt.ok_(np.allclose(nrn.neurites[0].root_node.points[1], [0., 0., 0.1, 0.31646374, 4., 4., 3.])) nt.ok_(np.allclose(nrn.neurites[1].root_node.points[1], [0., 0., 0.1, 1.84130445e-01, 3.0, 235., 234.])) nt.ok_(np.allclose(nrn.neurites[2].root_node.points[1], [0., 0., 0.1, 5.62225521e-01, 3., 466, 465])) nt.ok_(np.allclose(nrn.neurites[3].root_node.points[1], [0., 0., 0.1, 7.28555262e-01, 2., 697, 696])) def test_load_unknown_type(): nt.assert_raises(NeuroMError, utils.load_data, Path('fake.file')) def test_NeuronLoader(): dirpath = Path(DATA_PATH, 'h5', 'v2') loader = utils.NeuronLoader(dirpath, file_ext='.h5', cache_size=5) nrn = loader.get('Neuron') nt.ok_(isinstance(nrn, Neuron)) # check caching nt.ok_(nrn == loader.get('Neuron')) nt.ok_(nrn != loader.get('Neuron_2_branch')) def test_NeuronLoader_mixed_file_extensions(): loader = utils.NeuronLoader(VALID_DATA_PATH) loader.get('Neuron') loader.get('Neuron_h5v1') nt.assert_raises(NeuroMError, loader.get, 'NoSuchNeuron') def test_ignore_exceptions(): pop = utils.load_neurons((NO_SOMA_FILE, ), ignored_exceptions=(SomaError, )) nt.eq_(len(pop), 0) pop = utils.load_neurons((NO_SOMA_FILE, ), ignored_exceptions=(SomaError, RawDataError, )) nt.eq_(len(pop), 0) def test_get_files_by_path(): single_neurom = utils.get_files_by_path(NO_SOMA_FILE) nt.eq_(len(single_neurom), 1) neuron_dir = utils.get_files_by_path(VALID_DATA_PATH) nt.eq_(len(neuron_dir), 6) nt.assert_raises(IOError, utils.get_files_by_path, Path('this/is/a/fake/path'))
## debug.py ## ## Copyright (C) 2003 Jacob Lundqvist ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU Lesser General Public License as published ## by the Free Software Foundation; either version 2, or (at your option) ## any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU Lesser General Public License for more details. _version_ = '1.4.0' import sys from traceback import format_exception as traceback_format_exception import time import os import types if os.environ.has_key('TERM'): colors_enabled=True else: colors_enabled=False color_none = chr(27) + "[0m" color_black = chr(27) + "[30m" color_red = chr(27) + "[31m" color_green = chr(27) + "[32m" color_brown = chr(27) + "[33m" color_blue = chr(27) + "[34m" color_magenta = chr(27) + "[35m" color_cyan = chr(27) + "[36m" color_light_gray = chr(27) + "[37m" color_dark_gray = chr(27) + "[30;1m" color_bright_red = chr(27) + "[31;1m" color_bright_green = chr(27) + "[32;1m" color_yellow = chr(27) + "[33;1m" color_bright_blue = chr(27) + "[34;1m" color_purple = chr(27) + "[35;1m" color_bright_cyan = chr(27) + "[36;1m" color_white = chr(27) + "[37;1m" class NoDebug: def __init__( self, *args, **kwargs ): self.debug_flags = [] def show( self, *args, **kwargs): pass def Show( self, *args, **kwargs): pass def is_active( self, flag ): pass colors={} active_set = lambda self, active_flags = None: 0 LINE_FEED = '\n' class Debug: def __init__(self, active_flags = None, log_file = sys.stderr, prefix = 'DEBUG: ', sufix = '\n', time_stamp = 0, flag_show = None, validate_flags = 1, welcome = -1): self.debug_flags = [] if welcome == -1: if active_flags and len(active_flags): welcome = 1 else: welcome = 0 self._remove_dupe_flags() if log_file: if type( log_file ) is type(''): try: self._fh = open(log_file,'w') except: print 'ERROR: can open %s for writing' sys.exit(0) else: ## assume its a stream type object self._fh = log_file else: self._fh = sys.stdout if time_stamp not in (0,1,2): msg2 = '%s' % time_stamp raise 'Invalid time_stamp param', msg2 self.prefix = prefix self.sufix = sufix self.time_stamp = time_stamp self.flag_show = None # must be initialised after possible welcome self.validate_flags = validate_flags self.active_set( active_flags ) if welcome: self.show('') caller = sys._getframe(1) # used to get name of caller try: mod_name= ":%s" % caller.f_locals['__name__'] except: mod_name = "" self.show('Debug created for %s%s' % (caller.f_code.co_filename, mod_name )) self.show(' flags defined: %s' % ','.join( self.active )) if type(flag_show) in (type(''), type(None)): self.flag_show = flag_show else: msg2 = '%s' % type(flag_show ) raise 'Invalid type for flag_show!', msg2 def show( self, msg, flag = None, prefix = None, sufix = None, lf = 0 ): """ flag can be of folowing types: None - this msg will always be shown if any debugging is on flag - will be shown if flag is active (flag1,flag2,,,) - will be shown if any of the given flags are active if prefix / sufix are not given, default ones from init will be used lf = -1 means strip linefeed if pressent lf = 1 means add linefeed if not pressent """ if self.validate_flags: self._validate_flag( flag ) if not self.is_active(flag): return if prefix: pre = prefix else: pre = self.prefix if sufix: suf = sufix else: suf = self.sufix if self.time_stamp == 2: output = '%s%s ' % ( pre, time.strftime('%b %d %H:%M:%S', time.localtime(time.time() )), ) elif self.time_stamp == 1: output = '%s %s' % ( time.strftime('%b %d %H:%M:%S', time.localtime(time.time() )), pre, ) else: output = pre if self.flag_show: if flag: output = '%s%s%s' % ( output, flag, self.flag_show ) else: # this call uses the global default, # dont print "None", just show the separator output = '%s %s' % ( output, self.flag_show ) output = '%s%s%s' % ( output, msg, suf ) if lf: # strip/add lf if needed last_char = output[-1] if lf == 1 and last_char != LINE_FEED: output = output + LINE_FEED elif lf == -1 and last_char == LINE_FEED: output = output[:-1] try: self._fh.write( output ) except: # unicode strikes again ;) s=u'' for i in range(len(output)): if ord(output[i]) < 128: c = output[i] else: c = '?' s=s+c self._fh.write( '%s%s%s' % ( pre, s, suf )) self._fh.flush() def is_active( self, flag ): 'If given flag(s) should generate output.' # try to abort early to quicken code if not self.active: return 0 if not flag or flag in self.active: return 1 else: # check for multi flag type: if type( flag ) in ( type(()), type([]) ): for s in flag: if s in self.active: return 1 return 0 def active_set( self, active_flags = None ): "returns 1 if any flags where actually set, otherwise 0." r = 0 ok_flags = [] if not active_flags: #no debuging at all self.active = [] elif type( active_flags ) in ( types.TupleType, types.ListType ): flags = self._as_one_list( active_flags ) for t in flags: if t not in self.debug_flags: sys.stderr.write('Invalid debugflag given: %s\n' % t ) ok_flags.append( t ) self.active = ok_flags r = 1 else: # assume comma string try: flags = active_flags.split(',') except: self.show( '***' ) self.show( '*** Invalid debug param given: %s' % active_flags ) self.show( '*** please correct your param!' ) self.show( '*** due to this, full debuging is enabled' ) self.active = self.debug_flags for f in flags: s = f.strip() ok_flags.append( s ) self.active = ok_flags self._remove_dupe_flags() return r active_get = lambda self: self.active "returns currently active flags." def _as_one_list( self, items ): """ init param might contain nested lists, typically from group flags. This code organises lst and remves dupes """ if type( items ) <> type( [] ) and type( items ) <> type( () ): return [ items ] r = [] for l in items: if type( l ) == type([]): lst2 = self._as_one_list( l ) for l2 in lst2: self._append_unique_str(r, l2 ) elif l == None: continue else: self._append_unique_str(r, l ) return r def _append_unique_str( self, lst, item ): """filter out any dupes.""" if type(item) <> type(''): msg2 = '%s' % item raise 'Invalid item type (should be string)',msg2 if item not in lst: lst.append( item ) return lst def _validate_flag( self, flags ): 'verify that flag is defined.' if flags: for f in self._as_one_list( flags ): if not f in self.debug_flags: msg2 = '%s' % f raise 'Invalid debugflag given', msg2 def _remove_dupe_flags( self ): """ if multiple instances of Debug is used in same app, some flags might be created multiple time, filter out dupes """ unique_flags = [] for f in self.debug_flags: if f not in unique_flags: unique_flags.append(f) self.debug_flags = unique_flags colors={} def Show(self, flag, msg, prefix=''): msg=msg.replace('\r','\\r').replace('\n','\\n').replace('><','>\n <') if not colors_enabled: pass elif self.colors.has_key(prefix): msg=self.colors[prefix]+msg+color_none else: msg=color_none+msg if not colors_enabled: prefixcolor='' elif self.colors.has_key(flag): prefixcolor=self.colors[flag] else: prefixcolor=color_none if prefix=='error': _exception = sys.exc_info() if _exception[0]: msg=msg+'\n'+''.join(traceback_format_exception(_exception[0], _exception[1], _exception[2])).rstrip() prefix= self.prefix+prefixcolor+(flag+' '*12)[:12]+' '+(prefix+' '*6)[:6] self.show(msg, flag, prefix) def is_active( self, flag ): if not self.active: return 0 if not flag or flag in self.active and DBG_ALWAYS not in self.active or flag not in self.active and DBG_ALWAYS in self.active : return 1 return 0 DBG_ALWAYS='always' ##Uncomment this to effectively disable all debugging and all debugging overhead. #Debug=NoDebug
#!/usr/bin/env python from optparse import OptionParser, OptionGroup from bs_utils.utils import * try : import pysam except ImportError : print "[Error] Cannot import \"pysam\" package. Have you installed it?" exit(-1) # import gzip """ def context_calling(seq, position): word=seq[position] word=word.upper() context="--" context_CH="--" if position + 2 < len(seq) and position - 2 >= 0: if word == "C": word2 = seq[position+1] context_CH = word + word2 if word2 == "G": context = "CG" elif word2 in ['A','C','T']: word3 = seq[position+2] if word3 == "G": context = "CHG" elif word3 in ['A','C','T']: context="CHH" elif word == "G": word2 = seq[position-1] context_CH = word + word2 context_CH = context_CH.translate(string.maketrans("ATCG", "TAGC")) if word2 == "C": context = "CG" elif word2 in ['A','G','T']: word3 = seq[position-2] if word3 == "C": context = "CHG" elif word3 in ['A','G','T']: context = "CHH" return word, context, context_CH """ if __name__ == '__main__': # parser = OptionParser() parser.add_option("-i", "--input", type="string", dest="infilename",help="BAM output from bs_seeker2-align.py", metavar="INFILE") parser.add_option("-d", "--db", type="string", dest="dbpath", help="Path to the reference genome library (generated in preprocessing genome) " "[Default: %default]" , metavar="DBPATH", default = reference_genome_path) parser.add_option("-o", "--output-prefix", type="string", dest="output_prefix", default = None, help="The output prefix to create ATCGmap and wiggle files. " "Three files (ATCGmap, CGmap, wig) will be generated if specified. " "Omit this if only to generate specific format.", metavar="OUTFILE") parser.add_option("--sorted", action="store_true", dest="sorted", help="Specify when the input bam file is already sorted, the sorting step will be skipped " "[Default: %default]", default = False) parser.add_option("--wig", type="string", dest="wig_file", help="Filename for wig file. Ex: output.wig, or output.wig.gz. Can be overwritten by \"-o\".", metavar="OUTFILE", default = None) parser.add_option("--CGmap", type="string", dest="CGmap_file", default = None, help="Filename for CGmap file. Ex: output.CGmap, or output.CGmap.gz. " "Can be overwritten by \"-o\".", metavar="OUTFILE") parser.add_option("--ATCGmap", type="string", dest="ATCGmap_file", default = None, help="Filename for ATCGmap file. Ex: output.ATCGmap, or output.ATCGmap.gz. " "Can be overwritten by \"-o\".", metavar="OUTFILE") parser.add_option("-x", "--rm-SX", action="store_true", dest="RM_SX", help="Removed reads with tag \'XS:i:1\', which would be considered as not fully converted " "by bisulfite treatment [Default: %default]", default = False) parser.add_option("--rm-CCGG", action="store_true", dest="RM_CCGG", help="Removed sites located in CCGG, avoiding the bias introduced by artificial" " DNA methylation status \'XS:i:1\', which would be considered as not fully converted " "by bisulfite treatment [Default: %default]", default = False) parser.add_option("--rm-overlap", action="store_true", dest="RM_OVERLAP", help="Removed one mate if two mates are overlapped, for paired-end data" " [Default: %default]", default = False) parser.add_option("--txt", action="store_true", dest="text", help="When specified, output file will be stored in plain text instead of compressed version (.gz)", default = False) parser.add_option("-r", "--read-no",type = "int", dest="read_no", help="The least number of reads covering one site to be shown in wig file " "[Default: %default]", default = 1) parser.add_option("-D", "--pileup-maxdepth",type = "int", dest="PileupMaxDepth", help="The max number of read depth can be called for each position. Parameter passing to pysam. " "Large number costs more time." "[Default: %default]", default = 8000) parser.add_option("-v", "--version", action="store_true", dest="version", help="show version of BS-Seeker2", metavar="version", default = False) # (options, args) = parser.parse_args() # # # if no options were given by the user, print help and exit if len(sys.argv) == 1: parser.print_help() exit(0) # if options.version : show_version() exit (-1) else : show_version() # # if options.infilename is None: error('-i option is required') if not os.path.isfile(options.infilename): error('Cannot find input file: %s' % options.infilename) # open_log(options.infilename+'.call_methylation_log') db_d = lambda fname: os.path.join( os.path.expanduser(options.dbpath), fname) # bug fixed, weilong # if options.RM_OVERLAP : logm("The option \"--rm-overlap\" is specified, thus overlap regions of two mates would be discarded.") # if options.sorted : logm('The option \"--sorted\" is specified, thus sorting step is skipped') sorted_input_filename = options.infilename else : logm('sorting BS-Seeker2 alignments') sorted_input_filename = options.infilename+'_sorted' if [int(i) for i in pysam.__version__.split(".")] < [0, 7, 7] : pysam.sort(options.infilename, sorted_input_filename) else : pysam.sort("-o", sorted_input_filename + '.bam', "-T", sorted_input_filename, options.infilename) # sorted_input_filename += '.bam' # end_of if logm('indexing sorted alignments') pysam.index(sorted_input_filename) if options.output_prefix is not None : if options.text : options.ATCGmap_file = options.output_prefix + '.ATCGmap' options.CGmap_file = options.output_prefix + '.CGmap' options.wig_file = options.output_prefix + '.wig' else : options.ATCGmap_file = options.output_prefix + '.ATCGmap.gz' options.CGmap_file = options.output_prefix + '.CGmap.gz' options.wig_file = options.output_prefix + '.wig.gz' # else : if (options.ATCGmap_file is None) and (options.CGmap_file is None) and (options.wig_file is None) : if options.text : options.ATCGmap_file = options.infilename + '.ATCGmap' options.CGmap_file = options.infilename + '.CGmap' options.wig_file = options.infilename + '.wig' else : options.ATCGmap_file = options.infilename + '.ATCGmap.gz' options.CGmap_file = options.infilename + '.CGmap.gz' options.wig_file = options.infilename + '.wig.gz' # # # logm('calculating methylation levels') if options.ATCGmap_file is not None : if options.ATCGmap_file.endswith(".gz") : ATCGmap = gzip.open(options.ATCGmap_file, 'wb') else : ATCGmap = open(options.ATCGmap_file, 'w') # # if options.CGmap_file is not None : if options.CGmap_file.endswith(".gz") : CGmap = gzip.open(options.CGmap_file, 'wb') else : CGmap = open(options.CGmap_file, 'w') # # if options.wig_file is not None : if options.wig_file.endswith(".gz") : wiggle = gzip.open(options.wig_file, 'wb') else : wiggle = open(options.wig_file, 'w') # # # to improve the performance options_RM_CCGG = options.RM_CCGG options_read_no = options.read_no options_RM_SX = options.RM_SX options_RM_OVERLAP = options.RM_OVERLAP PileupMaxDepth = options.PileupMaxDepth # if options.wig_file is not None : wiggle.write('track type=wiggle_0\n') # sorted_input = pysam.Samfile(sorted_input_filename, 'rb') # chrom = None nucs = ['A', 'T', 'C', 'G', 'N'] ATCG_fwd = dict((n, 0) for n in nucs) ATCG_rev = dict((n, 0) for n in nucs) # # Define the context and subcontext exchanging dictionary ContextTable={"CAA":"CHH", "CAC":"CHH", "CAG":"CHG", "CAT":"CHH", "CCA":"CHH", "CCC":"CHH", "CCG":"CHG", "CCT":"CHH", "CGA":"CG", "CGC":"CG", "CGG":"CG", "CGT":"CG", "CTA":"CHH", "CTC":"CHH", "CTG":"CHG", "CTT":"CHH"} # SubContextTable={"CAA":"CA", "CAC":"CA", "CAG":"CA", "CAT":"CA", "CCA":"CC", "CCC":"CC", "CCG":"CC", "CCT":"CC", "CGA":"CG", "CGC":"CG", "CGG":"CG", "CGT":"CG", "CTA":"CT", "CTC":"CT", "CTG":"CT", "CTT":"CT"} # AntisenseContextTable=\ {"TTG":"CHH", "TGG":"CHH", "TCG":"CG", "TAG":"CHH", "GTG":"CHH", "GGG":"CHH", "GCG":"CG", "GAG":"CHH", "CTG":"CHG", "CGG":"CHG", "CCG":"CG", "CAG":"CHG", "ATG":"CHH", "AGG":"CHH", "ACG":"CG", "AAG":"CHH"} # AntisenseSubContextTable=\ {"TTG":"CA", "TGG":"CC", "TCG":"CG", "TAG":"CT", "GTG":"CA", "GGG":"CC", "GCG":"CG", "GAG":"CT", "CTG":"CA", "CGG":"CC", "CCG":"CG", "CAG":"CT", "ATG":"CA", "AGG":"CC", "ACG":"CG", "AAG":"CT"} # cnts = lambda d: '\t'.join(str(d[n]) for n in nucs) # for col in sorted_input.pileup(max_depth=PileupMaxDepth): col_chrom = sorted_input.getrname(col.tid) col_pos = col.pos if chrom != col_chrom: chrom = col_chrom chrom_seq = deserialize(db_d(chrom)) if options.wig_file is not None : wiggle.write('variableStep chrom=%s\n' % chrom) # logm('Processing chromosome: %s' % chrom) # for n in nucs: ATCG_fwd[n] = 0 ATCG_rev[n] = 0 # if 1 < col_pos < len(chrom_seq) - 2 : FiveMer = chrom_seq[(col_pos-2):(col_pos+3)].upper() nuc = FiveMer[2] if nuc == "C" : ThreeMer = FiveMer[2:5] subcontext = SubContextTable.get(ThreeMer, "--") context = ContextTable.get(ThreeMer, "--") elif nuc == "G" : ThreeMer = FiveMer[0:3] subcontext = AntisenseSubContextTable.get(ThreeMer, "--") context = AntisenseContextTable.get(ThreeMer, "--") else : context = "--" subcontext = "--" # else : nuc = chrom_seq[col_pos].upper() context = "--" subcontext = "--" # total_reads = 0 # if options_RM_CCGG : # To validate the outputs #print chrom_seq[ (max(col_pos-3, 0)):(col_pos+4) ].upper() if "CCGG" in chrom_seq[ (max(col_pos-3, 0)):(col_pos+4) ].upper() : #print "Removed ============" continue # ---321X123--- # ---CCGG===--- # ---===CCGG--- #check_start_pos = (col_pos - 3) if col_pos>3 else 0 #check_end_pos = (col_pos + 4) if col_pos+4<len(chrom_seq) else len(chrom_seq) #check_seq = chrom_seq[check_start_pos:check_end_pos].upper() #if "CCGG" in SevenMer : #print "Remove\t%s\n" % check_seq # for debug # continue # if options_RM_OVERLAP : qname_pool = [] del qname_pool[:] # for pr in col.pileups: # print pr #if pysam.__version__ > "0.8.0" : if [int(i) for i in pysam.__version__.split(".")] > [0, 7, 7] : pr_qpos = pr.query_position else : pr_qpos =pr.qpos # if (not pr.indel) : # skip indels pr_alignment = pr.alignment #print pr.alignment #if ( (options_RM_SX) and (pr.alignment.tags[1][1] == 1) ): ##=== Fixed error reported by Roberto #print options_RM_SX, dict(pr.alignment.tags)["XS"] #if ( (options_RM_SX) and (dict(pr.alignment.tags)["XS"] == 1) ): #if ( (options_RM_SX) and (dict(pr_alignment.tags).get("XS",0) == 1) ): if ( (options_RM_SX) and ( ('XS', 1) in pr_alignment.tags) ) : # faster # print "Debug: ", options_RM_SX, pr.alignment.tags[1] # when need to filter and read with tag (XS==1), then remove the reads continue #print pr_alignment.tags #if pr.qpos >= len(pr_alignment.seq): #if pr.query_position >= len(pr_alignment.seq): if pr_qpos >= len(pr_alignment.seq): print 'WARNING: read %s has an invalid alignment. Discarding.. ' % pr_alignment.qname continue # #print "qname= %s" % pr.alignment.qname #qname = pr_alignment.qname.replace( "#1", "").replace(".1", "") if options_RM_OVERLAP : #qname = re.sub( "[\.\#]1$","",pr_alignment.qname) #qname = pr_alignment.qname.replace( "#1", "").replace(".1", "") pr_alignment_qname = pr_alignment.qname qname = pr_alignment_qname[:-2] if pr_alignment_qname[-2] in ['.', '#'] else pr_alignment_qname # "SRR121545.1" or "SRR121545#1" to "SRR121545" if (qname in qname_pool) : # remove the 2nd mate (the same qname with 1st qname) #print "Remove the read with duplicate qname : %s" % qname continue qname_pool.append(qname) # try : read_nuc = pr_alignment.seq[pr_qpos] except : continue # if pr_alignment.is_reverse: ATCG_rev[read_nuc] += 1 else: ATCG_fwd[read_nuc] += 1 # if read_nuc != 'N': total_reads += 1 #print col_pos, qname_pool # #cnts = lambda d: '\t'.join(str(d[n]) for n in nucs) fwd_counts = cnts(ATCG_fwd) rev_counts = cnts(ATCG_rev) # meth_level = None meth_cytosines = 0 unmeth_cytosines = 0 # if nuc == 'C': # plus strand: take the ratio of C's to T's from reads that come from the forward strand meth_cytosines = ATCG_fwd['C'] unmeth_cytosines = ATCG_fwd['T'] elif nuc == 'G': # minus strand: take the ratio of G's to A's from reads that come from the reverse strand meth_cytosines = ATCG_rev['G'] unmeth_cytosines = ATCG_rev['A'] #print("%s\t%d\t%d" % (nuc, ATCG_rev['G'], ATCG_rev['A'] ) ) # all_cytosines = meth_cytosines + unmeth_cytosines if all_cytosines > 0: meth_level = float(meth_cytosines)/all_cytosines # pos = col_pos + 1 # meth_level_string = str(round(meth_level, 2)) if meth_level is not None else 'na' # if options.ATCGmap_file is not None: ATCGmap.write('%(chrom)s\t%(nuc)s\t%(pos)d\t%(context)s\t%(subcontext)s\t%(fwd_counts)s\t%(rev_counts)s\t%(meth_level_string)s\n' % locals()) # try : #if (meth_level is not None) and (all_cytosines >= options_read_no): if (all_cytosines >= options_read_no): # print all_cytosines if options.wig_file is not None: if nuc == 'C': wiggle.write('%d\t%.2f\n' % (pos, meth_level)) else : wiggle.write('%d\t-%.2f\n' % (pos, meth_level)) # # if options.CGmap_file is not None: CGmap.write('%(chrom)s\t%(nuc)s\t%(pos)d\t%(context)s\t%(subcontext)s\t%(meth_level_string)s\t%(meth_cytosines)s\t%(all_cytosines)s\n' % locals()) # CGmap file only show CG sites except TypeError : continue # # if options.ATCGmap_file is not None: ATCGmap.close() # if options.CGmap_file is not None: CGmap.close() # if options.wig_file is not None: wiggle.close() # logm('Call methylation is finished. ') logm('==============================') logm('Files are saved as:') if options.wig_file is not None: logm(' Wiggle: %s'% options.wig_file) if options.ATCGmap_file is not None: logm(' ATCGMap: %s' % options.ATCGmap_file) if options.CGmap_file is not None: logm(' CGmap: %s' % options.CGmap_file) # #
# -*- coding: utf-8 -*- from operator import attrgetter from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType from pyangbind.lib.yangtypes import RestrictedClassType from pyangbind.lib.yangtypes import TypedListType from pyangbind.lib.yangtypes import YANGBool from pyangbind.lib.yangtypes import YANGListType from pyangbind.lib.yangtypes import YANGDynClass from pyangbind.lib.yangtypes import ReferenceType from pyangbind.lib.base import PybindBase from collections import OrderedDict from decimal import Decimal from bitarray import bitarray import six # PY3 support of some PY2 keywords (needs improved) if six.PY3: import builtins as __builtin__ long = int elif six.PY2: import __builtin__ from . import prefix_limit class l3vpn_ipv6_multicast(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/bgp/global/afi-safis/afi-safi/l3vpn-ipv6-multicast. Each member element of the container is represented as a class variable - with a specific YANG type. YANG Description: Multicast IPv6 L3VPN configuration options """ __slots__ = ("_path_helper", "_extmethods", "__prefix_limit") _yang_name = "l3vpn-ipv6-multicast" _pybind_generated_by = "container" def __init__(self, *args, **kwargs): self._path_helper = False self._extmethods = False self.__prefix_limit = YANGDynClass( base=prefix_limit.prefix_limit, is_container="container", yang_name="prefix-limit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=True, ) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path() + [self._yang_name] else: return [ "network-instances", "network-instance", "protocols", "protocol", "bgp", "global", "afi-safis", "afi-safi", "l3vpn-ipv6-multicast", ] def _get_prefix_limit(self): """ Getter method for prefix_limit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/l3vpn_ipv6_multicast/prefix_limit (container) YANG Description: Configure the maximum number of prefixes that will be accepted from a peer """ return self.__prefix_limit def _set_prefix_limit(self, v, load=False): """ Setter method for prefix_limit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/l3vpn_ipv6_multicast/prefix_limit (container) If this variable is read-only (config: false) in the source YANG file, then _set_prefix_limit is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_prefix_limit() directly. YANG Description: Configure the maximum number of prefixes that will be accepted from a peer """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=prefix_limit.prefix_limit, is_container="container", yang_name="prefix-limit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=True, ) except (TypeError, ValueError): raise ValueError( { "error-string": """prefix_limit must be of a type compatible with container""", "defined-type": "container", "generated-type": """YANGDynClass(base=prefix_limit.prefix_limit, is_container='container', yang_name="prefix-limit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""", } ) self.__prefix_limit = t if hasattr(self, "_set"): self._set() def _unset_prefix_limit(self): self.__prefix_limit = YANGDynClass( base=prefix_limit.prefix_limit, is_container="container", yang_name="prefix-limit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=True, ) prefix_limit = __builtin__.property(_get_prefix_limit, _set_prefix_limit) _pyangbind_elements = OrderedDict([("prefix_limit", prefix_limit)]) from . import prefix_limit class l3vpn_ipv6_multicast(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/bgp/global/afi-safis/afi-safi/l3vpn-ipv6-multicast. Each member element of the container is represented as a class variable - with a specific YANG type. YANG Description: Multicast IPv6 L3VPN configuration options """ __slots__ = ("_path_helper", "_extmethods", "__prefix_limit") _yang_name = "l3vpn-ipv6-multicast" _pybind_generated_by = "container" def __init__(self, *args, **kwargs): self._path_helper = False self._extmethods = False self.__prefix_limit = YANGDynClass( base=prefix_limit.prefix_limit, is_container="container", yang_name="prefix-limit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=True, ) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path() + [self._yang_name] else: return [ "network-instances", "network-instance", "protocols", "protocol", "bgp", "global", "afi-safis", "afi-safi", "l3vpn-ipv6-multicast", ] def _get_prefix_limit(self): """ Getter method for prefix_limit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/l3vpn_ipv6_multicast/prefix_limit (container) YANG Description: Configure the maximum number of prefixes that will be accepted from a peer """ return self.__prefix_limit def _set_prefix_limit(self, v, load=False): """ Setter method for prefix_limit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/l3vpn_ipv6_multicast/prefix_limit (container) If this variable is read-only (config: false) in the source YANG file, then _set_prefix_limit is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_prefix_limit() directly. YANG Description: Configure the maximum number of prefixes that will be accepted from a peer """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=prefix_limit.prefix_limit, is_container="container", yang_name="prefix-limit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=True, ) except (TypeError, ValueError): raise ValueError( { "error-string": """prefix_limit must be of a type compatible with container""", "defined-type": "container", "generated-type": """YANGDynClass(base=prefix_limit.prefix_limit, is_container='container', yang_name="prefix-limit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""", } ) self.__prefix_limit = t if hasattr(self, "_set"): self._set() def _unset_prefix_limit(self): self.__prefix_limit = YANGDynClass( base=prefix_limit.prefix_limit, is_container="container", yang_name="prefix-limit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=True, ) prefix_limit = __builtin__.property(_get_prefix_limit, _set_prefix_limit) _pyangbind_elements = OrderedDict([("prefix_limit", prefix_limit)])
""" twemproxy/nutcracker sharded Redis library for Python. - Capable of dealing with twemproxy sharded Redis configuration schemes. - Talks to the Sentinels to obtain the Redis shards """ import collections import hashlib import re from redis.sentinel import Sentinel import yaml __all__ = ['TwemRedis'] class TwemRedis: """ A redis wrapper library for using twemproxy sharded Redis. Allows running operations on individual shards or across all shards. Attributes: disallowed_sharded_operations: These Redis functions cannot be called indirectly on a TwemRedis instance. If they are to be used, then the operations must be performed on the shards directly. """ disallowed_sharded_operations = ['hscan', 'scan', 'sscan', 'zscan'] def __init__(self, config_file): self._config = self._parse_config(config_file) self._shard_name_format = self._config['shard_name_format'] self._hash_tag = self._config['hash_tag'] self._hash_start = self._hash_tag[0] self._hash_stop = self._hash_tag[1] if 'sentinels' in self._config: self._sentinels = self._config['sentinels'] self._num_shards = int(self._config['num_shards']) self._masters = None elif 'masters' in self._config: self._masters = self._config['masters'] self._num_shards = len(self._masters) self._sentinels = None if 'timeout' in self._config: self._timeout = self._config['timeout'] else: self._timeout = 2.0 self._canonical_keys = self.compute_canonical_key_ids() self._init_redis_shards() def _parse_config(self, config_file): """ _parse_config takes a path to a yml file and returns the parsed object representation of the file. This is a convenient method to override in unit tests. """ return yaml.load(open(config_file, 'r')) def _init_redis_shards(self): """ init_redis_shards is used internally to connect to the Redis sentinels and populate self.shards with the redis.StrictRedis instances. This is a convenient method to override / stub out in unit tests. """ self._shards = {} if self._sentinels is not None: self.init_shards_from_sentinel() elif self._masters is not None: self.init_shards_from_masters() else: raise Exception("You must either specify sentinels or masters") def init_shards_from_sentinel(self): sentinel_client = Sentinel( [(h, 8422) for h in self._sentinels], socket_timeout=self._timeout) # Connect to all the shards with the names specified per # shard_name_format. The names are important since it's getting # the instances from Redis Sentinel. for shard_num in range(0, self.num_shards()): shard_name = self.get_shard_name(shard_num) self._shards[shard_num] = sentinel_client.master_for( shard_name, socket_timeout=self._timeout) # Just in case we need it later. self._sentinel_client = sentinel_client def init_shards_from_masters(self): for shard_num in range(0, self.num_shards()): master = self._masters[shard_num] (host, port) = master.split(' ') shard = redis.StrictRedis(host, port, socket_timeout=self._timeout) self._shards[shard_num] = shard def num_shards(self): """ num_shards returns the number of Redis shards in this cluster. """ return self._num_shards def get_shard_name(self, shard_num): """ get_shard_name returns the name of the shard given its number. It uses the 'shard_name_format' from the configuration file to format the name from the number. Keyword arguments: shard_num - The shard number (e.g. 0) Returns the shard name (e.g. tdb001) """ return self._shard_name_format.format(shard_num) def get_shard_names(self): """ get_shard_names returns an array containing the names of the shards in the cluster. This is determined with num_shards and shard_name_format """ results = [] for shard_num in range(0, self.num_shards()): shard_name = self.get_shard_name(shard_num) results.append(shard_name) return results def get_key(self, key_type, key_id): """ get_key constructs a key given a key type and a key id. Keyword arguments: key_type -- the type of key (e.g.: 'friend_request') key_id -- the key id (e.g.: '12345') returns a string representing the key (e.g.: 'friend_request:{12345}') """ return "{0}:{1}{2}{3}".format(key_type, self._hash_start, key_id, self._hash_stop) def get_shard_by_key(self, key): """ get_shard_by_key returns the Redis shard given a key. Keyword arguments: key -- the key (e.g. 'friend_request:{12345}') If the key contains curly braces as in the example, then portion inside the curly braces will be used as the key id. Otherwise, the entire key is the key id. returns a redis.StrictRedis connection """ key_id = self._get_key_id_from_key(key) return self.get_shard_by_key_id(key_id) def get_shard_num_by_key(self, key): """ get_shard_num_by_key returns the Redis shard number givne a key. Keyword arguments: key -- the key (e.g. 'friend_request:{12345}') See get_shard_by_key for more details as this method behaves the same way. """ key_id = self._get_key_id_from_key(key) return self.get_shard_num_by_key_id(key_id) def get_shard_by_key_id(self, key_id): """ get_shard_by_key_id returns the Redis shard given a key id. Keyword arguments: key_id -- the key id (e.g. '12345') This is similar to get_shard_by_key(key) except that it will not search for a key id within the curly braces. returns a redis.StrictRedis connection """ shard_num = self.get_shard_num_by_key_id(key_id) return self.get_shard_by_num(shard_num) def get_shard_num_by_key_id(self, key_id): """ get_shard_num_by_key_id returns the Redis shard number (zero-indexed) given a key id. Keyword arguments: key_id -- the key id (e.g. '12345' or 'anythingcangohere') This method is critical in how the Redis cluster sharding works. We emulate twemproxy's md5 distribution algorithm. """ # TODO: support other hash functions? m = hashlib.md5(str(key_id).encode('ascii')).hexdigest() # Below is borrowed from # https://github.com/twitter/twemproxy/blob/master/src/hashkit/nc_md5.c val = (int(m[0:2], 16) | int(m[2:4], 16) << 8 | int(m[4:6], 16) << 16 | int(m[6:8], 16) << 24) return val % self.num_shards() def get_canonical_key(self, key_type, key_id): """ get_canonical_key returns the canonical form of a key given a key id. For example, '12345' maps to shard 6. The canonical key at index 6 (say '12') is the canonical key id given the key id of '12345'. This is useful for sets that need to exist on all shards. See compute_canonical_key_ids for how these are calculated. Keyword arguments: key_type -- the type of key (e.g. 'canceled') key_id -- the key id (e.g. '12345') returns the canonical key string (e.g. 'canceled:{12}') """ canonical_key_id = self.get_canonical_key_id(key_id) return self.get_key(key_type, canonical_key_id) def get_canonical_key_id(self, key_id): """ get_canonical_key_id is used by get_canonical_key, see the comment for that method for more explanation. Keyword arguments: key_id -- the key id (e.g. '12345') returns the canonical key id (e.g. '12') """ shard_num = self.get_shard_num_by_key_id(key_id) return self._canonical_keys[shard_num] def get_canonical_key_id_for_shard(self, shard_num): """ get_canonical_key_id_for_shard is a utility method. It will return the canonical key id for a given shard number. Keyword arguments: shard_num -- the shard number (e.g. 0) returns the canonical key id (e.g. '12') """ return self._canonical_keys[shard_num] def get_shard_by_num(self, shard_num): """ get_shard_by_num returns the shard at index shard_num. Keyword arguments: shard_num -- The shard index Returns a redis.StrictRedis connection or raises a ValueError. """ if shard_num < 0 or shard_num >= self.num_shards(): raise ValueError("requested invalid shard# {0}".format(shard_num)) return self._shards[shard_num] def _get_key_id_from_key(self, key): """ _get_key_id_from_key returns the key id from a key, if found. otherwise it just returns the key to be used as the key id. Keyword arguments: key -- The key to derive the ID from. If curly braces are found in the key, then the contents of the curly braces are used as the key id for the key. Returns the key id portion of the key, or the whole key if no hash tags are present. """ key_id = key regex = '{0}([^{1}]*){2}'.format(self._hash_start, self._hash_stop, self._hash_stop) m = re.search(regex, key) if m is not None: # Use what's inside the hash tags as the key id, if present. # Otherwise the whole key will be used as the key id. key_id = m.group(1) return key_id def compute_canonical_key_ids(self, search_amplifier=100): """ A canonical key id is the lowest integer key id that maps to a particular shard. The mapping to canonical key ids depends on the number of shards. Returns a dictionary mapping from shard number to canonical key id. This method will throw an exception if it fails to compute all of the canonical key ids. """ canonical_keys = {} num_shards = self.num_shards() # Guarantees enough to find all keys without running forever num_iterations = (num_shards**2) * search_amplifier for key_id in range(1, num_iterations): shard_num = self.get_shard_num_by_key(str(key_id)) if shard_num in canonical_keys: continue canonical_keys[shard_num] = str(key_id) if len(canonical_keys) == num_shards: break if len(canonical_keys) != num_shards: raise ValueError("Failed to compute enough keys. " + "Wanted %d, got %d (search_amp=%d).".format( num_shards, len(canonical_keys), search_amplifier)) return canonical_keys def execute_on_all_shards(self, closure): results = {} for shard_num in range(0, self.num_shards()): shard = self.get_shard_by_num(shard_num) results[shard_num] = closure(shard) return results def run_on_all_shards(self, func, *args, **kwargs): results = {} for shard_num in range(0, self.num_shards()): shard = self.get_shard_by_num(shard_num) method = getattr(shard, func) if method is not None: results[shard_num] = getattr(shard, func)(*args, **kwargs) else: raise Exception("undefined method '%s' on shard %d".format( method, shard_num)) return results def keys(self, args): """ keys wrapper that queries every shard. This is an expensive operation. This method should be invoked on a TwemRedis instance as if it were being invoked directly on a StrictRedis instance. """ results = {} # TODO: parallelize for shard_num in range(0, self.num_shards()): shard = self.get_shard_by_num(shard_num) results[shard_num] = shard.keys(args) return results def mget(self, args): """ mget wrapper that batches keys per shard and execute as few mgets as necessary to fetch the keys from all the shards involved. This method should be invoked on a TwemRedis instance as if it were being invoked directly on a StrictRedis instance. """ key_map = collections.defaultdict(list) results = {} for key in args: shard_num = self.get_shard_num_by_key(key) key_map[shard_num].append(key) # TODO: parallelize for shard_num in key_map.keys(): shard = self.get_shard_by_num(shard_num) results[shard_num] = shard.mget(key_map[shard_num]) return results def mset(self, args): """ mset wrapper that batches keys per shard and execute as few msets as necessary to set the keys in all the shards involved. This method should be invoked on a TwemRedis instance as if it were being invoked directly on a StrictRedis instance. """ key_map = collections.defaultdict(dict) result_count = 0 for key in args.keys(): value = args[key] shard_num = self.get_shard_num_by_key(key) key_map[shard_num][key] = value # TODO: parallelize for shard_num in key_map.keys(): shard = self.get_shard_by_num(shard_num) result_count += shard.mset(key_map[shard_num]) return result_count def __getattr__(self, func_name): """ Allow directly calling StrictRedis operations on a TwemRedis instance. This will take the key and apply it to the appropriate shard. Certain operations like KEYS and MGET are supported but are handled in their own wrapper methods. """ def func(key, *args, **kwargs): if (func_name in self.disallowed_sharded_operations): raise Exception("Cannot call '%s' on sharded Redis".format( func_name)) shard = self.get_shard_by_key(key) return getattr(shard, func_name)(key, *args, **kwargs) return func
# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from lxml import etree from unittest import mock from oslo_config import cfg from oslo_context import context from oslo_serialization import jsonutils from oslotest import base as test_base import requests import webob.dec import webob.exc from ec2api import api as ec2 from ec2api import exception from ec2api.tests.unit import tools from ec2api import wsgi CONF = cfg.CONF @webob.dec.wsgify def conditional_forbid(req): """Helper wsgi app returns 403 if param 'die' is 1.""" if 'die' in req.params and req.params['die'] == '1': raise webob.exc.HTTPForbidden() return 'OK' class ExecutorTestCase(test_base.BaseTestCase): def setUp(self): super(ExecutorTestCase, self).setUp() self.executor = ec2.Executor() def _execute(self, invoke): class Fake(object): pass fake_ec2_request = Fake() fake_ec2_request.invoke = invoke fake_wsgi_request = Fake() fake_wsgi_request.environ = { 'ec2api.context': mock.Mock( request_id=context.generate_request_id()), 'ec2.request': fake_ec2_request, } return self.executor(fake_wsgi_request) def _extract_message(self, result): tree = etree.fromstring(result.body) return tree.findall('./Errors')[0].find('Error/Message').text def _extract_code(self, result): tree = etree.fromstring(result.body) return tree.findall('./Errors')[0].find('Error/Code').text def test_instance_not_found(self): def not_found(context): raise exception.InvalidInstanceIDNotFound(id='i-01') result = self._execute(not_found) self.assertIn('i-01', self._extract_message(result)) self.assertEqual('InvalidInstanceID.NotFound', self._extract_code(result)) def test_instance_not_found_none(self): def not_found(context): raise exception.InvalidInstanceIDNotFound(id=None) # NOTE(mikal): we want no exception to be raised here, which was what # was happening in bug/1080406 result = self._execute(not_found) self.assertIn('None', self._extract_message(result)) self.assertEqual('InvalidInstanceID.NotFound', self._extract_code(result)) def test_snapshot_not_found(self): def not_found(context): raise exception.InvalidSnapshotNotFound(id='snap-01') result = self._execute(not_found) self.assertIn('snap-01', self._extract_message(result)) self.assertEqual('InvalidSnapshot.NotFound', self._extract_code(result)) def test_volume_not_found(self): def not_found(context): raise exception.InvalidVolumeNotFound(id='vol-01') result = self._execute(not_found) self.assertIn('vol-01', self._extract_message(result)) self.assertEqual('InvalidVolume.NotFound', self._extract_code(result)) class FakeResponse(object): reason = "Test Reason" def __init__(self, status_code=400): self.status_code = status_code def json(self): return {} class KeystoneAuthTestCase(test_base.BaseTestCase): def setUp(self): super(KeystoneAuthTestCase, self).setUp() self.kauth = ec2.EC2KeystoneAuth(conditional_forbid) def _validate_ec2_error(self, response, http_status, ec2_code): self.assertEqual(response.status_code, http_status, 'Expected HTTP status %s' % http_status) root_e = etree.XML(response.body) self.assertEqual(root_e.tag, 'Response', "Top element must be Response.") errors_e = root_e.find('Errors') error_e = errors_e[0] code_e = error_e.find('Code') self.assertIsNotNone(code_e, "Code element must be present.") self.assertEqual(code_e.text, ec2_code) def test_no_signature(self): req = wsgi.Request.blank('/test') resp = self.kauth(req) self._validate_ec2_error(resp, 400, 'AuthFailure') def test_no_key_id(self): req = wsgi.Request.blank('/test') req.GET['Signature'] = 'test-signature' resp = self.kauth(req) self._validate_ec2_error(resp, 400, 'AuthFailure') @mock.patch.object(requests, 'request', return_value=FakeResponse()) def test_communication_failure(self, mock_request): req = wsgi.Request.blank('/test') req.GET['Signature'] = 'test-signature' req.GET['AWSAccessKeyId'] = 'test-key-id' resp = self.kauth(req) self._validate_ec2_error(resp, 400, 'AuthFailure') mock_request.assert_called_with('POST', CONF.keystone_ec2_tokens_url, data=mock.ANY, headers=mock.ANY) @tools.screen_all_logs @mock.patch.object(requests, 'request', return_value=FakeResponse(200)) def test_no_result_data(self, mock_request): req = wsgi.Request.blank('/test') req.GET['Signature'] = 'test-signature' req.GET['AWSAccessKeyId'] = 'test-key-id' resp = self.kauth(req) self._validate_ec2_error(resp, 400, 'AuthFailure') mock_request.assert_called_with('POST', CONF.keystone_ec2_tokens_url, data=mock.ANY, headers=mock.ANY) fake_request = mock.NonCallableMock(status_code=200, headers={}) fake_request.json.return_value = {'token': {}} mock_request.return_value = fake_request resp = self.kauth(req) self._validate_ec2_error(resp, 400, 'AuthFailure') fake_request.json.return_value = {'access': {}} resp = self.kauth(req) self._validate_ec2_error(resp, 400, 'AuthFailure') @tools.screen_unexpected_exception_logs @mock.patch.object(requests, 'request', return_value=FakeResponse(200)) def test_params_for_keystone_call(self, mock_request): req = wsgi.Request.blank('/test') req.GET['Signature'] = 'test-signature' req.GET['AWSAccessKeyId'] = 'test-key-id' self.kauth(req) mock_request.assert_called_with( 'POST', CONF.keystone_ec2_tokens_url, data=mock.ANY, headers=mock.ANY) data = jsonutils.loads(mock_request.call_args[1]['data']) expected_data = { 'ec2Credentials': { 'access': 'test-key-id', 'headers': {'Host': 'localhost:80'}, 'host': 'localhost:80', 'verb': 'GET', 'params': {'AWSAccessKeyId': 'test-key-id'}, 'signature': 'test-signature', 'path': '/test', 'body_hash': 'e3b0c44298fc1c149afbf4c8996fb924' '27ae41e4649b934ca495991b7852b855'}} self.assertEqual(expected_data, data)
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """ImageNet preprocessing.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v2 as tf IMAGE_SIZE = 224 CROP_PADDING = 32 class Preprocessor(object): """Preprocessing for image classification.""" def __init__(self, input_shape, num_classes, mean_rgb, stddev_rgb, use_augmentation=False): self.input_shape = input_shape self.num_classes = num_classes self.mean_rgb = mean_rgb self.stddev_rgb = stddev_rgb self.use_augmentation = use_augmentation def __call__(self, image, label, is_training=True): if self.use_augmentation: return self._preprocess_with_augmentation(image, label, is_training) return self._preprocess_without_augmentation(image, label) def _preprocess_with_augmentation(self, image, label, is_training): """Image preprocessing method with data augmentation.""" image_size = self.input_shape[0] if is_training: image = preprocess_for_train(image, image_size) else: image = preprocess_for_eval(image, image_size) image -= tf.constant(self.mean_rgb, shape=[1, 1, 3], dtype=image.dtype) image /= tf.constant(self.stddev_rgb, shape=[1, 1, 3], dtype=image.dtype) label = tf.one_hot(label, depth=self.num_classes) return image, label # TODO(yuqili): Changes to preprocess to support batch input. def _preprocess_without_augmentation(self, image, label): """Image preprocessing method without data augmentation.""" image = tf.cast(image, tf.float32) image -= tf.constant(self.mean_rgb, shape=[1, 1, 3], dtype=image.dtype) image /= tf.constant(self.stddev_rgb, shape=[1, 1, 3], dtype=image.dtype) image = tf.compat.v1.image.resize(image, self.input_shape) label = tf.one_hot(label, depth=self.num_classes) return image, label def distorted_bounding_box_crop(image, bbox, min_object_covered=0.1, aspect_ratio_range=(0.75, 1.33), area_range=(0.05, 1.0), max_attempts=100): """Generates cropped_image using one of the bboxes randomly distorted. See `tf.image.sample_distorted_bounding_box` for more documentation. Args: image: 4-D Tensor of shape [batch, height, width, channels] or 3-D Tensor of shape [height, width, channels]. bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]` where each coordinate is [0, 1) and the coordinates are arranged as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole image. min_object_covered: An optional `float`. Defaults to `0.1`. The cropped area of the image must contain at least this fraction of any bounding box supplied. aspect_ratio_range: An optional list of `float`s. The cropped area of the image must have an aspect ratio = width / height within this range. area_range: An optional list of `float`s. The cropped area of the image must contain a fraction of the supplied image within in this range. max_attempts: An optional `int`. Number of attempts at generating a cropped region of the image of the specified constraints. After `max_attempts` failures, return the entire image. Returns: cropped image `Tensor` """ with tf.name_scope('distorted_bounding_box_crop'): shape = tf.shape(image) sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( shape, bounding_boxes=bbox, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=True) bbox_begin, bbox_size, _ = sample_distorted_bounding_box # Crop the image to the specified bounding box. offset_y, offset_x, _ = tf.unstack(bbox_begin) target_height, target_width, _ = tf.unstack(bbox_size) image = tf.image.crop_to_bounding_box(image, offset_y, offset_x, target_height, target_width) return image def _at_least_x_are_equal(a, b, x): """At least `x` of `a` and `b` `Tensors` are equal.""" match = tf.equal(a, b) match = tf.cast(match, tf.int32) return tf.greater_equal(tf.reduce_sum(match), x) def _resize_image(image, image_size, method=None): if method is not None: tf.compat.v1.logging.info('Use customized resize method {}'.format(method)) return tf.compat.v1.image.resize([image], [image_size, image_size], method)[0] tf.compat.v1.logging.info('Use default resize_bicubic.') return tf.compat.v1.image.resize_bicubic([image], [image_size, image_size])[0] def _decode_and_random_crop(original_image, image_size, resize_method=None): """Make a random crop of image_size.""" bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]) image = distorted_bounding_box_crop( original_image, bbox, min_object_covered=0.1, aspect_ratio_range=(3. / 4, 4. / 3.), area_range=(0.08, 1.0), max_attempts=10) original_shape = tf.shape(original_image) bad = _at_least_x_are_equal(original_shape, tf.shape(image), 3) image = tf.cond(bad, lambda: _decode_and_center_crop(original_image, image_size), lambda: _resize_image(image, image_size, resize_method)) return image def _decode_and_center_crop(image, image_size, resize_method=None): """Crops to center of image with padding then scales image_size.""" shape = tf.shape(image) image_height = shape[0] image_width = shape[1] padded_center_crop_size = tf.cast( ((image_size / (image_size + CROP_PADDING)) * tf.cast(tf.minimum(image_height, image_width), tf.float32)), tf.int32) offset_height = ((image_height - padded_center_crop_size) + 1) // 2 offset_width = ((image_width - padded_center_crop_size) + 1) // 2 image = tf.image.crop_to_bounding_box(image, offset_height, offset_width, padded_center_crop_size, padded_center_crop_size) image = _resize_image(image, image_size, resize_method) return image def _flip(image): """Random horizontal image flip.""" image = tf.image.random_flip_left_right(image) return image def preprocess_for_train(image, image_size=IMAGE_SIZE, resize_method=tf.image.ResizeMethod.BILINEAR): """Preprocesses the given image for evaluation. Args: image: 4-D Tensor of shape [batch, height, width, channels] or 3-D Tensor of shape [height, width, channels]. image_size: image size. resize_method: resize method. If none, use bicubic. Returns: A preprocessed image `Tensor`. """ image = _decode_and_random_crop(image, image_size, resize_method) image = _flip(image) image = tf.reshape(image, [image_size, image_size, 3]) image = tf.image.convert_image_dtype(image, dtype=tf.float32) return image def preprocess_for_eval(image, image_size=IMAGE_SIZE, resize_method=tf.image.ResizeMethod.BILINEAR): """Preprocesses the given image for evaluation. Args: image: 4-D Tensor of shape [batch, height, width, channels] or 3-D Tensor of shape [height, width, channels]. image_size: image size. resize_method: if None, use bicubic. Returns: A preprocessed image `Tensor`. """ image = _decode_and_center_crop(image, image_size, resize_method) image = tf.reshape(image, [image_size, image_size, 3]) image = tf.image.convert_image_dtype(image, dtype=tf.float32) return image
#!/usr/bin/env python # -*- coding: UTF-8 -*- # # Copyright 2017 University of Westminster. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """ It is an interface for reading and writing Comma Separated Values (CSV) files. """ from typing import Dict, List, TypeVar, Any, Callable import sys import pandas as pd import os import csv import logging from Configs.CONSTANTS import CONSTANTS PandasDataFrame = TypeVar('DataFrame') __author__ = "Mohsen Mesgarpour" __copyright__ = "Copyright 2016, https://github.com/mesgarpour" __credits__ = ["Mohsen Mesgarpour"] __license__ = "GPL" __version__ = "1.1" __maintainer__ = "Mohsen Mesgarpour" __email__ = "[email protected]" __status__ = "Release" class CsvFile: def __init__(self): """Initialise the objects and constants. """ self.__logger = logging.getLogger(CONSTANTS.app_name) self.__logger.debug(__name__) self.__path = None def set(self, path: str, title: str, ext: str = "csv"): """Set the CSV file for reading or writing. :param path: the directory path of the CSV file. :param title: the file name of the CSV file. :param ext: the extension of the CSV file (default: 'csv'). """ self.__logger.debug("Set the CSV file.") self.__path = os.path.join(path, title + "." + ext) def reset(self): """Reset the CSV file reader/writer. """ self.__logger.debug("Reset the CSV File.") try: open(self.__path, 'w').close() except (OSError, IOError) as e: self.__logger.error(__name__ + " - Can not open the file: " + self.__path + "\n" + str(e)) sys.exit() except(): self.__logger.error(__name__ + " - Could not create the file: \n" + self.__path) sys.exit() def exists(self) -> bool: """Check if the CSV file exists. :return: indicates if the file exists. """ self.__logger.debug("Check if the CSV file exists.") return os.path.isfile(self.__path) def exists_column(self, column: str, skip: int = 0) -> bool: """Check if the CSV file exists. :param column: name of the column. :param skip: lines to skip before reading or writing. :return: indicates if the column exists. """ self.__logger.debug("Check if a column exists in the CSV File.") i = 0 try: with open(self.__path, "r") as f: for line in f: if i > skip: if column not in set(line.split(",")): return False else: return True except (OSError, IOError) as e: self.__logger.error(__name__ + " - Can not open the file: " + self.__path + "\n" + str(e)) sys.exit() except(): self.__logger.error(__name__ + " - Can not read the file: \n" + self.__path) sys.exit() def read(self, skip: int=0, dataframing: bool=True, **kwargs: Any) -> Callable[[List, PandasDataFrame], None]: """Read the CSV file into dataframe or list. :param skip: lines to skip before reading. :param dataframing: indicates if the outputs must be saved into dataframe. :param kwargs: any other arguments that the selected reader may accept. :return: the read file contents. """ self.__logger.debug("Read the CSV File.") if dataframing: rows = self.__read_dataframe(skip, **kwargs) else: rows = self.__read_array(skip) return rows def __read_dataframe(self, skip: int=0, **kwargs: Any) -> PandasDataFrame: """Read the CSV file into dataframe. :param skip: lines to skip before reading. :param kwargs: any other arguments that the selected reader may accept. :return: the read file contents. """ self.__logger.debug("Read the CSV File into Dataframe.") try: rows = pd.read_csv(self.__path, skiprows=skip, **kwargs) except(): self.__logger.error(__name__ + " - Can not read the file into a dataframe: \n" + self.__path) sys.exit() return rows def __read_array(self, skip: int = 0) -> List: """Read the CSV file into array. :param skip: lines to skip before reading. :return: the read file contents. """ self.__logger.debug("Read the CSV File into array.") rows = [] i = 0 with open(self.__path, "r") as f: try: for line in f: i += 1 if i > skip: rows.append(line.split(",")) except (OSError, IOError) as e: self.__logger.error(__name__ + " - Can not open the file: " + self.__path + "\n" + str(e)) sys.exit() except(): self.__logger.error(__name__ + " - Can not read the file: \n" + self.__path) sys.exit() return rows def append(self, data: Callable[[List, Dict, PandasDataFrame], None], **kwargs: Any): """Append to CSV file using dataframe, dictionary or list. :param data: the data to write. :param kwargs: any other arguments that the selected writer may accept. """ self.__logger.debug("Append to the CSV file.") if isinstance(data, pd.DataFrame): self.__append_dataframe(data, **kwargs) elif isinstance(data, list): self.__append_array(data) elif isinstance(data, dict): self.__append_dict(data) else: self.__logger.error(__name__ + " - Invalid object to write into file!\n" + str(type(data))) sys.exit() return True def __append_dataframe(self, data: PandasDataFrame, max_line_width: int = 100000000, **kwargs: Any): """Append to CSV file using dataframe. :param data: the dataframe to write. :param max_line_width: max line width (PANDAS: display.width). :param kwargs: any other arguments that the selected writer may accept. """ self.__logger.debug("Append a Dataframe to the CSV file.") kwargs["header"] = False if "header" not in kwargs.keys() else kwargs["header"] kwargs["index"] = False if "index" not in kwargs.keys() else kwargs["index"] try: with open(self.__path, 'a') as f: pd.set_option("display.width", max_line_width) pd.set_option("display.max_rows", data.shape[0]) pd.set_option("display.max_columns", data.shape[1]) data.to_csv(f, header=kwargs["header"], index=kwargs["index"]) pd.reset_option("display.width") pd.reset_option("display.max_rows") pd.reset_option("display.max_columns") except(): self.__logger.error(__name__ + " - Can not append dataframe to file: \n" + self.__path) sys.exit() def __append_dict(self, data: Dict[str, str]): """Append to CSV file using dictionary. :param data: the dictionary to write. """ self.__logger.debug("Append a Dictionary to the CSV file.") try: with open(self.__path, 'a') as f: w = csv.DictWriter(f, data.keys()) w.writeheader() w.writerow(data) except (OSError, IOError) as e: self.__logger.error(__name__ + " - Can not open the file: " + self.__path + "\n" + str(e)) sys.exit() except(): self.__logger.error(__name__ + " - Can not append dictionary to file: \n" + self.__path) sys.exit() def __append_array(self, data: List): """Append to CSV file using list. :param data: the list to write. """ self.__logger.debug("Append an Array to the CSV file.") if data is None or data == "" or data == []: return elif not isinstance(data, list): data = [[data]] elif not isinstance(data[0], list): data = [data] # write try: with open(self.__path, 'a+b') as f: # flatten 2D list for row in data: f.write((",".join(row) + "\n").encode()) except (OSError, IOError) as e: self.__logger.error(__name__ + " - Can not open the file: " + self.__path + "\n" + str(e)) sys.exit() except(): self.__logger.error(__name__ + " - Can not write a row into the file: \n" + self.__path) sys.exit() def size(self) -> int: """Check number of lines in the CSV file. :return: number of lines in the file. """ self.__logger.debug("Check number of lines in the CSV file.") cnt_lines = 0 try: with open(self.__path, "r") as f: for _ in f: cnt_lines += 1 except (OSError, IOError) as e: self.__logger.error(__name__ + " - Can not open the file: " + self.__path + "\n" + str(e)) sys.exit() except(): self.__logger.error(__name__ + " - Can not read the file: " + self.__path) sys.exit() return cnt_lines
# Copyright (c) 2015-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. An additional grant # of patent rights can be found in the PATENTS file in the same directory. # from __future__ import print_function import curses import sys import signal import processInput import usageStrings import output import logger import format from charCodeMapping import CODE_TO_CHAR from colorPrinter import ColorPrinter def signal_handler(signal, frame): # from http://stackoverflow.com/a/1112350/948126 # Lets just quit rather than signal.SIGINT printing the stack sys.exit(0) signal.signal(signal.SIGINT, signal_handler) CHROME_MIN_X = 5 CHROME_MIN_Y = 0 SELECT_MODE = 'SELECT' COMMAND_MODE = 'COMMAND_MODE' X_MODE = 'X_MODE' lbls = "ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890~!@#$%^&*()_+<>?{}|;'" SHORT_NAV_USAGE = '[f|A] selection, [down|j|up|k|space|b] navigation, [enter] open, [x] quick select mode, [c] command mode' SHORT_COMMAND_USAGE = 'command examples: | git add | git checkout HEAD~1 -- | mv $F ../here/ |' SHORT_COMMAND_PROMPT = 'Type a command below! Files will be appended or replace $F' SHORT_COMMAND_PROMPT2 = 'Enter a blank line to go back to the selection process' SHORT_FILES_HEADER = 'Files you have selected:' INVISIBLE_CURSOR = 0 BLOCK_CURSOR = 2 class HelperChrome(object): def __init__(self, printer, screenControl): self.printer = printer self.screenControl = screenControl self.WIDTH = 50 if self.getIsSidebarMode(): logger.addEvent('init_wide_mode') else: logger.addEvent('init_narrow_mode') def output(self, mode): self.mode = mode for func in [self.outputSide, self.outputBottom, self.toggleCursor]: try: func() except curses.error: pass def toggleCursor(self): # only include cursor when in command mode if self.mode == COMMAND_MODE: curses.curs_set(BLOCK_CURSOR) else: curses.curs_set(INVISIBLE_CURSOR) def reduceMaxY(self, maxy): if self.getIsSidebarMode(): return maxy return maxy - 4 def reduceMaxX(self, maxx): if not self.getIsSidebarMode(): return maxx return maxx - self.WIDTH def getMinX(self): if self.mode == COMMAND_MODE: return 0 return self.screenControl.getChromeBoundaries()[0] def getMinY(self): return self.screenControl.getChromeBoundaries()[1] def getIsSidebarMode(self): (maxy, maxx) = self.screenControl.getScreenDimensions() return maxx > 200 def outputSide(self): if not self.getIsSidebarMode(): return (maxy, maxx) = self.screenControl.getScreenDimensions() borderX = maxx - self.WIDTH if (self.mode == COMMAND_MODE): borderX = len(SHORT_COMMAND_PROMPT) + 20 usageLines = usageStrings.USAGE_PAGE.split('\n') if self.mode == COMMAND_MODE: usageLines = usageStrings.USAGE_COMMAND.split('\n') for index, usageLine in enumerate(usageLines): self.printer.addstr(self.getMinY() + index, borderX + 2, usageLine) for y in range(self.getMinY(), maxy): self.printer.addstr(y, borderX, '|') def outputBottom(self): if self.getIsSidebarMode(): return (maxy, maxx) = self.screenControl.getScreenDimensions() borderY = maxy - 2 # first output text since we might throw an exception during border usageStr = { SELECT_MODE: SHORT_NAV_USAGE, X_MODE: SHORT_NAV_USAGE, COMMAND_MODE: SHORT_COMMAND_USAGE }[self.mode] borderStr = '_' * (maxx - self.getMinX() - 0) self.printer.addstr(borderY, self.getMinX(), borderStr) self.printer.addstr(borderY + 1, self.getMinX(), usageStr) class ScrollBar(object): def __init__(self, printer, lines, screenControl): self.printer = printer self.screenControl = screenControl self.numLines = len(lines) self.boxStartFraction = 0.0 self.boxStopFraction = 0.0 self.calcBoxFractions() # see if we are activated self.activated = True (maxy, maxx) = self.screenControl.getScreenDimensions() if (self.numLines < maxy): self.activated = False logger.addEvent('no_scrollbar') else: logger.addEvent('needed_scrollbar') def getIsActivated(self): return self.activated def calcBoxFractions(self): # what we can see is basically the fraction of our screen over # total num lines (maxy, maxx) = self.screenControl.getScreenDimensions() fracDisplayed = min(1.0, (maxy / float(self.numLines))) self.boxStartFraction = -self.screenControl.getScrollOffset() / float( self.numLines) self.boxStopFraction = self.boxStartFraction + fracDisplayed def output(self): if not self.activated: return for func in [self.outputCaps, self.outputBase, self.outputBox, self.outputBorder]: try: func() except curses.error: pass def getMinY(self): return self.screenControl.getChromeBoundaries()[1] + 1 def getX(self): return 0 def outputBorder(self): x = self.getX() + 4 (maxy, maxx) = self.screenControl.getScreenDimensions() for y in range(0, maxy): self.printer.addstr(y, x, ' ') def outputBox(self): (maxy, maxx) = self.screenControl.getScreenDimensions() topY = maxy - 2 minY = self.getMinY() diff = topY - minY x = self.getX() boxStartY = int(diff * self.boxStartFraction) + minY boxStopY = int(diff * self.boxStopFraction) + minY self.printer.addstr(boxStartY, x, '/-\\') for y in range(boxStartY + 1, boxStopY): self.printer.addstr(y, x, '|-|') self.printer.addstr(boxStopY, x, '\-/') def outputCaps(self): x = self.getX() (maxy, maxx) = self.screenControl.getScreenDimensions() for y in [self.getMinY() - 1, maxy - 1]: self.printer.addstr(y, x, '===') def outputBase(self): x = self.getX() (maxy, maxx) = self.screenControl.getScreenDimensions() for y in range(self.getMinY(), maxy - 1): self.printer.addstr(y, x, ' . ') class Controller(object): def __init__(self, flags, stdscr, lineObjs, cursesAPI): self.stdscr = stdscr self.cursesAPI = cursesAPI self.cursesAPI.useDefaultColors() self.colorPrinter = ColorPrinter(self.stdscr, cursesAPI) self.flags = flags self.lineObjs = lineObjs self.hoverIndex = 0 self.scrollOffset = 0 self.scrollBar = ScrollBar(self.colorPrinter, lineObjs, self) self.helperChrome = HelperChrome(self.colorPrinter, self) (self.oldmaxy, self.oldmaxx) = self.getScreenDimensions() self.mode = SELECT_MODE # lets loop through and split self.lineMatches = [] for lineObj in self.lineObjs.values(): lineObj.controller = self if not lineObj.isSimple(): self.lineMatches.append(lineObj) self.numLines = len(lineObjs.keys()) self.numMatches = len(self.lineMatches) # begin tracking dirty state self.resetDirty() self.setHover(self.hoverIndex, True) # the scroll offset might not start off # at 0 if our first real match is WAY # down the screen -- so lets init it to # a valid value after we have all our line objects self.updateScrollOffset() logger.addEvent('init') def getScrollOffset(self): return self.scrollOffset def getScreenDimensions(self): return self.stdscr.getmaxyx() def getChromeBoundaries(self): (maxy, maxx) = self.stdscr.getmaxyx() minx = CHROME_MIN_X if self.scrollBar.getIsActivated( ) or self.mode == X_MODE else 0 maxy = self.helperChrome.reduceMaxY(maxy) maxx = self.helperChrome.reduceMaxX(maxx) # format of (MINX, MINY, MAXX, MAXY) return (minx, CHROME_MIN_Y, maxx, maxy) def getViewportHeight(self): (minx, miny, maxx, maxy) = self.getChromeBoundaries() return maxy - miny def setHover(self, index, val): self.lineMatches[index].setHover(val) def toggleSelect(self): self.lineMatches[self.hoverIndex].toggleSelect() def toggleSelectAll(self): files = set() for line in self.lineMatches: if line.getFile() not in files: files.add(line.getFile()) line.toggleSelect() def setSelect(self, val): self.lineMatches[self.hoverIndex].setSelect(val) def control(self): # we start out by printing everything we need to self.printAll() self.resetDirty() self.moveCursor() while True: inKey = self.getKey() self.checkResize() self.processInput(inKey) self.processDirty() self.resetDirty() self.moveCursor() self.stdscr.refresh() def checkResize(self): (maxy, maxx) = self.getScreenDimensions() if (maxy is not self.oldmaxy or maxx is not self.oldmaxx): # we resized so print all! self.printAll() self.resetDirty() self.stdscr.refresh() logger.addEvent('resize') (self.oldmaxy, self.oldmaxx) = self.getScreenDimensions() def updateScrollOffset(self): """ yay scrolling logic! we will start simple here and basically just center the viewport to current matched line """ windowHeight = self.getViewportHeight() halfHeight = int(round(windowHeight / 2.0)) # important, we need to get the real SCREEN position # of the hover index, not its index within our matches hovered = self.lineMatches[self.hoverIndex] desiredTopRow = hovered.getScreenIndex() - halfHeight oldOffset = self.scrollOffset desiredTopRow = max(desiredTopRow, 0) newOffset = -desiredTopRow # lets add in some leeway -- dont bother repositioning # if the old offset is within 1/2 of the window height # of our desired (unless we absolutely have to) if abs(newOffset - oldOffset) > halfHeight / 2 or self.hoverIndex + oldOffset < 0: # need to reassign now we have gone too far self.scrollOffset = newOffset if oldOffset is not self.scrollOffset: self.dirtyAll() # also update our scroll bar self.scrollBar.calcBoxFractions() def pageDown(self): pageHeight = (int)(self.getViewportHeight() * 0.5) self.moveIndex(pageHeight) def pageUp(self): pageHeight = (int)(self.getViewportHeight() * 0.5) self.moveIndex(-pageHeight) def moveIndex(self, delta): newIndex = (self.hoverIndex + delta) % self.numMatches self.jumpToIndex(newIndex) def jumpToIndex(self, newIndex): self.setHover(self.hoverIndex, False) self.hoverIndex = newIndex self.setHover(self.hoverIndex, True) self.updateScrollOffset() def processInput(self, key): if key == 'UP' or key == 'k': self.moveIndex(-1) elif key == 'DOWN' or key == 'j': self.moveIndex(1) elif key == 'x': self.toggleXMode() elif key == 'c': self.beginEnterCommand() elif key == ' ' or key == 'PAGE_DOWN': self.pageDown() elif key == 'b' or key == 'PAGE_UP': self.pageUp() elif key == 'g': self.jumpToIndex(0) elif key == 'G' and not self.mode == X_MODE: self.jumpToIndex(self.numMatches - 1) elif key == 'f': self.toggleSelect() elif key == 'F': self.toggleSelect() self.moveIndex(1) elif key == 'A' and not self.mode == X_MODE: self.toggleSelectAll() elif key == 'ENTER': self.onEnter() elif key == 'q': output.outputNothing() # this will get the appropriate selection and save it to a file for reuse # before exiting the program self.getFilesToUse() self.cursesAPI.exit() elif self.mode == X_MODE and key in lbls: self.selectXMode(key) pass def getFilesToUse(self): # if we have select files, those, otherwise hovered toUse = self.getSelectedFiles() if not toUse: toUse = self.getHoveredFiles() # save the selection we are using if self.cursesAPI.allowFileOutput(): output.outputSelection(toUse) return toUse def getSelectedFiles(self): return [lineObj for (index, lineObj) in enumerate(self.lineMatches) if lineObj.getSelected()] def getHoveredFiles(self): return [lineObj for (index, lineObj) in enumerate(self.lineMatches) if index == self.hoverIndex] def showAndGetCommand(self): fileObjs = self.getFilesToUse() files = [fileObj.getFile() for fileObj in fileObjs] (maxy, maxx) = self.getScreenDimensions() halfHeight = int(round(maxy / 2) - len(files) / 2.0) borderLine = '=' * len(SHORT_COMMAND_PROMPT) promptLine = '.' * len(SHORT_COMMAND_PROMPT) # from helper chrome code maxFileLength = maxx - 5 if self.helperChrome.getIsSidebarMode(): # need to be shorter to not go into side bar maxFileLength = len(SHORT_COMMAND_PROMPT) + 18 # first lets print all the files startHeight = halfHeight - 1 - len(files) try: self.colorPrinter.addstr(startHeight - 3, 0, borderLine) self.colorPrinter.addstr(startHeight - 2, 0, SHORT_FILES_HEADER) self.colorPrinter.addstr(startHeight - 1, 0, borderLine) for index, file in enumerate(files): self.colorPrinter.addstr(startHeight + index, 0, file[0:maxFileLength]) except curses.error: pass # first print prompt try: self.colorPrinter.addstr(halfHeight, 0, SHORT_COMMAND_PROMPT) self.colorPrinter.addstr(halfHeight + 1, 0, SHORT_COMMAND_PROMPT2) except curses.error: pass # then line to distinguish and prompt line try: self.colorPrinter.addstr(halfHeight - 1, 0, borderLine) self.colorPrinter.addstr(halfHeight + 2, 0, borderLine) self.colorPrinter.addstr(halfHeight + 3, 0, promptLine) except curses.error: pass self.stdscr.refresh() self.cursesAPI.echo() maxX = int(round(maxx - 1)) command = self.stdscr.getstr(halfHeight + 3, 0, maxX) return command def beginEnterCommand(self): self.stdscr.erase() # first check if they are trying to enter command mode # but already have a command... if len(self.flags.getPresetCommand()): self.helperChrome.output(self.mode) (minX, minY, _, maxY) = self.getChromeBoundaries() yStart = (maxY + minY) / 2 - 3 self.printProvidedCommandWarning(yStart, minX) self.stdscr.refresh() self.getKey() self.mode = SELECT_MODE self.dirtyAll() return self.mode = COMMAND_MODE self.helperChrome.output(self.mode) logger.addEvent('enter_command_mode') command = self.showAndGetCommand() if len(command) == 0: # go back to selection mode and repaint self.mode = SELECT_MODE self.cursesAPI.noecho() self.dirtyAll() logger.addEvent('exit_command_mode') return lineObjs = self.getFilesToUse() output.execComposedCommand(command, lineObjs) sys.exit(0) def onEnter(self): lineObjs = self.getFilesToUse() if not lineObjs: # nothing selected, assume we want hovered lineObjs = self.getHoveredFiles() logger.addEvent('selected_num_files', len(lineObjs)) # commands passed from the command line get used immediately presetCommand = self.flags.getPresetCommand() if len(presetCommand) > 0: output.execComposedCommand(presetCommand, lineObjs) else: output.editFiles(lineObjs) sys.exit(0) def resetDirty(self): # reset all dirty state for our components self.dirty = False self.dirtyIndexes = [] def dirtyLine(self, index): self.dirtyIndexes.append(index) def dirtyAll(self): self.dirty = True def processDirty(self): if self.dirty: self.printAll() return (minx, miny, maxx, maxy) = self.getChromeBoundaries() didClearLine = False for index in self.dirtyIndexes: y = miny + index + self.getScrollOffset() if y >= miny or y < maxy: didClearLine = True self.clearLine(y) self.lineObjs[index].output(self.colorPrinter) if didClearLine and self.helperChrome.getIsSidebarMode(): # now we need to output the chrome again since on wide # monitors we will have cleared out a line of the chrome self.helperChrome.output(self.mode) def clearLine(self, y): '''Clear a line of content, excluding the chrome''' (minx, _, _, _) = self.getChromeBoundaries() (_, maxx) = self.stdscr.getmaxyx() charsToDelete = range(minx, maxx) # we go in the **reverse** order since the original documentation # of delchar (http://dell9.ma.utexas.edu/cgi-bin/man-cgi?delch+3) # mentions that delchar actually moves all the characters to the right # of the cursor for x in reversed(charsToDelete): self.stdscr.delch(y, x) def printAll(self): self.stdscr.erase() self.printLines() self.printScroll() self.printXMode() self.printChrome() def printLines(self): for lineObj in self.lineObjs.values(): lineObj.output(self.colorPrinter) def printScroll(self): self.scrollBar.output() def printProvidedCommandWarning(self, yStart, xStart): self.colorPrinter.addstr(yStart, xStart, 'Oh no! You already provided a command so ' + 'you cannot enter command mode.', self.colorPrinter.getAttributes(curses.COLOR_WHITE, curses.COLOR_RED, 0)) self.colorPrinter.addstr( yStart + 1, xStart, 'The command you provided was "%s" ' % self.flags.getPresetCommand()) self.colorPrinter.addstr( yStart + 2, xStart, 'Press any key to go back to selecting files.') def printChrome(self): self.helperChrome.output(self.mode) def moveCursor(self): x = CHROME_MIN_X if self.scrollBar.getIsActivated() else 0 y = self.lineMatches[ self.hoverIndex].getScreenIndex() + self.scrollOffset self.stdscr.move(y, x) def getKey(self): charCode = self.stdscr.getch() return CODE_TO_CHAR.get(charCode, '') def toggleXMode(self): self.mode = X_MODE if self.mode != X_MODE else SELECT_MODE self.printAll() def printXMode(self): if self.mode == X_MODE: (maxy, _) = self.scrollBar.screenControl.getScreenDimensions() topY = maxy - 2 minY = self.scrollBar.getMinY() - 1 for i in range(minY, topY + 1): self.colorPrinter.addstr(i, 1, lbls[i - minY]) def selectXMode(self, key): lineObj = self.lineObjs[ lbls.index(key) - self.scrollOffset] if type(lineObj) == format.LineMatch: lineMatchIndex = self.lineMatches.index(lineObj) self.hoverIndex = lineMatchIndex self.toggleSelect()
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Serialized DAG table in database.""" import hashlib import logging from datetime import datetime, timedelta from typing import Any, Dict, List, Optional import sqlalchemy_jsonfield from sqlalchemy import BigInteger, Column, Index, String, and_ from sqlalchemy.orm import Session, backref, foreign, relationship from sqlalchemy.sql.expression import func, literal from airflow.models.base import ID_LEN, Base from airflow.models.dag import DAG, DagModel from airflow.models.dagcode import DagCode from airflow.models.dagrun import DagRun from airflow.serialization.serialized_objects import DagDependency, SerializedDAG from airflow.settings import MIN_SERIALIZED_DAG_UPDATE_INTERVAL, json from airflow.utils import timezone from airflow.utils.session import provide_session from airflow.utils.sqlalchemy import UtcDateTime log = logging.getLogger(__name__) class SerializedDagModel(Base): """A table for serialized DAGs. serialized_dag table is a snapshot of DAG files synchronized by scheduler. This feature is controlled by: * ``[core] min_serialized_dag_update_interval = 30`` (s): serialized DAGs are updated in DB when a file gets processed by scheduler, to reduce DB write rate, there is a minimal interval of updating serialized DAGs. * ``[scheduler] dag_dir_list_interval = 300`` (s): interval of deleting serialized DAGs in DB when the files are deleted, suggest to use a smaller interval such as 60 It is used by webserver to load dags because reading from database is lightweight compared to importing from files, it solves the webserver scalability issue. """ __tablename__ = 'serialized_dag' dag_id = Column(String(ID_LEN), primary_key=True) fileloc = Column(String(2000), nullable=False) # The max length of fileloc exceeds the limit of indexing. fileloc_hash = Column(BigInteger, nullable=False) data = Column(sqlalchemy_jsonfield.JSONField(json=json), nullable=False) last_updated = Column(UtcDateTime, nullable=False) dag_hash = Column(String(32), nullable=False) __table_args__ = (Index('idx_fileloc_hash', fileloc_hash, unique=False),) dag_runs = relationship( DagRun, primaryjoin=dag_id == foreign(DagRun.dag_id), backref=backref('serialized_dag', uselist=False, innerjoin=True), ) dag_model = relationship( DagModel, primaryjoin=dag_id == DagModel.dag_id, # type: ignore foreign_keys=dag_id, uselist=False, innerjoin=True, backref=backref('serialized_dag', uselist=False, innerjoin=True), ) load_op_links = True def __init__(self, dag: DAG): self.dag_id = dag.dag_id self.fileloc = dag.full_filepath self.fileloc_hash = DagCode.dag_fileloc_hash(self.fileloc) self.data = SerializedDAG.to_dict(dag) self.last_updated = timezone.utcnow() self.dag_hash = hashlib.md5(json.dumps(self.data, sort_keys=True).encode("utf-8")).hexdigest() def __repr__(self): return f"<SerializedDag: {self.dag_id}>" @classmethod @provide_session def write_dag(cls, dag: DAG, min_update_interval: Optional[int] = None, session: Session = None) -> bool: """Serializes a DAG and writes it into database. If the record already exists, it checks if the Serialized DAG changed or not. If it is changed, it updates the record, ignores otherwise. :param dag: a DAG to be written into database :param min_update_interval: minimal interval in seconds to update serialized DAG :param session: ORM Session :returns: Boolean indicating if the DAG was written to the DB """ # Checks if (Current Time - Time when the DAG was written to DB) < min_update_interval # If Yes, does nothing # If No or the DAG does not exists, updates / writes Serialized DAG to DB if min_update_interval is not None: if ( session.query(literal(True)) .filter( and_( cls.dag_id == dag.dag_id, (timezone.utcnow() - timedelta(seconds=min_update_interval)) < cls.last_updated, ) ) .first() is not None ): # TODO: .first() is not None can be changed to .scalar() once we update to sqlalchemy 1.4+ # as the associated sqlalchemy bug for MySQL was fixed # related issue : https://github.com/sqlalchemy/sqlalchemy/issues/5481 return False log.debug("Checking if DAG (%s) changed", dag.dag_id) new_serialized_dag = cls(dag) serialized_dag_hash_from_db = session.query(cls.dag_hash).filter(cls.dag_id == dag.dag_id).scalar() if serialized_dag_hash_from_db == new_serialized_dag.dag_hash: log.debug("Serialized DAG (%s) is unchanged. Skipping writing to DB", dag.dag_id) return False log.debug("Writing Serialized DAG: %s to the DB", dag.dag_id) session.merge(new_serialized_dag) log.debug("DAG: %s written to the DB", dag.dag_id) return True @classmethod @provide_session def read_all_dags(cls, session: Session = None) -> Dict[str, 'SerializedDAG']: """Reads all DAGs in serialized_dag table. :param session: ORM Session :returns: a dict of DAGs read from database """ serialized_dags = session.query(cls) dags = {} for row in serialized_dags: log.debug("Deserializing DAG: %s", row.dag_id) dag = row.dag # Sanity check. if dag.dag_id == row.dag_id: dags[row.dag_id] = dag else: log.warning( "dag_id Mismatch in DB: Row with dag_id '%s' has Serialised DAG with '%s' dag_id", row.dag_id, dag.dag_id, ) return dags @property def dag(self): """The DAG deserialized from the ``data`` column""" SerializedDAG._load_operator_extra_links = self.load_op_links # pylint: disable=protected-access if isinstance(self.data, dict): dag = SerializedDAG.from_dict(self.data) # type: Any else: dag = SerializedDAG.from_json(self.data) # noqa return dag @classmethod @provide_session def remove_dag(cls, dag_id: str, session: Session = None): """Deletes a DAG with given dag_id. :param dag_id: dag_id to be deleted :param session: ORM Session """ # pylint: disable=no-member session.execute(cls.__table__.delete().where(cls.dag_id == dag_id)) @classmethod @provide_session def remove_deleted_dags(cls, alive_dag_filelocs: List[str], session=None): """Deletes DAGs not included in alive_dag_filelocs. :param alive_dag_filelocs: file paths of alive DAGs :param session: ORM Session """ alive_fileloc_hashes = [DagCode.dag_fileloc_hash(fileloc) for fileloc in alive_dag_filelocs] log.debug( "Deleting Serialized DAGs (for which DAG files are deleted) from %s table ", cls.__tablename__ ) # pylint: disable=no-member session.execute( cls.__table__.delete().where( and_(cls.fileloc_hash.notin_(alive_fileloc_hashes), cls.fileloc.notin_(alive_dag_filelocs)) ) ) @classmethod @provide_session def has_dag(cls, dag_id: str, session: Session = None) -> bool: """Checks a DAG exist in serialized_dag table. :param dag_id: the DAG to check :param session: ORM Session """ return session.query(literal(True)).filter(cls.dag_id == dag_id).first() is not None @classmethod @provide_session def get(cls, dag_id: str, session: Session = None) -> Optional['SerializedDagModel']: """ Get the SerializedDAG for the given dag ID. It will cope with being passed the ID of a subdag by looking up the root dag_id from the DAG table. :param dag_id: the DAG to fetch :param session: ORM Session """ row = session.query(cls).filter(cls.dag_id == dag_id).one_or_none() if row: return row # If we didn't find a matching DAG id then ask the DAG table to find # out the root dag root_dag_id = session.query(DagModel.root_dag_id).filter(DagModel.dag_id == dag_id).scalar() return session.query(cls).filter(cls.dag_id == root_dag_id).one_or_none() @staticmethod @provide_session def bulk_sync_to_db(dags: List[DAG], session: Session = None): """ Saves DAGs as Serialized DAG objects in the database. Each DAG is saved in a separate database query. :param dags: the DAG objects to save to the DB :type dags: List[airflow.models.dag.DAG] :param session: ORM Session :type session: Session :return: None """ for dag in dags: if not dag.is_subdag: SerializedDagModel.write_dag( dag, min_update_interval=MIN_SERIALIZED_DAG_UPDATE_INTERVAL, session=session ) @classmethod @provide_session def get_last_updated_datetime(cls, dag_id: str, session: Session = None) -> datetime: """ Get the date when the Serialized DAG associated to DAG was last updated in serialized_dag table :param dag_id: DAG ID :type dag_id: str :param session: ORM Session :type session: Session """ return session.query(cls.last_updated).filter(cls.dag_id == dag_id).scalar() @classmethod @provide_session def get_max_last_updated_datetime(cls, session: Session = None) -> datetime: """ Get the maximum date when any DAG was last updated in serialized_dag table :param session: ORM Session :type session: Session """ return session.query(func.max(cls.last_updated)).scalar() @classmethod @provide_session def get_latest_version_hash(cls, dag_id: str, session: Session = None) -> str: """ Get the latest DAG version for a given DAG ID. :param dag_id: DAG ID :type dag_id: str :param session: ORM Session :type session: Session :return: DAG Hash :rtype: str """ return session.query(cls.dag_hash).filter(cls.dag_id == dag_id).scalar() @classmethod @provide_session def get_dag_dependencies(cls, session: Session = None) -> Dict[str, List['DagDependency']]: """ Get the dependencies between DAGs :param session: ORM Session :type session: Session """ dependencies = {} if session.bind.dialect.name in ["sqlite", "mysql"]: for row in session.query(cls.dag_id, func.json_extract(cls.data, "$.dag.dag_dependencies")).all(): dependencies[row[0]] = [DagDependency(**d) for d in json.loads(row[1])] elif session.bind.dialect.name == "mssql": for row in session.query(cls.dag_id, func.json_query(cls.data, "$.dag.dag_dependencies")).all(): dependencies[row[0]] = [DagDependency(**d) for d in json.loads(row[1])] else: for row in session.query( cls.dag_id, func.json_extract_path(cls.data, "dag", "dag_dependencies") ).all(): dependencies[row[0]] = [DagDependency(**d) for d in row[1]] return dependencies
# coding: utf-8 # # Copyright 2021 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for jobs.transforms.base_validation.""" from __future__ import annotations import re from core import feconf from core.domain import change_domain from core.domain import exp_fetchers from core.domain import state_domain from core.jobs import job_test_utils from core.jobs.transforms.validation import base_validation from core.jobs.types import base_validation_errors from core.platform import models import apache_beam as beam (base_models, exp_models) = models.Registry.import_models( [models.NAMES.base_model, models.NAMES.exploration]) class MockDomainObject(base_models.BaseModel): def validate(self, strict=True): """Mock validate function.""" pass class ValidateDeletedTests(job_test_utils.PipelinedTestBase): def test_process_reports_error_for_old_deleted_model(self): expired_model = base_models.BaseModel( id='123', deleted=True, created_on=self.YEAR_AGO, last_updated=self.YEAR_AGO) output = ( self.pipeline | beam.Create([expired_model]) | beam.ParDo(base_validation.ValidateDeletedModel()) ) self.assert_pcoll_equal(output, [ base_validation_errors.ModelExpiredError(expired_model), ]) class ValidateModelTimeFieldTests(job_test_utils.PipelinedTestBase): def test_process_reports_model_timestamp_relationship_error(self): invalid_timestamp = base_models.BaseModel( id='123', created_on=self.NOW, last_updated=self.YEAR_AGO) output = ( self.pipeline | beam.Create([invalid_timestamp]) | beam.ParDo(base_validation.ValidateModelTimestamps()) ) self.assert_pcoll_equal(output, [ base_validation_errors.InconsistentTimestampsError( invalid_timestamp), ]) def test_process_reports_model_mutated_during_job_error(self): invalid_timestamp = base_models.BaseModel( id='124', created_on=self.NOW, last_updated=self.YEAR_LATER) output = ( self.pipeline | beam.Create([invalid_timestamp]) | beam.ParDo(base_validation.ValidateModelTimestamps()) ) self.assert_pcoll_equal(output, [ base_validation_errors.ModelMutatedDuringJobError( invalid_timestamp), ]) class ValidateModelIdTests(job_test_utils.PipelinedTestBase): def test_validate_model_id(self): invalid_id_model = base_models.BaseModel( id='123@?!*', created_on=self.YEAR_AGO, last_updated=self.NOW) output = ( self.pipeline | beam.Create([invalid_id_model]) | beam.ParDo(base_validation.ValidateBaseModelId()) ) self.assert_pcoll_equal(output, [ base_validation_errors.ModelIdRegexError( invalid_id_model, base_validation.BASE_MODEL_ID_PATTERN), ]) class ValidatePostCommitIsInvalidTests(job_test_utils.PipelinedTestBase): def test_validate_post_commit_is_invalid(self): invalid_commit_status = base_models.BaseCommitLogEntryModel( id='123', created_on=self.YEAR_AGO, last_updated=self.NOW, commit_type='invalid-type', user_id='', post_commit_status='invalid', post_commit_is_private=False, commit_cmds=[]) output = ( self.pipeline | beam.Create([invalid_commit_status]) | beam.ParDo(base_validation.ValidatePostCommitStatus()) ) self.assert_pcoll_equal(output, [ base_validation_errors.InvalidCommitStatusError( invalid_commit_status), ]) class ValidatePostCommitIsPrivateTests(job_test_utils.PipelinedTestBase): def test_validate_post_commit_is_private_when_status_is_public(self): invalid_commit_status = base_models.BaseCommitLogEntryModel( id='123', created_on=self.YEAR_AGO, last_updated=self.NOW, commit_type='invalid-type', user_id='', post_commit_status='public', post_commit_is_private=True, commit_cmds=[]) output = ( self.pipeline | beam.Create([invalid_commit_status]) | beam.ParDo(base_validation.ValidatePostCommitIsPrivate()) ) self.assert_pcoll_equal(output, [ base_validation_errors.InvalidPrivateCommitStatusError( invalid_commit_status), ]) def test_validate_post_commit_is_private_when_status_is_private(self): invalid_commit_status = base_models.BaseCommitLogEntryModel( id='123', created_on=self.YEAR_AGO, last_updated=self.NOW, commit_type='invalid-type', user_id='', post_commit_status='private', post_commit_is_private=False, commit_cmds=[]) output = ( self.pipeline | beam.Create([invalid_commit_status]) | beam.ParDo(base_validation.ValidatePostCommitIsPrivate()) ) self.assert_pcoll_equal(output, [ base_validation_errors.InvalidPrivateCommitStatusError( invalid_commit_status), ]) class ValidatePostCommitIsPublicTests(job_test_utils.PipelinedTestBase): def test_validate_post_commit_is_public_when_status_is_public(self): invalid_commit_status = base_models.BaseCommitLogEntryModel( id='123', created_on=self.YEAR_AGO, last_updated=self.NOW, commit_type='create', user_id='', post_commit_status='public', post_commit_community_owned=True, commit_cmds=[]) output = ( self.pipeline | beam.Create([invalid_commit_status]) | beam.ParDo(base_validation.ValidatePostCommitIsPublic()) ) self.assert_pcoll_empty(output) def test_validate_post_commit_is_public_when_status_is_private(self): invalid_commit_status = base_models.BaseCommitLogEntryModel( id='123', created_on=self.YEAR_AGO, last_updated=self.NOW, commit_type='create', user_id='', post_commit_status='private', post_commit_community_owned=True, commit_cmds=[]) output = ( self.pipeline | beam.Create([invalid_commit_status]) | beam.ParDo(base_validation.ValidatePostCommitIsPublic()) ) self.assert_pcoll_equal(output, [ base_validation_errors.InvalidPublicCommitStatusError( invalid_commit_status), ]) def test_validate_post_commit_is_public_raise_exception(self): invalid_commit_status = base_models.BaseCommitLogEntryModel( id='123', created_on=self.YEAR_AGO, last_updated=self.NOW, commit_type='create', user_id='', post_commit_status='public', post_commit_community_owned=False, commit_cmds=[]) output = ( self.pipeline | beam.Create([invalid_commit_status]) | beam.ParDo(base_validation.ValidatePostCommitIsPublic()) ) self.assert_pcoll_equal(output, [ base_validation_errors.InvalidPublicCommitStatusError( invalid_commit_status), ]) class MockValidateModelDomainObjectInstancesWithNeutral( base_validation.ValidateModelDomainObjectInstances): def _get_model_domain_object_instance(self, item): # pylint: disable=unused-argument return MockDomainObject() def _get_domain_object_validation_type(self, item): # pylint: disable=unused-argument return base_validation.VALIDATION_MODES.neutral class MockValidateModelDomainObjectInstancesWithStrict( base_validation.ValidateModelDomainObjectInstances): def _get_model_domain_object_instance(self, item): # pylint: disable=unused-argument return MockDomainObject() def _get_domain_object_validation_type(self, item): # pylint: disable=unused-argument return base_validation.VALIDATION_MODES.strict class MockValidateModelDomainObjectInstancesWithNonStrict( base_validation.ValidateModelDomainObjectInstances): def _get_model_domain_object_instance(self, item): # pylint: disable=unused-argument return MockDomainObject() def _get_domain_object_validation_type(self, item): # pylint: disable=unused-argument return base_validation.VALIDATION_MODES.non_strict class MockValidateModelDomainObjectInstancesWithInvalid( base_validation.ValidateModelDomainObjectInstances): def _get_model_domain_object_instance(self, item): # pylint: disable=unused-argument return MockDomainObject() def _get_domain_object_validation_type(self, item): # pylint: disable=unused-argument return 'invalid' class MockValidateExplorationModelDomainObjectInstances( base_validation.ValidateModelDomainObjectInstances): def _get_model_domain_object_instance(self, item): return exp_fetchers.get_exploration_from_model(item) class ValidateModelDomainObjectInstancesTests(job_test_utils.PipelinedTestBase): def test_validation_type_for_domain_object( self): model = base_models.BaseModel( id='mock-123', deleted=False, created_on=self.YEAR_AGO, last_updated=self.NOW) output = ( self.pipeline | beam.Create([model]) | beam.ParDo( base_validation.ValidateModelDomainObjectInstances()) ) self.assert_pcoll_equal(output, []) def test_validation_type_for_domain_object_with_neutral_type( self): model = base_models.BaseModel( id='mock-123', deleted=False, created_on=self.YEAR_AGO, last_updated=self.NOW) output = ( self.pipeline | beam.Create([model]) | beam.ParDo( MockValidateModelDomainObjectInstancesWithNeutral()) ) self.assert_pcoll_equal(output, []) def test_validation_type_for_domain_object_with_strict_type( self): model = base_models.BaseModel( id='mock-123', deleted=False, created_on=self.YEAR_AGO, last_updated=self.NOW) output = ( self.pipeline | beam.Create([model]) | beam.ParDo( MockValidateModelDomainObjectInstancesWithStrict()) ) self.assert_pcoll_equal(output, []) def test_validation_type_for_domain_object_with_non_strict_type( self): model = base_models.BaseModel( id='mock-123', deleted=False, created_on=self.YEAR_AGO, last_updated=self.NOW) output = ( self.pipeline | beam.Create([model]) | beam.ParDo( MockValidateModelDomainObjectInstancesWithNonStrict()) ) self.assert_pcoll_equal(output, []) def test_error_is_raised_with_invalid_validation_type_for_domain_object( self): model = base_models.BaseModel( id='mock-123', deleted=False, created_on=self.YEAR_AGO, last_updated=self.NOW) output = ( self.pipeline | beam.Create([model]) | beam.ParDo(MockValidateModelDomainObjectInstancesWithInvalid()) ) self.assert_pcoll_equal(output, [ base_validation_errors.ModelDomainObjectValidateError( model, 'Invalid validation type for domain object: invalid') ]) def test_validation_type_for_exploration_domain_object(self): model_instance1 = exp_models.ExplorationModel( id='mock-123', title='title', category='category', language_code='en', init_state_name=feconf.DEFAULT_INIT_STATE_NAME, states={ feconf.DEFAULT_INIT_STATE_NAME: ( state_domain.State.create_default_state( feconf.DEFAULT_INIT_STATE_NAME, is_initial_state=True ).to_dict()), }, states_schema_version=feconf.CURRENT_STATE_SCHEMA_VERSION, created_on=self.YEAR_AGO, last_updated=self.NOW ) model_instance2 = exp_models.ExplorationModel( id='mock-123', title='title', category='category', language_code='en', init_state_name=feconf.DEFAULT_INIT_STATE_NAME, states={ feconf.DEFAULT_INIT_STATE_NAME: ( state_domain.State.create_default_state( 'end', is_initial_state=True ).to_dict()), }, states_schema_version=feconf.CURRENT_STATE_SCHEMA_VERSION, created_on=self.YEAR_AGO, last_updated=self.NOW ) output = ( self.pipeline | beam.Create([model_instance1, model_instance2]) | beam.ParDo(MockValidateExplorationModelDomainObjectInstances()) ) self.assert_pcoll_equal(output, [ base_validation_errors.ModelDomainObjectValidateError( model_instance2, 'The destination end is not a valid state.') ]) class ValidateCommitTypeTests(job_test_utils.PipelinedTestBase): def test_validate_commit_type(self): invalid_commit_type_model = base_models.BaseCommitLogEntryModel( id='123', created_on=self.YEAR_AGO, last_updated=self.NOW, commit_type='invalid-type', user_id='', post_commit_status='', commit_cmds=[]) output = ( self.pipeline | beam.Create([invalid_commit_type_model]) | beam.ParDo(base_validation.ValidateCommitType()) ) self.assert_pcoll_equal(output, [ base_validation_errors.InvalidCommitTypeError( invalid_commit_type_model), ]) class MockValidateCommitCmdsSchema( base_validation.BaseValidateCommitCmdsSchema): def process(self, input_model): self._get_change_domain_class(input_model) class MockValidateCommitCmdsSchemaChangeDomain( base_validation.BaseValidateCommitCmdsSchema): def _get_change_domain_class(self, item): pass class MockValidateWrongSchema(base_validation.BaseValidateCommitCmdsSchema): def _get_change_domain_class(self, item): # pylint: disable=unused-argument return change_domain.BaseChange class ValidateCommitCmdsSchemaTests(job_test_utils.PipelinedTestBase): def test_validate_none_commit(self): invalid_commit_cmd_model = base_models.BaseCommitLogEntryModel( id='invalid', created_on=self.YEAR_AGO, last_updated=self.NOW, commit_type='test', user_id='', post_commit_status='', commit_cmds=[{}]) output = ( self.pipeline | beam.Create([invalid_commit_cmd_model]) | beam.ParDo(MockValidateCommitCmdsSchemaChangeDomain()) ) self.assert_pcoll_equal(output, [ base_validation_errors.CommitCmdsNoneError(invalid_commit_cmd_model) ]) def test_validate_wrong_commit_cmd_missing(self): invalid_commit_cmd_model = base_models.BaseCommitLogEntryModel( id='invalid', created_on=self.YEAR_AGO, last_updated=self.NOW, commit_type='test', user_id='', post_commit_status='', commit_cmds=[{'cmd-invalid': 'invalid_test_command'}, {}]) output = ( self.pipeline | beam.Create([invalid_commit_cmd_model]) | beam.ParDo(MockValidateWrongSchema()) ) self.assert_pcoll_equal(output, [ base_validation_errors.CommitCmdsValidateError( invalid_commit_cmd_model, {'cmd-invalid': 'invalid_test_command'}, 'Missing cmd key in change dict') ]) def test_validate_wrong_commit_cmd(self): invalid_commit_cmd_model = base_models.BaseCommitLogEntryModel( id='invalid', created_on=self.YEAR_AGO, last_updated=self.NOW, commit_type='test', user_id='', post_commit_status='', commit_cmds=[{'cmd': 'invalid_test_command'}]) output = ( self.pipeline | beam.Create([invalid_commit_cmd_model]) | beam.ParDo(MockValidateWrongSchema()) ) self.assert_pcoll_equal(output, [ base_validation_errors.CommitCmdsValidateError( invalid_commit_cmd_model, {'cmd': 'invalid_test_command'}, 'Command invalid_test_command is not allowed') ]) def test_validate_raise_not_implemented(self): invalid_commit_cmd_model = base_models.BaseCommitLogEntryModel( id='123', created_on=self.YEAR_AGO, last_updated=self.NOW, commit_type='test', user_id='', post_commit_status='', commit_cmds=[{}]) with self.assertRaisesRegexp( NotImplementedError, re.escape( 'The _get_change_domain_class() method is missing from the ' 'derived class. It should be implemented in the derived class.' ) ): MockValidateCommitCmdsSchema().process(invalid_commit_cmd_model) def test_validate_commit_cmds(self): invalid_commit_cmd_model = base_models.BaseCommitLogEntryModel( id='123', created_on=self.YEAR_AGO, last_updated=self.NOW, commit_type='test', user_id='', post_commit_status='', commit_cmds=[{'cmd': base_models.VersionedModel.CMD_DELETE_COMMIT}]) output = ( self.pipeline | beam.Create([invalid_commit_cmd_model]) | beam.ParDo(MockValidateWrongSchema()) ) self.assert_pcoll_equal(output, [])
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A tool to generate api_docs for TensorFlow2. ``` python generate2.py --output_dir=/tmp/out ``` Requires a local installation of `tensorflow_docs`: ``` pip install git+https://github.com/tensorflow/docs ``` """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from os import path import textwrap from absl import app from absl import flags from distutils.version import LooseVersion import tensorflow as tf from tensorflow_docs.api_generator import doc_controls from tensorflow_docs.api_generator import doc_generator_visitor from tensorflow_docs.api_generator import generate_lib from tensorflow_docs.api_generator import parser import tensorboard import tensorflow_estimator from tensorflow.python.util import tf_export from tensorflow.python.util import tf_inspect # Use tensorflow's `tf_inspect`, which is aware of `tf_decorator`. parser.tf_inspect = tf_inspect # `tf` has an `__all__` that doesn't list important things like `keras`. # The doc generator recognizes `__all__` as the list of public symbols. # So patch `tf.__all__` to list everything. tf.__all__ = [item_name for item_name, value in tf_inspect.getmembers(tf)] FLAGS = flags.FLAGS flags.DEFINE_string( "code_url_prefix", "/code/stable/tensorflow", "A url to prepend to code paths when creating links to defining code") flags.DEFINE_string( "output_dir", "/tmp/out", "A directory, where the docs will be output to.") flags.DEFINE_bool("search_hints", True, "Include meta-data search hints at the top of each file.") flags.DEFINE_string("site_path", "", "The prefix ({site-path}/api_docs/python/...) used in the " "`_toc.yaml` and `_redirects.yaml` files") if tf.__version__.startswith('1'): PRIVATE_MAP = { 'tf.contrib.autograph': ['utils', 'operators'], 'tf.test': ['mock'], 'tf.contrib.estimator': ['python'], 'tf': ['python', 'core', 'compiler', 'examples', 'tools'], # There's some aliasing between the compats and v1/2s, so it's easier to # block by name and location than by deleting, or hiding objects. 'tf.compat.v1.compat': ['v1', 'v2'], 'tf.compat.v2.compat': ['v1', 'v2'] } DO_NOT_DESCEND_MAP = { 'tf': ['cli', 'lib', 'wrappers'], 'tf.contrib': [ 'compiler', 'grid_rnn', # Block contrib.keras to de-clutter the docs 'keras', 'labeled_tensor', 'quantization', 'session_bundle', 'slim', 'solvers', 'specs', 'tensor_forest', 'tensorboard', 'testing', 'tfprof', ], 'tf.contrib.bayesflow': [ 'special_math', 'stochastic_gradient_estimators', 'stochastic_variables' ], 'tf.contrib.ffmpeg': ['ffmpeg_ops'], 'tf.contrib.graph_editor': [ 'edit', 'match', 'reroute', 'subgraph', 'transform', 'select', 'util' ], 'tf.contrib.keras': ['api', 'python'], 'tf.contrib.layers': ['feature_column', 'summaries'], 'tf.contrib.learn': [ 'datasets', 'head', 'graph_actions', 'io', 'models', 'monitors', 'ops', 'preprocessing', 'utils', ], 'tf.contrib.util': ['loader'], } else: PRIVATE_MAP = { 'tf': ['python', 'core', 'compiler', 'examples', 'tools'], # There's some aliasing between the compats and v1/2s, so it's easier to # block by name and location than by deleting, or hiding objects. 'tf.compat.v1.compat': ['v1', 'v2'], 'tf.compat.v2.compat': ['v1', 'v2'] } DO_NOT_DESCEND_MAP = {} tf.__doc__ = """ ## TensorFlow ``` pip install tensorflow ``` """ _raw_ops_doc = textwrap.dedent("""\n Note: `tf.raw_ops` provides direct/low level access to all TensorFlow ops. See \ [the RFC](https://github.com/tensorflow/community/blob/master/rfcs/20181225-tf-raw-ops.md) for details. Unless you are library writer, you likely do not need to use these ops directly.""") if LooseVersion(tf.__version__) < LooseVersion('2'): tf.raw_ops.__doc__ = _raw_ops_doc tf.contrib.__doc__ = """ Contrib module containing volatile or experimental code. Warning: The `tf.contrib` module will not be included in TensorFlow 2.0. Many of its submodules have been integrated into TensorFlow core, or spun-off into other projects like [`tensorflow_io`](https://github.com/tensorflow/io), or [`tensorflow_addons`](https://github.com/tensorflow/addons). For instructions on how to upgrade see the [Migration guide](https://www.tensorflow.org/guide/migrate). """ else: tf.raw_ops.__doc__ += _raw_ops_doc # The doc generator isn't aware of tf_export. # So prefix the score tuples with -1 when this is the canonical name, +1 # otherwise. The generator chooses the name with the lowest score. class TfExportAwareDocGeneratorVisitor( doc_generator_visitor.DocGeneratorVisitor): """A `tf_export` aware doc_visitor.""" def _score_name(self, name): canonical = tf_export.get_canonical_name_for_symbol(self._index[name]) canonical_score = 1 if canonical is not None and name == "tf." + canonical: canonical_score = -1 scores = super(TfExportAwareDocGeneratorVisitor, self)._score_name(name) return (canonical_score,) + scores def _hide_layer_and_module_methods(): """Hide methods and properties defined in the base classes of keras layers.""" # __dict__ only sees attributes defined in *this* class, not on parent classes module_contents = list(tf.Module.__dict__.items()) layer_contents = list(tf.keras.layers.Layer.__dict__.items()) for name, obj in module_contents + layer_contents: if name == "__init__": continue if isinstance(obj, property): obj = obj.fget if isinstance(obj, (staticmethod, classmethod)): obj = obj.__func__ try: doc_controls.do_not_doc_in_subclasses(obj) except AttributeError: pass def build_docs(output_dir, code_url_prefix, search_hints=True): """Build api docs for tensorflow v2. Args: output_dir: A string path, where to put the files. code_url_prefix: prefix for "Defined in" links. search_hints: Bool. Include meta-data search hints at the top of each file. """ _hide_layer_and_module_methods() try: doc_controls.do_not_generate_docs(tf.tools) except AttributeError: pass try: doc_controls.do_not_generate_docs(tf.compat.v1.pywrap_tensorflow) except AttributeError: pass try: doc_controls.do_not_generate_docs(tf.pywrap_tensorflow) except AttributeError: pass try: doc_controls.do_not_generate_docs(tf.flags) except AttributeError: pass base_dir = path.normpath(path.join(tf.__file__, "../..")) base_dirs = ( path.join(base_dir, "tensorflow_core"), # External packages base directories path.dirname(tensorboard.__file__), path.dirname(tensorflow_estimator.__file__), ) code_url_prefixes = ( code_url_prefix, # External packages source repositories, "https://github.com/tensorflow/tensorboard/tree/master/tensorboard", "https://github.com/tensorflow/estimator/tree/master/tensorflow_estimator", ) if LooseVersion(tf.__version__) < LooseVersion('2'): root_title = 'TensorFlow' elif LooseVersion(tf.__version__) >= LooseVersion('2'): root_title = 'TensorFlow 2.0' doc_generator = generate_lib.DocGenerator( root_title=root_title, py_modules=[("tf", tf)], base_dir=base_dirs, search_hints=search_hints, code_url_prefix=code_url_prefixes, site_path=FLAGS.site_path, visitor_cls=TfExportAwareDocGeneratorVisitor, private_map=PRIVATE_MAP, do_not_descend_map=DO_NOT_DESCEND_MAP) doc_generator.build(output_dir) def main(argv): del argv build_docs(output_dir=FLAGS.output_dir, code_url_prefix=FLAGS.code_url_prefix, search_hints=FLAGS.search_hints) if __name__ == "__main__": app.run(main)
""" Tests for Downsampled Filters/Factors/Classifiers """ from functools import partial import pandas as pd from pandas.util.testing import assert_frame_equal from zipline.errors import NoFurtherDataError from zipline.pipeline import ( Pipeline, CustomFactor, CustomFilter, CustomClassifier, SimplePipelineEngine, ) from zipline.pipeline.data.testing import TestingDataSet from zipline.pipeline.domain import ( CA_EQUITIES, EquitySessionDomain, GB_EQUITIES, US_EQUITIES, ) from zipline.pipeline.factors import SimpleMovingAverage from zipline.pipeline.filters.smoothing import All from zipline.testing import ZiplineTestCase, parameter_space, ExplodingObject from zipline.testing.fixtures import ( WithTradingSessions, WithSeededRandomPipelineEngine, WithAssetFinder, ) from zipline.utils.classproperty import classproperty from zipline.utils.input_validation import _qualified_name from zipline.utils.numpy_utils import int64_dtype class NDaysAgoFactor(CustomFactor): inputs = [TestingDataSet.float_col] def compute(self, today, assets, out, floats): out[:] = floats[0] class NDaysAgoFilter(CustomFilter): inputs = [TestingDataSet.bool_col] def compute(self, today, assets, out, bools): out[:] = bools[0] class NDaysAgoClassifier(CustomClassifier): inputs = [TestingDataSet.categorical_col] dtype = TestingDataSet.categorical_col.dtype def compute(self, today, assets, out, cats): out[:] = cats[0] class ComputeExtraRowsTestCase(WithTradingSessions, ZiplineTestCase): DATA_MIN_DAY = pd.Timestamp('2012-06', tz='UTC') DATA_MAX_DAY = pd.Timestamp('2015', tz='UTC') TRADING_CALENDAR_STRS = ('NYSE', 'LSE', 'TSX') # Test with different window_lengths to ensure that window length is not # used when calculating exra rows for the top-level term. factor1 = TestingDataSet.float_col.latest factor11 = NDaysAgoFactor(window_length=11) factor91 = NDaysAgoFactor(window_length=91) filter1 = TestingDataSet.bool_col.latest filter11 = NDaysAgoFilter(window_length=11) filter91 = NDaysAgoFilter(window_length=91) classifier1 = TestingDataSet.categorical_col.latest classifier11 = NDaysAgoClassifier(window_length=11) classifier91 = NDaysAgoClassifier(window_length=91) all_terms = [ factor1, factor11, factor91, filter1, filter11, filter91, classifier1, classifier11, classifier91, ] @parameter_space( calendar_name=TRADING_CALENDAR_STRS, base_terms=[ (factor1, factor11, factor91), (filter1, filter11, filter91), (classifier1, classifier11, classifier91), ], __fail_fast=True ) def test_yearly(self, base_terms, calendar_name): downsampled_terms = tuple( t.downsample('year_start') for t in base_terms ) all_terms = base_terms + downsampled_terms all_sessions = self.trading_sessions[calendar_name] end_session = all_sessions[-1] years = all_sessions.year sessions_in_2012 = all_sessions[years == 2012] sessions_in_2013 = all_sessions[years == 2013] sessions_in_2014 = all_sessions[years == 2014] # Simulate requesting computation where the unaltered lookback would # land exactly on the first date in 2014. We shouldn't request any # additional rows for the regular terms or the downsampled terms. for i in range(0, 30, 5): start_session = sessions_in_2014[i] self.check_extra_row_calculations( all_terms, all_sessions, start_session, end_session, min_extra_rows=i, expected_extra_rows=i, ) # Simulate requesting computation where the unaltered lookback would # land on the second date in 2014. We should request one more extra # row in the downsampled terms to push us back to the first date in # 2014. for i in range(0, 30, 5): start_session = sessions_in_2014[i + 1] self.check_extra_row_calculations( downsampled_terms, all_sessions, start_session, end_session, min_extra_rows=i, expected_extra_rows=i + 1, ) self.check_extra_row_calculations( base_terms, all_sessions, start_session, end_session, min_extra_rows=i, expected_extra_rows=i, ) # Simulate requesting computation where the unaltered lookback would # land on the last date of 2013. The downsampled terms should request # enough extra rows to push us back to the start of 2013. for i in range(0, 30, 5): start_session = sessions_in_2014[i] self.check_extra_row_calculations( downsampled_terms, all_sessions, start_session, end_session, min_extra_rows=i + 1, expected_extra_rows=i + len(sessions_in_2013), ) self.check_extra_row_calculations( base_terms, all_sessions, start_session, end_session, min_extra_rows=i + 1, expected_extra_rows=i + 1, ) # Simulate requesting computation where the unaltered lookback would # land on the last date of 2012. The downsampled terms should request # enough extra rows to push us back to the first known date, which is # in the middle of 2012. for i in range(0, 30, 5): start_session = sessions_in_2013[i] self.check_extra_row_calculations( downsampled_terms, all_sessions, start_session, end_session, min_extra_rows=i + 1, expected_extra_rows=i + len(sessions_in_2012), ) self.check_extra_row_calculations( base_terms, all_sessions, start_session, end_session, min_extra_rows=i + 1, expected_extra_rows=i + 1, ) # Simulate requesting computation where the unaltered lookback would # land prior to the first date of 2012. The downsampled terms will fail # to request enough extra rows. for i in range(0, 30, 5): with self.assertRaisesRegex( NoFurtherDataError, r'\s*Insufficient data to compute Pipeline' ): self.check_extra_row_calculations( downsampled_terms, all_sessions, all_sessions[i], end_session, min_extra_rows=i + 1, expected_extra_rows=i + 1, ) self.check_extra_row_calculations( base_terms, all_sessions, all_sessions[i], end_session, min_extra_rows=i + 1, expected_extra_rows=i + 1, ) @parameter_space( calendar_name=TRADING_CALENDAR_STRS, base_terms=[ (factor1, factor11, factor91), (filter1, filter11, filter91), (classifier1, classifier11, classifier91), ], __fail_fast=True ) def test_quarterly(self, calendar_name, base_terms): downsampled_terms = tuple( t.downsample('quarter_start') for t in base_terms ) all_terms = base_terms + downsampled_terms # This region intersects with Q4 2013, Q1 2014, and Q2 2014. tmp = self.trading_sessions[calendar_name] all_sessions = tmp[tmp.slice_indexer('2013-12-15', '2014-04-30')] end_session = all_sessions[-1] months = all_sessions.month Q4_2013 = all_sessions[months == 12] Q1_2014 = all_sessions[(months == 1) | (months == 2) | (months == 3)] Q2_2014 = all_sessions[months == 4] # Simulate requesting computation where the unaltered lookback would # land exactly on the first date in Q2 2014. We shouldn't request any # additional rows for the regular terms or the downsampled terms. for i in range(0, 15, 5): start_session = Q2_2014[i] self.check_extra_row_calculations( all_terms, all_sessions, start_session, end_session, min_extra_rows=i, expected_extra_rows=i, ) # Simulate requesting computation where the unaltered lookback would # land exactly on the second date in Q2 2014. # The downsampled terms should request one more extra row. for i in range(0, 15, 5): start_session = Q2_2014[i + 1] self.check_extra_row_calculations( downsampled_terms, all_sessions, start_session, end_session, min_extra_rows=i, expected_extra_rows=i + 1, ) self.check_extra_row_calculations( base_terms, all_sessions, start_session, end_session, min_extra_rows=i, expected_extra_rows=i, ) # Simulate requesting computation where the unaltered lookback would # land exactly on the last date in Q1 2014. The downsampled terms # should request enough extra rows to push us back to the first date of # Q1 2014. for i in range(0, 15, 5): start_session = Q2_2014[i] self.check_extra_row_calculations( downsampled_terms, all_sessions, start_session, end_session, min_extra_rows=i + 1, expected_extra_rows=i + len(Q1_2014), ) self.check_extra_row_calculations( base_terms, all_sessions, start_session, end_session, min_extra_rows=i + 1, expected_extra_rows=i + 1, ) # Simulate requesting computation where the unaltered lookback would # land exactly on the last date in Q4 2013. The downsampled terms # should request enough extra rows to push us back to the first known # date, which is in the middle of december 2013. for i in range(0, 15, 5): start_session = Q1_2014[i] self.check_extra_row_calculations( downsampled_terms, all_sessions, start_session, end_session, min_extra_rows=i + 1, expected_extra_rows=i + len(Q4_2013), ) self.check_extra_row_calculations( base_terms, all_sessions, start_session, end_session, min_extra_rows=i + 1, expected_extra_rows=i + 1, ) @parameter_space( calendar_name=TRADING_CALENDAR_STRS, base_terms=[ (factor1, factor11, factor91), (filter1, filter11, filter91), (classifier1, classifier11, classifier91), ], __fail_fast=True ) def test_monthly(self, calendar_name, base_terms): downsampled_terms = tuple( t.downsample('month_start') for t in base_terms ) all_terms = base_terms + downsampled_terms # This region intersects with Dec 2013, Jan 2014, and Feb 2014. tmp = self.trading_sessions[calendar_name] all_sessions = tmp[tmp.slice_indexer('2013-12-15', '2014-02-28')] end_session = all_sessions[-1] months = all_sessions.month dec2013 = all_sessions[months == 12] jan2014 = all_sessions[months == 1] feb2014 = all_sessions[months == 2] # Simulate requesting computation where the unaltered lookback would # land exactly on the first date in feb 2014. We shouldn't request any # additional rows for the regular terms or the downsampled terms. for i in range(0, 10, 2): start_session = feb2014[i] self.check_extra_row_calculations( all_terms, all_sessions, start_session, end_session, min_extra_rows=i, expected_extra_rows=i, ) # Simulate requesting computation where the unaltered lookback would # land on the second date in feb 2014. We should request one more # extra row in the downsampled terms to push us back to the first date # in 2014. for i in range(0, 10, 2): start_session = feb2014[i + 1] self.check_extra_row_calculations( downsampled_terms, all_sessions, start_session, end_session, min_extra_rows=i, expected_extra_rows=i + 1, ) self.check_extra_row_calculations( base_terms, all_sessions, start_session, end_session, min_extra_rows=i, expected_extra_rows=i, ) # Simulate requesting computation where the unaltered lookback would # land on the last date of jan 2014. The downsampled terms should # request enough extra rows to push us back to the start of jan 2014. for i in range(0, 10, 2): start_session = feb2014[i] self.check_extra_row_calculations( downsampled_terms, all_sessions, start_session, end_session, min_extra_rows=i + 1, expected_extra_rows=i + len(jan2014), ) self.check_extra_row_calculations( base_terms, all_sessions, start_session, end_session, min_extra_rows=i + 1, expected_extra_rows=i + 1, ) # Simulate requesting computation where the unaltered lookback would # land on the last date of dec 2013. The downsampled terms should # request enough extra rows to push us back to the first known date, # which is in the middle of december 2013. for i in range(0, 10, 2): start_session = jan2014[i] self.check_extra_row_calculations( downsampled_terms, all_sessions, start_session, end_session, min_extra_rows=i + 1, expected_extra_rows=i + len(dec2013), ) self.check_extra_row_calculations( base_terms, all_sessions, start_session, end_session, min_extra_rows=i + 1, expected_extra_rows=i + 1, ) @parameter_space( calendar_name=TRADING_CALENDAR_STRS, base_terms=[ (factor1, factor11, factor91), (filter1, filter11, filter91), (classifier1, classifier11, classifier91), ], __fail_fast=True ) def test_weekly(self, calendar_name, base_terms): downsampled_terms = tuple( t.downsample('week_start') for t in base_terms ) all_terms = base_terms + downsampled_terms # December 2013 # Mo Tu We Th Fr Sa Su # 1 # 2 3 4 5 6 7 8 # 9 10 11 12 13 14 15 # 16 17 18 19 20 21 22 # 23 24 25 26 27 28 29 # 30 31 # January 2014 # Mo Tu We Th Fr Sa Su # 1 2 3 4 5 # 6 7 8 9 10 11 12 # 13 14 15 16 17 18 19 # 20 21 22 23 24 25 26 # 27 28 29 30 31 # This region intersects with the last full week of 2013, the week # shared by 2013 and 2014, and the first full week of 2014. tmp = self.trading_sessions[calendar_name] all_sessions = tmp[tmp.slice_indexer('2013-12-27', '2014-01-12')] end_session = all_sessions[-1] week0 = all_sessions[ all_sessions.slice_indexer('2013-12-27', '2013-12-29') ] week1 = all_sessions[ all_sessions.slice_indexer('2013-12-30', '2014-01-05') ] week2 = all_sessions[ all_sessions.slice_indexer('2014-01-06', '2014-01-12') ] # Simulate requesting computation where the unaltered lookback would # land exactly on the first date in week 2. We shouldn't request any # additional rows for the regular terms or the downsampled terms. for i in range(3): start_session = week2[i] self.check_extra_row_calculations( all_terms, all_sessions, start_session, end_session, min_extra_rows=i, expected_extra_rows=i, ) # Simulate requesting computation where the unaltered lookback would # land exactly on the second date in week 2. The downsampled terms # should request one more extra row. for i in range(3): start_session = week2[i + 1] self.check_extra_row_calculations( downsampled_terms, all_sessions, start_session, end_session, min_extra_rows=i, expected_extra_rows=i + 1, ) self.check_extra_row_calculations( base_terms, all_sessions, start_session, end_session, min_extra_rows=i, expected_extra_rows=i, ) # Simulate requesting computation where the unaltered lookback would # land exactly on the last date in week 1. The downsampled terms # should request enough extra rows to push us back to the first date of # week 1. for i in range(3): start_session = week2[i] self.check_extra_row_calculations( downsampled_terms, all_sessions, start_session, end_session, min_extra_rows=i + 1, expected_extra_rows=i + len(week1), ) self.check_extra_row_calculations( base_terms, all_sessions, start_session, end_session, min_extra_rows=i + 1, expected_extra_rows=i + 1, ) # Simulate requesting computation where the unaltered lookback would # land exactly on the last date in week0. The downsampled terms # should request enough extra rows to push us back to the first known # date, which is in the middle of december 2013. for i in range(3): start_session = week1[i] self.check_extra_row_calculations( downsampled_terms, all_sessions, start_session, end_session, min_extra_rows=i + 1, expected_extra_rows=i + len(week0), ) self.check_extra_row_calculations( base_terms, all_sessions, start_session, end_session, min_extra_rows=i + 1, expected_extra_rows=i + 1, ) def check_extra_row_calculations(self, terms, all_sessions, start_session, end_session, min_extra_rows, expected_extra_rows): """ Check that each term in ``terms`` computes an expected number of extra rows for the given parameters. """ for term in terms: result = term.compute_extra_rows( all_sessions, start_session, end_session, min_extra_rows, ) self.assertEqual( result, expected_extra_rows, "Expected {} extra_rows from {}, but got {}.".format( expected_extra_rows, term, result, ) ) class DownsampledPipelineTestCase(WithSeededRandomPipelineEngine, ZiplineTestCase): # Extend into the last few days of 2013 to test year/quarter boundaries. START_DATE = pd.Timestamp('2013-12-15', tz='UTC') # Extend into the first few days of 2015 to test year/quarter boundaries. END_DATE = pd.Timestamp('2015-01-06', tz='UTC') ASSET_FINDER_EQUITY_SIDS = tuple(range(10)) DOMAIN = US_EQUITIES @classproperty def ASSET_FINDER_COUNTRY_CODE(cls): return cls.DOMAIN.country_code @classproperty def SEEDED_RANDOM_PIPELINE_DEFAULT_DOMAIN(cls): return cls.DOMAIN @classproperty def all_sessions(cls): return cls.DOMAIN.all_sessions() def check_downsampled_term(self, term): # June 2014 # Mo Tu We Th Fr Sa Su # 1 # 2 3 4 5 6 7 8 # 9 10 11 12 13 14 15 # 16 17 18 19 20 21 22 # 23 24 25 26 27 28 29 # 30 all_sessions = self.all_sessions compute_dates = all_sessions[ all_sessions.slice_indexer('2014-06-05', '2015-01-06') ] start_date, end_date = compute_dates[[0, -1]] pipe = Pipeline({ 'year': term.downsample(frequency='year_start'), 'quarter': term.downsample(frequency='quarter_start'), 'month': term.downsample(frequency='month_start'), 'week': term.downsample(frequency='week_start'), }) # Raw values for term, computed each day from 2014 to the end of the # target period. raw_term_results = self.run_pipeline( Pipeline({'term': term}), start_date=pd.Timestamp('2014-01-02', tz='UTC'), end_date=pd.Timestamp('2015-01-06', tz='UTC'), )['term'].unstack() expected_results = { 'year': (raw_term_results .groupby(pd.TimeGrouper('AS')) .first() .reindex(compute_dates, method='ffill')), 'quarter': (raw_term_results .groupby(pd.TimeGrouper('QS')) .first() .reindex(compute_dates, method='ffill')), 'month': (raw_term_results .groupby(pd.TimeGrouper('MS')) .first() .reindex(compute_dates, method='ffill')), 'week': (raw_term_results .groupby(pd.TimeGrouper('W', label='left')) .first() .reindex(compute_dates, method='ffill')), } results = self.run_pipeline(pipe, start_date, end_date) for frequency in expected_results: result = results[frequency].unstack() expected = expected_results[frequency] assert_frame_equal(result, expected) def test_downsample_windowed_factor(self): self.check_downsampled_term( SimpleMovingAverage( inputs=[TestingDataSet.float_col], window_length=5, ) ) def test_downsample_non_windowed_factor(self): sma = SimpleMovingAverage( inputs=[TestingDataSet.float_col], window_length=5, ) self.check_downsampled_term(((sma + sma) / 2).rank()) def test_downsample_windowed_filter(self): sma = SimpleMovingAverage( inputs=[TestingDataSet.float_col], window_length=5, ) self.check_downsampled_term(All(inputs=[sma.top(4)], window_length=5)) def test_downsample_nonwindowed_filter(self): sma = SimpleMovingAverage( inputs=[TestingDataSet.float_col], window_length=5, ) self.check_downsampled_term(sma > 5) def test_downsample_windowed_classifier(self): class IntSumClassifier(CustomClassifier): inputs = [TestingDataSet.float_col] window_length = 8 dtype = int64_dtype missing_value = -1 def compute(self, today, assets, out, floats): out[:] = floats.sum(axis=0).astype(int) % 4 self.check_downsampled_term(IntSumClassifier()) def test_downsample_nonwindowed_classifier(self): sma = SimpleMovingAverage( inputs=[TestingDataSet.float_col], window_length=5, ) self.check_downsampled_term(sma.quantiles(5)) def test_errors_on_bad_downsample_frequency(self): f = NDaysAgoFactor(window_length=3) with self.assertRaises(ValueError) as e: f.downsample('bad') expected = ( "{}() expected a value in " "('month_start', 'quarter_start', 'week_start', 'year_start') " "for argument 'frequency', but got 'bad' instead." ).format(_qualified_name(f.downsample)) self.assertEqual(str(e.exception), expected) class DownsampledGBPipelineTestCase(DownsampledPipelineTestCase): DOMAIN = GB_EQUITIES class DownsampledCAPipelineTestCase(DownsampledPipelineTestCase): DOMAIN = CA_EQUITIES class TestDownsampledRowwiseOperation(WithAssetFinder, ZiplineTestCase): T = partial(pd.Timestamp, tz='utc') START_DATE = T('2014-01-01') END_DATE = T('2014-02-01') HALF_WAY_POINT = T('2014-01-15') dates = pd.date_range(START_DATE, END_DATE) ASSET_FINDER_COUNTRY_CODE = '??' class SidFactor(CustomFactor): inputs = () window_length = 1 def compute(self, today, assets, out): out[:] = assets factor = SidFactor() @classmethod def init_class_fixtures(cls): super(TestDownsampledRowwiseOperation, cls).init_class_fixtures() cls.pipeline_engine = SimplePipelineEngine( get_loader=lambda c: ExplodingObject(), asset_finder=cls.asset_finder, default_domain=EquitySessionDomain( cls.dates, country_code=cls.ASSET_FINDER_COUNTRY_CODE, ), ) @classmethod def make_equity_info(cls): start = cls.START_DATE - pd.Timedelta(days=1) end = cls.END_DATE early_end = cls.HALF_WAY_POINT return pd.DataFrame( [['A', 'Ayy Inc.', start, end, 'E'], ['B', 'early end', start, early_end, 'E'], ['C', 'C Inc.', start, end, 'E']], index=[ord('A'), ord('B'), ord('C')], columns=( 'symbol', 'asset_name', 'start_date', 'end_date', 'exchange', ), ) def test_downsampled_rank(self): downsampled_rank = self.factor.rank().downsample('month_start') pipeline = Pipeline({'rank': downsampled_rank}) results_month_start = self.pipeline_engine.run_pipeline( pipeline, self.START_DATE, self.END_DATE, ) half_way_start = self.HALF_WAY_POINT + pd.Timedelta(days=1) results_halfway_start = self.pipeline_engine.run_pipeline( pipeline, half_way_start, self.END_DATE, ) results_month_start_aligned = results_month_start.loc[half_way_start:] assert_frame_equal(results_month_start_aligned, results_halfway_start)
from cattle import ClientApiError from common import * # NOQA from copy import deepcopy from gdapi import ApiError _USER_LIST = [ "Owner", "Member", "Stranger", "OutThereUser" ] PROJECTS = set([]) class NotFound(Exception): pass @pytest.fixture(autouse=True, scope="module") def clean_up_projects(super_client, request): # This randomly times out, don't know why, disabling it # on = super_client.create_setting(name='api.projects.use.rancher_id', # value='true') # wait_setting_active(super_client, on) def fin(): for project in PROJECTS: try: super_client.delete(super_client.by_id('project', project)) except ApiError as e: assert e.error.status == 404 assert len(get_ids(super_client.list_project()) & PROJECTS) == 0 # super_client.delete(on) request.addfinalizer(fin) pass @pytest.fixture() def project(user_clients, admin_user_client, request): project = _create_project(admin_user_client, user_clients, 'Owner') def fin(): try: admin_user_client.delete(admin_user_client.by_id('project', project)) except ApiError as e: assert e.error.status == 404 request.addfinalizer(fin) return project @pytest.fixture(scope='session') def user_clients(admin_user_client): clients = {} for user in _USER_LIST: clients[user] = create_context(admin_user_client, kind='user').user_client clients['admin'] = admin_user_client return clients @pytest.fixture() def members(user_clients): members = ['Owner', 'Member'] return _create_members(user_clients, members) def get_ids(items): ids = [] for item in items: ids.append(item.id) return set(ids) def diff_members(members, got_members): assert len(members) == len(got_members) members_a = set([]) members_b = set([]) for member in members: members_a.add( member['externalId'] + ' ' + member['externalIdType'] + ' ' + member['role']) for member in got_members: members_b.add( member['externalId'] + ' ' + member['externalIdType'] + ' ' + member['role']) assert members_a == members_b def all_owners(members): for member in members: member['role'] = 'owner' return members def test_update_project(user_clients, project): user_clients['Owner'].update( project, name='Project Name', description='Some description') assert user_clients['Owner'].by_id( 'project', project.id).name == 'Project Name' assert user_clients['Owner'].by_id( 'project', project.id).description == 'Some description' with pytest.raises(ApiError) as e: user_clients['Member'].update( project, name='Project Name from Member', description='Loop hole?') assert e.value.error.status == 404 with pytest.raises(ApiError) as e: user_clients['Stranger'].update( project, name='Project Name from Stranger', description='Changed') assert e.value.error.status == 404 def test_update_project_kubernetes_swarm(user_clients, project): project = user_clients['Owner'].update( project, name='Project Name', description='Some description', kubernetes=True, swarm=True) assert project.kubernetes is True assert project.swarm is True client = user_clients['Owner'] members = _create_members(user_clients, ['Owner']) project = client.create_project(members=members, kubernetes=True, swarm=True) project = client.wait_success(project) assert project.kubernetes is True assert project.swarm is True def test_project_no_filters_and_sort(user_clients): projects = user_clients['Owner'].list_project() assert len(projects.sortLinks) == 0 assert len(projects.filters) == 0 def test_set_members(admin_user_client, user_clients, project): members = get_plain_members(project.projectMembers()) members.append({ 'role': 'member', 'externalId': acc_id(user_clients['Member']), 'externalIdType': 'rancher_id' }) _set_members(admin_user_client, user_clients['Owner'], project.id, None, 422) _set_members(admin_user_client, user_clients['Owner'], project.id, [], 422) _set_members(admin_user_client, user_clients['Owner'], project.id, members, None) _set_members(admin_user_client, user_clients['Member'], project.id, None, 'Attribute') _set_members(admin_user_client, user_clients['Member'], project.id, [], 'Attribute') _set_members(admin_user_client, user_clients['Member'], project.id, members, 'Attribute') with pytest.raises(NotFound): _set_members(admin_user_client, user_clients['Stranger'], project.id, None, 422) with pytest.raises(NotFound): _set_members(admin_user_client, user_clients['Stranger'], project.id, [], 422) with pytest.raises(NotFound): _set_members(admin_user_client, user_clients['Stranger'], project.id, members, 403) def test_get_members(admin_user_client, user_clients, members): project = _create_project_with_members(admin_user_client, user_clients['Owner'], members) members = project.projectMembers() _get_members(user_clients['Owner'], project.id, members) _get_members(user_clients['Member'], project.id, members) _get_members(user_clients['admin'], project.id, members) with pytest.raises(NotFound): _get_members(user_clients['Stranger'], project.id, members) def test_list_all_projects(admin_user_client): projects = admin_user_client.list_project() projectAccounts = admin_user_client.list_account(kind='project', limit=4000) ids = [] ids_2 = [] for project in projects: ids.append(project.id) for project in projectAccounts: ids_2.append(project.id) assert len(list(set(ids) - set(ids_2))) == 0 def check_state(client, project_id, states, excludes): for type in client.schema.types: if type not in excludes: try: for resource in client.list(type, accountId=project_id): assert resource.state in states assert resource.removed is not None except AttributeError: pass def client_for_project(project, admin_user_client): project_key = admin_user_client.create_api_key(accountId=project.id) admin_user_client.wait_success(project_key) return api_client(project_key.publicValue, project_key.secretValue) def test_delete_project(admin_user_client, new_context, super_client): project = new_context.user_client.reload(new_context.project) proj_id = new_context.project.id _create_resources(new_context.client) assert len(new_context.client.list_projectMember()) == 1 project = super_client.wait_success(project.deactivate()) project = super_client.wait_success(project.remove()) check_state(new_context.user_client, proj_id, ['removed'], ['account', 'project', 'host', 'subscribe', 'machineDriver']) try: super_client.wait_success(project.purge()) except: pass wait_for_condition(super_client, project, lambda x: x.state == 'purged') project = new_context.user_client.by_id('project', id=proj_id) assert project is None or project.state == 'purged' check_state(new_context.user_client, proj_id, ['purged', 'removed'], ['account', 'project', 'subscribe', 'machineDriver']) project_members = admin_user_client\ .list('projectMember') for member in project_members: assert member.projectId != proj_id def test_delete_members(admin_user_client, user_clients, members): project = _create_project_with_members(admin_user_client, user_clients['Owner'], members) members = [members[0]] assert len(user_clients['Member'] .by_id('project', project.id).projectMembers()) == 2 project.setmembers(members=members) project = user_clients['Owner'].by_id('project', project.id) assert len(project.projectMembers()) == 1 assert user_clients['Member'].by_id('project', project.id) is None def test_change_roles(admin_user_client, user_clients, members): project = _create_project_with_members(admin_user_client, user_clients['Owner'], members) assert len(project.projectMembers()) == 2 new_members = all_owners(get_plain_members(project.projectMembers())) project_from_member = user_clients['Member'].by_id('project', project.id) with pytest.raises(AttributeError) as e: project_from_member.setmembers(members=new_members) assert 'setmembers' in e.value.message project.setmembers(members=new_members) project_from_member = user_clients['Member'].reload(project_from_member) project_from_member.setmembers(members=new_members) project_members_after = get_plain_members(project.projectMembers()) project_from_member_members_after = get_plain_members( project_from_member.projectMembers()) for member in project_members_after: assert member['role'] == 'owner' for member in project_from_member_members_after: assert member['role'] == 'owner' def test_delete_other_owners(admin_user_client, user_clients, members): project = _create_project_with_members(admin_user_client, user_clients['Owner'], all_owners(members)) project.setmembers(members=members) project = user_clients['Member'].by_id('project', project.id) new_members = get_plain_members(project.projectMembers()) for member in new_members: if((member['role'] == 'owner') & (member['externalId'] != acc_id( user_clients['Member']))): new_members.remove(member) project.setmembers(members=new_members) assert len(project.projectMembers()) == 1 assert user_clients['Owner'].by_id('project', project.id) is None project = client_for_project(project, admin_user_client).list_project()[0] got_members = project.projectMembers() assert len(got_members) == 1 def test_multiple_owners_add_members(admin_user_client, user_clients, members): project = _create_project_with_members(admin_user_client, user_clients['Owner'], all_owners(members)) current_members = get_plain_members(project.projectMembers()) current_members.append({ 'role': 'member', 'externalId': acc_id(user_clients['Stranger']), 'externalIdType': 'rancher_id' }) _set_members(admin_user_client, user_clients['Owner'], project.id, current_members, None) _set_members(admin_user_client, user_clients['Stranger'], project.id, current_members, 'Attribute') project = user_clients['Stranger'].by_id('project', project.id) assert len(project.projectMembers()) == 3 _set_members(admin_user_client, user_clients['Member'], project.id, members, None) with pytest.raises(ApiError) as e: project.projectMembers() assert e.value.error.status == 404 _set_members(admin_user_client, user_clients['Member'], project.id, current_members, None) assert len(project.projectMembers()) == len(current_members) def test_members_cant_do_things(admin_user_client, user_clients, members): # Tests that members can't alter members of a project, delete a project, # deactivate a project. project = _create_project_with_members(admin_user_client, user_clients['Owner'], members) project = user_clients['Member'].by_id("project", project.id) got_members = get_plain_members(project.projectMembers()) id = acc_id(user_clients['Member']) for member in got_members: if member['externalId'] == id: assert member['role'] == 'member' with pytest.raises(ApiError) as e: user_clients['Member'].delete(project) assert e.value.error.status == 403 _set_members(admin_user_client, user_clients['Member'], project.id, [{ 'externalId': acc_id(user_clients['Member']), 'externalIdType': 'rancher_id', 'role': 'owner' }], 'Attribute') with pytest.raises(AttributeError) as e: project.deactivate() assert 'deactivate' in e.value.message def test_project_cant_create_project(user_clients, members, project, admin_user_client): uuid = project.uuid client = client_for_project(project, admin_user_client) assert 'POST' not in client.schema.types['project'].collectionMethods got_project = client.list_project()[0] assert got_project.uuid == uuid assert len(project.projectMembers()) == len(got_project.projectMembers()) pass def test_list_projects_flag(admin_user_client, user_clients): projects = admin_user_client.list('project') ids = set([]) for project in projects: ids.add(project.id) projects_with_flag = admin_user_client.list('project', all='true') admin_id = acc_id(admin_user_client) assert len(projects) != len(projects_with_flag) for project in projects_with_flag: include = False for member in get_plain_members(project.projectMembers()): if (member['externalIdType'] == 'rancher_id'): if (member['externalId'] == admin_id): include = True if (include): assert project.id in ids else: assert project.id not in ids def test_get_project_not_mine(user_clients, project): assert user_clients['Member'].by_id('project', project.id) is None def test_project_deactivate(user_clients, project, members): project.setmembers(members=members) diff_members(members, get_plain_members(project.projectMembers())) project = user_clients['Member'].reload(project) with pytest.raises(AttributeError) as e: project.deactivate() assert 'deactivate' in e.value.message project = user_clients['Owner'].reload(project) project.deactivate() project = user_clients['Owner'].wait_success(project) assert project.state == 'inactive' project.activate() project = user_clients['Owner'].wait_success(project) project.deactivate() project = user_clients['Owner'].wait_success(project) assert project.state == 'inactive' def test_project_deactivate_members_cant_access(user_clients, project, members): project.setmembers(members=members) diff_members(members, get_plain_members(project.projectMembers())) project = user_clients['Owner'].reload(project) project.deactivate() project = user_clients['Owner'].wait_success(project) assert project.state == 'inactive' assert user_clients['Member'].by_id('project', project.id) is None project.activate() project = user_clients['Owner'].wait_success(project) prj_id = project.id assert project.state == 'active' project = user_clients['Member'].reload(project) assert project.id == prj_id def test_project_member_invalid(project, admin_user_client, members): client = client_for_project(project, admin_user_client) project.setmembers(members=members) diff_members(members, get_plain_members(project.projectMembers())) members_got = get_plain_members(project.projectMembers()) new_members = [] old_members = [] for member in members_got: if (member['role'] == 'owner'): new_members.append(member) else: old_members.append(member) project.setmembers(members=new_members) diff_members(new_members, get_plain_members(project.projectMembers())) assert client.by_id('projectMember', 'garbageId') is None assert client.by_id('projectMember', old_members[0]['externalId']) is None def test_make_project_with_identity(admin_user_client): client = create_context(admin_user_client).user_client identity = client.list_identity() assert len(identity) == 1 identity = identity[0] identity['role'] = 'owner' members = [identity] _create_project_with_members(admin_user_client, client, members) def test_restricted_members(admin_user_client): context_1 = create_context(admin_user_client, create_project=True, add_host=True) context_2 = create_context(admin_user_client) user2_client = context_2.user_client members = get_plain_members(context_1.project.projectMembers()) members.append({ 'role': 'restricted', 'externalId': acc_id(user2_client), 'externalIdType': 'rancher_id' }) project = context_1.user_client.reload(context_1.project) project.setmembers(members=members) user2_client = context_2.user_client new_headers = deepcopy(user2_client._headers) new_headers['X-API-Project-Id'] = project.id user2_client._headers = new_headers user2_client.reload_schema() hosts = user2_client.list_host() assert len(hosts) == 1 assert hosts[0].actions == {} with pytest.raises(ApiError) as e: user2_client.delete(hosts[0]) assert e.value.error.status == 405 with pytest.raises(ClientApiError) as e: user2_client.list_registration_token() assert 'registrationToken' in e.value.message with pytest.raises(AttributeError) as e: user2_client.create_registration_token() assert 'create_registration_token' in e.value.message def _create_resources(client): for x in range(0, 4): uuid = "sim:{}".format(random_num()) client.wait_success(client.create_container(imageUuid=uuid)) registry = client.create_registry(serverAddress='quay.io', name='Quay') registry = client.wait_success(registry) reg_cred = client.create_registry_credential( registryId=registry.id, email='[email protected]', publicValue='rancher', secretValue='rancher') client.wait_success(reg_cred) def _set_members(admin_user_client, client, id, members, status): project = client.by_id('project', id) if project is None: raise NotFound() if status is None: project.setmembers(members=members) got_members = get_plain_members(project.projectMembers()) assert len(got_members) == len(members) diff_members(members, got_members) elif (status == 'Attribute'): with pytest.raises(AttributeError) as e: project.setmembers(members=members) assert 'setmembers' in e.value.message else: with pytest.raises(ApiError) as e: project.setmembers(members=members) assert e.value.error.status == status def _get_plain_members(client, project): members = client.list_project_member(projectId=project.id) return get_plain_members(members) def _create_project(admin_user_client, user_clients, user): client = user_clients[user] members = _create_members(user_clients, [user]) project = client.create_project(members=members) project = admin_user_client.wait_success(project) project = client.by_id('project', project.id) got_members = get_plain_members(project.projectMembers()) diff_members(members, got_members) PROJECTS.add(project.id) assert project.id == project.id return project def _get_members(client, id, actual_members): project = client.by_id('project', id) if project is None: raise NotFound() assert len(project.projectMembers()) == len(actual_members) def _create_project_with_members(admin_user_client, client, members): project = client.create_project(members=members) project = admin_user_client.wait_success(project) project = client.by_id('project', project.id) PROJECTS.add(project.id) got_members = get_plain_members(project.projectMembers()) assert len(members) == len(got_members) diff_members(members, got_members) return project def _create_members(user_clients, members): newMembers = [] for member in members: newMembers.append({ 'role': 'owner' if member == 'Owner' else 'member', 'externalId': acc_id(user_clients[member]), 'externalIdType': 'rancher_id' }) return newMembers def test_update_project_publicdns(user_clients, project): project = user_clients['Owner'].update( project, name='Project Name', description='Some description', publicDns=True) assert project.publicDns is True client = user_clients['Owner'] members = _create_members(user_clients, ['Owner']) project = client.create_project(members=members, publicDns=True) project = client.wait_success(project) project = client.update(project, publicDns=False) project = client.wait_success(project) assert project.publicDns is False
''' This module holds the constants used for specifying the states of the debugger. ''' DEBUG_TRACE_LEVEL = -1 DEBUG_TRACE_BREAKPOINTS = -1 STATE_RUN = 1 STATE_SUSPEND = 2 try: __setFalse = False except: import __builtin__ setattr(__builtin__, 'True', 1) setattr(__builtin__, 'False', 0) class DebugInfoHolder: #we have to put it here because it can be set through the command line (so, the #already imported references would not have it). DEBUG_RECORD_SOCKET_READS = False #Optimize with psyco? This gave a 50% speedup in the debugger in tests USE_PSYCO_OPTIMIZATION = True #Hold a reference to the original _getframe (because psyco will change that as soon as it's imported) import sys #Note: the sys import must be here anyways (others depend on it) try: GetFrame = sys._getframe except AttributeError: def GetFrame(): raise AssertionError('sys._getframe not available (possible causes: enable -X:Frames on IronPython?)') #Used to determine the maximum size of each variable passed to eclipse -- having a big value here may make #the communication slower -- as the variables are being gathered lazily in the latest version of eclipse, #this value was raised from 200 to 1000. MAXIMUM_VARIABLE_REPRESENTATION_SIZE = 1000 import threading import os _nextThreadIdLock = threading.Lock() #======================================================================================================================= # Python 3? #======================================================================================================================= IS_PY3K = False IS_PY27 = False try: if sys.version_info[0] >= 3: IS_PY3K = True elif sys.version_info[0] == 2 and sys.version_info[1] == 7: IS_PY27 = True except AttributeError: pass #Not all versions have sys.version_info try: IS_64_BITS = sys.maxsize > 2 ** 32 except AttributeError: try: import struct IS_64_BITS = struct.calcsize("P") * 8 > 32 except: IS_64_BITS = False #======================================================================================================================= # Jython? #======================================================================================================================= try: import org.python.core.PyDictionary #@UnresolvedImport @UnusedImport -- just to check if it could be valid def DictContains(d, key): return d.has_key(key) except: try: #Py3k does not have has_key anymore, and older versions don't have __contains__ DictContains = dict.__contains__ except: try: DictContains = dict.has_key except NameError: def DictContains(d, key): return d.has_key(key) try: xrange except: #Python 3k does not have it xrange = range try: object except NameError: class object: pass try: enumerate except: def enumerate(lst): ret = [] i=0 for element in lst: ret.append((i, element)) i+=1 return ret #======================================================================================================================= # StringIO #======================================================================================================================= try: from StringIO import StringIO except: from io import StringIO #======================================================================================================================= # NextId #======================================================================================================================= class NextId: def __init__(self): self._id = 0 def __call__(self): #No need to synchronize here self._id += 1 return self._id _nextThreadId = NextId() #======================================================================================================================= # GetThreadId #======================================================================================================================= def GetThreadId(thread): try: return thread.__pydevd_id__ except AttributeError: _nextThreadIdLock.acquire() try: #We do a new check with the lock in place just to be sure that nothing changed if not hasattr(thread, '__pydevd_id__'): try: pid = os.getpid() except AttributeError: try: #Jython does not have it! import java.lang.management.ManagementFactory #@UnresolvedImport -- just for jython pid = java.lang.management.ManagementFactory.getRuntimeMXBean().getName() pid = pid.replace('@', '_') except: #ok, no pid available (will be unable to debug multiple processes) pid = '000001' thread.__pydevd_id__ = 'pid%s_seq%s' % (pid, _nextThreadId()) finally: _nextThreadIdLock.release() return thread.__pydevd_id__ #=============================================================================== # Null #=============================================================================== class Null: """ Gotten from: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/68205 """ def __init__(self, *args, **kwargs): return None def __call__(self, *args, **kwargs): return self def __getattr__(self, mname): return self def __setattr__(self, name, value): return self def __delattr__(self, name): return self def __repr__(self): return "<Null>" def __str__(self): return "Null" def __len__(self): return 0 def __getitem__(self): return self def __setitem__(self, *args, **kwargs): pass def write(self, *args, **kwargs): pass def __nonzero__(self): return 0 if __name__ == '__main__': if Null(): sys.stdout.write('here\n')
# ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- # pylint: disable=invalid-overridden-method import asyncio import random import logging from typing import Any, TYPE_CHECKING from azure.core.pipeline.policies import AsyncBearerTokenCredentialPolicy, AsyncHTTPPolicy from azure.core.exceptions import AzureError from .authentication import StorageHttpChallenge from .constants import DEFAULT_OAUTH_SCOPE, STORAGE_OAUTH_SCOPE from .policies import is_retry, StorageRetryPolicy if TYPE_CHECKING: from azure.core.credentials_async import AsyncTokenCredential from azure.core.pipeline import PipelineRequest, PipelineResponse _LOGGER = logging.getLogger(__name__) async def retry_hook(settings, **kwargs): if settings['hook']: if asyncio.iscoroutine(settings['hook']): await settings['hook']( retry_count=settings['count'] - 1, location_mode=settings['mode'], **kwargs) else: settings['hook']( retry_count=settings['count'] - 1, location_mode=settings['mode'], **kwargs) class AsyncStorageResponseHook(AsyncHTTPPolicy): def __init__(self, **kwargs): # pylint: disable=unused-argument self._response_callback = kwargs.get('raw_response_hook') super(AsyncStorageResponseHook, self).__init__() async def send(self, request): # type: (PipelineRequest) -> PipelineResponse # Values could be 0 data_stream_total = request.context.get('data_stream_total') if data_stream_total is None: data_stream_total = request.context.options.pop('data_stream_total', None) download_stream_current = request.context.get('download_stream_current') if download_stream_current is None: download_stream_current = request.context.options.pop('download_stream_current', None) upload_stream_current = request.context.get('upload_stream_current') if upload_stream_current is None: upload_stream_current = request.context.options.pop('upload_stream_current', None) response_callback = request.context.get('response_callback') or \ request.context.options.pop('raw_response_hook', self._response_callback) response = await self.next.send(request) await response.http_response.load_body() will_retry = is_retry(response, request.context.options.get('mode')) # Auth error could come from Bearer challenge, in which case this request will be made again is_auth_error = response.http_response.status_code == 401 should_update_counts = not (will_retry or is_auth_error) if should_update_counts and download_stream_current is not None: download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) if data_stream_total is None: content_range = response.http_response.headers.get('Content-Range') if content_range: data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) else: data_stream_total = download_stream_current elif should_update_counts and upload_stream_current is not None: upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) for pipeline_obj in [request, response]: pipeline_obj.context['data_stream_total'] = data_stream_total pipeline_obj.context['download_stream_current'] = download_stream_current pipeline_obj.context['upload_stream_current'] = upload_stream_current if response_callback: if asyncio.iscoroutine(response_callback): await response_callback(response) else: response_callback(response) request.context['response_callback'] = response_callback return response class AsyncStorageRetryPolicy(StorageRetryPolicy): """ The base class for Exponential and Linear retries containing shared code. """ async def sleep(self, settings, transport): backoff = self.get_backoff_time(settings) if not backoff or backoff < 0: return await transport.sleep(backoff) async def send(self, request): retries_remaining = True response = None retry_settings = self.configure_retries(request) while retries_remaining: try: response = await self.next.send(request) if is_retry(response, retry_settings['mode']): retries_remaining = self.increment( retry_settings, request=request.http_request, response=response.http_response) if retries_remaining: await retry_hook( retry_settings, request=request.http_request, response=response.http_response, error=None) await self.sleep(retry_settings, request.context.transport) continue break except AzureError as err: retries_remaining = self.increment( retry_settings, request=request.http_request, error=err) if retries_remaining: await retry_hook( retry_settings, request=request.http_request, response=None, error=err) await self.sleep(retry_settings, request.context.transport) continue raise err if retry_settings['history']: response.context['history'] = retry_settings['history'] response.http_response.location_mode = retry_settings['mode'] return response class ExponentialRetry(AsyncStorageRetryPolicy): """Exponential retry.""" def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): ''' Constructs an Exponential retry object. The initial_backoff is used for the first retry. Subsequent retries are retried after initial_backoff + increment_power^retry_count seconds. For example, by default the first retry occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the third after (15+3^2) = 24 seconds. :param int initial_backoff: The initial backoff interval, in seconds, for the first retry. :param int increment_base: The base, in seconds, to increment the initial_backoff by after the first retry. :param int max_attempts: The maximum number of retry attempts. :param bool retry_to_secondary: Whether the request should be retried to secondary, if able. This should only be enabled of RA-GRS accounts are used and potentially stale data can be handled. :param int random_jitter_range: A number in seconds which indicates a range to jitter/randomize for the back-off interval. For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. ''' self.initial_backoff = initial_backoff self.increment_base = increment_base self.random_jitter_range = random_jitter_range super(ExponentialRetry, self).__init__( retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) def get_backoff_time(self, settings): """ Calculates how long to sleep before retrying. :return: An integer indicating how long to wait before retrying the request, or None to indicate no retry should be performed. :rtype: int or None """ random_generator = random.Random() backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 random_range_end = backoff + self.random_jitter_range return random_generator.uniform(random_range_start, random_range_end) class LinearRetry(AsyncStorageRetryPolicy): """Linear retry.""" def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): """ Constructs a Linear retry object. :param int backoff: The backoff interval, in seconds, between retries. :param int max_attempts: The maximum number of retry attempts. :param bool retry_to_secondary: Whether the request should be retried to secondary, if able. This should only be enabled of RA-GRS accounts are used and potentially stale data can be handled. :param int random_jitter_range: A number in seconds which indicates a range to jitter/randomize for the back-off interval. For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. """ self.backoff = backoff self.random_jitter_range = random_jitter_range super(LinearRetry, self).__init__( retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) def get_backoff_time(self, settings): """ Calculates how long to sleep before retrying. :return: An integer indicating how long to wait before retrying the request, or None to indicate no retry should be performed. :rtype: int or None """ random_generator = random.Random() # the backoff interval normally does not change, however there is the possibility # that it was modified by accessing the property directly after initializing the object random_range_start = self.backoff - self.random_jitter_range \ if self.backoff > self.random_jitter_range else 0 random_range_end = self.backoff + self.random_jitter_range return random_generator.uniform(random_range_start, random_range_end) class AsyncStorageBearerTokenCredentialPolicy(AsyncBearerTokenCredentialPolicy): """ Custom Bearer token credential policy for following Storage Bearer challenges """ def __init__(self, credential, **kwargs): # type: (AsyncTokenCredential, **Any) -> None super(AsyncStorageBearerTokenCredentialPolicy, self).__init__(credential, STORAGE_OAUTH_SCOPE, **kwargs) async def on_challenge(self, request, response): # type: (PipelineRequest, PipelineResponse) -> bool try: auth_header = response.http_response.headers.get("WWW-Authenticate") challenge = StorageHttpChallenge(auth_header) except ValueError: return False scope = challenge.resource_id + DEFAULT_OAUTH_SCOPE await self.authorize_request(request, scope, tenant_id=challenge.tenant_id) return True
import time import numpy as np import ConfigParser import os.path import Adafruit_CharLCD as LCD import Adafruit_GPIO.MCP230xx as MCP import Adafruit_MAX31855.MAX31855 as MAX31855 import Adafruit_MCP3008 import RPi.GPIO as GPIO import Utilities import LedOutputs import DigitalInputs from CharLcdOutput import CharLcdOutput from sqlalchemy import create_engine from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import sessionmaker class MainControl: def __init__(self): self.program_mode = 'Startup' self.mode_state = 'Startup' self.previous_program_mode = 'Startup' self.lcd = CharLcdOutput.setup_char_lcd() #setting up SPI connected devices (temperature sensors and analog sensors) clk = 11 cs_sensor_1 = 7 cs_sensor_2 = 7 cs_analog = 8 data_out = 9 data_in = 10 # set I2C reset pin high so we can see those devices i2c_reset = 21 GPIO.setmode(GPIO.BCM) GPIO.setup(i2c_reset, GPIO.OUT) GPIO.output(i2c_reset, True) # set spi selector high so that we can engage the A2D chip analog_spi_select = 17 GPIO.setup(analog_spi_select, GPIO.OUT) GPIO.setup(27, GPIO.OUT) GPIO.setup(22, GPIO.OUT) GPIO.output(analog_spi_select, True) try: sensor = MAX31855.MAX31855(clk, cs_sensor_1, data_out) sensor2 = MAX31855.MAX31855(clk, cs_sensor_2, data_out) mcp = Adafruit_MCP3008.MCP3008(clk=clk, cs=cs_analog, miso=data_out, mosi=data_in) self.temperature_sensors = [sensor, sensor2] self.analog_sensor = mcp except: print 'Could not reach temperature sensors or analog sensor.' # set up I2C GPIO for LEDs try: self.led_gpio = MCP.MCP23017(0x20, busnum=1) for i in range(0,16): self.led_gpio.setup(i, GPIO.OUT) self.led_gpio.output(i, GPIO.HIGH) #True is HIGH is OFF, False is LOW is ON #time.sleep(0.2) time.sleep(2) for i in range(0,16): self.led_gpio.output(i, GPIO.LOW) #True is HIGH is OFF, False is LOW is ON #time.sleep(0.1) except: print 'Could not reach LED control chip' # set up I2C GPIO for inputs try: self.digital_gpio = MCP.MCP23017(0x21, busnum=1) for i in range(0,4): self.digital_gpio.output(i,GPIO.HIGH) for i in range(4,16): self.digital_gpio.setup(i, GPIO.IN) self.digital_gpio.pullup(i, True) except: print 'Could not reach digital input control chip' def check_program_mode(self): self.mode_state self.program_mode pass #print 'Checking Program State' def set_relays(self): pass #print "setting relays" #TODO set relays based on input or program state def main_control_loop(self): temp, temp1, analog_input_values, digital_input_values = self.get_all_inputs() DigitalInputs.update_input_values(digital_input_values) if self.program_mode != self.previous_program_mode: pass # handle state changes first if self.program_mode == 'Manual': #self.lcd.clear() # GPIO.output(22, True) # i = 0 # sensor = self.temperature_sensors[i] # temp = sensor.readTempC() # internal = sensor.readInternalC() # print('Thermocouple {2} Temperature: {0:0.3F}*C / {1:0.3F}*F'.format(temp, Utilities.c_to_f(temp), i)) # print(' Internal {2} Temperature: {0:0.3F}*C / {1:0.3F}*F'.format(internal, Utilities.c_to_f(internal), i)) # self.lcd.message('T{2}:{0:0.2F}*C/{1:0.2F}*F\n'.format(internal, Utilities.c_to_f(internal), i)) # GPIO.output(22, False) # GPIO.output(27, True) # i = 1 # sensor = self.temperature_sensors[i] # temp = sensor.readTempC() # internal = sensor.readInternalC() # print('Thermocouple {2} Temperature: {0:0.3F}*C / {1:0.3F}*F'.format(temp, Utilities.c_to_f(temp), i)) # print(' Internal {2} Temperature: {0:0.3F}*C / {1:0.3F}*F'.format(internal, Utilities.c_to_f(internal), i)) # self.lcd.message('T{2}:{0:0.2F}*C/{1:0.2F}*F\n'.format(internal, Utilities.c_to_f(internal), i)) # GPIO.output(27, False) # # analog_spi_select = 17 # GPIO.output(analog_spi_select, True) # values = [0] * 16 # for sensor in range(8): # # The read_ad function will get the value of the specified channel (0-7). # values[sensor] = self.analog_sensor.read_adc(sensor) # print('| {0:>4} | {1:>4} | {2:>4} | {3:>4} | {4:>4} | {5:>4} | {6:>4} | {7:>4} |'.format(*values)) # GPIO.output(analog_spi_select, False) # for i in range(16): # values[i] = self.digital_gpio.input(i) # print('| {0:>4} | {1:>4} | {2:>4} | {3:>4} | {4:>4} | {5:>4} | {6:>4} | {7:>4} |'.format(*values)) # print('| {8:>4} | {9:>4} | {10:>4} | {11:>4} | {12:>4} | {13:>4} | {14:>4} | {15:>4} |'.format(*values)) time.sleep(1.0) self.set_relays() elif self.program_mode == 'Configure': self.lcd.clear() elif self.program_mode == 'Automatic': self.lcd.clear() def get_target_slope(initial_temp, target_temp, rampup): return (target_temp - initial_temp) / rampup duration = 40*60 #TODO set this rampup_time = 20*60 #TODO set this max_temperature = 82 #TODO set this myTimes = [] myTemps = [] storedTimes = [] storedTemps = [] counter = 0. dutyCycle = 0.5 bufferLength = 10 initialTemp = self.temperature_sensors[0].readTempC() isOn = False while counter < duration: #TODO need to refactor such that this doesn't block main control loop temp = self.temperature_sensors[0].readTempC() internal = self.temperature_sensors[0].readInternalC() if temp != float("NAN"): myTimes.append(counter) myTemps.append(temp) else: print "error reading temperature. duty cycle may be wrong" if (counter % bufferLength) == 0 and dutyCycle > 0 and isOn == False: # TODO turn On Relay isOn = True if (counter % bufferLength) / float(bufferLength) >= dutyCycle and isOn == True: # TODO turn Off Relay isOn = False counter += 1. #TODO Logging/persist to DB #with open("temps{0}.txt".format("-s1" if args['sensor'] == '1' else "-s2"), "a") as myFile: # myFile.write('{0:0.3F}\t{1}\n'.format(temp, "1" if isOn else "0")) if myTemps.__len__() == bufferLength: timeArray = np.array([myTimes, np.ones(bufferLength)]) fitSlope, fitIntercept = np.linalg.lstsq(timeArray.T, myTemps)[0] targetSlope = 0 if counter < rampup_time: targetSlope = get_target_slope(initialTemp, max_temperature, rampup_time) targetIntercept = initialTemp else: targetIntercept = max_temperature futureTime = counter + 15 yError = (targetSlope * futureTime + targetIntercept) - (fitSlope * futureTime + fitIntercept) currentYError = (targetSlope * counter + targetIntercept) - (fitSlope * counter + fitIntercept) if yError > 0.25: # increase dutyCycle if we're below target temp dutyCycle = min(dutyCycle + 0.1, 1.0) elif yError < -0.25: dutyCycle = max(dutyCycle - 0.1, 0.0) if currentYError < -1.0: # if we are currently too hot, take immediate action dutyCycle = 0.0 print('---------------------------------------------------') print('Least Squares Values: m={0:0.3F}\tb={1:0.3F}\tyError={2:0.3F}\tNewDutyCycle={3:0.2F}'.format( fitSlope, fitIntercept, yError, dutyCycle)) print('---------------------------------------------------') # with open("fit{0}.txt".format("-s1" if args['sensor'] == '1' else "-s2"), "a") as myFile: # myFile.write('{0:0.3F}\t{1:0.3F}\t{2:0.3F}\t{3:0.2F}\n'.format(fitSlope, fitIntercept, yError, dutyCycle)) storedTimes += myTimes storedTemps += myTemps myTemps = [] myTimes = [] print('Thermocouple Temperature: {0:0.3F}*C / {1:0.3F}*F'.format(temp, Utilities.c_to_f(temp))) # print(' Internal Temperature: {0:0.3F}*C / {1:0.3F}*F'.format(internal2, c_to_f(internal2))) time.sleep(1) # relayControl.turnOff() print 'finished run, relay off' self.check_program_mode() def get_all_inputs(self): GPIO.output(22, True) i = 0 sensor = self.temperature_sensors[i] temp = sensor.readTempC() # internal = sensor.readInternalC() GPIO.output(22, False) GPIO.output(27, True) i = 1 sensor = self.temperature_sensors[i] temp1 = sensor.readTempC() # internal1 = sensor.readInternalC() GPIO.output(27, False) analog_spi_select = 17 GPIO.output(analog_spi_select, True) analog_input_values = [0] * 8 for sensor in range(8): # The read_ad function will get the value of the specified channel (0-7). analog_input_values[sensor] = self.analog_sensor.read_adc(sensor) GPIO.output(analog_spi_select, False) digital_input_values = [0] * 16 for i in range(16): digital_input_values[i] = self.digital_gpio.input(i) return temp, temp1, analog_input_values, digital_input_values class SensorState: duty_cycle = 0.5 initial_temp = 0. current_temp = 0. rampup_time = 20. hold_time = 20. max_temp = 180. def __init__(self): self.duty_cycle= 0.5 config_path = './pywork/.press_config' print os.getcwd() if os.path.isfile(config_path): Config = ConfigParser.ConfigParser() Config.read(config_path) try: conn_string = Config.get('ConnectionStrings','ConnectionString') except: print 'Error reading configuration file. Expected Section "ConnectionStrings" and Expected Key: "ConnectionString"' exit() else: print 'Configuration file not found, expected at ' + config_path exit() control = MainControl() while True: DigitalInputs.read_inputs() #AnalogInputs.read_inputs() #TemperatureSensors.read_inputs() control.main_control_loop() LedOutputs.set_outputs() # engine = create_engine(conn_string, isolation_level="READ UNCOMMITTED") # Base = declarative_base(engine) # # ######################################################################## # class Configuration(Base): # """""" # __tablename__ = 'Configuration' # __table_args__ = {'autoload': True} # class FitLog(Base): # """""" # __tablename__ = 'FitLog' # __table_args__ = {'autoload': True} # class RawLog(Base): # """""" # __tablename__ = 'RawLog' # __table_args__ = {'autoload': True} # class RunInformation(Base): # """""" # __tablename__ = 'RunInformation' # __table_args__ = {'autoload': True} # # ---------------------------------------------------------------------- # def loadSession(): # """""" # metadata = Base.metadata # Session = sessionmaker(bind=engine) # session = Session() # return session # # if __name__ == "__main__": # session = loadSession() # res = session.query(Configuration).all() # # config = Configuration(TotalDuration=2200,TopRampUpTime=1000,BottomRampUpTime=1000, # # TopTemperature=80,BottomTemperature=80) # # session.add(config) # # session.commit() # print res[0].Id
import logging import os import re from abc import abstractmethod from itertools import chain from xml.dom import minidom from gensim.corpora import Dictionary from gensim.corpora.textcorpus import lower_to_unicode, strip_multiple_whitespaces, remove_short, remove_stopwords from gensim.interfaces import CorpusABC from gensim.parsing import strip_punctuation, PorterStemmer from gensim.utils import deaccent, simple_tokenize class NewsCorpusABC(CorpusABC): """An abstract class representing a news corpus""" def __init__(self, input=None, dictionary=None, metadata=False, character_filters=None, tokenizer=None, token_filters=None, language=None, stem=True): """ Args: input (str): path to top-level directory to traverse for corpus documents. dictionary (Dictionary): if a dictionary is provided, it will not be updated with the given corpus on initialization. If none is provided, a new dictionary will be built for the given corpus. If no corpus is given, the dictionary will remain uninitialized. metadata (bool): True to yield metadata with each document, else False (default). character_filters (iterable of callable): each will be applied to the text of each document in order, and should return a single string with the modified text. For Python 2, the original text will not be unicode, so it may be useful to convert to unicode as the first character filter. The default character filters lowercase, convert to unicode (strict utf8), perform ASCII-folding, then collapse multiple whitespaces. tokenizer (callable): takes as input the document text, preprocessed by all filters in `character_filters`; should return an iterable of tokens (strings). token_filters (iterable of callable): each will be applied to the iterable of tokens in order, and should return another iterable of tokens. These filters can add, remove, or replace tokens, or do nothing at all. The default token filters remove tokens less than 3 characters long and remove stopwords using the list in `gensim.parsing.preprocessing.STOPWORDS`. """ self.input = input self.metadata = metadata self.language = language self.documents = self.get_news_in_folder(input, language) if input is not None else [] self.character_filters = character_filters if self.character_filters is None: self.character_filters = [lower_to_unicode, deaccent, strip_punctuation, strip_multiple_whitespaces] self.tokenizer = tokenizer if self.tokenizer is None: self.tokenizer = simple_tokenize self.token_filters = token_filters if self.token_filters is None: self.token_filters = [remove_short, remove_stopwords] self.stem = stem self.length = None self.dictionary = None self.init_dictionary(dictionary) def init_dictionary(self, dictionary): """If `dictionary` is None, initialize to an empty Dictionary, and then if there is an `input` for the corpus, add all documents from that `input`. If the `dictionary` is already initialized, simply set it as the corpus's `dictionary`. """ self.dictionary = dictionary if dictionary is not None else Dictionary() if self.input is not None: if dictionary is None: logging.info("Initializing dictionary") metadata_setting = self.metadata self.metadata = False self.dictionary.add_documents(self.get_texts()) self.metadata = metadata_setting else: logging.info("Input stream provided but dictionary already initialized") else: logging.warning("No input document stream provided; assuming dictionary will be initialized some other way.") @staticmethod def get_news_in_folder(root, language=None): """Find all news articles in a folder.""" if language is None: language = "[a-z]{2}" filename_pattern = re.compile("[0-9]{10}.[0-9]-%s-[0-9A-Fa-f]{32}-[0-9A-Fa-f]{32}.q.job.xml" % language) all_files = [] walk_iter = iter(os.walk(root)) if isinstance(root, str) else chain.from_iterable( os.walk(path) for path in root) for dirpath, subdirs, files in walk_iter: for filename in files: # Check if we already did process file result = filename_pattern.match(filename) if result is None: continue # Construct file path file_path = os.path.join(dirpath, filename) # Store filename and file path all_files.append((filename, file_path)) # Sort by filename all_files.sort(key=lambda x: x[0]) logging.info("Found %d files." % len(all_files)) return all_files def get_texts(self): """Yield preprocessed documents and its metadata if desired""" data = self.getstream() for line, metadata in data: if self.metadata: yield self.preprocess_text(line), metadata else: yield self.preprocess_text(line) def getstream(self): for filename, file_path in self.documents: # Parse document xmldoc = minidom.parse(file_path) # Parse title title_raw = xmldoc.getElementsByTagName('title') if len(title_raw) != 2: logging.warning("Title not found in '%s', file skipped." % file_path) continue title = title_raw[1].firstChild.nodeValue # Parse data text_raw = xmldoc.getElementsByTagName('emm:text') if len(text_raw) == 0: logging.warning("Text not found in '%s', file skipped." % file_path) continue text = text_raw[0].firstChild.nodeValue # Get metadata from filename metadata = filename.split("-") metadata = (metadata[2], float(metadata[0]), title) # (docId, timestamp, title) yield "%s %s" % (title, text), metadata def preprocess_text(self, text): """Apply preprocessing to a single text document. This should perform tokenization in addition to any other desired preprocessing steps. Args: text (str): document text read from plain-text file. Returns: iterable of str: tokens produced from `text` as a result of preprocessing. """ for character_filter in self.character_filters: text = character_filter(text) tokens = self.tokenizer(text) for token_filter in self.token_filters: tokens = token_filter(tokens) if self.stem: p = PorterStemmer() tokens = [p.stem(token) for token in tokens] return tokens def step_through_preprocess(self, text): """Yield tuples of functions and their output for each stage of preprocessing. This is useful for debugging issues with the corpus preprocessing pipeline. """ for character_filter in self.character_filters: text = character_filter(text) yield (character_filter, text) tokens = self.tokenizer(text) yield (self.tokenizer, tokens) for token_filter in self.token_filters: yield (token_filter, token_filter(tokens)) @abstractmethod def _encode(self, text): pass def __iter__(self): """The function that defines a corpus. Iterating over the corpus must yield sparse vectors, one for each document. """ if self.metadata: for text, metadata in self.get_texts(): yield self._encode(text), metadata else: for text in self.get_texts(): yield self._encode(text) def __len__(self): if self.length is None: # cache the corpus length self.length = sum(1 for _ in self.getstream()) return self.length
#!/usr/bin/env python # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import json import logging import os import pwd import shutil import sys # TODO(rhallisey): add docstring. logging.basicConfig() LOG = logging.getLogger(__name__) LOG.setLevel(logging.INFO) def validate_config(config): required_keys = {'source', 'dest', 'owner', 'perm'} if 'command' not in config: LOG.error('Config is missing required "command" key') sys.exit(1) # Validate config sections for data in config.get('config_files', list()): # Verify required keys exist. if not data.viewkeys() >= required_keys: LOG.error("Config is missing required keys: %s", data) sys.exit(1) def validate_source(data): source = data.get('source') exists = os.path.exists(source) if not exists: if data.get('optional'): LOG.info("%s does not exist, but is not required", source) return False else: LOG.error("The source to copy does not exist: %s", source) sys.exit(1) return True def copy_files(data): dest = data.get('dest') source = data.get('source') if os.path.exists(dest): LOG.info("Removing existing destination: %s", dest) if os.path.isdir(dest): shutil.rmtree(dest) else: os.remove(dest) if os.path.isdir(source): source_path = source dest_path = dest else: source_path = os.path.dirname(source) dest_path = os.path.dirname(dest) if not os.path.exists(dest_path): LOG.info("Creating dest parent directory: %s", dest_path) os.makedirs(dest_path) if source != source_path: # Source is file LOG.info("Copying %s to %s", source, dest) shutil.copy(source, dest) else: # Source is a directory for src in os.listdir(source_path): LOG.info("Copying %s to %s", os.path.join(source_path, src), dest_path) if os.path.isdir(os.path.join(source_path, src)): shutil.copytree(os.path.join(source_path, src), dest_path) else: shutil.copy(os.path.join(source_path, src), dest_path) def set_permissions(data): def set_perms(file_, uid, guid, perm): LOG.info("Setting permissions for %s", file_) # Give config file proper perms. try: os.chown(file_, uid, gid) os.chmod(file_, perm) except OSError as e: LOG.error("Error while setting permissions for %s: %r", file_, repr(e)) sys.exit(1) dest = data.get('dest') owner = data.get('owner') perm = int(data.get('perm'), 0) # Check for user and group id in the environment. try: user = pwd.getpwnam(owner) except KeyError: LOG.error("The specified user does not exist: %s", owner) sys.exit(1) uid = user.pw_uid gid = user.pw_gid # Set permissions on the top level dir or file set_perms(dest, uid, gid, perm) if os.path.isdir(dest): # Recursively set permissions for root, dirs, files in os.walk(dest): for dir_ in dirs: set_perms(os.path.join(root, dir_), uid, gid, perm) for file_ in files: set_perms(os.path.join(root, file_), uid, gid, perm) def load_config(): def load_from_env(): config_raw = os.environ.get("KOLLA_CONFIG") if config_raw is None: return None # Attempt to read config try: return json.loads(config_raw) except ValueError: LOG.error('Invalid json for Kolla config') sys.exit(1) def load_from_file(): config_file = '/var/lib/kolla/config_files/config.json' LOG.info("Loading config file at %s", config_file) # Attempt to read config file with open(config_file) as f: try: return json.load(f) except ValueError: LOG.error("Invalid json file found at %s", config_file) sys.exit(1) except IOError as e: LOG.error("Could not read file %s: %r", config_file, e) sys.exit(1) config = load_from_env() if config is None: config = load_from_file() LOG.info('Validating config file') validate_config(config) return config def copy_config(config): if 'config_files' in config: LOG.info('Copying service configuration files') for data in config['config_files']: if validate_source(data): copy_files(data) set_permissions(data) else: LOG.debug('No files to copy found in config') LOG.info('Writing out command to execute') LOG.debug("Command is: %s", config['command']) # The value from the 'command' key will be written to '/run_command' with open('/run_command', 'w+') as f: f.write(config['command']) def execute_config_strategy(): config_strategy = os.environ.get("KOLLA_CONFIG_STRATEGY") LOG.info("Kolla config strategy set to: %s", config_strategy) config = load_config() if config_strategy == "COPY_ALWAYS": copy_config(config) elif config_strategy == "COPY_ONCE": if os.path.exists('/configured'): LOG.info("The config strategy prevents copying new configs") sys.exit(0) else: copy_config(config) os.mknod('/configured') else: LOG.error('KOLLA_CONFIG_STRATEGY is not set properly') sys.exit(1) def execute_config_check(): config = load_config() for config_file in config.get('config_files', {}): source = config_file.get('source') dest = config_file.get('dest') perm = config_file.get('perm') owner = config_file.get('owner') optional = config_file.get('optional', False) if not os.path.exists(dest): if optional: LOG.info('Dest file does not exist, but is optional: %s', dest) return else: LOG.error('Dest file does not exist and is: %s', dest) sys.exit(1) # check content with open(source) as fp1, open(dest) as fp2: if fp1.read() != fp2.read(): LOG.error('The content of source file(%s) and' ' dest file(%s) are not equal.', source, dest) sys.exit(1) # check perm file_stat = os.stat(dest) actual_perm = oct(file_stat.st_mode)[-4:] if perm != actual_perm: LOG.error('Dest file does not have expected perm: %s, actual: %s', perm, actual_perm) sys.exit(1) # check owner actual_user = pwd.getpwuid(file_stat.st_uid) if actual_user.pw_name != owner: LOG.error('Dest file does not have expected user: %s, actual: %s ', owner, actual_user.pw_name) sys.exit(1) LOG.info('The config files are in the expected state') def main(): parser = argparse.ArgumentParser() parser.add_argument('--check', action='store_true', required=False, help='Check whether the configs changed') conf = parser.parse_args() if conf.check: execute_config_check() else: execute_config_strategy() return 0 if __name__ == "__main__": sys.exit(main())
import typing from functools import partial import json import sys from textwrap import dedent import time import pika class AbstractQueueConnection(object): """A task queue. Implement this interface to provide a task queue for the workers. """ CONNECTION_TIMEOUT = 10 # second def connect(self): """Establish the connection. This method should be able to retry the connection until CONNECTION_TIMEOUT or sleep and try at the end of the CONNECTION_TIMEOUT period. Lack of network connection is NOT an error until the CONNECTION_TIMEOUT period expires. """ pass def send(self, message): """Send an asynchronous message.""" pass def receive(self): """Return multiple responses. It should be a generator that produces each response. The user is supposed to send `True` back to the generator when all the responses are returned. """ pass def consume_forever(self, callback): """Consume and invoke `callback`. `callback` has the signature:: f(message, responder) where `responder` is a function with signature:: g(message) that can be used to answer to the producer. This method should be able to retry the connection. """ pass def delete(self): """Delete this queue.""" pass class QueueConnection(AbstractQueueConnection): def __init__(self, queue_host, queue_port, queue_name): self.queue_host = queue_host self.queue_port = queue_port self.queue_name = queue_name self.connect() def delete(self): self.channel.queue_delete(self.queue_name) def connect(self): """Establish a connection with the task queue.""" start_t = time.time() while True: try: self.connection = pika.BlockingConnection( pika.ConnectionParameters(host=self.queue_host, port=self.queue_port)) self.channel = self.connection.channel() self.channel.queue_declare(queue=self.queue_name, durable=True) return except pika.exceptions.AMQPConnectionError: if time.time() - start_t > self.CONNECTION_TIMEOUT: raise time.sleep(0.5) def send(self, message): """Send a message to the queue. Return immediately. Use `receive` to get the result. """ self.response = None self.result = self.channel.queue_declare(exclusive=True) self.result_queue = self.result.method.queue self.channel.basic_publish(exchange='', routing_key=self.queue_name, body=message, properties=pika.BasicProperties( # make message persistent delivery_mode=2, reply_to=self.result_queue)) def receive(self): """Receive results from the queue. A generator returning objects from the queue. It will block if there are no objects yet. The end of the stream is marked by sending `True` back to the generator. """ while True: (ok, props, message) = self.channel.basic_get( self.result_queue, no_ack=True) if ok is not None: is_done = yield message if is_done: return def consume_forever(self, callback, **kwargs): while True: try: self.channel.basic_qos(prefetch_count=1) self.channel.basic_consume(partial(self.on_consume, callback), queue=self.queue_name, no_ack=True, **kwargs) self.channel.start_consuming() except pika.exceptions.ChannelClosed: if kwargs.get('exclusive', False): # if the channel closes and the connection is # 'exclusive', just return. This is so temporary # connections can be clean up automatically. return except Exception as exc: # on exceptions, try to reconnect to the queue # it will give up after CONNECTION_TIMEOUT pass self.connect() def on_consume(self, callback, ch, method, props, body): def responder(result): ch.basic_publish(exchange='', routing_key=props.reply_to, body=result) callback(body, responder) class Producer(QueueConnection): """Send messages to the queue exchange and receive answers. The `receive` method behaves as a generator, returning a stream of messages. """ def send(self, message): """Send a dictionary as message.""" super(Producer, self).send(json.dumps(message)) def receive(self): """Receive messages until getting `END`.""" g = super(Producer, self).receive() for message in g: if message == 'END': # save the object after 'END' as metadata, so the # client can use it self.metadata = json.loads(next(g)) g.send(True) return yield json.loads(message) def check_queue(display=False): """Check that we can establish a connection to the queue.""" from adama.config import Config host = Config.get('queue', 'host') port = Config.getint('queue', 'port') try: q = QueueConnection(queue_host=host, queue_port=port, queue_name='test') q.delete() return True except Exception: if display: print(dedent( """ Cannot connect to queue exchange at {0}:{1} with dummy queue "test". Please, check ~/.adama.conf """.format(host, port)), file=sys.stderr) return False
# Copyright (c) 2017 NTT Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from datetime import datetime from unittest import mock from blazarclient import exception from blazarclient import shell from blazarclient import tests from blazarclient.v1.shell_commands import leases mock_time = mock.Mock(return_value=datetime(2020, 6, 8)) FIRST_LEASE = 'd1e43d6d-8f6f-4c2e-b0a9-2982b39dc698' SECOND_LEASE = '424d21c3-45a2-448a-81ad-32eddc888375' @mock.patch('blazarclient.v1.shell_commands.leases._utc_now', mock_time) class CreateLeaseTestCase(tests.TestCase): def setUp(self): super(CreateLeaseTestCase, self).setUp() self.cl = leases.CreateLease(shell.BlazarShell(), mock.Mock()) def test_args2body_correct_phys_res_params(self): args = argparse.Namespace( start='2020-07-24 20:00', end='2020-08-09 22:30', before_end='2020-08-09 21:30', events=[], name='lease-test', reservations=[], physical_reservations=[ 'min=1,' 'max=2,' 'hypervisor_properties=' '["and", [">=", "$vcpus", "2"], ' '[">=", "$memory_mb", "2048"]],' 'resource_properties=' '["==", "$extra_key", "extra_value"],' 'before_end=default' ] ) expected = { 'start': '2020-07-24 20:00', 'end': '2020-08-09 22:30', 'before_end': '2020-08-09 21:30', 'events': [], 'name': 'lease-test', 'reservations': [ { 'min': 1, 'max': 2, 'hypervisor_properties': '["and", [">=", "$vcpus", "2"], ' '[">=", "$memory_mb", "2048"]]', 'resource_properties': '["==", "$extra_key", "extra_value"]', 'resource_type': 'physical:host', 'before_end': 'default' } ] } self.assertDictEqual(self.cl.args2body(args), expected) def test_args2body_incorrect_phys_res_params(self): args = argparse.Namespace( start='2020-07-24 20:00', end='2020-08-09 22:30', before_end='2020-08-09 21:30', events=[], name='lease-test', reservations=[], physical_reservations=[ 'incorrect_param=1,' 'min=1,' 'max=2,' 'hypervisor_properties=' '["and", [">=", "$vcpus", "2"], ' '[">=", "$memory_mb", "2048"]],' 'resource_properties=' '["==", "$extra_key", "extra_value"]' ] ) self.assertRaises(exception.IncorrectLease, self.cl.args2body, args) def test_args2body_duplicated_phys_res_params(self): args = argparse.Namespace( start='2020-07-24 20:00', end='2020-08-09 22:30', before_end='2020-08-09 21:30', events=[], name='lease-test', reservations=[], physical_reservations=[ 'min=1,' 'min=1,' 'max=2,' 'hypervisor_properties=' '["and", [">=", "$vcpus", "2"], ' '[">=", "$memory_mb", "2048"]],' 'resource_properties=' '["==", "$extra_key", "extra_value"]' ] ) self.assertRaises(exception.DuplicatedLeaseParameters, self.cl.args2body, args) def test_args2body_correct_instance_res_params(self): args = argparse.Namespace( start='2020-07-24 20:00', end='2020-08-09 22:30', before_end='2020-08-09 21:30', events=[], name='lease-test', reservations=[ 'vcpus=4,' 'memory_mb=1024,' 'disk_gb=10,' 'amount=2,' 'affinity=True,' 'resource_properties=' '["==", "$extra_key", "extra_value"],' 'resource_type=virtual:instance' ], physical_reservations=[ 'min=1,' 'max=2,' 'hypervisor_properties=' '["and", [">=", "$vcpus", "2"], ' '[">=", "$memory_mb", "2048"]],' 'resource_properties=' '["==", "$extra_key", "extra_value"],' 'before_end=default' ] ) expected = { 'start': '2020-07-24 20:00', 'end': '2020-08-09 22:30', 'before_end': '2020-08-09 21:30', 'events': [], 'name': 'lease-test', 'reservations': [ { 'min': 1, 'max': 2, 'hypervisor_properties': '["and", [">=", "$vcpus", "2"], ' '[">=", "$memory_mb", "2048"]]', 'resource_properties': '["==", "$extra_key", "extra_value"]', 'resource_type': 'physical:host', 'before_end': 'default' }, { 'vcpus': 4, 'memory_mb': 1024, 'disk_gb': 10, 'amount': 2, 'affinity': 'True', 'resource_properties': '["==", "$extra_key", "extra_value"]', 'resource_type': 'virtual:instance' } ] } self.assertDictEqual(self.cl.args2body(args), expected) def test_args2body_start_now(self): args = argparse.Namespace( start='now', end='2030-08-09 22:30', before_end='2030-08-09 21:30', events=[], name='lease-test', reservations=[], physical_reservations=[ 'min=1,' 'max=2,' 'hypervisor_properties=' '["and", [">=", "$vcpus", "2"], ' '[">=", "$memory_mb", "2048"]],' 'resource_properties=' '["==", "$extra_key", "extra_value"],' 'before_end=default' ] ) expected = { 'start': 'now', 'end': '2030-08-09 22:30', 'before_end': '2030-08-09 21:30', 'events': [], 'name': 'lease-test', 'reservations': [ { 'min': 1, 'max': 2, 'hypervisor_properties': '["and", [">=", "$vcpus", "2"], ' '[">=", "$memory_mb", "2048"]]', 'resource_properties': '["==", "$extra_key", "extra_value"]', 'resource_type': 'physical:host', 'before_end': 'default' } ] } self.assertDictEqual(self.cl.args2body(args), expected) class UpdateLeaseTestCase(tests.TestCase): def setUp(self): super(UpdateLeaseTestCase, self).setUp() self.cl = leases.UpdateLease(shell.BlazarShell(), mock.Mock()) def test_args2body_time_params(self): args = argparse.Namespace( name=None, prolong_for='1h', reduce_by=None, end_date=None, defer_by=None, advance_by=None, start_date=None, reservation=None ) expected = { 'prolong_for': '1h', } self.assertDictEqual(self.cl.args2body(args), expected) def test_args2body_host_reservation_params(self): args = argparse.Namespace( name=None, prolong_for=None, reduce_by=None, end_date=None, defer_by=None, advance_by=None, start_date=None, reservation=[ 'id=798379a6-194c-45dc-ba34-1b5171d5552f,' 'max=3,' 'hypervisor_properties=' '["and", [">=", "$vcpus", "4"], ' '[">=", "$memory_mb", "8192"]],' 'resource_properties=' '["==", "$extra_key", "extra_value"]' ] ) expected = { 'reservations': [ { 'id': '798379a6-194c-45dc-ba34-1b5171d5552f', 'max': 3, 'hypervisor_properties': '["and", [">=", "$vcpus", "4"], ' '[">=", "$memory_mb", "8192"]]', 'resource_properties': '["==", "$extra_key", "extra_value"]' } ] } self.assertDictEqual(self.cl.args2body(args), expected) def test_args2body_instance_reservation_params(self): args = argparse.Namespace( name=None, prolong_for=None, reduce_by=None, end_date=None, defer_by=None, advance_by=None, start_date=None, reservation=[ 'id=798379a6-194c-45dc-ba34-1b5171d5552f,' 'vcpus=3,memory_mb=1024,disk_gb=20,' 'amount=4,affinity=False' ] ) expected = { 'reservations': [ { 'id': '798379a6-194c-45dc-ba34-1b5171d5552f', 'vcpus': 3, 'memory_mb': 1024, 'disk_gb': 20, 'amount': 4, 'affinity': 'False' } ] } self.assertDictEqual(self.cl.args2body(args), expected) class ShowLeaseTestCase(tests.TestCase): def create_show_command(self): mock_lease_manager = mock.Mock() mock_client = mock.Mock() mock_client.lease = mock_lease_manager blazar_shell = shell.BlazarShell() blazar_shell.client = mock_client return (leases.ShowLease(blazar_shell, mock.Mock()), mock_lease_manager) def test_show_lease(self): show_lease, lease_manager = self.create_show_command() lease_manager.get.return_value = {'id': FIRST_LEASE} mock.seal(lease_manager) args = argparse.Namespace(id=FIRST_LEASE) expected = [('id',), (FIRST_LEASE,)] self.assertEqual(show_lease.get_data(args), expected) lease_manager.get.assert_called_once_with(FIRST_LEASE) def test_show_lease_by_name(self): show_lease, lease_manager = self.create_show_command() lease_manager.list.return_value = [ {'id': FIRST_LEASE, 'name': 'first-lease'}, {'id': SECOND_LEASE, 'name': 'second-lease'}, ] lease_manager.get.return_value = {'id': SECOND_LEASE} mock.seal(lease_manager) args = argparse.Namespace(id='second-lease') expected = [('id',), (SECOND_LEASE,)] self.assertEqual(show_lease.get_data(args), expected) lease_manager.list.assert_called_once_with() lease_manager.get.assert_called_once_with(SECOND_LEASE) class DeleteLeaseTestCase(tests.TestCase): def create_delete_command(self): mock_lease_manager = mock.Mock() mock_client = mock.Mock() mock_client.lease = mock_lease_manager blazar_shell = shell.BlazarShell() blazar_shell.client = mock_client return (leases.DeleteLease(blazar_shell, mock.Mock()), mock_lease_manager) def test_delete_lease(self): delete_lease, lease_manager = self.create_delete_command() lease_manager.delete.return_value = None mock.seal(lease_manager) args = argparse.Namespace(id=FIRST_LEASE) delete_lease.run(args) lease_manager.delete.assert_called_once_with(FIRST_LEASE) def test_delete_lease_by_name(self): delete_lease, lease_manager = self.create_delete_command() lease_manager.list.return_value = [ {'id': FIRST_LEASE, 'name': 'first-lease'}, {'id': SECOND_LEASE, 'name': 'second-lease'}, ] lease_manager.delete.return_value = None mock.seal(lease_manager) args = argparse.Namespace(id='second-lease') delete_lease.run(args) lease_manager.list.assert_called_once_with() lease_manager.delete.assert_called_once_with(SECOND_LEASE)
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: ''' module for handling USB boxes connected via psyscopex install, which circumcents the usual HID driver. we provide a HID "emulation" layer, so the rest of the code interacts with this in the same way as for a HID device ''' import logging import struct from ctypes import * from hid import HIDDevice from hid.cparser import parse, define try: from hid.osx import IOObjectRelease, find_usb_devices, COMObjectRef, IOCreatePlugInInterfaceForService, \ CFUUIDGetConstantUUIDWithBytes, IOCFPlugInInterfaceStruct, SInt32, UInt32, UInt8, kIOCFPlugInInterfaceID, \ IUNKNOWN_C_GUTS, CFUUIDGetUUIDBytes, io_iterator_t, IOIteratorNext, kIOReturnSuccess on_osx=True except: on_osx=False if on_osx: # only define all these things if we have imported the osx hid library ok def err_system(x): return (((x)&0x3f)<<26) def err_sub(x): return (((x)&0xfff)<<14) def iokit_common_err(ret): sys_iokit=err_system(0x38) sub_iokit_common=err_sub(0) return sys_iokit|sub_iokit_common|ret kIOUSBDeviceClassName="IOUSBDevice" kUSBVendorID="idVendor" kUSBProductID="idProduct" kIOReturnExclusiveAccess=iokit_common_err(0x2c5) kIOUSBFindInterfaceDontCare = 0xFFFF kIOUSBDeviceUserClientTypeID=CFUUIDGetConstantUUIDWithBytes(None, 0x9d, 0xc7, 0xb7, 0x80, 0x9e, 0xc0, 0x11, 0xD4, 0xa5, 0x4f, 0x00, 0x0a, 0x27, 0x05, 0x28, 0x61) kIOUSBInterfaceUserClientTypeID=CFUUIDGetConstantUUIDWithBytes(None, 0x2d, 0x97, 0x86, 0xc6, 0x9e, 0xf3, 0x11, 0xD4, 0xad, 0x51, 0x00, 0x0a, 0x27, 0x05, 0x28, 0x61) kIOUSBInterfaceInterfaceID=CFUUIDGetConstantUUIDWithBytes(None, 0x73, 0xc9, 0x7a, 0xe8, 0x9e, 0xf3, 0x11, 0xD4, 0xb1, 0xd0, 0x00, 0x0a, 0x27, 0x05, 0x28, 0x61) kIOUSBDeviceInterfaceID=CFUUIDGetConstantUUIDWithBytes(None, 0x5c, 0x81, 0x87, 0xd0, 0x9e, 0xf3, 0x11, 0xD4, 0x8b, 0x45, 0x00, 0x0a, 0x27, 0x05, 0x28, 0x61) USBDeviceAddress=define('USBDeviceAddress','UInt16') class IOUSBDevRequest(Structure): _fields_=[ parse('UInt8 bmRequestType').cstruct, parse('UInt8 bRequest').cstruct, parse('UInt16 wValue').cstruct, parse('UInt16 wIndex').cstruct, parse('UInt16 wLength').cstruct, parse('void *pData').cstruct, parse('UInt32 wLenDone').cstruct, ] define('IOUSBDevRequest',IOUSBDevRequest) IOAsyncCallback1=parse('void ( *IOAsyncCallback1)(void *refcon,IOReturn result,void *arg0)').ctype define('IOAsyncCallback1',IOAsyncCallback1) class IOUSBIsocFrame(Structure): _fields_=[ parse('IOReturn frStatus').cstruct, parse('UInt16 frReqCount').cstruct, parse('UInt16 frActCount').cstruct, ] define('IOUSBIsocFrame',IOUSBIsocFrame) class IOUSBDevRequestTO(Structure): _fields_=[ parse('UInt8 bmRequestType').cstruct, parse('UInt8 bRequest').cstruct, parse('UInt16 wValue').cstruct, parse('UInt16 wIndex').cstruct, parse('UInt16 wLength').cstruct, parse('void *pData').cstruct, parse('UInt32 wLenDone').cstruct, parse('UInt32 noDataTimeout').cstruct, parse('UInt32 completionTimeout').cstruct, ] define('IOUSBDevRequestTO', IOUSBDevRequestTO) class IOUSBConfigurationDescriptor(Structure): _fields_=[ parse('UInt8 bLength').cstruct, parse('UInt8 bDescriptorType').cstruct, parse('UInt16 wTotalLength').cstruct, parse('UInt8 bNumInterfaces').cstruct, parse('UInt8 bConfigurationValue').cstruct, parse('UInt8 iConfiguration').cstruct, parse('UInt8 bmAttributes').cstruct, parse('UInt8 MaxPower').cstruct, ] define('IOUSBConfigurationDescriptor',IOUSBConfigurationDescriptor) IOUSBConfigurationDescriptorPtr=define('IOUSBConfigurationDescriptorPtr','IOUSBConfigurationDescriptor*') class IOUSBFindInterfaceRequest(Structure): _fields_=[ parse('UInt16 bInterfaceClass').cstruct, parse('UInt16 bInterfaceSubClass').cstruct, parse('UInt16 bInterfaceProtocol').cstruct, parse('UInt16 bAlternateSetting').cstruct, ] define('IOUSBFindInterfaceRequest',IOUSBFindInterfaceRequest) class IOUSBDeviceInterface(Structure): _fields_= IUNKNOWN_C_GUTS + \ [ parse('IOReturn (*CreateDeviceAsyncEventSource)(void *self, CFRunLoopSourceRef *source)').cstruct, parse('CFRunLoopSourceRef (*GetDeviceAsyncEventSource)(void *self)').cstruct, parse('IOReturn (*CreateDeviceAsyncPort)(void *self, mach_port_t *port)').cstruct, parse('mach_port_t (*GetDeviceAsyncPort)(void *self)').cstruct, parse('IOReturn (*USBDeviceOpen)(void *self)').cstruct, parse('IOReturn (*USBDeviceClose)(void *self)').cstruct, parse('IOReturn (*GetDeviceClass)(void *self, UInt8 *devClass)').cstruct, parse('IOReturn (*GetDeviceSubClass)(void *self, UInt8 *devSubClass)').cstruct, parse('IOReturn (*GetDeviceProtocol)(void *self, UInt8 *devProtocol)').cstruct, parse('IOReturn (*GetDeviceVendor)(void *self, UInt16 *devVendor)').cstruct, parse('IOReturn (*GetDeviceProduct)(void *self, UInt16 *devProduct)').cstruct, parse('IOReturn (*GetDeviceReleaseNumber)(void *self, UInt16 *devRelNum)').cstruct, parse('IOReturn (*GetDeviceAddress)(void *self, USBDeviceAddress *addr)').cstruct, parse('IOReturn (*GetDeviceBusPowerAvailable)(void *self, UInt32 *powerAvailable)').cstruct, parse('IOReturn (*GetDeviceSpeed)(void *self, UInt8 *devSpeed)').cstruct, parse('IOReturn (*GetNumberOfConfigurations)(void *self, UInt8 *numConfig)').cstruct, parse('IOReturn (*GetLocationID)(void *self, UInt32 *locationID)').cstruct, parse('IOReturn (*GetConfigurationDescriptorPtr)(void *self, UInt8 configIndex, IOUSBConfigurationDescriptorPtr *desc)').cstruct, parse('IOReturn (*GetConfiguration)(void *self, UInt8 *configNum)').cstruct, parse('IOReturn (*SetConfiguration)(void *self, UInt8 configNum)').cstruct, parse('IOReturn (*GetBusFrameNumber)(void *self, UInt64 *frame, AbsoluteTime *atTime)').cstruct, parse('IOReturn (*ResetDevice)(void *self)').cstruct, parse('IOReturn (*DeviceRequest)(void *self, IOUSBDevRequest *req)').cstruct, parse('IOReturn (*DeviceRequestAsync)(void *self, IOUSBDevRequest *req, IOAsyncCallback1 callback, void *refCon)').cstruct, parse('IOReturn (*CreateInterfaceIterator)(void *self, IOUSBFindInterfaceRequest *req, io_iterator_t *iter)').cstruct, ] define('IOUSBDeviceInterface',IOUSBDeviceInterface) class IOUSBInterfaceInterface182(Structure): _fields_ = IUNKNOWN_C_GUTS + \ [ parse('IOReturn (*CreateInterfaceAsyncEventSource)(void *self, CFRunLoopSourceRef *source)').cstruct, parse('CFRunLoopSourceRef (*GetInterfaceAsyncEventSource)(void *self)').cstruct, parse('IOReturn (*CreateInterfaceAsyncPort)(void *self, mach_port_t *port)').cstruct, parse('mach_port_t (*GetInterfaceAsyncPort)(void *self)').cstruct, parse('IOReturn (*USBInterfaceOpen)(void *self)').cstruct, parse('IOReturn (*USBInterfaceClose)(void *self)').cstruct, parse('IOReturn (*GetInterfaceClass)(void *self, UInt8 *intfClass)').cstruct, parse('IOReturn (*GetInterfaceSubClass)(void *self, UInt8 *intfSubClass)').cstruct, parse('IOReturn (*GetInterfaceProtocol)(void *self, UInt8 *intfProtocol)').cstruct, parse('IOReturn (*GetDeviceVendor)(void *self, UInt16 *devVendor)').cstruct, parse('IOReturn (*GetDeviceProduct)(void *self, UInt16 *devProduct)').cstruct, parse('IOReturn (*GetDeviceReleaseNumber)(void *self, UInt16 *devRelNum)').cstruct, parse('IOReturn (*GetConfigurationValue)(void *self, UInt8 *configVal)').cstruct, parse('IOReturn (*GetInterfaceNumber)(void *self, UInt8 *intfNumber)').cstruct, parse('IOReturn (*GetAlternateSetting)(void *self, UInt8 *intfAltSetting)').cstruct, parse('IOReturn (*GetNumEndpoints)(void *self, UInt8 *intfNumEndpoints)').cstruct, parse('IOReturn (*GetLocationID)(void *self, UInt32 *locationID)').cstruct, parse('IOReturn (*GetDevice)(void *self, io_service_t *device)').cstruct, parse('IOReturn (*SetAlternateInterface)(void *self, UInt8 alternateSetting)').cstruct, parse('IOReturn (*GetBusFrameNumber)(void *self, UInt64 *frame, AbsoluteTime *atTime)').cstruct, parse('IOReturn (*ControlRequest)(void *self, UInt8 pipeRef, IOUSBDevRequest *req)').cstruct, parse('IOReturn (*ControlRequestAsync)(void *self, UInt8 pipeRef, IOUSBDevRequest *req, IOAsyncCallback1 callback, void *refCon)').cstruct, parse('IOReturn (*GetPipeProperties)(void *self, UInt8 pipeRef, UInt8 *direction, UInt8 *number, UInt8 *transferType, UInt16 *maxPacketSize, UInt8 *interval)').cstruct, parse('IOReturn (*GetPipeStatus)(void *self, UInt8 pipeRef)').cstruct, parse('IOReturn (*AbortPipe)(void *self, UInt8 pipeRef)').cstruct, parse('IOReturn (*ResetPipe)(void *self, UInt8 pipeRef)').cstruct, parse('IOReturn (*ClearPipeStall)(void *self, UInt8 pipeRef)').cstruct, parse('IOReturn (*ReadPipe)(void *self, UInt8 pipeRef, void *buf, UInt32 *size)').cstruct, parse('IOReturn (*WritePipe)(void *self, UInt8 pipeRef, void *buf, UInt32 size)').cstruct, parse('IOReturn (*ReadPipeAsync)(void *self, UInt8 pipeRef, void *buf, UInt32 size, IOAsyncCallback1 callback, void *refcon)').cstruct, parse('IOReturn (*WritePipeAsync)(void *self, UInt8 pipeRef, void *buf, UInt32 size, IOAsyncCallback1 callback, void *refcon)').cstruct, parse('IOReturn (*ReadIsochPipeAsync)(void *self, UInt8 pipeRef, void *buf, UInt64 frameStart, UInt32 numFrames, IOUSBIsocFrame *frameList,' 'IOAsyncCallback1 callback, void *refcon)').cstruct, parse('IOReturn (*WriteIsochPipeAsync)(void *self, UInt8 pipeRef, void *buf, UInt64 frameStart, UInt32 numFrames, IOUSBIsocFrame *frameList,' 'IOAsyncCallback1 callback, void *refcon)').cstruct, parse('IOReturn (*ControlRequestTO)(void *self, UInt8 pipeRef, IOUSBDevRequestTO *req)').cstruct, parse('IOReturn (*ControlRequestAsyncTO)(void *self, UInt8 pipeRef, IOUSBDevRequestTO *req, IOAsyncCallback1 callback, void *refCon)').cstruct, parse('IOReturn (*ReadPipeTO)(void *self, UInt8 pipeRef, void *buf, UInt32 *size, UInt32 noDataTimeout, UInt32 completionTimeout)').cstruct, parse('IOReturn (*WritePipeTO)(void *self, UInt8 pipeRef, void *buf, UInt32 size, UInt32 noDataTimeout, UInt32 completionTimeout)').cstruct, parse('IOReturn (*ReadPipeAsyncTO)(void *self, UInt8 pipeRef, void *buf, UInt32 size, UInt32 noDataTimeout, UInt32 completionTimeout, IOAsyncCallback1 callback, void *refcon)').cstruct, parse('IOReturn (*WritePipeAsyncTO)(void *self, UInt8 pipeRef, void *buf, UInt32 size, UInt32 noDataTimeout, UInt32 completionTimeout, IOAsyncCallback1 callback, void *refcon)').cstruct, parse('IOReturn (*USBInterfaceGetStringIndex)(void *self, UInt8 *si)').cstruct, ] def find_devices(): # attempt to directly find usb devices via # this function from hid lib (fail gracefully if # we're not running on os x) if on_osx: return find_usb_devices(kIOUSBDeviceClassName,kUSBVendorID,kUSBProductID,PsyScopeXUSBDevice) return [] class PsyScopeXUSBDevice(HIDDevice): def __init__(self, dev, vendor, product): super(PsyScopeXUSBDevice,self).__init__(vendor,product) self._usbDevice=dev self._devInterface = None # cache these so they are guaranteed to be available # when calling __del__ self.IOObjectRelease=IOObjectRelease self.logging_info=logging.info self._parent__del__=super(PsyScopeXUSBDevice,self).__del__ def __del__(self): self._parent__del__() if self._usbDevice: self.logging_info("releasing USB device: %s"%self) self.IOObjectRelease(self._usbDevice) def close(self): if self._devInterface: self._devInterface.USBInterfaceClose() super(PsyScopeXUSBDevice,self).close() def is_open(self): return self._devInterface is not None def open(self): logging.info("opening hid device via usb") # plugInInterface initialised to NULL plugInInterface=COMObjectRef(POINTER(POINTER(IOCFPlugInInterfaceStruct))()) score=SInt32() err=IOCreatePlugInInterfaceForService(self._usbDevice, kIOUSBDeviceUserClientTypeID, kIOCFPlugInInterfaceID, byref(plugInInterface.ref), byref(score)); # query to get the USB interface usbDevInterface=POINTER(POINTER(IOUSBDeviceInterface))() err=plugInInterface.QueryInterface(CFUUIDGetUUIDBytes(kIOUSBDeviceInterfaceID),parse('LPVOID*').cast(byref(usbDevInterface))) if err: raise RuntimeError("Failed to QueryInterface for USB Device Interface") usbDevInterface=COMObjectRef(usbDevInterface) # open USB device err=usbDevInterface.USBDeviceOpen() if err: if err == kIOReturnExclusiveAccess: raise RuntimeError("Device already open") raise RuntimeError("Could not open device") numConf=UInt8() err=usbDevInterface.GetNumberOfConfigurations(byref(numConf)) if err: raise RuntimeError("Error calling GetNumberOfConfigurations") logging.info("Found %d Interface(s)" % numConf.value) configDesc=IOUSBConfigurationDescriptorPtr() # get first (and only) config err = usbDevInterface.GetConfigurationDescriptorPtr(0, byref(configDesc)) if err: raise RuntimeError("Error calling GetConfigurationDescriptorPtr") err = usbDevInterface.SetConfiguration(configDesc.contents.bConfigurationValue) if err: raise RuntimeError("Error calling SetConfiguration") interfaceRequest=IOUSBFindInterfaceRequest() interfaceRequest.bInterfaceClass = kIOUSBFindInterfaceDontCare interfaceRequest.bInterfaceSubClass = kIOUSBFindInterfaceDontCare interfaceRequest.bInterfaceProtocol = kIOUSBFindInterfaceDontCare interfaceRequest.bAlternateSetting = kIOUSBFindInterfaceDontCare interfaceIterator=io_iterator_t() err = usbDevInterface.CreateInterfaceIterator(byref(interfaceRequest), byref(interfaceIterator)) if err: raise RuntimeError("Error calling CreateInterfaceIterator") err = usbDevInterface.USBDeviceClose() if err: raise RuntimeError("Error calling USBDeviceClose") while True: interface = IOIteratorNext(interfaceIterator) if not interface: break self._open_interface(interface) IOObjectRelease(interfaceIterator) def _open_interface(self, interface): plugInInterface=COMObjectRef(POINTER(POINTER(IOCFPlugInInterfaceStruct))()) score=SInt32() err = IOCreatePlugInInterfaceForService(interface, kIOUSBInterfaceUserClientTypeID, kIOCFPlugInInterfaceID, byref(plugInInterface.ref), byref(score)); devInterface=POINTER(POINTER(IOUSBInterfaceInterface182))() err = plugInInterface.QueryInterface(CFUUIDGetUUIDBytes(kIOUSBInterfaceInterfaceID),parse('LPVOID*').cast(byref(devInterface))); if err: raise RuntimeError("Failed to QueryInterface for USB Interface Interface") self._devInterface=COMObjectRef(devInterface) err = self._devInterface.USBInterfaceOpen() if err: raise RuntimeError("Error opening USB interface") numPipes=UInt8() err = self._devInterface.GetNumEndpoints(byref(numPipes)) if err: raise RuntimeError("Error calling GetNumEndpoints") def set_report(self,report_data,report_id=0): ''' "set" a report - send the data to the device (which must have been opened previously) ''' HIDDevice.set_report(self,report_data,report_id) # copy data into a ctypes buffer report_buffer=(c_ubyte*len(report_data))() for i,c in enumerate(report_data): report_buffer[i]=struct.unpack('B',c)[0] ioret=self._devInterface.WritePipe(2, report_buffer, len(report_data)) if ioret != kIOReturnSuccess: logging.info("error writing to device: 0x%x" % long(ioret)) def _run_interrupt_callback_loop(self,report_buffer_size): if not self.is_open(): raise RuntimeError("device not open") logging.info("starting _run_interrupt_callback_loop") # create the report buffer report_buffer=(c_ubyte*report_buffer_size)() while self._running and self.is_open(): # just repeatedly read from the pipe 1 on the interface size=UInt32(report_buffer_size) ioret=self._devInterface.ReadPipe(1, byref(report_buffer), byref(size)) if ioret == kIOReturnSuccess: report_data="".join([struct.pack('B',b) for b in report_buffer]) # zero buffer after to ensure we don't get weird-ness # if it's not fully written to later for i in range(len(report_buffer)): report_buffer[i]=0 logging.info('interrupt_report_callback(%r)',report_data) self._callback(self,report_data) else: logging.info("error reading from device: 0x%x" % long(ioret))
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """for_loop and pfor ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import tensor_array_ops from tensorflow.python.ops.parallel_for.pfor import PFor from tensorflow.python.util import nest def for_loop(loop_fn, loop_fn_dtypes, iters, parallel_iterations=None): """Runs `loop_fn` `iters` times and stacks the outputs. Runs `loop_fn` `iters` times, with input values from 0 to `iters - 1`, and stacks corresponding outputs of the different runs. Args: loop_fn: A function that takes an int32 scalar tf.Tensor object representing the iteration number, and returns a possibly nested structure of tensor objects. The shape of these outputs should not depend on the input. loop_fn_dtypes: dtypes for the outputs of loop_fn. iters: Number of iterations for which to run loop_fn. parallel_iterations: The number of iterations that can be dispatched in parallel. This knob can be used to control the total memory usage. Returns: Returns a nested structure of stacked output tensor objects with the same nested structure as the output of `loop_fn`. """ flat_loop_fn_dtypes = nest.flatten(loop_fn_dtypes) is_none_list = [] def while_body(i, *ta_list): """Body of while loop.""" fn_output = nest.flatten(loop_fn(i)) if len(fn_output) != len(flat_loop_fn_dtypes): raise ValueError( "Number of expected outputs, %d, does not match the number of " "actual outputs, %d, from loop_fn" % (len(flat_loop_fn_dtypes), len(fn_output))) outputs = [] del is_none_list[:] is_none_list.extend([x is None for x in fn_output]) for out, ta in zip(fn_output, ta_list): # TODO(agarwal): support returning Operation objects from loop_fn. if out is not None: ta = ta.write(i, array_ops.expand_dims(out, 0)) outputs.append(ta) return tuple([i + 1] + outputs) if parallel_iterations is not None: extra_args = {"parallel_iterations": parallel_iterations} else: extra_args = {} ta_list = control_flow_ops.while_loop( lambda i, *ta: i < iters, while_body, [0] + [tensor_array_ops.TensorArray(dtype, iters) for dtype in flat_loop_fn_dtypes], **extra_args)[1:] # TODO(rachelim): enable this for sparse tensors output = [None if is_none else ta.concat() for ta, is_none in zip(ta_list, is_none_list)] return nest.pack_sequence_as(loop_fn_dtypes, output) def _flatten_first_two_dims(x): """Flattens the first two dimensions of x into a single dimension.""" old_shape = array_ops.shape(x) new_shape = array_ops.concat([[old_shape[0] * old_shape[1]], old_shape[2:]], axis=0) return array_ops.reshape(x, new_shape) def pfor(loop_fn, iters, parallel_iterations=None): """Equivalent to running `loop_fn` `iters` times and stacking the outputs. `pfor` has functionality similar to `for_loop`, i.e. running `loop_fn` `iters` times, with input from 0 to `iters - 1`, and stacking corresponding output of each iteration. However the implementation does not use a tf.while_loop. Instead it adds new operations to the graph that collectively compute the same value as what running `loop_fn` in a loop would compute. This is an experimental feature and currently has a lot of limitations: - There should be no data depenendency between the different iterations. For example, a future iteration should not depend on a value or side-effect of a previous iteration. - Stateful kernels may mostly not be supported since these often imply a data dependency or ordering of the iterations. We do support a limited set of such stateful kernels though (like RandomFoo, Variable operations like reads, etc). - Conversion works only on a limited set of kernels for which a converter has been registered. - loop_fn has limited support for control flow operations. tf.cond in particular is not supported. - `loop_fn` should return nested structure of Tensors or Operations. However if an Operation is returned, it should have zero outputs. - The shape and dtype of `loop_fn` outputs should not depend on the input to loop_fn. Args: loop_fn: A function that takes an int32 scalar tf.Tensor object representing the iteration number, and returns a possibly nested structure of Tensor or Operation objects. Note that if setting `parallel_iterations` argument to something other than None, `loop_fn` may be called more than once during graph construction. So it may need to avoid mutating global state. iters: Number of iterations for which to run loop_fn. parallel_iterations: A knob to control how many iterations are vectorized and dispatched in parallel. The default value of None corresponds to vectorizing all the iterations. If `parallel_iterations` is smaller than `iters`, then chunks of at most that many iterations are dispatched in sequence. This knob can be used to control the total memory usage. Returns: Returns a nested structure of stacked tensor objects with the same nested structure as the output of `loop_fn`. Raises: ValueError: If parallel_iterations is not None and not an integer > 1. """ existing_ops = set(ops.get_default_graph().get_operations()) with ops.name_scope("loop_body"): loop_var = array_ops.placeholder(dtypes.int32, shape=[]) loop_fn_outputs = loop_fn(loop_var) new_ops = set(ops.get_default_graph().get_operations()) - existing_ops iters = ops.convert_to_tensor(iters) if parallel_iterations is not None: if parallel_iterations < 1: raise ValueError("parallel_iterations must be None or a positive integer") if parallel_iterations == 1: raise ValueError("Found parallel_iterations == 1. Use for_loop instead.") iters_value = tensor_util.constant_value(iters) if iters_value is not None and iters_value < parallel_iterations: parallel_iterations = None if parallel_iterations is None: with ops.name_scope("pfor"): converter = PFor(loop_var, iters, new_ops) outputs = [] for loop_fn_output in nest.flatten(loop_fn_outputs): outputs.append(converter.convert(loop_fn_output)) return nest.pack_sequence_as(loop_fn_outputs, outputs) else: num_tiled_iterations = iters // parallel_iterations num_remaining_iterations = iters % parallel_iterations # TODO(agarwal): Avoid calling loop_fn twice. Generate the loop body inside # a tf.function and extract the graph from there to vectorize it. with ops.name_scope("pfor_untiled"): converter = PFor(loop_var, num_remaining_iterations, new_ops) remaining_outputs = [] flattened_loop_fn_outputs = nest.flatten(loop_fn_outputs) for loop_fn_output in flattened_loop_fn_outputs: remaining_outputs.append(converter.convert(loop_fn_output)) with ops.name_scope("pfor_tiled"): loop_fn_dtypes = [ops.convert_to_tensor(x).dtype for x in flattened_loop_fn_outputs] def tiled_loop_body(j): offset = j * parallel_iterations + num_remaining_iterations def tiled_loop_fn(i): return nest.flatten(loop_fn(i + offset)) return pfor(tiled_loop_fn, parallel_iterations) tiled_outputs = for_loop(tiled_loop_body, loop_fn_dtypes, num_tiled_iterations, parallel_iterations=1) tiled_outputs = [_flatten_first_two_dims(y) for y in tiled_outputs] with ops.name_scope("pfor"): iters_value = tensor_util.constant_value(iters) if iters_value is None or iters_value % parallel_iterations: outputs = control_flow_ops.cond( math_ops.equal(num_remaining_iterations, 0), lambda: tiled_outputs, lambda: [array_ops.concat([x, y], axis=0) for x, y in zip(remaining_outputs, tiled_outputs)]) else: outputs = tiled_outputs return nest.pack_sequence_as(loop_fn_outputs, nest.flatten(outputs))
import os, sys sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import halyard_utils as utils def list_nodes(driver): nodes = driver.list_nodes() node_list = [] for node in nodes: node_list.append({"name": node.name, "creationTimestamp": node.extra['creationTimestamp'], "image": node.extra['image'], "public_ips": node.public_ips}) halyard_nodes = list(filter(lambda x: x['name'].startswith('halyard'), node_list)) return halyard_nodes def get_node(driver, instance_name, zone): node = utils.find_instance(driver, instance_name, zone) if node: return {"name": node.name, "creationTimestamp": node.extra['creationTimestamp'], "image": node.extra['image'], "public_ips": node.public_ips} else: return {} def delete_node(driver, instance_name, zone): node = utils.find_instance(driver, instance_name, zone) if node: driver.destroy_node(node) return {"stopped_instance": instance_name} else: return {"error": f"instance {instance_name} not found"} def get_base_image_from_labels(user_disk): """Gets original base image from GCP disk labels""" labels = ['cf_version', 'branch', 'target', 'build_id'] disk_labels = user_disk.extra['labels'] if all(label in disk_labels for label in labels): cf_version = disk_labels['cf_version'] branch = disk_labels['branch'] target = disk_labels['target'] build_id = disk_labels['build_id'] base_image = f'halyard-{cf_version}-{branch}-{target}-{build_id}' return base_image else: utils.fatal_error(f'Labels for {user_disk.name} are not complete.\n \ Must have all labels in: {labels}') def set_base_image_labels(driver, user_disk, img_name, branch, target): """Sets disk labels with base image version data""" dashes = [i for i, c in enumerate(img_name) if c=='-'] cf_version = img_name[dashes[0]+1:dashes[3]] build_id = img_name[dashes[-1]+1:] driver.ex_set_volume_labels(user_disk, {'cf_version': cf_version, 'branch': branch, 'target': target, 'build_id': build_id}) def create_or_restore_instance(driver, user_id, sig_server_addr, sig_server_port, zone='us-central1-b', tags=[], branch='aosp-master', target='aosp_cf_x86_phone-userdebug'): """Restores instance with existing user disk and original base image. Creates a new instance with latest image if user disk doesn't exist. Stores runtime data in external GCP disk. Launches Cuttlefish if creation is successful.""" # SETUP target = target.replace('_','-') instance_name = f'halyard-{user_id}' disk_name = f'halyard-user-{user_id}' image_family = f'halyard-{branch}-{target}' # Stops execution if instance already exists instance = utils.find_instance(driver, instance_name, zone) if instance: utils.fatal_error(f'Instance {instance_name} already exists.') # Looks for existing user disk user_disk = utils.find_disk(driver, disk_name, zone) if user_disk: base_image = get_base_image_from_labels(user_disk) else: user_disk = driver.create_volume( 30, disk_name, location=zone, image='blank-halyard') base_image = None # CREATE INSTANCE # If existing user, use original base image if base_image: try: driver.ex_get_image(base_image) except: utils.fatal_error(f'Image {base_image} does not exist.') new_instance = driver.create_node( instance_name, 'n1-standard-4', base_image, location=zone, ex_service_accounts=[{'scopes': ['storage-ro']}], ex_disk_size=30, ex_tags=tags) # If new user, use image family else: try: img = driver.ex_get_image_from_family(image_family) except: utils.fatal_error(f'Image in family {image_family} does not exist.') set_base_image_labels(driver, user_disk, img.name, branch, target) new_instance = driver.create_node( instance_name, 'n1-standard-4', None, location=zone, ex_image_family=image_family, ex_service_accounts=[{'scopes': ['storage-ro']}], ex_disk_size=30, ex_tags=tags) # ATTACH USER DISK AND LAUNCH utils.wait_for_instance(instance_name, zone) print('successfully created new instance', instance_name) driver.attach_volume(new_instance, user_disk) print(f'attached {disk_name} to {instance_name}') os.system(f'gcloud compute ssh --zone={zone} {instance_name} -- \ sudo mkdir /mnt/user_data') os.system(f'gcloud compute ssh --zone={zone} {instance_name} -- \ sudo mount /dev/sdb /mnt/user_data') os.system(f'gcloud compute ssh --zone={zone} {instance_name} -- \ sudo chmod -R 777 /mnt/user_data') # FIXME : should assign specific user permissions launch_cvd(instance_name, zone, sig_server_addr, sig_server_port) return {"name": instance_name} def create_instance(driver, user_id, sig_server_addr, sig_server_port, zone='us-central1-b', tags=[], branch='aosp-master', target='aosp_cf_x86_phone-userdebug'): """Creates a new Cuttlefish instance and launches it. Does not store runtime data in external GCP disk.""" target = target.replace('_','-') instance_name = f'halyard-{user_id}' image_family = f'halyard-{branch}-{target}' try: driver.ex_get_image_from_family(image_family) except: utils.fatal_error(f'Image family {image_family} does not exist.\n \ New base images can be created using the `create_base_image` endpoint.') # Stops execution if instance already exists instance = utils.find_instance(driver, instance_name, zone) if instance: utils.fatal_error(f'Instance {instance_name} already exists.') build_node = driver.create_node( instance_name, 'n1-standard-4', None, location=zone, ex_image_family=image_family, ex_service_accounts=[{'scopes': ['storage-ro']}], ex_disk_size=30, ex_tags=tags) utils.wait_for_instance(instance_name, zone) print('successfully created new instance', instance_name) launch_cvd(instance_name, zone, sig_server_addr, sig_server_port, False) return {"name": instance_name} def launch_cvd(instance_name, zone, sig_server_addr, sig_server_port, use_user_disk=True): """Launch cvd on given instance and connect to operator on given address. If use_user_disk is True it uses existing user data disk.""" cuttlefish_dir = '/usr/local/share/cuttlefish' user_data_dir = '/mnt/user_data' launch_command = f'gcloud compute ssh --zone={zone} {instance_name} -- ' if use_user_disk: launch_command += f'HOME={user_data_dir} \ ANDROID_HOST_OUT={cuttlefish_dir} \ ANDROID_PRODUCT_OUT={cuttlefish_dir} ' else: launch_command += f'HOME={cuttlefish_dir} ' launch_command += f'{cuttlefish_dir}/bin/launch_cvd \ --start_webrtc --daemon \ --webrtc_sig_server_addr={sig_server_addr} \ --webrtc_sig_server_port={sig_server_port} \ --start_webrtc_sig_server=false \ --webrtc_device_id={instance_name} \ --report_anonymous_usage_stats=y' os.system(launch_command) print(f'Launched cuttlefish on {instance_name} at {sig_server_addr}:{sig_server_port}')
# Copyright (c) 2017-2019 Uber Technologies, Inc. # SPDX-License-Identifier: Apache-2.0 import math from math import pi import numpy as np import pytest import scipy.stats as sp import pyro.distributions as dist from pyro.distributions.testing.naive_dirichlet import NaiveBeta, NaiveDirichlet from pyro.distributions.testing.rejection_exponential import RejectionExponential from pyro.distributions.testing.rejection_gamma import ( ShapeAugmentedBeta, ShapeAugmentedDirichlet, ShapeAugmentedGamma, ) from tests.distributions.dist_fixture import Fixture class FoldedNormal(dist.FoldedDistribution): arg_constraints = dist.Normal.arg_constraints def __init__(self, loc, scale): super().__init__(dist.Normal(loc, scale)) @property def loc(self): return self.base_dist.loc @property def scale(self): return self.base_dist.scale class SparsePoisson(dist.Poisson): def __init__(self, rate, *, validate_args=None): super().__init__(rate, is_sparse=True, validate_args=validate_args) class SineSkewedUniform(dist.SineSkewed): def __init__(self, lower, upper, skewness, *args, **kwargs): base_dist = dist.Uniform(lower, upper).to_event(lower.ndim) super().__init__(base_dist, skewness, *args, **kwargs) class SineSkewedVonMises(dist.SineSkewed): def __init__(self, von_loc, von_conc, skewness): base_dist = dist.VonMises(von_loc, von_conc).to_event(von_loc.ndim) super().__init__(base_dist, skewness) continuous_dists = [ Fixture( pyro_dist=dist.Uniform, scipy_dist=sp.uniform, examples=[ {"low": [2.0], "high": [2.5], "test_data": [2.2]}, { "low": [2.0, 4.0], "high": [3.0, 5.0], "test_data": [[[2.5, 4.5]], [[2.5, 4.5]], [[2.5, 4.5]]], }, { "low": [[2.0], [-3.0], [0.0]], "high": [[2.5], [0.0], [1.0]], "test_data": [[2.2], [-2], [0.7]], }, ], scipy_arg_fn=lambda low, high: ( (), {"loc": np.array(low), "scale": np.array(high) - np.array(low)}, ), ), Fixture( pyro_dist=dist.Exponential, scipy_dist=sp.expon, examples=[ {"rate": [2.4], "test_data": [5.5]}, { "rate": [2.4, 5.5], "test_data": [[[5.5, 3.2]], [[5.5, 3.2]], [[5.5, 3.2]]], }, { "rate": [[2.4, 5.5]], "test_data": [[[5.5, 3.2]], [[5.5, 3.2]], [[5.5, 3.2]]], }, {"rate": [[2.4], [5.5]], "test_data": [[5.5], [3.2]]}, ], scipy_arg_fn=lambda rate: ((), {"scale": 1.0 / np.array(rate)}), ), Fixture( pyro_dist=RejectionExponential, scipy_dist=sp.expon, examples=[ {"rate": [2.4], "factor": [0.5], "test_data": [5.5]}, { "rate": [2.4, 5.5], "factor": [0.5], "test_data": [[[5.5, 3.2]], [[5.5, 3.2]], [[5.5, 3.2]]], }, { "rate": [[2.4, 5.5]], "factor": [0.5], "test_data": [[[5.5, 3.2]], [[5.5, 3.2]], [[5.5, 3.2]]], }, {"rate": [[2.4], [5.5]], "factor": [0.5], "test_data": [[5.5], [3.2]]}, ], scipy_arg_fn=lambda rate, factor: ((), {"scale": 1.0 / np.array(rate)}), ), Fixture( pyro_dist=dist.Gamma, scipy_dist=sp.gamma, examples=[ {"concentration": [2.4], "rate": [3.2], "test_data": [5.5]}, { "concentration": [[2.4, 2.4], [3.2, 3.2]], "rate": [[2.4, 2.4], [3.2, 3.2]], "test_data": [[[5.5, 4.4], [5.5, 4.4]]], }, { "concentration": [[2.4], [2.4]], "rate": [[3.2], [3.2]], "test_data": [[5.5], [4.4]], }, ], scipy_arg_fn=lambda concentration, rate: ( (np.array(concentration),), {"scale": 1.0 / np.array(rate)}, ), ), Fixture( pyro_dist=ShapeAugmentedGamma, scipy_dist=sp.gamma, examples=[ {"concentration": [2.4], "rate": [3.2], "test_data": [5.5]}, { "concentration": [[2.4, 2.4], [3.2, 3.2]], "rate": [[2.4, 2.4], [3.2, 3.2]], "test_data": [[[5.5, 4.4], [5.5, 4.4]]], }, { "concentration": [[2.4], [2.4]], "rate": [[3.2], [3.2]], "test_data": [[5.5], [4.4]], }, ], scipy_arg_fn=lambda concentration, rate: ( (np.array(concentration),), {"scale": 1.0 / np.array(rate)}, ), ), Fixture( pyro_dist=dist.Beta, scipy_dist=sp.beta, examples=[ {"concentration1": [2.4], "concentration0": [3.6], "test_data": [0.4]}, { "concentration1": [[2.4, 2.4], [3.6, 3.6]], "concentration0": [[2.5, 2.5], [2.5, 2.5]], "test_data": [[[0.5, 0.4], [0.5, 0.4]]], }, { "concentration1": [[2.4], [3.7]], "concentration0": [[3.6], [2.5]], "test_data": [[0.4], [0.6]], }, ], scipy_arg_fn=lambda concentration1, concentration0: ( (np.array(concentration1), np.array(concentration0)), {}, ), ), Fixture( pyro_dist=NaiveBeta, scipy_dist=sp.beta, examples=[ {"concentration1": [2.4], "concentration0": [3.6], "test_data": [0.4]}, { "concentration1": [[2.4, 2.4], [3.6, 3.6]], "concentration0": [[2.5, 2.5], [2.5, 2.5]], "test_data": [[[0.5, 0.4], [0.5, 0.4]]], }, { "concentration1": [[2.4], [3.7]], "concentration0": [[3.6], [2.5]], "test_data": [[0.4], [0.6]], }, ], scipy_arg_fn=lambda concentration1, concentration0: ( (np.array(concentration1), np.array(concentration0)), {}, ), ), Fixture( pyro_dist=ShapeAugmentedBeta, scipy_dist=sp.beta, examples=[ {"concentration1": [2.4], "concentration0": [3.6], "test_data": [0.4]}, { "concentration1": [[2.4, 2.4], [3.6, 3.6]], "concentration0": [[2.5, 2.5], [2.5, 2.5]], "test_data": [[[0.5, 0.4], [0.5, 0.4]]], }, { "concentration1": [[2.4], [3.7]], "concentration0": [[3.6], [2.5]], "test_data": [[0.4], [0.6]], }, ], scipy_arg_fn=lambda concentration1, concentration0: ( (np.array(concentration1), np.array(concentration0)), {}, ), ), Fixture( pyro_dist=dist.LogNormal, scipy_dist=sp.lognorm, examples=[ {"loc": [1.4], "scale": [0.4], "test_data": [5.5]}, {"loc": [1.4], "scale": [0.4], "test_data": [[5.5]]}, { "loc": [[1.4, 0.4], [1.4, 0.4]], "scale": [[2.6, 0.5], [2.6, 0.5]], "test_data": [[5.5, 6.4], [5.5, 6.4]], }, { "loc": [[1.4], [0.4]], "scale": [[2.6], [0.5]], "test_data": [[5.5], [6.4]], }, ], scipy_arg_fn=lambda loc, scale: ( (np.array(scale),), {"scale": np.exp(np.array(loc))}, ), ), Fixture( pyro_dist=dist.AffineBeta, scipy_dist=sp.beta, examples=[ { "concentration1": [2.4], "concentration0": [3.6], "loc": [-1.0], "scale": [2.0], "test_data": [-0.4], }, { "concentration1": [[2.4, 2.4], [3.6, 3.6]], "concentration0": [[2.5, 2.5], [2.5, 2.5]], "loc": [[-1.0, -1.0], [2.0, 2.0]], "scale": [[2.0, 2.0], [1.0, 1.0]], "test_data": [[[-0.4, 0.4], [2.5, 2.6]]], }, { "concentration1": [[2.4], [3.7]], "concentration0": [[3.6], [2.5]], "loc": [[-1.0], [2.0]], "scale": [[2.0], [2.0]], "test_data": [[0.0], [3.0]], }, ], scipy_arg_fn=lambda concentration1, concentration0, loc, scale: ( ( np.array(concentration1), np.array(concentration0), np.array(loc), np.array(scale), ), {}, ), ), Fixture( pyro_dist=dist.Normal, scipy_dist=sp.norm, examples=[ {"loc": [2.0], "scale": [4.0], "test_data": [2.0]}, {"loc": [[2.0]], "scale": [[4.0]], "test_data": [[2.0]]}, {"loc": [[[2.0]]], "scale": [[[4.0]]], "test_data": [[[2.0]]]}, { "loc": [2.0, 50.0], "scale": [4.0, 100.0], "test_data": [[2.0, 50.0], [2.0, 50.0]], }, ], scipy_arg_fn=lambda loc, scale: ( (), {"loc": np.array(loc), "scale": np.array(scale)}, ), prec=0.07, min_samples=50000, ), Fixture( pyro_dist=dist.MultivariateNormal, scipy_dist=sp.multivariate_normal, examples=[ { "loc": [2.0, 1.0], "covariance_matrix": [[1.0, 0.5], [0.5, 1.0]], "test_data": [[2.0, 1.0], [9.0, 3.4]], }, ], # This hack seems to be the best option right now, as 'scale' is not handled well by get_scipy_batch_logpdf scipy_arg_fn=lambda loc, covariance_matrix=None: ( (), {"mean": np.array(loc), "cov": np.array([[1.0, 0.5], [0.5, 1.0]])}, ), prec=0.01, min_samples=500000, ), Fixture( pyro_dist=dist.LowRankMultivariateNormal, scipy_dist=sp.multivariate_normal, examples=[ { "loc": [2.0, 1.0], "cov_diag": [0.5, 0.5], "cov_factor": [[1.0], [0.5]], "test_data": [[2.0, 1.0], [9.0, 3.4]], }, ], scipy_arg_fn=lambda loc, cov_diag=None, cov_factor=None: ( (), {"mean": np.array(loc), "cov": np.array([[1.5, 0.5], [0.5, 0.75]])}, ), prec=0.01, min_samples=500000, ), Fixture( pyro_dist=FoldedNormal, examples=[ {"loc": [2.0], "scale": [4.0], "test_data": [2.0]}, {"loc": [[2.0]], "scale": [[4.0]], "test_data": [[2.0]]}, {"loc": [[[2.0]]], "scale": [[[4.0]]], "test_data": [[[2.0]]]}, { "loc": [2.0, 50.0], "scale": [4.0, 100.0], "test_data": [[2.0, 50.0], [2.0, 50.0]], }, ], ), Fixture( pyro_dist=dist.Dirichlet, scipy_dist=sp.dirichlet, examples=[ {"concentration": [2.4, 3, 6], "test_data": [0.2, 0.45, 0.35]}, { "concentration": [2.4, 3, 6], "test_data": [[0.2, 0.45, 0.35], [0.2, 0.45, 0.35]], }, { "concentration": [[2.4, 3, 6], [3.2, 1.2, 0.4]], "test_data": [[0.2, 0.45, 0.35], [0.3, 0.4, 0.3]], }, ], scipy_arg_fn=lambda concentration: ((concentration,), {}), ), Fixture( pyro_dist=NaiveDirichlet, scipy_dist=sp.dirichlet, examples=[ {"concentration": [2.4, 3, 6], "test_data": [0.2, 0.45, 0.35]}, { "concentration": [2.4, 3, 6], "test_data": [[0.2, 0.45, 0.35], [0.2, 0.45, 0.35]], }, { "concentration": [[2.4, 3, 6], [3.2, 1.2, 0.4]], "test_data": [[0.2, 0.45, 0.35], [0.3, 0.4, 0.3]], }, ], scipy_arg_fn=lambda concentration: ((concentration,), {}), ), Fixture( pyro_dist=ShapeAugmentedDirichlet, scipy_dist=sp.dirichlet, examples=[ {"concentration": [2.4, 3, 6], "test_data": [0.2, 0.45, 0.35]}, { "concentration": [2.4, 3, 6], "test_data": [[0.2, 0.45, 0.35], [0.2, 0.45, 0.35]], }, { "concentration": [[2.4, 3, 6], [3.2, 1.2, 0.4]], "test_data": [[0.2, 0.45, 0.35], [0.3, 0.4, 0.3]], }, ], scipy_arg_fn=lambda concentration: ((concentration,), {}), ), Fixture( pyro_dist=dist.Cauchy, scipy_dist=sp.cauchy, examples=[ {"loc": [0.5], "scale": [1.2], "test_data": [1.0]}, { "loc": [0.5, 0.5], "scale": [1.2, 1.2], "test_data": [[1.0, 1.0], [1.0, 1.0]], }, { "loc": [[0.5], [0.3]], "scale": [[1.2], [1.0]], "test_data": [[0.4], [0.35]], }, ], scipy_arg_fn=lambda loc, scale: ( (), {"loc": np.array(loc), "scale": np.array(scale)}, ), ), Fixture( pyro_dist=dist.HalfCauchy, scipy_dist=sp.halfcauchy, examples=[ {"scale": [1.2], "test_data": [1.0]}, {"scale": [1.2, 1.2], "test_data": [[1.0, 2.0], [1.0, 2.0]]}, {"scale": [[1.2], [1.0]], "test_data": [[0.54], [0.35]]}, ], scipy_arg_fn=lambda scale: ((), {"scale": np.array(scale)}), ), Fixture( pyro_dist=dist.VonMises, scipy_dist=sp.vonmises, examples=[ {"loc": [0.5], "concentration": [1.2], "test_data": [1.0]}, { "loc": [0.5, 3.0], "concentration": [2.0, 0.5], "test_data": [[1.0, 2.0], [1.0, 2.0]], }, { "loc": [[0.5], [0.3]], "concentration": [[2.0], [0.5]], "test_data": [[1.0], [2.0]], }, ], scipy_arg_fn=lambda loc, concentration: ( (), {"loc": np.array(loc), "kappa": np.array(concentration)}, ), ), Fixture( pyro_dist=dist.LKJ, examples=[ { "dim": 3, "concentration": 1.0, "test_data": [ [ [1.0000, -0.8221, 0.7655], [-0.8221, 1.0000, -0.5293], [0.7655, -0.5293, 1.0000], ], [ [1.0000, -0.5345, -0.5459], [-0.5345, 1.0000, -0.0333], [-0.5459, -0.0333, 1.0000], ], [ [1.0000, -0.3758, -0.2409], [-0.3758, 1.0000, 0.4653], [-0.2409, 0.4653, 1.0000], ], [ [1.0000, -0.8800, -0.9493], [-0.8800, 1.0000, 0.9088], [-0.9493, 0.9088, 1.0000], ], [ [1.0000, 0.2284, -0.1283], [0.2284, 1.0000, 0.0146], [-0.1283, 0.0146, 1.0000], ], ], }, ], ), Fixture( pyro_dist=dist.LKJCholesky, examples=[ { "dim": 3, "concentration": 1.0, "test_data": [ [ [1.0, 0.0, 0.0], [-0.17332135, 0.98486533, 0.0], [0.43106407, -0.54767312, 0.71710384], ], [ [1.0, 0.0, 0.0], [-0.31391555, 0.94945091, 0.0], [-0.31391296, -0.29767500, 0.90158097], ], ], }, ], ), Fixture( pyro_dist=dist.Stable, examples=[ {"stability": [1.5], "skew": 0.1, "test_data": [-10.0]}, { "stability": [1.5], "skew": 0.1, "scale": 2.0, "loc": -2.0, "test_data": [10.0], }, ], ), Fixture( pyro_dist=dist.MultivariateStudentT, examples=[ { "df": 1.5, "loc": [0.2, 0.3], "scale_tril": [[0.8, 0.0], [1.3, 0.4]], "test_data": [-3.0, 2], }, ], ), Fixture( pyro_dist=dist.ProjectedNormal, examples=[ {"concentration": [0.0, 0.0], "test_data": [1.0, 0.0]}, {"concentration": [2.0, 3.0], "test_data": [0.0, 1.0]}, {"concentration": [0.0, 0.0, 0.0], "test_data": [1.0, 0.0, 0.0]}, {"concentration": [-1.0, 2.0, 3.0], "test_data": [0.0, 0.0, 1.0]}, ], ), Fixture( pyro_dist=dist.SineBivariateVonMises, examples=[ { "phi_loc": [0.0], "psi_loc": [0.0], "phi_concentration": [5.0], "psi_concentration": [6.0], "correlation": [2.0], "test_data": [[0.0, 0.0]], }, { "phi_loc": [3.003], "psi_loc": [-1.343], "phi_concentration": [5.0], "psi_concentration": [6.0], "correlation": [2.0], "test_data": [[0.0, 1.0]], }, { "phi_loc": [-math.pi / 3], "psi_loc": -1.0, "phi_concentration": 0.5, "psi_concentration": 10.0, "correlation": 0.9, "test_data": [[1.0, 0.555]], }, { "phi_loc": [math.pi - 0.2, 1.0], "psi_loc": [0.0, 1.0], "phi_concentration": [5.0, 5.0], "psi_concentration": [7.0, 0.5], "weighted_correlation": [0.5, 0.1], "test_data": [[[1.0, -3.0], [1.0, 59.0]]], }, ], ), Fixture( pyro_dist=dist.SoftLaplace, examples=[ {"loc": [2.0], "scale": [4.0], "test_data": [2.0]}, {"loc": [[2.0]], "scale": [[4.0]], "test_data": [[2.0]]}, {"loc": [[[2.0]]], "scale": [[[4.0]]], "test_data": [[[2.0]]]}, { "loc": [2.0, 50.0], "scale": [4.0, 100.0], "test_data": [[2.0, 50.0], [2.0, 50.0]], }, ], ), Fixture( pyro_dist=SineSkewedUniform, examples=[ { "lower": [-pi, -pi], "upper": [pi, pi], "skewness": [-pi / 4, 0.1], "test_data": [pi / 2, -2 * pi / 3], } ], ), Fixture( pyro_dist=SineSkewedVonMises, examples=[ { "von_loc": [0.0], "von_conc": [1.0], "skewness": [0.342355], "test_data": [0.1], }, { "von_loc": [0.0, -1.234], "von_conc": [1.0, 10.0], "skewness": [[0.342355, -0.0001], [0.91, 0.09]], "test_data": [[0.1, -3.2], [-2.0, 0.0]], }, ], ), Fixture( pyro_dist=dist.AsymmetricLaplace, examples=[ {"loc": [1.0], "scale": [1.0], "asymmetry": [2.0], "test_data": [2.0]}, { "loc": [2.0, -50.0], "scale": [2.0, 10.0], "asymmetry": [0.5, 2.5], "test_data": [[2.0, 10.0], [-1.0, -50.0]], }, ], ), Fixture( pyro_dist=dist.SoftAsymmetricLaplace, examples=[ {"loc": [1.0], "scale": [1.0], "asymmetry": [2.0], "test_data": [2.0]}, { "loc": [2.0, -50.0], "scale": [2.0, 10.0], "asymmetry": [0.5, 2.5], "softness": [0.7, 1.4], "test_data": [[2.0, 10.0], [-1.0, -50.0]], }, ], ), Fixture( pyro_dist=dist.SkewLogistic, examples=[ {"loc": [1.0], "scale": [1.0], "asymmetry": [2.0], "test_data": [2.0]}, { "loc": [2.0, -50.0], "scale": [2.0, 10.0], "asymmetry": [0.5, 2.5], "test_data": [[2.0, 10.0], [-1.0, -50.0]], }, ], ), Fixture( pyro_dist=dist.Logistic, examples=[ {"loc": [1.0], "scale": [1.0], "test_data": [2.0]}, { "loc": [2.0, -50.0], "scale": [2.0, 10.0], "test_data": [[2.0, 10.0], [-1.0, -50.0]], }, ], ), ] discrete_dists = [ Fixture( pyro_dist=dist.OrderedLogistic, examples=[ {"cutpoints": [0.0, 1.0, 2.0], "predictor": [1.0], "test_data": [1]}, { "cutpoints": [0.0, 1.0, 2.0], "predictor": [-0.5, 0.5, 1.5, 2.5], "test_data": [0, 1, 2, 3], }, { "cutpoints": [0.0, 1.0], "predictor": [[-0.5, 0.5, 1.5], [-0.5, 0.5, 1.5]], "test_data": [[0, 1, 2], [0, 1, 2]], }, ], prec=0.05, min_samples=10000, is_discrete=True, ), Fixture( pyro_dist=dist.Multinomial, scipy_dist=sp.multinomial, examples=[ {"probs": [0.1, 0.6, 0.3], "test_data": [0.0, 1.0, 0.0]}, {"probs": [0.1, 0.6, 0.3], "total_count": 8, "test_data": [2.0, 4.0, 2.0]}, { "probs": [0.1, 0.6, 0.3], "total_count": 8, "test_data": [[2.0, 4.0, 2.0], [2.0, 4.0, 2.0]], }, { "probs": [[0.1, 0.6, 0.3], [0.2, 0.4, 0.4]], "total_count": 8, "test_data": [[2.0, 4.0, 2.0], [1.0, 4.0, 3.0]], }, ], scipy_arg_fn=lambda probs, total_count=[1]: ( (total_count[0], np.array(probs)), {}, ), prec=0.05, min_samples=10000, is_discrete=True, ), Fixture( pyro_dist=dist.Bernoulli, scipy_dist=sp.bernoulli, examples=[ {"probs": [0.25], "test_data": [1.0]}, { "probs": [0.25, 0.25], "test_data": [[[0.0, 1.0]], [[1.0, 0.0]], [[0.0, 0.0]]], }, { "logits": [math.log(p / (1 - p)) for p in (0.25, 0.25)], "test_data": [[[0.0, 1.0]], [[1.0, 0.0]], [[0.0, 0.0]]], }, # for now, avoid tests on infinite logits # {'logits': [-float('inf'), 0], # 'test_data': [[0, 1], [0, 1], [0, 1]]}, { "logits": [ [math.log(p / (1 - p)) for p in (0.25, 0.25)], [math.log(p / (1 - p)) for p in (0.3, 0.3)], ], "test_data": [[1.0, 1.0], [0.0, 0.0]], }, { "probs": [[0.25, 0.25], [0.3, 0.3]], "test_data": [[1.0, 1.0], [0.0, 0.0]], }, ], # for now, avoid tests on infinite logits # test_data_indices=[0, 1, 2, 3], batch_data_indices=[-1, -2], scipy_arg_fn=lambda **kwargs: ((), {"p": kwargs["probs"]}), prec=0.01, min_samples=10000, is_discrete=True, expected_support_non_vec=[[0.0], [1.0]], expected_support=[[[0.0, 0.0], [0.0, 0.0]], [[1.0, 1.0], [1.0, 1.0]]], ), Fixture( pyro_dist=dist.BetaBinomial, examples=[ { "concentration1": [2.0], "concentration0": [5.0], "total_count": 8, "test_data": [4.0], }, { "concentration1": [2.0], "concentration0": [5.0], "total_count": 8, "test_data": [[2.0], [4.0]], }, { "concentration1": [[2.0], [2.0]], "concentration0": [[5.0], [5.0]], "total_count": 8, "test_data": [[4.0], [3.0]], }, { "concentration1": [2.0, 2.0], "concentration0": [5.0, 5.0], "total_count": [0.0, 0.0], "test_data": [[0.0, 0.0], [0.0, 0.0]], }, { "concentration1": [2.0, 2.0], "concentration0": [5.0, 5.0], "total_count": [[8.0, 7.0], [5.0, 9.0]], "test_data": [[6.0, 3.0], [2.0, 8.0]], }, ], batch_data_indices=[-1, -2], prec=0.01, min_samples=10000, is_discrete=True, ), Fixture( pyro_dist=dist.Binomial, scipy_dist=sp.binom, examples=[ {"probs": [0.6], "total_count": 8, "test_data": [4.0]}, {"probs": [0.3], "total_count": 8, "test_data": [[2.0], [4.0]]}, {"probs": [[0.2], [0.4]], "total_count": 8, "test_data": [[4.0], [3.0]]}, { "probs": [0.2, 0.4], "total_count": [0.0, 0.0], "test_data": [[0.0, 0.0], [0.0, 0.0]], }, { "probs": [0.2, 0.4], "total_count": [[8.0, 7.0], [5.0, 9.0]], "test_data": [[6.0, 3.0], [2.0, 8.0]], }, ], scipy_arg_fn=lambda probs, total_count: ((total_count, probs), {}), prec=0.05, min_samples=10000, is_discrete=True, ), Fixture( pyro_dist=dist.ExtendedBetaBinomial, examples=[ { "concentration1": [2.0], "concentration0": [5.0], "total_count": 8, "test_data": [4.0], }, { "concentration1": [2.0], "concentration0": [5.0], "total_count": 8, "test_data": [[2.0], [4.0]], }, { "concentration1": [[2.0], [2.0]], "concentration0": [[5.0], [5.0]], "total_count": 8, "test_data": [[4.0], [3.0]], }, { "concentration1": [2.0, 2.0], "concentration0": [5.0, 5.0], "total_count": [0.0, 0.0], "test_data": [[0.0, 0.0], [0.0, 0.0]], }, { "concentration1": [2.0, 2.0], "concentration0": [5.0, 5.0], "total_count": [[8.0, 7.0], [5.0, 9.0]], "test_data": [[6.0, 3.0], [2.0, 8.0]], }, ], batch_data_indices=[-1, -2], prec=0.01, min_samples=10000, is_discrete=True, ), Fixture( pyro_dist=dist.ExtendedBinomial, scipy_dist=sp.binom, examples=[ {"probs": [0.6], "total_count": 8, "test_data": [4.0]}, {"probs": [0.3], "total_count": 8, "test_data": [[2.0], [4.0]]}, {"probs": [[0.2], [0.4]], "total_count": 8, "test_data": [[4.0], [3.0]]}, { "probs": [0.2, 0.4], "total_count": [0.0, 0.0], "test_data": [[0.0, 0.0], [0.0, 0.0]], }, { "probs": [0.2, 0.4], "total_count": [[8.0, 7.0], [5.0, 9.0]], "test_data": [[6.0, 3.0], [2.0, 8.0]], }, ], scipy_arg_fn=lambda probs, total_count: ((total_count, probs), {}), prec=0.05, min_samples=10000, is_discrete=True, ), Fixture( pyro_dist=dist.Categorical, scipy_dist=sp.multinomial, examples=[ {"probs": [0.1, 0.6, 0.3], "test_data": [2]}, {"logits": list(map(math.log, [0.1, 0.6, 0.3])), "test_data": [2]}, { "logits": [ list(map(math.log, [0.1, 0.6, 0.3])), list(map(math.log, [0.2, 0.4, 0.4])), ], "test_data": [2, 0], }, {"probs": [[0.1, 0.6, 0.3], [0.2, 0.4, 0.4]], "test_data": [2, 0]}, ], test_data_indices=[0, 1, 2], batch_data_indices=[-1, -2], scipy_arg_fn=None, prec=0.05, min_samples=10000, is_discrete=True, ), Fixture( pyro_dist=dist.DirichletMultinomial, examples=[ {"concentration": [0.1, 0.6, 0.3], "test_data": [0.0, 1.0, 0.0]}, { "concentration": [0.5, 1.0, 2.0], "total_count": 8, "test_data": [0.0, 2.0, 6.0], }, { "concentration": [[0.5, 1.0, 2.0], [3.0, 3.0, 0.1]], "total_count": 8, "test_data": [[0.0, 2.0, 6.0], [5.0, 2.0, 1.0]], }, ], prec=0.08, is_discrete=True, ), Fixture( pyro_dist=dist.GammaPoisson, examples=[ {"concentration": [1.0], "rate": [2.0], "test_data": [0.0]}, {"concentration": [1.0], "rate": [2.0], "test_data": [1.0]}, {"concentration": [1.0], "rate": [2.0], "test_data": [4.0]}, { "concentration": [1.0, 1.0, 1.0], "rate": [2.0, 2.0, 3.0], "test_data": [[0.0, 1.0, 4.0], [0.0, 1.0, 4.0]], }, { "concentration": [[1.0], [1.0], [1.0]], "rate": [[2.0], [2.0], [3.0]], "test_data": [[0.0], [1.0], [4.0]], }, ], prec=0.08, is_discrete=True, ), Fixture( pyro_dist=dist.OneHotCategorical, scipy_dist=sp.multinomial, examples=[ {"probs": [0.1, 0.6, 0.3], "test_data": [0.0, 0.0, 1.0]}, { "logits": list(map(math.log, [0.1, 0.6, 0.3])), "test_data": [0.0, 0.0, 1.0], }, { "logits": [ list(map(math.log, [0.1, 0.6, 0.3])), list(map(math.log, [0.2, 0.4, 0.4])), ], "test_data": [[0.0, 0.0, 1.0], [1.0, 0.0, 0.0]], }, { "probs": [[0.1, 0.6, 0.3], [0.2, 0.4, 0.4]], "test_data": [[0.0, 0.0, 1.0], [1.0, 0.0, 0.0]], }, ], test_data_indices=[0, 1, 2], batch_data_indices=[-1, -2], scipy_arg_fn=lambda probs: ((1, np.array(probs)), {}), prec=0.05, min_samples=10000, is_discrete=True, ), Fixture( pyro_dist=dist.Poisson, scipy_dist=sp.poisson, examples=[ {"rate": [2.0], "test_data": [0.0]}, {"rate": [3.0], "test_data": [1.0]}, {"rate": [6.0], "test_data": [4.0]}, {"rate": [2.0, 3.0, 6.0], "test_data": [[0.0, 1.0, 4.0], [0.0, 1.0, 4.0]]}, {"rate": [[2.0], [3.0], [6.0]], "test_data": [[0.0], [1.0], [4.0]]}, ], scipy_arg_fn=lambda rate: ((np.array(rate),), {}), prec=0.08, is_discrete=True, ), Fixture( pyro_dist=SparsePoisson, scipy_dist=sp.poisson, examples=[ {"rate": [2.0], "test_data": [0.0]}, {"rate": [3.0], "test_data": [1.0]}, {"rate": [6.0], "test_data": [4.0]}, {"rate": [2.0, 3.0, 6.0], "test_data": [[0.0, 1.0, 4.0], [0.0, 1.0, 4.0]]}, {"rate": [[2.0], [3.0], [6.0]], "test_data": [[0.0], [1.0], [4.0]]}, ], scipy_arg_fn=lambda rate: ((np.array(rate),), {}), prec=0.08, is_discrete=True, ), Fixture( pyro_dist=dist.Geometric, scipy_dist=sp.geom, examples=[ {"logits": [2.0], "test_data": [0.0]}, {"logits": [3.0], "test_data": [1.0]}, {"logits": [-6.0], "test_data": [4.0]}, { "logits": [2.0, 3.0, -6.0], "test_data": [[0.0, 1.0, 4.0], [0.0, 1.0, 4.0]], }, {"logits": [[2.0], [3.0], [-6.0]], "test_data": [[0.0], [1.0], [4.0]]}, ], scipy_arg_fn=lambda probs: ((np.array(probs), -1), {}), prec=0.08, is_discrete=True, ), ] @pytest.fixture( name="dist", params=continuous_dists + discrete_dists, ids=lambda x: x.get_test_distribution_name(), ) def all_distributions(request): return request.param @pytest.fixture( name="continuous_dist", params=continuous_dists, ids=lambda x: x.get_test_distribution_name(), ) def continuous_distributions(request): return request.param @pytest.fixture( name="discrete_dist", params=discrete_dists, ids=lambda x: x.get_test_distribution_name(), ) def discrete_distributions(request): return request.param def pytest_collection_modifyitems(items): for item in items: if item.nodeid.startswith("tests/distributions"): if "stage" not in item.keywords: item.add_marker(pytest.mark.stage("unit")) if "init" not in item.keywords: item.add_marker(pytest.mark.init(rng_seed=123))
""" Scripts for the in-game Python system. """ from datetime import datetime, timedelta from Queue import Queue import re import sys import traceback from django.conf import settings from evennia import DefaultObject, DefaultScript, ChannelDB, ScriptDB from evennia import logger from evennia.utils.ansi import raw from evennia.utils.create import create_channel from evennia.utils.dbserialize import dbserialize from evennia.utils.utils import all_from_module, delay, pypath_to_realpath from evennia.contrib.ingame_python.callbackhandler import CallbackHandler from evennia.contrib.ingame_python.utils import get_next_wait, EVENTS, InterruptEvent # Constants RE_LINE_ERROR = re.compile(r'^ File "\<string\>", line (\d+)') class EventHandler(DefaultScript): """ The event handler that contains all events in a global script. This script shouldn't be created more than once. It contains event (in a non-persistent attribute) and callbacks (in a persistent attribute). The script method would help adding, editing and deleting these events and callbacks. """ def at_script_creation(self): """Hook called when the script is created.""" self.key = "event_handler" self.desc = "Global event handler" self.persistent = True # Permanent data to be stored self.db.callbacks = {} self.db.to_valid = [] self.db.locked = [] # Tasks self.db.tasks = {} def at_start(self): """Set up the event system when starting. Note that this hook is called every time the server restarts (including when it's reloaded). This hook performs the following tasks: - Create temporarily stored events. - Generate locals (individual events' namespace). - Load eventfuncs, including user-defined ones. - Re-schedule tasks that aren't set to fire anymore. - Effectively connect the handler to the main script. """ self.ndb.events = {} for typeclass, name, variables, help_text, custom_call, custom_add in EVENTS: self.add_event(typeclass, name, variables, help_text, custom_call, custom_add) # Generate locals self.ndb.current_locals = {} self.ndb.fresh_locals = {} addresses = ["evennia.contrib.ingame_python.eventfuncs"] addresses.extend(getattr(settings, "EVENTFUNCS_LOCATIONS", ["world.eventfuncs"])) for address in addresses: if pypath_to_realpath(address): self.ndb.fresh_locals.update(all_from_module(address)) # Restart the delayed tasks now = datetime.now() for task_id, definition in tuple(self.db.tasks.items()): future, obj, event_name, locals = definition seconds = (future - now).total_seconds() if seconds < 0: seconds = 0 delay(seconds, complete_task, task_id) # Place the script in the CallbackHandler from evennia.contrib.ingame_python import typeclasses CallbackHandler.script = self DefaultObject.callbacks = typeclasses.EventObject.callbacks # Create the channel if non-existent try: self.ndb.channel = ChannelDB.objects.get(db_key="everror") except ChannelDB.DoesNotExist: self.ndb.channel = create_channel("everror", desc="Event errors", locks="control:false();listen:perm(Builders);send:false()") def get_events(self, obj): """ Return a dictionary of events on this object. Args: obj (Object): the connected object. Returns: A dictionary of the object's events. Note: Events would define what the object can have as callbacks. Note, however, that chained callbacks will not appear in events and are handled separately. """ events = {} all_events = self.ndb.events classes = Queue() classes.put(type(obj)) invalid = [] while not classes.empty(): typeclass = classes.get() typeclass_name = typeclass.__module__ + "." + typeclass.__name__ for key, etype in all_events.get(typeclass_name, {}).items(): if key in invalid: continue if etype[0] is None: # Invalidate invalid.append(key) continue if key not in events: events[key] = etype # Look for the parent classes for parent in typeclass.__bases__: classes.put(parent) return events def get_variable(self, variable_name): """ Return the variable defined in the locals. This can be very useful to check the value of a variable that can be modified in an event, and whose value will be used in code. This system allows additional customization. Args: variable_name (str): the name of the variable to return. Returns: The variable if found in the locals. None if not found in the locals. Note: This will return the variable from the current locals. Keep in mind that locals are shared between events. As every event is called one by one, this doesn't pose additional problems if you get the variable right after an event has been executed. If, however, you differ, there's no guarantee the variable will be here or will mean the same thing. """ return self.ndb.current_locals.get(variable_name) def get_callbacks(self, obj): """ Return a dictionary of the object's callbacks. Args: obj (Object): the connected objects. Returns: A dictionary of the object's callbacks. Note: This method can be useful to override in some contexts, when several objects would share callbacks. """ obj_callbacks = self.db.callbacks.get(obj, {}) callbacks = {} for callback_name, callback_list in obj_callbacks.items(): new_list = [] for i, callback in enumerate(callback_list): callback = dict(callback) callback["obj"] = obj callback["name"] = callback_name callback["number"] = i new_list.append(callback) if new_list: callbacks[callback_name] = new_list return callbacks def add_callback(self, obj, callback_name, code, author=None, valid=False, parameters=""): """ Add the specified callback. Args: obj (Object): the Evennia typeclassed object to be extended. callback_name (str): the name of the callback to add. code (str): the Python code associated with this callback. author (Character or Player, optional): the author of the callback. valid (bool, optional): should the callback be connected? parameters (str, optional): optional parameters. Note: This method doesn't check that the callback type exists. """ obj_callbacks = self.db.callbacks.get(obj, {}) if not obj_callbacks: self.db.callbacks[obj] = {} obj_callbacks = self.db.callbacks[obj] callbacks = obj_callbacks.get(callback_name, []) if not callbacks: obj_callbacks[callback_name] = [] callbacks = obj_callbacks[callback_name] # Add the callback in the list callbacks.append({ "created_on": datetime.now(), "author": author, "valid": valid, "code": code, "parameters": parameters, }) # If not valid, set it in 'to_valid' if not valid: self.db.to_valid.append((obj, callback_name, len(callbacks) - 1)) # Call the custom_add if needed custom_add = self.get_events(obj).get( callback_name, [None, None, None, None])[3] if custom_add: custom_add(obj, callback_name, len(callbacks) - 1, parameters) # Build the definition to return (a dictionary) definition = dict(callbacks[-1]) definition["obj"] = obj definition["name"] = callback_name definition["number"] = len(callbacks) - 1 return definition def edit_callback(self, obj, callback_name, number, code, author=None, valid=False): """ Edit the specified callback. Args: obj (Object): the Evennia typeclassed object to be edited. callback_name (str): the name of the callback to edit. number (int): the callback number to be changed. code (str): the Python code associated with this callback. author (Character or Player, optional): the author of the callback. valid (bool, optional): should the callback be connected? Raises: RuntimeError if the callback is locked. Note: This method doesn't check that the callback type exists. """ obj_callbacks = self.db.callbacks.get(obj, {}) if not obj_callbacks: self.db.callbacks[obj] = {} obj_callbacks = self.db.callbacks[obj] callbacks = obj_callbacks.get(callback_name, []) if not callbacks: obj_callbacks[callback_name] = [] callbacks = obj_callbacks[callback_name] # If locked, don't edit it if (obj, callback_name, number) in self.db.locked: raise RuntimeError("this callback is locked.") # Edit the callback callbacks[number].update({ "updated_on": datetime.now(), "updated_by": author, "valid": valid, "code": code, }) # If not valid, set it in 'to_valid' if not valid and (obj, callback_name, number) not in self.db.to_valid: self.db.to_valid.append((obj, callback_name, number)) elif valid and (obj, callback_name, number) in self.db.to_valid: self.db.to_valid.remove((obj, callback_name, number)) # Build the definition to return (a dictionary) definition = dict(callbacks[number]) definition["obj"] = obj definition["name"] = callback_name definition["number"] = number return definition def del_callback(self, obj, callback_name, number): """ Delete the specified callback. Args: obj (Object): the typeclassed object containing the callback. callback_name (str): the name of the callback to delete. number (int): the number of the callback to delete. Raises: RuntimeError if the callback is locked. """ obj_callbacks = self.db.callbacks.get(obj, {}) callbacks = obj_callbacks.get(callback_name, []) # If locked, don't edit it if (obj, callback_name, number) in self.db.locked: raise RuntimeError("this callback is locked.") # Delete the callback itself try: code = callbacks[number]["code"] except IndexError: return else: logger.log_info("Deleting callback {} {} of {}:\n{}".format( callback_name, number, obj, code)) del callbacks[number] # Change IDs of callbacks to be validated i = 0 while i < len(self.db.to_valid): t_obj, t_callback_name, t_number = self.db.to_valid[i] if obj is t_obj and callback_name == t_callback_name: if t_number == number: # Strictly equal, delete the callback del self.db.to_valid[i] i -= 1 elif t_number > number: # Change the ID for this callback self.db.to_valid.insert(i, (t_obj, t_callback_name, t_number - 1)) del self.db.to_valid[i + 1] i += 1 # Update locked callback for i, line in enumerate(self.db.locked): t_obj, t_callback_name, t_number = line if obj is t_obj and callback_name == t_callback_name: if number < t_number: self.db.locked[i] = (t_obj, t_callback_name, t_number - 1) # Delete time-related callbacks associated with this object for script in list(obj.scripts.all()): if isinstance(script, TimecallbackScript): if script.obj is obj and script.db.callback_name == callback_name: if script.db.number == number: script.stop() elif script.db.number > number: script.db.number -= 1 def accept_callback(self, obj, callback_name, number): """ Valid a callback. Args: obj (Object): the object containing the callback. callback_name (str): the name of the callback. number (int): the number of the callback. """ obj_callbacks = self.db.callbacks.get(obj, {}) callbacks = obj_callbacks.get(callback_name, []) # Accept and connect the callback callbacks[number].update({"valid": True}) if (obj, callback_name, number) in self.db.to_valid: self.db.to_valid.remove((obj, callback_name, number)) def call(self, obj, callback_name, *args, **kwargs): """ Call the connected callbacks. Args: obj (Object): the Evennia typeclassed object. callback_name (str): the callback name to call. *args: additional variables for this callback. Kwargs: number (int, optional): call just a specific callback. parameters (str, optional): call a callback with parameters. locals (dict, optional): a locals replacement. Returns: True to report the callback was called without interruption, False otherwise. """ # First, look for the callback type corresponding to this name number = kwargs.get("number") parameters = kwargs.get("parameters") locals = kwargs.get("locals") # Errors should not pass silently allowed = ("number", "parameters", "locals") if any(k for k in kwargs if k not in allowed): raise TypeError("Unknown keyword arguments were specified " \ "to call callbacks: {}".format(kwargs)) event = self.get_events(obj).get(callback_name) if locals is None and not event: logger.log_err("The callback {} for the object {} (typeclass " \ "{}) can't be found".format(callback_name, obj, type(obj))) return False # Prepare the locals if necessary if locals is None: locals = self.ndb.fresh_locals.copy() for i, variable in enumerate(event[0]): try: locals[variable] = args[i] except IndexError: logger.log_trace("callback {} of {} ({}): need variable " \ "{} in position {}".format(callback_name, obj, type(obj), variable, i)) return False else: locals = {key: value for key, value in locals.items()} callbacks = self.get_callbacks(obj).get(callback_name, []) if event: custom_call = event[2] if custom_call: callbacks = custom_call(callbacks, parameters) # Now execute all the valid callbacks linked at this address self.ndb.current_locals = locals for i, callback in enumerate(callbacks): if not callback["valid"]: continue if number is not None and callback["number"] != number: continue try: exec(callback["code"], locals, locals) except InterruptEvent: return False except Exception: etype, evalue, tb = sys.exc_info() trace = traceback.format_exception(etype, evalue, tb) self.handle_error(callback, trace) return True def handle_error(self, callback, trace): """ Handle an error in a callback. Args: callback (dict): the callback representation. trace (list): the traceback containing the exception. Notes: This method can be useful to override to change the default handling of errors. By default, the error message is sent to the character who last updated the callback, if connected. If not, display to the everror channel. """ callback_name = callback["name"] number = callback["number"] obj = callback["obj"] oid = obj.id logger.log_err("An error occurred during the callback {} of " \ "{} (#{}), number {}\n{}".format(callback_name, obj, oid, number + 1, "\n".join(trace))) # Create the error message line = "|runknown|n" lineno = "|runknown|n" for error in trace: if error.startswith(' File "<string>", line '): res = RE_LINE_ERROR.search(error) if res: lineno = int(res.group(1)) # Try to extract the line try: line = raw(callback["code"].splitlines()[lineno - 1]) except IndexError: continue else: break exc = raw(trace[-1].strip("\n").splitlines()[-1]) err_msg = "Error in {} of {} (#{})[{}], line {}:" \ " {}\n{}".format(callback_name, obj, oid, number + 1, lineno, line, exc) # Inform the last updater if connected updater = callback.get("updated_by") if updater is None: updater = callback["created_by"] if updater and updater.sessions.all(): updater.msg(err_msg) else: err_msg = "Error in {} of {} (#{})[{}], line {}:" \ " {}\n {}".format(callback_name, obj, oid, number + 1, lineno, line, exc) self.ndb.channel.msg(err_msg) def add_event(self, typeclass, name, variables, help_text, custom_call, custom_add): """ Add a new event for a defined typeclass. Args: typeclass (str): the path leading to the typeclass. name (str): the name of the event to add. variables (list of str): list of variable names for this event. help_text (str): the long help text of the event. custom_call (callable or None): the function to be called when the event fires. custom_add (callable or None): the function to be called when a callback is added. """ if typeclass not in self.ndb.events: self.ndb.events[typeclass] = {} events = self.ndb.events[typeclass] if name not in events: events[name] = (variables, help_text, custom_call, custom_add) def set_task(self, seconds, obj, callback_name): """ Set and schedule a task to run. Args: seconds (int, float): the delay in seconds from now. obj (Object): the typecalssed object connected to the event. callback_name (str): the callback's name. Notes: This method allows to schedule a "persistent" task. 'utils.delay' is called, but a copy of the task is kept in the event handler, and when the script restarts (after reload), the differed delay is called again. The dictionary of locals is frozen and will be available again when the task runs. This feature, however, is limited by the database: all data cannot be saved. Lambda functions, class methods, objects inside an instance and so on will not be kept in the locals dictionary. """ now = datetime.now() delta = timedelta(seconds=seconds) # Choose a free task_id used_ids = list(self.db.tasks.keys()) task_id = 1 while task_id in used_ids: task_id += 1 # Collect and freeze current locals locals = {} for key, value in self.ndb.current_locals.items(): try: dbserialize(value) except TypeError: continue else: locals[key] = value self.db.tasks[task_id] = (now + delta, obj, callback_name, locals) delay(seconds, complete_task, task_id) # Script to call time-related events class TimeEventScript(DefaultScript): """Gametime-sensitive script.""" def at_script_creation(self): """The script is created.""" self.start_delay = True self.persistent = True # Script attributes self.db.time_format = None self.db.event_name = "time" self.db.number = None def at_repeat(self): """ Call the event and reset interval. It is necessary to restart the script to reset its interval only twice after a reload. When the script has undergone down time, there's usually a slight shift in game time. Once the script restarts once, it will set the average time it needs for all its future intervals and should not need to be restarted. In short, a script that is created shouldn't need to restart more than once, and a script that is reloaded should restart only twice. """ if self.db.time_format: # If the 'usual' time is set, use it seconds = self.ndb.usual if seconds is None: seconds, usual, details = get_next_wait(self.db.time_format) self.ndb.usual = usual if self.interval != seconds: self.restart(interval=seconds) if self.db.event_name and self.db.number is not None: obj = self.obj if not obj.callbacks: return event_name = self.db.event_name number = self.db.number obj.callbacks.call(event_name, obj, number=number) # Functions to manipulate tasks def complete_task(task_id): """ Mark the task in the event handler as complete. Args: task_id (int): the task ID. Note: This function should be called automatically for individual tasks. """ try: script = ScriptDB.objects.get(db_key="event_handler") except ScriptDB.DoesNotExist: logger.log_trace("Can't get the event handler.") return if task_id not in script.db.tasks: logger.log_err("The task #{} was scheduled, but it cannot be " \ "found".format(task_id)) return delta, obj, callback_name, locals = script.db.tasks.pop(task_id) script.call(obj, callback_name, locals=locals)
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2011 OpenStack Foundation # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from datetime import datetime import six from novaclient import client as base_client from novaclient import exceptions from novaclient.openstack.common import strutils from novaclient.openstack.common.py3kcompat import urlutils from novaclient.tests import fakes from novaclient.tests import utils from novaclient.v1_1 import client class FakeClient(fakes.FakeClient, client.Client): def __init__(self, *args, **kwargs): client.Client.__init__(self, 'username', 'password', 'project_id', 'auth_url', extensions=kwargs.get('extensions')) self.client = FakeHTTPClient(**kwargs) class FakeHTTPClient(base_client.HTTPClient): def __init__(self, **kwargs): self.username = 'username' self.password = 'password' self.auth_url = 'auth_url' self.tenant_id = 'tenant_id' self.callstack = [] self.projectid = 'projectid' self.user = 'user' self.region_name = 'region_name' self.endpoint_type = 'endpoint_type' self.service_type = 'service_type' self.service_name = 'service_name' self.volume_service_name = 'volume_service_name' self.timings = 'timings' self.bypass_url = 'bypass_url' self.os_cache = 'os_cache' self.http_log_debug = 'http_log_debug' def _cs_request(self, url, method, **kwargs): # Check that certain things are called correctly if method in ['GET', 'DELETE']: assert 'body' not in kwargs elif method == 'PUT': assert 'body' in kwargs # Call the method args = urlutils.parse_qsl(urlutils.urlparse(url)[4]) kwargs.update(args) munged_url = url.rsplit('?', 1)[0] munged_url = munged_url.strip('/').replace('/', '_').replace('.', '_') munged_url = munged_url.replace('-', '_') munged_url = munged_url.replace(' ', '_') callback = "%s_%s" % (method.lower(), munged_url) if not hasattr(self, callback): raise AssertionError('Called unknown API method: %s %s, ' 'expected fakes method name: %s' % (method, url, callback)) # Note the call self.callstack.append((method, url, kwargs.get('body', None))) status, headers, body = getattr(self, callback)(**kwargs) r = utils.TestResponse({ "status_code": status, "text": body, "headers": headers, }) return r, body # # agents # def get_os_agents(self, **kw): hypervisor = kw.get('hypervisor', 'kvm') return (200, {}, {'agents': [{'hypervisor': hypervisor, 'os': 'win', 'architecture': 'x86', 'version': '7.0', 'url': 'xxx://xxxx/xxx/xxx', 'md5hash': 'add6bb58e139be103324d04d82d8f545', 'id': 1}, {'hypervisor': hypervisor, 'os': 'linux', 'architecture': 'x86', 'version': '16.0', 'url': 'xxx://xxxx/xxx/xxx1', 'md5hash': 'add6bb58e139be103324d04d82d8f546', 'id': 2}, ]}) def post_os_agents(self, body): return (200, {}, {'agent': { 'url': '/xxx/xxx/xxx', 'hypervisor': body['agent']['hypervisor'], 'md5hash': 'add6bb58e139be103324d04d82d8f546', 'version': '7.0', 'architecture': 'x86', 'os': 'win', 'id': 1}}) def delete_os_agents_1(self, **kw): return (202, {}, None) def put_os_agents_1(self, body, **kw): return (200, {}, {"agent": { "url": "/yyy/yyyy/yyyy", "version": "8.0", "md5hash": "add6bb58e139be103324d04d82d8f546", 'id': 1}}) # # List all extensions # def get_extensions(self, **kw): exts = [ { "alias": "NMN", "description": "Multiple network support", "links": [], "name": "Multinic", "namespace": ("http://docs.openstack.org/" "compute/ext/multinic/api/v1.1"), "updated": "2011-06-09T00:00:00+00:00" }, { "alias": "OS-DCF", "description": "Disk Management Extension", "links": [], "name": "DiskConfig", "namespace": ("http://docs.openstack.org/" "compute/ext/disk_config/api/v1.1"), "updated": "2011-09-27T00:00:00+00:00" }, { "alias": "OS-EXT-SRV-ATTR", "description": "Extended Server Attributes support.", "links": [], "name": "ExtendedServerAttributes", "namespace": ("http://docs.openstack.org/" "compute/ext/extended_status/api/v1.1"), "updated": "2011-11-03T00:00:00+00:00" }, { "alias": "OS-EXT-STS", "description": "Extended Status support", "links": [], "name": "ExtendedStatus", "namespace": ("http://docs.openstack.org/" "compute/ext/extended_status/api/v1.1"), "updated": "2011-11-03T00:00:00+00:00" }, ] return (200, {}, { "extensions": exts, }) # # Limits # def get_limits(self, **kw): return (200, {}, {"limits": { "rate": [ { "uri": "*", "regex": ".*", "limit": [ { "value": 10, "verb": "POST", "remaining": 2, "unit": "MINUTE", "next-available": "2011-12-15T22:42:45Z" }, { "value": 10, "verb": "PUT", "remaining": 2, "unit": "MINUTE", "next-available": "2011-12-15T22:42:45Z" }, { "value": 100, "verb": "DELETE", "remaining": 100, "unit": "MINUTE", "next-available": "2011-12-15T22:42:45Z" } ] }, { "uri": "*/servers", "regex": "^/servers", "limit": [ { "verb": "POST", "value": 25, "remaining": 24, "unit": "DAY", "next-available": "2011-12-15T22:42:45Z" } ] } ], "absolute": { "maxTotalRAMSize": 51200, "maxServerMeta": 5, "maxImageMeta": 5, "maxPersonality": 5, "maxPersonalitySize": 10240 }, }, }) # # Servers # def get_servers(self, **kw): return (200, {}, {"servers": [ {'id': 1234, 'name': 'sample-server'}, {'id': 5678, 'name': 'sample-server2'} ]}) def get_servers_detail(self, **kw): return (200, {}, {"servers": [ { "id": 1234, "name": "sample-server", "image": { "id": 2, "name": "sample image", }, "flavor": { "id": 1, "name": "256 MB Server", }, "hostId": "e4d909c290d0fb1ca068ffaddf22cbd0", "status": "BUILD", "progress": 60, "addresses": { "public": [{ "version": 4, "addr": "1.2.3.4", }, { "version": 4, "addr": "5.6.7.8", }], "private": [{ "version": 4, "addr": "10.11.12.13", }], }, "metadata": { "Server Label": "Web Head 1", "Image Version": "2.1" }, "OS-EXT-SRV-ATTR:host": "computenode1", "security_groups": [{ 'id': 1, 'name': 'securitygroup1', 'description': 'FAKE_SECURITY_GROUP', 'tenant_id': '4ffc664c198e435e9853f2538fbcd7a7' }], "OS-EXT-MOD:some_thing": "mod_some_thing_value", }, { "id": 5678, "name": "sample-server2", "image": { "id": 2, "name": "sample image", }, "flavor": { "id": 1, "name": "256 MB Server", }, "hostId": "9e107d9d372bb6826bd81d3542a419d6", "status": "ACTIVE", "addresses": { "public": [{ "version": 4, "addr": "4.5.6.7", }, { "version": 4, "addr": "5.6.9.8", }], "private": [{ "version": 4, "addr": "10.13.12.13", }], }, "metadata": { "Server Label": "DB 1" }, "OS-EXT-SRV-ATTR:host": "computenode2", }, { "id": 9012, "name": "sample-server3", "image": "", "flavor": { "id": 1, "name": "256 MB Server", }, "hostId": "9e107d9d372bb6826bd81d3542a419d6", "status": "ACTIVE", "addresses": { "public": [{ "version": 4, "addr": "4.5.6.7", }, { "version": 4, "addr": "5.6.9.8", }], "private": [{ "version": 4, "addr": "10.13.12.13", }], }, "metadata": { "Server Label": "DB 1" } } ]}) def post_servers(self, body, **kw): assert set(body.keys()) <= set(['server', 'os:scheduler_hints']) fakes.assert_has_keys(body['server'], required=['name', 'imageRef', 'flavorRef'], optional=['metadata', 'personality']) if 'personality' in body['server']: for pfile in body['server']['personality']: fakes.assert_has_keys(pfile, required=['path', 'contents']) return (202, {}, self.get_servers_1234()[2]) def post_os_volumes_boot(self, body, **kw): assert set(body.keys()) <= set(['server', 'os:scheduler_hints']) fakes.assert_has_keys(body['server'], required=['name', 'flavorRef'], optional=['imageRef']) # Require one, and only one, of the keys for bdm if 'block_device_mapping' not in body['server']: if 'block_device_mapping_v2' not in body['server']: raise AssertionError( "missing required keys: 'block_device_mapping'" ) elif 'block_device_mapping_v2' in body['server']: raise AssertionError("found extra keys: 'block_device_mapping'") return (202, {}, self.get_servers_9012()[2]) def get_servers_1234(self, **kw): r = {'server': self.get_servers_detail()[2]['servers'][0]} return (200, {}, r) def get_servers_5678(self, **kw): r = {'server': self.get_servers_detail()[2]['servers'][1]} return (200, {}, r) def get_servers_9012(self, **kw): r = {'server': self.get_servers_detail()[2]['servers'][2]} return (200, {}, r) def put_servers_1234(self, body, **kw): assert list(body) == ['server'] fakes.assert_has_keys(body['server'], optional=['name', 'adminPass']) return (204, {}, body) def delete_servers_1234(self, **kw): return (202, {}, None) def delete_servers_5678(self, **kw): return (202, {}, None) def delete_servers_1234_metadata_test_key(self, **kw): return (204, {}, None) def delete_servers_1234_metadata_key1(self, **kw): return (204, {}, None) def delete_servers_1234_metadata_key2(self, **kw): return (204, {}, None) def post_servers_1234_metadata(self, **kw): return (204, {}, {'metadata': {'test_key': 'test_value'}}) def get_servers_1234_diagnostics(self, **kw): return (200, {}, {'data': 'Fake diagnostics'}) def post_servers_uuid1_metadata(self, **kw): return (204, {}, {'metadata': {'key1': 'val1'}}) def post_servers_uuid2_metadata(self, **kw): return (204, {}, {'metadata': {'key1': 'val1'}}) def post_servers_uuid3_metadata(self, **kw): return (204, {}, {'metadata': {'key1': 'val1'}}) def post_servers_uuid4_metadata(self, **kw): return (204, {}, {'metadata': {'key1': 'val1'}}) def delete_servers_uuid1_metadata_key1(self, **kw): return (200, {}, {'data': 'Fake diagnostics'}) def delete_servers_uuid2_metadata_key1(self, **kw): return (200, {}, {'data': 'Fake diagnostics'}) def delete_servers_uuid3_metadata_key1(self, **kw): return (200, {}, {'data': 'Fake diagnostics'}) def delete_servers_uuid4_metadata_key1(self, **kw): return (200, {}, {'data': 'Fake diagnostics'}) def get_servers_1234_os_security_groups(self, **kw): return (200, {}, { "security_groups": [{ 'id': 1, 'name': 'securitygroup1', 'description': 'FAKE_SECURITY_GROUP', 'tenant_id': '4ffc664c198e435e9853f2538fbcd7a7', 'rules': []}] }) # # Server Addresses # def get_servers_1234_ips(self, **kw): return (200, {}, {'addresses': self.get_servers_1234()[1]['server']['addresses']}) def get_servers_1234_ips_public(self, **kw): return (200, {}, {'public': self.get_servers_1234_ips()[1]['addresses']['public']}) def get_servers_1234_ips_private(self, **kw): return (200, {}, {'private': self.get_servers_1234_ips()[1]['addresses']['private']}) def delete_servers_1234_ips_public_1_2_3_4(self, **kw): return (202, {}, None) # # Server password # def get_servers_1234_os_server_password(self, **kw): return (200, {}, {'password': ''}) def delete_servers_1234_os_server_password(self, **kw): return (202, {}, None) # # Server actions # def post_servers_1234_action(self, body, **kw): _headers = None _body = None resp = 202 assert len(body.keys()) == 1 action = list(body)[0] if action == 'reboot': assert list(body[action]) == ['type'] assert body[action]['type'] in ['HARD', 'SOFT'] elif action == 'rebuild': keys = list(body[action]) if 'adminPass' in keys: keys.remove('adminPass') assert 'imageRef' in keys _body = self.get_servers_1234()[2] elif action == 'resize': keys = body[action].keys() assert 'flavorRef' in keys elif action == 'confirmResize': assert body[action] is None # This one method returns a different response code return (204, {}, None) elif action == 'revertResize': assert body[action] is None elif action == 'migrate': assert body[action] is None elif action == 'os-stop': assert body[action] is None elif action == 'os-start': assert body[action] is None elif action == 'forceDelete': assert body[action] is None elif action == 'restore': assert body[action] is None elif action == 'pause': assert body[action] is None elif action == 'unpause': assert body[action] is None elif action == 'lock': assert body[action] is None elif action == 'unlock': assert body[action] is None elif action == 'rescue': assert body[action] is None _body = {'Password': 'RescuePassword'} elif action == 'unrescue': assert body[action] is None elif action == 'resume': assert body[action] is None elif action == 'suspend': assert body[action] is None elif action == 'lock': assert body[action] is None elif action == 'unlock': assert body[action] is None elif action == 'shelve': assert body[action] is None elif action == 'shelveOffload': assert body[action] is None elif action == 'unshelve': assert body[action] is None elif action == 'addFixedIp': assert list(body[action]) == ['networkId'] elif action == 'removeFixedIp': assert list(body[action]) == ['address'] elif action == 'addFloatingIp': assert (list(body[action]) == ['address'] or sorted(list(body[action])) == ['address', 'fixed_address']) elif action == 'removeFloatingIp': assert list(body[action]) == ['address'] elif action == 'createImage': assert set(body[action].keys()) == set(['name', 'metadata']) _headers = dict(location="http://blah/images/456") elif action == 'changePassword': assert list(body[action]) == ['adminPass'] elif action == 'os-getConsoleOutput': assert list(body[action]) == ['length'] return (202, {}, {'output': 'foo'}) elif action == 'os-getVNCConsole': assert list(body[action]) == ['type'] elif action == 'os-getSPICEConsole': assert list(body[action]) == ['type'] elif action == 'os-migrateLive': assert set(body[action].keys()) == set(['host', 'block_migration', 'disk_over_commit']) elif action == 'os-resetState': assert list(body[action]) == ['state'] elif action == 'resetNetwork': assert body[action] is None elif action == 'addSecurityGroup': assert list(body[action]) == ['name'] elif action == 'removeSecurityGroup': assert list(body[action]) == ['name'] elif action == 'createBackup': assert set(body[action]) == set(['name', 'backup_type', 'rotation']) elif action == 'evacuate': keys = list(body[action]) if 'adminPass' in keys: keys.remove('adminPass') assert set(keys) == set(['host', 'onSharedStorage']) else: raise AssertionError("Unexpected server action: %s" % action) return (resp, _headers, _body) # # Cloudpipe # def get_os_cloudpipe(self, **kw): return ( 200, {}, {'cloudpipes': [{'project_id': 1}]} ) def post_os_cloudpipe(self, **ks): return ( 202, {}, {'instance_id': '9d5824aa-20e6-4b9f-b967-76a699fc51fd'} ) def put_os_cloudpipe_configure_project(self, **kw): return (202, {}, None) # # Flavors # def get_flavors(self, **kw): status, header, flavors = self.get_flavors_detail(**kw) for flavor in flavors['flavors']: for k in list(flavor): if k not in ['id', 'name']: del flavor[k] return (200, {}, flavors) def get_flavors_detail(self, **kw): flavors = {'flavors': [ {'id': 1, 'name': '256 MB Server', 'ram': 256, 'disk': 10, 'OS-FLV-EXT-DATA:ephemeral': 10, 'os-flavor-access:is_public': True, 'links': {}}, {'id': 2, 'name': '512 MB Server', 'ram': 512, 'disk': 20, 'OS-FLV-EXT-DATA:ephemeral': 20, 'os-flavor-access:is_public': False, 'links': {}}, {'id': 'aa1', 'name': '128 MB Server', 'ram': 128, 'disk': 0, 'OS-FLV-EXT-DATA:ephemeral': 0, 'os-flavor-access:is_public': True, 'links': {}} ]} if 'is_public' not in kw: filter_is_public = True else: if kw['is_public'].lower() == 'none': filter_is_public = None else: filter_is_public = strutils.bool_from_string(kw['is_public'], True) if filter_is_public is not None: if filter_is_public: flavors['flavors'] = [ v for v in flavors['flavors'] if v['os-flavor-access:is_public'] ] else: flavors['flavors'] = [ v for v in flavors['flavors'] if not v['os-flavor-access:is_public'] ] return (200, {}, flavors) def get_flavors_1(self, **kw): return ( 200, {}, {'flavor': self.get_flavors_detail(is_public='None')[2]['flavors'][0]} ) def get_flavors_2(self, **kw): return ( 200, {}, {'flavor': self.get_flavors_detail(is_public='None')[2]['flavors'][1]} ) def get_flavors_3(self, **kw): # Diablo has no ephemeral return ( 200, {}, {'flavor': { 'id': 3, 'name': '256 MB Server', 'ram': 256, 'disk': 10, }}, ) def get_flavors_512_MB_Server(self, **kw): raise exceptions.NotFound('404') def get_flavors_aa1(self, **kw): # Aplhanumeric flavor id are allowed. return ( 200, {}, {'flavor': self.get_flavors_detail(is_public='None')[2]['flavors'][2]} ) def delete_flavors_flavordelete(self, **kw): return (202, {}, None) def delete_flavors_2(self, **kw): return (202, {}, None) def post_flavors(self, body, **kw): return ( 202, {}, {'flavor': self.get_flavors_detail(is_public='None')[2]['flavors'][0]} ) def get_flavors_1_os_extra_specs(self, **kw): return (200, {}, {'extra_specs': {"k1": "v1"}}) def get_flavors_2_os_extra_specs(self, **kw): return (200, {}, {'extra_specs': {"k2": "v2"}}) def get_flavors_aa1_os_extra_specs(self, **kw): return (200, {}, {'extra_specs': {"k3": "v3"}}) def post_flavors_1_os_extra_specs(self, body, **kw): assert list(body) == ['extra_specs'] fakes.assert_has_keys(body['extra_specs'], required=['k1']) return (200, {}, {'extra_specs': {"k1": "v1"}}) def delete_flavors_1_os_extra_specs_k1(self, **kw): return (204, {}, None) # # Flavor access # def get_flavors_1_os_flavor_access(self, **kw): return (404, {}, None) def get_flavors_2_os_flavor_access(self, **kw): return (200, {}, {'flavor_access': [ {'flavor_id': '2', 'tenant_id': 'proj1'}, {'flavor_id': '2', 'tenant_id': 'proj2'} ]}) def post_flavors_2_action(self, body, **kw): return (202, {}, self.get_flavors_2_os_flavor_access()[2]) # # Floating ips # def get_os_floating_ip_pools(self): return ( 200, {}, {'floating_ip_pools': [{'name': 'foo', 'name': 'bar'}]} ) def get_os_floating_ips(self, **kw): return ( 200, {}, {'floating_ips': [ {'id': 1, 'fixed_ip': '10.0.0.1', 'ip': '11.0.0.1'}, {'id': 2, 'fixed_ip': '10.0.0.2', 'ip': '11.0.0.2'}, ]}, ) def get_os_floating_ips_1(self, **kw): return (200, {}, {'floating_ip': {'id': 1, 'fixed_ip': '10.0.0.1', 'ip': '11.0.0.1'} }) def post_os_floating_ips(self, body): if body.get('pool'): return (200, {}, {'floating_ip': {'id': 1, 'fixed_ip': '10.0.0.1', 'ip': '11.0.0.1', 'pool': 'nova'}}) else: return (200, {}, {'floating_ip': {'id': 1, 'fixed_ip': '10.0.0.1', 'ip': '11.0.0.1', 'pool': None}}) def delete_os_floating_ips_1(self, **kw): return (204, {}, None) def get_os_floating_ip_dns(self, **kw): return (205, {}, {'domain_entries': [{'domain': 'example.org'}, {'domain': 'example.com'}]}) def get_os_floating_ip_dns_testdomain_entries(self, **kw): if kw.get('ip'): return (205, {}, {'dns_entries': [{'dns_entry': {'ip': kw.get('ip'), 'name': "host1", 'type': "A", 'domain': 'testdomain'}}, {'dns_entry': {'ip': kw.get('ip'), 'name': "host2", 'type': "A", 'domain': 'testdomain'}}]}) else: return (404, {}, None) def get_os_floating_ip_dns_testdomain_entries_testname(self, **kw): return (205, {}, {'dns_entry': {'ip': "10.10.10.10", 'name': 'testname', 'type': "A", 'domain': 'testdomain'}}) def put_os_floating_ip_dns_testdomain(self, body, **kw): if body['domain_entry']['scope'] == 'private': fakes.assert_has_keys(body['domain_entry'], required=['availability_zone', 'scope']) elif body['domain_entry']['scope'] == 'public': fakes.assert_has_keys(body['domain_entry'], required=['project', 'scope']) else: fakes.assert_has_keys(body['domain_entry'], required=['project', 'scope']) return (205, {}, body) def put_os_floating_ip_dns_testdomain_entries_testname(self, body, **kw): fakes.assert_has_keys(body['dns_entry'], required=['ip', 'dns_type']) return (205, {}, body) def delete_os_floating_ip_dns_testdomain(self, **kw): return (200, {}, None) def delete_os_floating_ip_dns_testdomain_entries_testname(self, **kw): return (200, {}, None) def get_os_floating_ips_bulk(self, **kw): return (200, {}, {'floating_ip_info': [ {'id': 1, 'fixed_ip': '10.0.0.1', 'ip': '11.0.0.1'}, {'id': 2, 'fixed_ip': '10.0.0.2', 'ip': '11.0.0.2'}, ]}) def get_os_floating_ips_bulk_testHost(self, **kw): return (200, {}, {'floating_ip_info': [ {'id': 1, 'fixed_ip': '10.0.0.1', 'ip': '11.0.0.1'}, {'id': 2, 'fixed_ip': '10.0.0.2', 'ip': '11.0.0.2'}, ]}) def post_os_floating_ips_bulk(self, **kw): params = kw.get('body').get('floating_ips_bulk_create') pool = params.get('pool', 'defaultPool') interface = params.get('interface', 'defaultInterface') return (200, {}, {'floating_ips_bulk_create': {'ip_range': '192.168.1.0/30', 'pool': pool, 'interface': interface}}) def put_os_floating_ips_bulk_delete(self, **kw): ip_range = kw.get('body').get('ip_range') return (200, {}, {'floating_ips_bulk_delete': ip_range}) # # Images # def get_images(self, **kw): return (200, {}, {'images': [ {'id': 1, 'name': 'CentOS 5.2'}, {'id': 2, 'name': 'My Server Backup'} ]}) def get_images_detail(self, **kw): return (200, {}, {'images': [ { 'id': 1, 'name': 'CentOS 5.2', "updated": "2010-10-10T12:00:00Z", "created": "2010-08-10T12:00:00Z", "status": "ACTIVE", "metadata": { "test_key": "test_value", }, "links": {}, }, { "id": 2, "name": "My Server Backup", "serverId": 1234, "updated": "2010-10-10T12:00:00Z", "created": "2010-08-10T12:00:00Z", "status": "SAVING", "progress": 80, "links": {}, } ]}) def get_images_1(self, **kw): return (200, {}, {'image': self.get_images_detail()[2]['images'][0]}) def get_images_2(self, **kw): return (200, {}, {'image': self.get_images_detail()[2]['images'][1]}) def post_images(self, body, **kw): assert list(body) == ['image'] fakes.assert_has_keys(body['image'], required=['serverId', 'name']) return (202, {}, self.get_images_1()[2]) def post_images_1_metadata(self, body, **kw): assert list(body) == ['metadata'] fakes.assert_has_keys(body['metadata'], required=['test_key']) return (200, {}, {'metadata': self.get_images_1()[2]['image']['metadata']}) def delete_images_1(self, **kw): return (204, {}, None) def delete_images_2(self, **kw): return (204, {}, None) def delete_images_1_metadata_test_key(self, **kw): return (204, {}, None) # # Keypairs # def get_os_keypairs_test(self, *kw): return (200, {}, {'keypair': self.get_os_keypairs()[2]['keypairs'][0]}) def get_os_keypairs(self, *kw): return (200, {}, {"keypairs": [ {'fingerprint': 'FAKE_KEYPAIR', 'name': 'test'} ]}) def delete_os_keypairs_test(self, **kw): return (202, {}, None) def post_os_keypairs(self, body, **kw): assert list(body) == ['keypair'] fakes.assert_has_keys(body['keypair'], required=['name']) r = {'keypair': self.get_os_keypairs()[2]['keypairs'][0]} return (202, {}, r) # # Virtual Interfaces # def get_servers_1234_os_virtual_interfaces(self, **kw): return (200, {}, {"virtual_interfaces": [ {'id': 'fakeid', 'mac_address': 'fakemac'} ]}) # # Quotas # def get_os_quota_sets_test(self, **kw): return (200, {}, {'quota_set': { 'tenant_id': 'test', 'metadata_items': [], 'injected_file_content_bytes': 1, 'injected_file_path_bytes': 1, 'volumes': 1, 'gigabytes': 1, 'ram': 1, 'floating_ips': 1, 'instances': 1, 'injected_files': 1, 'cores': 1, 'keypairs': 1, 'security_groups': 1, 'security_group_rules': 1}}) def get_os_quota_sets_tenant_id(self, **kw): return (200, {}, {'quota_set': { 'tenant_id': 'test', 'metadata_items': [], 'injected_file_content_bytes': 1, 'injected_file_path_bytes': 1, 'volumes': 1, 'gigabytes': 1, 'ram': 1, 'floating_ips': 1, 'instances': 1, 'injected_files': 1, 'cores': 1, 'keypairs': 1, 'security_groups': 1, 'security_group_rules': 1}}) def get_os_quota_sets_97f4c221bff44578b0300df4ef119353(self, **kw): return (200, {}, {'quota_set': { 'tenant_id': '97f4c221bff44578b0300df4ef119353', 'metadata_items': [], 'injected_file_content_bytes': 1, 'injected_file_path_bytes': 1, 'volumes': 1, 'gigabytes': 1, 'ram': 1, 'floating_ips': 1, 'instances': 1, 'injected_files': 1, 'cores': 1, 'keypairs': 1, 'security_groups': 1, 'security_group_rules': 1}}) def put_os_quota_sets_97f4c221_bff4_4578_b030_0df4ef119353(self, **kw): return (200, {}, {'quota_set': { 'tenant_id': '97f4c221-bff4-4578-b030-0df4ef119353', 'metadata_items': [], 'injected_file_content_bytes': 1, 'injected_file_path_bytes': 1, 'volumes': 1, 'gigabytes': 1, 'ram': 1, 'floating_ips': 1, 'instances': 1, 'injected_files': 1, 'cores': 1, 'keypairs': 1, 'security_groups': 1, 'security_group_rules': 1}}) def get_os_quota_sets_97f4c221_bff4_4578_b030_0df4ef119353(self, **kw): return (200, {}, {'quota_set': { 'tenant_id': '97f4c221-bff4-4578-b030-0df4ef119353', 'metadata_items': [], 'injected_file_content_bytes': 1, 'injected_file_path_bytes': 1, 'volumes': 1, 'gigabytes': 1, 'ram': 1, 'floating_ips': 1, 'instances': 1, 'injected_files': 1, 'cores': 1, 'keypairs': 1, 'security_groups': 1, 'security_group_rules': 1}}) def get_os_quota_sets_97f4c221bff44578b0300df4ef119353_defaults(self): return (200, {}, {'quota_set': { 'tenant_id': 'test', 'metadata_items': [], 'injected_file_content_bytes': 1, 'injected_file_path_bytes': 1, 'volumes': 1, 'gigabytes': 1, 'ram': 1, 'floating_ips': 1, 'instances': 1, 'injected_files': 1, 'cores': 1, 'keypairs': 1, 'security_groups': 1, 'security_group_rules': 1}}) def get_os_quota_sets_tenant_id_defaults(self): return (200, {}, {'quota_set': { 'tenant_id': 'test', 'metadata_items': [], 'injected_file_content_bytes': 1, 'injected_file_path_bytes': 1, 'volumes': 1, 'gigabytes': 1, 'ram': 1, 'floating_ips': 1, 'instances': 1, 'injected_files': 1, 'cores': 1, 'keypairs': 1, 'security_groups': 1, 'security_group_rules': 1}}) def put_os_quota_sets_97f4c221bff44578b0300df4ef119353(self, body, **kw): assert list(body) == ['quota_set'] fakes.assert_has_keys(body['quota_set'], required=['tenant_id']) return (200, {}, {'quota_set': { 'tenant_id': '97f4c221bff44578b0300df4ef119353', 'metadata_items': [], 'injected_file_content_bytes': 1, 'injected_file_path_bytes': 1, 'volumes': 2, 'gigabytes': 1, 'ram': 1, 'floating_ips': 1, 'instances': 1, 'injected_files': 1, 'cores': 1, 'keypairs': 1, 'security_groups': 1, 'security_group_rules': 1}}) def delete_os_quota_sets_test(self, **kw): return (202, {}, {}) def delete_os_quota_sets_97f4c221bff44578b0300df4ef119353(self, **kw): return (202, {}, {}) # # Quota Classes # def get_os_quota_class_sets_test(self, **kw): return (200, {}, {'quota_class_set': { 'class_name': 'test', 'metadata_items': [], 'injected_file_content_bytes': 1, 'injected_file_path_bytes': 1, 'volumes': 1, 'gigabytes': 1, 'ram': 1, 'floating_ips': 1, 'instances': 1, 'injected_files': 1, 'cores': 1, 'keypairs': 1, 'security_groups': 1, 'security_group_rules': 1}}) def put_os_quota_class_sets_test(self, body, **kw): assert list(body) == ['quota_class_set'] fakes.assert_has_keys(body['quota_class_set'], required=['class_name']) return (200, {}, {'quota_class_set': { 'class_name': 'test', 'metadata_items': [], 'injected_file_content_bytes': 1, 'injected_file_path_bytes': 1, 'volumes': 2, 'gigabytes': 1, 'ram': 1, 'floating_ips': 1, 'instances': 1, 'injected_files': 1, 'cores': 1, 'keypairs': 1, 'security_groups': 1, 'security_group_rules': 1}}) def put_os_quota_class_sets_97f4c221bff44578b0300df4ef119353(self, body, **kw): assert list(body) == ['quota_class_set'] fakes.assert_has_keys(body['quota_class_set'], required=['class_name']) return (200, {}, {'quota_class_set': { 'class_name': '97f4c221bff44578b0300df4ef119353', 'metadata_items': [], 'injected_file_content_bytes': 1, 'injected_file_path_bytes': 1, 'volumes': 2, 'gigabytes': 1, 'ram': 1, 'floating_ips': 1, 'instances': 1, 'injected_files': 1, 'cores': 1, 'keypairs': 1, 'security_groups': 1, 'security_group_rules': 1}}) # # Security Groups # def get_os_security_groups(self, **kw): return (200, {}, {"security_groups": [ {"name": "test", "description": "FAKE_SECURITY_GROUP", "tenant_id": "4ffc664c198e435e9853f2538fbcd7a7", "id": 1, "rules": [ {"id": 11, "group": {}, "ip_protocol": "TCP", "from_port": 22, "to_port": 22, "parent_group_id": 1, "ip_range": {"cidr": "10.0.0.0/8"}}, {"id": 12, "group": { "tenant_id": "272bee4c1e624cd4a72a6b0ea55b4582", "name": "test2"}, "ip_protocol": "TCP", "from_port": 222, "to_port": 222, "parent_group_id": 1, "ip_range": {}}]}, {"name": "test2", "description": "FAKE_SECURITY_GROUP2", "tenant_id": "272bee4c1e624cd4a72a6b0ea55b4582", "id": 2, "rules": []} ]} ) def get_os_security_groups_1(self, **kw): return (200, {}, {"security_group": {'id': 1, 'name': 'test', 'description': 'FAKE_SECURITY_GROUP'} }) def delete_os_security_groups_1(self, **kw): return (202, {}, None) def post_os_security_groups(self, body, **kw): assert list(body) == ['security_group'] fakes.assert_has_keys(body['security_group'], required=['name', 'description']) r = {'security_group': self.get_os_security_groups()[2]['security_groups'][0]} return (202, {}, r) def put_os_security_groups_1(self, body, **kw): assert list(body) == ['security_group'] fakes.assert_has_keys(body['security_group'], required=['name', 'description']) return (205, {}, body) # # Security Group Rules # def get_os_security_group_rules(self, **kw): return (200, {}, {"security_group_rules": [ {'id': 1, 'parent_group_id': 1, 'group_id': 2, 'ip_protocol': 'TCP', 'from_port': '22', 'to_port': 22, 'cidr': '10.0.0.0/8'} ]}) def delete_os_security_group_rules_1(self, **kw): return (202, {}, None) def delete_os_security_group_rules_11(self, **kw): return (202, {}, None) def delete_os_security_group_rules_12(self, **kw): return (202, {}, None) def post_os_security_group_rules(self, body, **kw): assert list(body) == ['security_group_rule'] fakes.assert_has_keys(body['security_group_rule'], required=['parent_group_id'], optional=['group_id', 'ip_protocol', 'from_port', 'to_port', 'cidr']) r = {'security_group_rule': self.get_os_security_group_rules()[2]['security_group_rules'][0]} return (202, {}, r) # # Tenant Usage # def get_os_simple_tenant_usage(self, **kw): return (200, {}, {six.u('tenant_usages'): [{ six.u('total_memory_mb_usage'): 25451.762807466665, six.u('total_vcpus_usage'): 49.71047423333333, six.u('total_hours'): 49.71047423333333, six.u('tenant_id'): six.u('7b0a1d73f8fb41718f3343c207597869'), six.u('stop'): six.u('2012-01-22 19:48:41.750722'), six.u('server_usages'): [{ six.u('hours'): 49.71047423333333, six.u('uptime'): 27035, six.u('local_gb'): 0, six.u('ended_at'): None, six.u('name'): six.u('f15image1'), six.u('tenant_id'): six.u('7b0a1d73f8fb41718f3343c207597869'), six.u('vcpus'): 1, six.u('memory_mb'): 512, six.u('state'): six.u('active'), six.u('flavor'): six.u('m1.tiny'), six.u('started_at'): six.u('2012-01-20 18:06:06.479998')}], six.u('start'): six.u('2011-12-25 19:48:41.750687'), six.u('total_local_gb_usage'): 0.0}]}) def get_os_simple_tenant_usage_tenantfoo(self, **kw): return (200, {}, {six.u('tenant_usage'): { six.u('total_memory_mb_usage'): 25451.762807466665, six.u('total_vcpus_usage'): 49.71047423333333, six.u('total_hours'): 49.71047423333333, six.u('tenant_id'): six.u('7b0a1d73f8fb41718f3343c207597869'), six.u('stop'): six.u('2012-01-22 19:48:41.750722'), six.u('server_usages'): [{ six.u('hours'): 49.71047423333333, six.u('uptime'): 27035, six.u('local_gb'): 0, six.u('ended_at'): None, six.u('name'): six.u('f15image1'), six.u('tenant_id'): six.u('7b0a1d73f8fb41718f3343c207597869'), six.u('vcpus'): 1, six.u('memory_mb'): 512, six.u('state'): six.u('active'), six.u('flavor'): six.u('m1.tiny'), six.u('started_at'): six.u('2012-01-20 18:06:06.479998')}], six.u('start'): six.u('2011-12-25 19:48:41.750687'), six.u('total_local_gb_usage'): 0.0}}) def get_os_simple_tenant_usage_test(self, **kw): return (200, {}, {six.u('tenant_usage'): { six.u('total_memory_mb_usage'): 25451.762807466665, six.u('total_vcpus_usage'): 49.71047423333333, six.u('total_hours'): 49.71047423333333, six.u('tenant_id'): six.u('7b0a1d73f8fb41718f3343c207597869'), six.u('stop'): six.u('2012-01-22 19:48:41.750722'), six.u('server_usages'): [{ six.u('hours'): 49.71047423333333, six.u('uptime'): 27035, six.u('local_gb'): 0, six.u('ended_at'): None, six.u('name'): six.u('f15image1'), six.u('tenant_id'): six.u('7b0a1d73f8fb41718f3343c207597869'), six.u('vcpus'): 1, six.u('memory_mb'): 512, six.u('state'): six.u('active'), six.u('flavor'): six.u('m1.tiny'), six.u('started_at'): six.u('2012-01-20 18:06:06.479998')}], six.u('start'): six.u('2011-12-25 19:48:41.750687'), six.u('total_local_gb_usage'): 0.0}}) def get_os_simple_tenant_usage_tenant_id(self, **kw): return (200, {}, {six.u('tenant_usage'): { six.u('total_memory_mb_usage'): 25451.762807466665, six.u('total_vcpus_usage'): 49.71047423333333, six.u('total_hours'): 49.71047423333333, six.u('tenant_id'): six.u('7b0a1d73f8fb41718f3343c207597869'), six.u('stop'): six.u('2012-01-22 19:48:41.750722'), six.u('server_usages'): [{ six.u('hours'): 49.71047423333333, six.u('uptime'): 27035, six.u('local_gb'): 0, six.u('ended_at'): None, six.u('name'): six.u('f15image1'), six.u('tenant_id'): six.u('7b0a1d73f8fb41718f3343c207597869'), six.u('vcpus'): 1, six.u('memory_mb'): 512, six.u('state'): six.u('active'), six.u('flavor'): six.u('m1.tiny'), six.u('started_at'): six.u('2012-01-20 18:06:06.479998')}], six.u('start'): six.u('2011-12-25 19:48:41.750687'), six.u('total_local_gb_usage'): 0.0}}) # # Certificates # def get_os_certificates_root(self, **kw): return ( 200, {}, {'certificate': {'private_key': None, 'data': 'foo'}} ) def post_os_certificates(self, **kw): return ( 200, {}, {'certificate': {'private_key': 'foo', 'data': 'bar'}} ) # # Aggregates # def get_os_aggregates(self, *kw): return (200, {}, {"aggregates": [ {'id': '1', 'name': 'test', 'availability_zone': 'nova1'}, {'id': '2', 'name': 'test2', 'availability_zone': 'nova1'}, ]}) def _return_aggregate(self): r = {'aggregate': self.get_os_aggregates()[2]['aggregates'][0]} return (200, {}, r) def get_os_aggregates_1(self, **kw): return self._return_aggregate() def post_os_aggregates(self, body, **kw): return self._return_aggregate() def put_os_aggregates_1(self, body, **kw): return self._return_aggregate() def put_os_aggregates_2(self, body, **kw): return self._return_aggregate() def post_os_aggregates_1_action(self, body, **kw): return self._return_aggregate() def post_os_aggregates_2_action(self, body, **kw): return self._return_aggregate() def delete_os_aggregates_1(self, **kw): return (202, {}, None) # # Services # def get_os_services(self, **kw): host = kw.get('host', 'host1') binary = kw.get('binary', 'nova-compute') return (200, {}, {'services': [{'binary': binary, 'host': host, 'zone': 'nova', 'status': 'enabled', 'state': 'up', 'updated_at': datetime(2012, 10, 29, 13, 42, 2)}, {'binary': binary, 'host': host, 'zone': 'nova', 'status': 'disabled', 'state': 'down', 'updated_at': datetime(2012, 9, 18, 8, 3, 38)}, ]}) def put_os_services_enable(self, body, **kw): return (200, {}, {'service': {'host': body['host'], 'binary': body['binary'], 'status': 'enabled'}}) def put_os_services_disable(self, body, **kw): return (200, {}, {'service': {'host': body['host'], 'binary': body['binary'], 'status': 'disabled'}}) def put_os_services_disable_log_reason(self, body, **kw): return (200, {}, {'service': {'host': body['host'], 'binary': body['binary'], 'status': 'disabled', 'disabled_reason': body['disabled_reason']}}) # # Fixed IPs # def get_os_fixed_ips_192_168_1_1(self, *kw): return (200, {}, {"fixed_ip": {'cidr': '192.168.1.0/24', 'address': '192.168.1.1', 'hostname': 'foo', 'host': 'bar'}}) def post_os_fixed_ips_192_168_1_1_action(self, body, **kw): return (202, {}, None) # # Hosts # def get_os_hosts_host(self, *kw): return (200, {}, {'host': [{'resource': {'project': '(total)', 'host': 'dummy', 'cpu': 16, 'memory_mb': 32234, 'disk_gb': 128}}, {'resource': {'project': '(used_now)', 'host': 'dummy', 'cpu': 1, 'memory_mb': 2075, 'disk_gb': 45}}, {'resource': {'project': '(used_max)', 'host': 'dummy', 'cpu': 1, 'memory_mb': 2048, 'disk_gb': 30}}, {'resource': {'project': 'admin', 'host': 'dummy', 'cpu': 1, 'memory_mb': 2048, 'disk_gb': 30}}]}) def get_os_hosts(self, **kw): zone = kw.get('zone', 'nova1') return (200, {}, {'hosts': [{'host': 'host1', 'service': 'nova-compute', 'zone': zone}, {'host': 'host1', 'service': 'nova-cert', 'zone': zone}]}) def get_os_hosts_sample_host(self, *kw): return (200, {}, {'host': [{'resource': {'host': 'sample_host'}}], }) def put_os_hosts_sample_host_1(self, body, **kw): return (200, {}, {'host': 'sample-host_1', 'status': 'enabled'}) def put_os_hosts_sample_host_2(self, body, **kw): return (200, {}, {'host': 'sample-host_2', 'maintenance_mode': 'on_maintenance'}) def put_os_hosts_sample_host_3(self, body, **kw): return (200, {}, {'host': 'sample-host_3', 'status': 'enabled', 'maintenance_mode': 'on_maintenance'}) def get_os_hosts_sample_host_reboot(self, **kw): return (200, {}, {'host': 'sample_host', 'power_action': 'reboot'}) def get_os_hosts_sample_host_startup(self, **kw): return (200, {}, {'host': 'sample_host', 'power_action': 'startup'}) def get_os_hosts_sample_host_shutdown(self, **kw): return (200, {}, {'host': 'sample_host', 'power_action': 'shutdown'}) def put_os_hosts_sample_host(self, body, **kw): result = {'host': 'dummy'} result.update(body) return (200, {}, result) def get_os_hypervisors(self, **kw): return (200, {}, {"hypervisors": [ {'id': 1234, 'hypervisor_hostname': 'hyper1'}, {'id': 5678, 'hypervisor_hostname': 'hyper2'}, ]}) def get_os_hypervisors_detail(self, **kw): return (200, {}, {"hypervisors": [ {'id': 1234, 'service': {'id': 1, 'host': 'compute1'}, 'vcpus': 4, 'memory_mb': 10 * 1024, 'local_gb': 250, 'vcpus_used': 2, 'memory_mb_used': 5 * 1024, 'local_gb_used': 125, 'hypervisor_type': "xen", 'hypervisor_version': 3, 'hypervisor_hostname': "hyper1", 'free_ram_mb': 5 * 1024, 'free_disk_gb': 125, 'current_workload': 2, 'running_vms': 2, 'cpu_info': 'cpu_info', 'disk_available_least': 100}, {'id': 2, 'service': {'id': 2, 'host': "compute2"}, 'vcpus': 4, 'memory_mb': 10 * 1024, 'local_gb': 250, 'vcpus_used': 2, 'memory_mb_used': 5 * 1024, 'local_gb_used': 125, 'hypervisor_type': "xen", 'hypervisor_version': 3, 'hypervisor_hostname': "hyper2", 'free_ram_mb': 5 * 1024, 'free_disk_gb': 125, 'current_workload': 2, 'running_vms': 2, 'cpu_info': 'cpu_info', 'disk_available_least': 100} ]}) def get_os_hypervisors_statistics(self, **kw): return (200, {}, {"hypervisor_statistics": { 'count': 2, 'vcpus': 8, 'memory_mb': 20 * 1024, 'local_gb': 500, 'vcpus_used': 4, 'memory_mb_used': 10 * 1024, 'local_gb_used': 250, 'free_ram_mb': 10 * 1024, 'free_disk_gb': 250, 'current_workload': 4, 'running_vms': 4, 'disk_available_least': 200, }}) def get_os_hypervisors_hyper_search(self, **kw): return (200, {}, {'hypervisors': [ {'id': 1234, 'hypervisor_hostname': 'hyper1'}, {'id': 5678, 'hypervisor_hostname': 'hyper2'} ]}) def get_os_hypervisors_hyper_servers(self, **kw): return (200, {}, {'hypervisors': [ {'id': 1234, 'hypervisor_hostname': 'hyper1', 'servers': [ {'name': 'inst1', 'uuid': 'uuid1'}, {'name': 'inst2', 'uuid': 'uuid2'} ]}, {'id': 5678, 'hypervisor_hostname': 'hyper2', 'servers': [ {'name': 'inst3', 'uuid': 'uuid3'}, {'name': 'inst4', 'uuid': 'uuid4'} ]} ]}) def get_os_hypervisors_hyper_no_servers_servers(self, **kw): return (200, {}, {'hypervisors': [{'id': 1234, 'hypervisor_hostname': 'hyper1'}]}) def get_os_hypervisors_1234(self, **kw): return (200, {}, {'hypervisor': {'id': 1234, 'service': {'id': 1, 'host': 'compute1'}, 'vcpus': 4, 'memory_mb': 10 * 1024, 'local_gb': 250, 'vcpus_used': 2, 'memory_mb_used': 5 * 1024, 'local_gb_used': 125, 'hypervisor_type': "xen", 'hypervisor_version': 3, 'hypervisor_hostname': "hyper1", 'free_ram_mb': 5 * 1024, 'free_disk_gb': 125, 'current_workload': 2, 'running_vms': 2, 'cpu_info': 'cpu_info', 'disk_available_least': 100}}) def get_os_hypervisors_1234_uptime(self, **kw): return (200, {}, {'hypervisor': {'id': 1234, 'hypervisor_hostname': "hyper1", 'uptime': "fake uptime"}}) def get_os_networks(self, **kw): return (200, {}, {'networks': [{"label": "1", "cidr": "10.0.0.0/24", 'project_id': '4ffc664c198e435e9853f2538fbcd7a7', 'id': '1'}]}) def post_os_networks(self, **kw): return (202, {}, {'network': kw}) def get_os_networks_1(self, **kw): return (200, {}, {'network': {"label": "1", "cidr": "10.0.0.0/24"}}) def delete_os_networks_networkdelete(self, **kw): return (202, {}, None) def post_os_networks_add(self, **kw): return (202, {}, None) def post_os_networks_networkdisassociate_action(self, **kw): return (202, {}, None) def get_os_fping(self, **kw): return ( 200, {}, { 'servers': [ { "id": "1", "project_id": "fake-project", "alive": True, }, { "id": "2", "project_id": "fake-project", "alive": True, }, ] } ) def get_os_fping_1(self, **kw): return ( 200, {}, { 'server': { "id": "1", "project_id": "fake-project", "alive": True, } } ) def post_os_coverage_action(self, body, **kw): if 'report' not in body: return (200, {}, None) else: return (200, {}, { 'path': '/tmp/tmpdir/' + body['report']['file'] }) def post_os_networks_1_action(self, **kw): return (202, {}, None) def post_os_networks_networktest_action(self, **kw): return (202, {}, None) def post_os_networks_2_action(self, **kw): return (202, {}, None) def post_os_coverage_action(self, body, **kw): if 'start' in body or 'reset' in body: return (200, {}, None) elif 'stop' in body: return (200, {}, {'path': '/tmp/tmpdir/'}) else: return (200, {}, { 'path': '/tmp/tmpdir/' + body['report']['file'] }) def get_os_availability_zone(self, **kw): return (200, {}, {"availabilityZoneInfo": [ {"zoneName": "zone-1", "zoneState": {"available": True}, "hosts": None}, {"zoneName": "zone-2", "zoneState": {"available": False}, "hosts": None}]}) def get_os_availability_zone_detail(self, **kw): return (200, {}, {"availabilityZoneInfo": [ {"zoneName": "zone-1", "zoneState": {"available": True}, "hosts": { "fake_host-1": { "nova-compute": {"active": True, "available": True, "updated_at": datetime(2012, 12, 26, 14, 45, 25, 0)}}}}, {"zoneName": "internal", "zoneState": {"available": True}, "hosts": { "fake_host-1": { "nova-sched": { "active": True, "available": True, "updated_at": datetime(2012, 12, 26, 14, 45, 25, 0)}}, "fake_host-2": { "nova-network": { "active": True, "available": False, "updated_at": datetime(2012, 12, 26, 14, 45, 24, 0)}}}}, {"zoneName": "zone-2", "zoneState": {"available": False}, "hosts": None}]}) def get_servers_1234_os_interface(self, **kw): return (200, {}, {"interfaceAttachments": [ {"port_state": "ACTIVE", "net_id": "net-id-1", "port_id": "port-id-1", "mac_address": "aa:bb:cc:dd:ee:ff", "fixed_ips": [{"ip_address": "1.2.3.4"}], }, {"port_state": "ACTIVE", "net_id": "net-id-1", "port_id": "port-id-1", "mac_address": "aa:bb:cc:dd:ee:ff", "fixed_ips": [{"ip_address": "1.2.3.4"}], }]}) def post_servers_1234_os_interface(self, **kw): return (200, {}, {'interfaceAttachment': {}}) def delete_servers_1234_os_interface_port_id(self, **kw): return (200, {}, None) # NOTE (vkhomenko): # Volume responses was taken from: # https://wiki.openstack.org/wiki/CreateVolumeFromImage # http://jorgew.github.com/block-storage-api/content/ # GET_listDetailVolumes_v1__tenantId__volumes_detail_.html # I suppose they are outdated and should be updated after Cinder released def get_volumes_detail(self, **kw): return (200, {}, {"volumes": [ {"display_name": "Work", "display_description": "volume for work", "status": "ATTACHED", "id": "15e59938-07d5-11e1-90e3-e3dffe0c5983", "created_at": "2011-09-09T00:00:00Z", "attached": "2011-11-11T00:00:00Z", "size": 1024, "attachments": [ {"id": "3333", "links": ''}], "metadata": {}}]}) def get_volumes(self, **kw): return (200, {}, {"volumes": [ {"display_name": "Work", "display_description": "volume for work", "status": "ATTACHED", "id": "15e59938-07d5-11e1-90e3-e3dffe0c5983", "created_at": "2011-09-09T00:00:00Z", "attached": "2011-11-11T00:00:00Z", "size": 1024, "attachments": [ {"id": "3333", "links": ''}], "metadata": {}}]}) def get_volumes_15e59938_07d5_11e1_90e3_e3dffe0c5983(self, **kw): return (200, {}, {"volume": {"display_name": "Work", "display_description": "volume for work", "status": "ATTACHED", "id": "15e59938-07d5-11e1-90e3-e3dffe0c5983", "created_at": "2011-09-09T00:00:00Z", "attached": "2011-11-11T00:00:00Z", "size": 1024, "attachments": [ {"id": "3333", "links": ''}], "metadata": {}}}) def post_volumes(self, **kw): return (200, {}, {"volume": {"status": "creating", "display_name": "vol-007", "attachments": [(0)], "availability_zone": "cinder", "created_at": "2012-08-13T10:57:17.000000", "display_description": "create volume from image", "image_id": "f4cf905f-7c58-4d7b-8314-8dd8a2d1d483", "volume_type": "None", "metadata": {}, "id": "5cb239f6-1baf-4fe1-bd78-c852cf00fa39", "size": 1}}) def delete_volumes_15e59938_07d5_11e1_90e3_e3dffe0c5983(self, **kw): return (200, {}, {}) def post_servers_1234_os_volume_attachments(self, **kw): return (200, {}, {"volumeAttachment": {"device": "/dev/vdb", "volumeId": 2}}) def put_servers_1234_os_volume_attachments_Work(self, **kw): return (200, {}, {"volumeAttachment": {"volumeId": 2}}) def get_servers_1234_os_volume_attachments(self, **kw): return (200, {}, {"volumeAttachments": [ {"display_name": "Work", "display_description": "volume for work", "status": "ATTACHED", "id": "15e59938-07d5-11e1-90e3-e3dffe0c5983", "created_at": "2011-09-09T00:00:00Z", "attached": "2011-11-11T00:00:00Z", "size": 1024, "attachments": [ {"id": "3333", "links": ''}], "metadata": {}}]}) def get_servers_1234_os_volume_attachments_Work(self, **kw): return (200, {}, {"volumeAttachment": {"display_name": "Work", "display_description": "volume for work", "status": "ATTACHED", "id": "15e59938-07d5-11e1-90e3-e3dffe0c5983", "created_at": "2011-09-09T00:00:00Z", "attached": "2011-11-11T00:00:00Z", "size": 1024, "attachments": [ {"id": "3333", "links": ''}], "metadata": {}}}) def delete_servers_1234_os_volume_attachments_Work(self, **kw): return (200, {}, {}) def get_servers_1234_os_instance_actions(self, **kw): return (200, {}, {"instanceActions": [{"instance_uuid": "1234", "user_id": "b968c25e04ab405f9fe4e6ca54cce9a5", "start_time": "2013-03-25T13:45:09.000000", "request_id": "req-abcde12345", "action": "create", "message": None, "project_id": "04019601fe3648c0abd4f4abfb9e6106"}]}) def get_servers_1234_os_instance_actions_req_abcde12345(self, **kw): return (200, {}, {"instanceAction": {"instance_uuid": "1234", "user_id": "b968c25e04ab405f9fe4e6ca54cce9a5", "start_time": "2013-03-25T13:45:09.000000", "request_id": "req-abcde12345", "action": "create", "message": None, "project_id": "04019601fe3648c0abd4f4abfb9e6106"}}) def post_servers_uuid1_action(self, **kw): return 202, {}, {} def post_servers_uuid2_action(self, **kw): return 202, {}, {} def post_servers_uuid3_action(self, **kw): return 202, {}, {} def post_servers_uuid4_action(self, **kw): return 202, {}, {} def get_os_cells_child_cell(self, **kw): cell = {'cell': { 'username': 'cell1_user', 'name': 'cell1', 'rpc_host': '10.0.1.10', '_info': { 'username': 'cell1_user', 'rpc_host': '10.0.1.10', 'type': 'child', 'name': 'cell1', 'rpc_port': 5673 }, 'type': 'child', 'rpc_port': 5673, '_loaded': True }} return (200, {}, cell) def get_os_cells_capacities(self, **kw): cell_capacities_response = {"cell": {"capacities": {"ram_free": { "units_by_mb": {"8192": 0, "512": 13, "4096": 1, "2048": 3, "16384": 0}, "total_mb": 7680}, "disk_free": { "units_by_mb": {"81920": 11, "20480": 46, "40960": 23, "163840": 5, "0": 0}, "total_mb": 1052672}}}} return (200, {}, cell_capacities_response) def get_os_cells_child_cell_capacities(self, **kw): return self.get_os_cells_capacities() def get_os_migrations(self, **kw): migrations = {'migrations': [{ "created_at": "2012-10-29T13:42:02.000000", "dest_compute": "compute2", "dest_host": "1.2.3.4", "dest_node": "node2", "id": 1234, "instance_uuid": "instance_id_123", "new_instance_type_id": 2, "old_instance_type_id": 1, "source_compute": "compute1", "source_node": "node1", "status": "Done", "updated_at": "2012-10-29T13:42:02.000000" }]} return (200, {}, migrations)
# Copyright 2014 - 2015 IBM Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Volume driver for IBM FlashSystem storage systems. Limitations: 1. Cinder driver only works when open_access_enabled=off. 2. Cinder driver only works when connection protocol is FC. """ import random import re import string import threading from oslo_concurrency import processutils from oslo_config import cfg from oslo_utils import excutils from oslo_utils import units import six from cinder import context from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder.openstack.common import log as logging from cinder.openstack.common import loopingcall from cinder.openstack.common import strutils from cinder import utils from cinder.volume.drivers.san import san from cinder.volume import utils as volume_utils from cinder.volume import volume_types from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) FLASHSYSTEM_VOLPOOL_NAME = 'mdiskgrp0' FLASHSYSTEM_VOL_IOGRP = 0 flashsystem_opts = [ cfg.StrOpt('flashsystem_connection_protocol', default='FC', help='Connection protocol should be FC.'), cfg.BoolOpt('flashsystem_multipath_enabled', default=False, help='Connect with multipath (FC only).'), cfg.BoolOpt('flashsystem_multihostmap_enabled', default=True, help='Allows vdisk to multi host mapping.') ] CONF = cfg.CONF CONF.register_opts(flashsystem_opts) class FlashSystemDriver(san.SanDriver): """IBM FlashSystem 840 FC volume driver. Version history: 1.0.0 - Initial driver 1.0.1 - Code clean up """ VERSION = "1.0.1" def __init__(self, *args, **kwargs): super(FlashSystemDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(flashsystem_opts) self._storage_nodes = {} self._protocol = None self._context = None self._system_name = None self._system_id = None def _ssh(self, ssh_cmd, check_exit_code=True): try: return self._run_ssh(ssh_cmd, check_exit_code) except processutils.ProcessExecutionError as e: msg = (_('CLI Exception output:\n command: %(cmd)s\n ' 'stdout: %(out)s\n stderr: %(err)s') % {'cmd': ssh_cmd, 'out': e.stdout, 'err': e.stderr}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def _append_dict(self, dict_, key, value): key, value = key.strip(), value.strip() obj = dict_.get(key, None) if obj is None: dict_[key] = value elif isinstance(obj, list): obj.append(value) dict_[key] = obj else: dict_[key] = [obj, value] return dict_ def _assert_ssh_return(self, test, fun, ssh_cmd, out, err): self._driver_assert(test, (_('%(fun)s: Failed with unexpected CLI output.\n ' 'Command: %(cmd)s\n stdout: %(out)s\n ' 'stderr: %(err)s') % {'fun': fun, 'cmd': ssh_cmd, 'out': six.text_type(out), 'err': six.text_type(err)})) def _build_default_params(self): return {'protocol': self.configuration.flashsystem_connection_protocol, 'multipath': self.configuration.flashsystem_multipath_enabled} def _build_initiator_target_map(self, initiator_wwpns, target_wwpns): map = {} for i_wwpn in initiator_wwpns: idx = six.text_type(i_wwpn) map[idx] = [] for t_wwpn in target_wwpns: map[idx].append(t_wwpn) return map def _check_vdisk_params(self, params): # Check that the requested protocol is enabled if params['protocol'] != self._protocol: msg = (_("Illegal value '%(prot)s' specified for " "flashsystem_connection_protocol: " "valid value(s) are %(enabled)s.") % {'prot': params['protocol'], 'enabled': self._protocol}) raise exception.InvalidInput(reason=msg) def _connector_to_hostname_prefix(self, connector): """Translate connector info to storage system host name. Translate a host's name and IP to the prefix of its hostname on the storage subsystem. We create a host name from the host and IP address, replacing any invalid characters (at most 55 characters), and adding a random 8-character suffix to avoid collisions. The total length should be at most 63 characters. """ # Build cleanup translation tables for host names invalid_ch_in_host = '' for num in range(0, 128): ch = six.text_type(chr(num)) if not ch.isalnum() and ch not in [' ', '.', '-', '_']: invalid_ch_in_host = invalid_ch_in_host + ch host_name = connector['host'] if isinstance(host_name, unicode): unicode_host_name_filter = dict((ord(unicode(char)), u'-') for char in invalid_ch_in_host) host_name = host_name.translate(unicode_host_name_filter) elif isinstance(host_name, str): string_host_name_filter = string.maketrans( invalid_ch_in_host, '-' * len(invalid_ch_in_host)) host_name = host_name.translate(string_host_name_filter) else: msg = (_('_create_host: Can not clean host name. Host name ' 'is not unicode or string.')) LOG.error(msg) raise exception.NoValidHost(reason=msg) host_name = six.text_type(host_name) # FlashSystem family doesn't like hostname that starts with number. if not re.match('^[A-Za-z]', host_name): host_name = '_' + host_name return host_name[:55] def _copy_vdisk_data(self, src_vdisk_name, src_vdisk_id, dest_vdisk_name, dest_vdisk_id): """Copy data from src vdisk to dest vdisk. To be able to copy data between vdisks, we must ensure that both vdisks have been mapped to host. If vdisk has not been mapped, it must be mapped firstly. When data copy completed, vdisk should be restored to previous mapped or non-mapped status. """ LOG.debug('enter: _copy_vdisk_data: %(src)s -> %(dest)s.', {'src': src_vdisk_name, 'dest': dest_vdisk_name}) connector = utils.brick_get_connector_properties() (src_map, src_lun_id) = self._is_vdisk_map( src_vdisk_name, connector) (dest_map, dest_lun_id) = self._is_vdisk_map( dest_vdisk_name, connector) src_map_device = None src_properties = None dest_map_device = None dest_properties = None try: if not src_map: src_lun_id = self._map_vdisk_to_host(src_vdisk_name, connector) if not dest_map: dest_lun_id = self._map_vdisk_to_host(dest_vdisk_name, connector) src_properties = self._get_vdisk_map_properties( connector, src_lun_id, src_vdisk_name, src_vdisk_id, self._get_vdisk_params(None)) src_map_device = self._scan_device(src_properties) dest_properties = self._get_vdisk_map_properties( connector, dest_lun_id, dest_vdisk_name, dest_vdisk_id, self._get_vdisk_params(None)) dest_map_device = self._scan_device(dest_properties) src_vdisk_attr = self._get_vdisk_attributes(src_vdisk_name) # vdisk capacity is bytes, translate into MB size_in_mb = int(src_vdisk_attr['capacity']) / units.Mi volume_utils.copy_volume( src_map_device['path'], dest_map_device['path'], size_in_mb, self.configuration.volume_dd_blocksize) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('_copy_vdisk_data: Failed to ' 'copy %(src)s to %(dest)s.'), {'src': src_vdisk_name, 'dest': dest_vdisk_name}) finally: if not dest_map: self._unmap_vdisk_from_host(dest_vdisk_name, connector) self._remove_device(dest_properties, dest_map_device) if not src_map: self._unmap_vdisk_from_host(src_vdisk_name, connector) self._remove_device(src_properties, src_map_device) LOG.debug( 'leave: _copy_vdisk_data: %(src)s -> %(dest)s.', {'src': src_vdisk_name, 'dest': dest_vdisk_name}) def _create_and_copy_vdisk_data(self, src_vdisk_name, src_vdisk_id, dest_vdisk_name, dest_vdisk_id): vdisk_attr = self._get_vdisk_attributes(src_vdisk_name) self._driver_assert( vdisk_attr is not None, (_('_create_and_copy_vdisk_data: Failed to get attributes for ' 'vdisk %s.') % src_vdisk_name)) self._create_vdisk(dest_vdisk_name, vdisk_attr['capacity'], 'b', None) # create a timer to lock vdisk that will be used to data copy timer = loopingcall.FixedIntervalLoopingCall( self._set_vdisk_copy_in_progress, [src_vdisk_name, dest_vdisk_name]) timer.start(interval=self._check_lock_interval).wait() timer.stop() try: self._copy_vdisk_data(src_vdisk_name, src_vdisk_id, dest_vdisk_name, dest_vdisk_id) finally: self._unset_vdisk_copy_in_progress( [src_vdisk_name, dest_vdisk_name]) def _create_host(self, connector): """Create a new host on the storage system. We create a host and associate it with the given connection information. """ LOG.debug('enter: _create_host: host %s.', connector['host']) rand_id = six.text_type(random.randint(0, 99999999)).zfill(8) host_name = '%s-%s' % (self._connector_to_hostname_prefix(connector), rand_id) ports = [] if 'FC' == self._protocol and 'wwpns' in connector: for wwpn in connector['wwpns']: ports.append('-hbawwpn %s' % wwpn) self._driver_assert(ports, (_('_create_host: No connector ports.'))) port1 = ports.pop(0) arg_name, arg_val = port1.split() ssh_cmd = ['svctask', 'mkhost', '-force', arg_name, arg_val, '-name', '"%s"' % host_name] out, err = self._ssh(ssh_cmd) self._assert_ssh_return('successfully created' in out, '_create_host', ssh_cmd, out, err) for port in ports: arg_name, arg_val = port.split() ssh_cmd = ['svctask', 'addhostport', '-force', arg_name, arg_val, host_name] out, err = self._ssh(ssh_cmd) self._assert_ssh_return( (not out.strip()), '_create_host', ssh_cmd, out, err) LOG.debug( 'leave: _create_host: host %(host)s - %(host_name)s.', {'host': connector['host'], 'host_name': host_name}) return host_name def _create_vdisk(self, name, size, unit, opts): """Create a new vdisk.""" LOG.debug('enter: _create_vdisk: vdisk %s.', name) ssh_cmd = ['svctask', 'mkvdisk', '-name', name, '-mdiskgrp', FLASHSYSTEM_VOLPOOL_NAME, '-iogrp', six.text_type(FLASHSYSTEM_VOL_IOGRP), '-size', size, '-unit', unit] out, err = self._ssh(ssh_cmd) self._assert_ssh_return(out.strip(), '_create_vdisk', ssh_cmd, out, err) # Ensure that the output is as expected match_obj = re.search( 'Virtual Disk, id \[([0-9]+)\], successfully created', out) self._driver_assert( match_obj is not None, (_('_create_vdisk %(name)s - did not find ' 'success message in CLI output.\n ' 'stdout: %(out)s\n stderr: %(err)s') % {'name': name, 'out': six.text_type(out), 'err': six.text_type(err)})) LOG.debug('leave: _create_vdisk: vdisk %s.', name) def _delete_host(self, host_name): """Delete a host on the storage system.""" LOG.debug('enter: _delete_host: host %s.', host_name) ssh_cmd = ['svctask', 'rmhost', host_name] out, err = self._ssh(ssh_cmd) # No output should be returned from rmhost self._assert_ssh_return( (not out.strip()), '_delete_host', ssh_cmd, out, err) LOG.debug('leave: _delete_host: host %s.', host_name) def _delete_vdisk(self, name, force): """Deletes existing vdisks.""" LOG.debug('enter: _delete_vdisk: vdisk %s.', name) # Try to delete volume only if found on the storage vdisk_defined = self._is_vdisk_defined(name) if not vdisk_defined: LOG.warning(_LW('warning: Tried to delete vdisk %s but ' 'it does not exist.'), name) return ssh_cmd = ['svctask', 'rmvdisk', '-force', name] if not force: ssh_cmd.remove('-force') out, err = self._ssh(ssh_cmd) # No output should be returned from rmvdisk self._assert_ssh_return( (not out.strip()), ('_delete_vdisk %(name)s') % {'name': name}, ssh_cmd, out, err) LOG.debug('leave: _delete_vdisk: vdisk %s.', name) def _driver_assert(self, assert_condition, exception_message): """Internal assertion mechanism for CLI output.""" if not assert_condition: LOG.error(exception_message) raise exception.VolumeBackendAPIException(data=exception_message) def _execute_command_and_parse_attributes(self, ssh_cmd): """Execute command on the FlashSystem and parse attributes. Exception is raised if the information from the system can not be obtained. """ LOG.debug( 'enter: _execute_command_and_parse_attributes: ' 'command: %s.', six.text_type(ssh_cmd)) try: out, err = self._ssh(ssh_cmd) except processutils.ProcessExecutionError: LOG.warning(_LW('_execute_command_and_parse_attributes: Failed to ' 'run command: %s.'), six.text_type(ssh_cmd)) # Does not raise exception when command encounters error. # Only return and the upper logic decides what to do. return None self._assert_ssh_return( out, '_execute_command_and_parse_attributes', ssh_cmd, out, err) attributes = {} for attrib_line in out.split('\n'): # If '!' not found, return the string and two empty strings attrib_name, foo, attrib_value = attrib_line.partition('!') if attrib_name is not None and attrib_name.strip(): self._append_dict(attributes, attrib_name, attrib_value) LOG.debug( 'leave: _execute_command_and_parse_attributes: ' 'command: %(cmd)s attributes: %(attr)s.', {'cmd': six.text_type(ssh_cmd), 'attr': six.text_type(attributes)}) return attributes def _find_host_exhaustive(self, connector, hosts): for host in hosts: ssh_cmd = ['svcinfo', 'lshost', '-delim', '!', host] out, err = self._ssh(ssh_cmd) self._assert_ssh_return( out.strip(), '_find_host_exhaustive', ssh_cmd, out, err) for attr_line in out.split('\n'): # If '!' not found, return the string and two empty strings attr_name, foo, attr_val = attr_line.partition('!') if (attr_name == 'WWPN' and 'wwpns' in connector and attr_val.lower() in map(str.lower, map(str, connector['wwpns']))): return host return None def _get_hdr_dic(self, header, row, delim): """Return CLI row data as a dictionary indexed by names from header. string. The strings are converted to columns using the delimiter in delim. """ attributes = header.split(delim) values = row.split(delim) self._driver_assert( len(values) == len(attributes), (_('_get_hdr_dic: attribute headers and values do not match.\n ' 'Headers: %(header)s\n Values: %(row)s.') % {'header': six.text_type(header), 'row': six.text_type(row)})) dic = dict((a, v) for a, v in map(None, attributes, values)) return dic def _get_conn_fc_wwpns(self): wwpns = [] cmd = ['svcinfo', 'lsportfc'] generator = self._port_conf_generator(cmd) header = next(generator, None) if not header: return wwpns for port_data in generator: try: if port_data['status'] == 'active': wwpns.append(port_data['WWPN']) except KeyError: self._handle_keyerror('lsportfc', header) return wwpns def _get_fc_wwpns(self): for key in self._storage_nodes: node = self._storage_nodes[key] ssh_cmd = ['svcinfo', 'lsnode', '-delim', '!', node['id']] attributes = self._execute_command_and_parse_attributes(ssh_cmd) wwpns = set(node['WWPN']) for i, s in zip(attributes['port_id'], attributes['port_status']): if 'unconfigured' != s: wwpns.add(i) node['WWPN'] = list(wwpns) LOG.info(_LI('WWPN on node %(node)s: %(wwpn)s.'), {'node': node['id'], 'wwpn': node['WWPN']}) def _get_host_from_connector(self, connector): """List the hosts defined in the storage. Return the host name with the given connection info, or None if there is no host fitting that information. """ LOG.debug('enter: _get_host_from_connector: %s.', connector) # Get list of host in the storage ssh_cmd = ['svcinfo', 'lshost', '-delim', '!'] out, err = self._ssh(ssh_cmd) if not out.strip(): return None # If we have FC information, we have a faster lookup option hostname = None host_lines = out.strip().split('\n') self._assert_ssh_return( host_lines, '_get_host_from_connector', ssh_cmd, out, err) header = host_lines.pop(0).split('!') self._assert_ssh_return( 'name' in header, '_get_host_from_connector', ssh_cmd, out, err) name_index = header.index('name') hosts = map(lambda x: x.split('!')[name_index], host_lines) hostname = self._find_host_exhaustive(connector, hosts) LOG.debug('leave: _get_host_from_connector: host %s.', hostname) return hostname def _get_hostvdisk_mappings(self, host_name): """Return the defined storage mappings for a host.""" return_data = {} ssh_cmd = ['svcinfo', 'lshostvdiskmap', '-delim', '!', host_name] out, err = self._ssh(ssh_cmd) mappings = out.strip().split('\n') if mappings: header = mappings.pop(0) for mapping_line in mappings: mapping_data = self._get_hdr_dic(header, mapping_line, '!') return_data[mapping_data['vdisk_name']] = mapping_data return return_data def _get_vdisk_attributes(self, vdisk_name): """Return vdisk attributes Exception is raised if the information from system can not be parsed/matched to a single vdisk. """ ssh_cmd = [ 'svcinfo', 'lsvdisk', '-bytes', '-delim', '!', vdisk_name] return self._execute_command_and_parse_attributes(ssh_cmd) def _get_vdiskhost_mappings(self, vdisk_name): """Return the defined storage mappings for a vdisk.""" return_data = {} ssh_cmd = ['svcinfo', 'lsvdiskhostmap', '-delim', '!', vdisk_name] out, err = self._ssh(ssh_cmd) mappings = out.strip().split('\n') if mappings: header = mappings.pop(0) for mapping_line in mappings: mapping_data = self._get_hdr_dic(header, mapping_line, '!') return_data[mapping_data['host_name']] = mapping_data return return_data def _get_vdisk_map_properties( self, connector, lun_id, vdisk_name, vdisk_id, vdisk_params): """Get the map properties of vdisk.""" LOG.debug( 'enter: _get_vdisk_map_properties: vdisk ' '%(vdisk_name)s.', {'vdisk_name': vdisk_name}) preferred_node = '0' IO_group = '0' # Get preferred node and other nodes in I/O group preferred_node_entry = None io_group_nodes = [] for k, node in self._storage_nodes.iteritems(): if vdisk_params['protocol'] != node['protocol']: continue if node['id'] == preferred_node: preferred_node_entry = node if node['IO_group'] == IO_group: io_group_nodes.append(node) if not io_group_nodes: msg = (_('_get_vdisk_map_properties: No node found in ' 'I/O group %(gid)s for volume %(vol)s.') % {'gid': IO_group, 'vol': vdisk_name}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if not preferred_node_entry and not vdisk_params['multipath']: # Get 1st node in I/O group preferred_node_entry = io_group_nodes[0] LOG.warning(_LW('_get_vdisk_map_properties: Did not find a ' 'preferred node for vdisk %s.'), vdisk_name) properties = {} properties['target_discovered'] = False properties['target_lun'] = lun_id properties['volume_id'] = vdisk_id type_str = 'fibre_channel' conn_wwpns = self._get_conn_fc_wwpns() if not conn_wwpns: msg = (_('_get_vdisk_map_properties: Could not get FC ' 'connection information for the host-volume ' 'connection. Is the host configured properly ' 'for FC connections?')) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) properties['target_wwn'] = conn_wwpns if "zvm_fcp" in connector: properties['zvm_fcp'] = connector['zvm_fcp'] properties['initiator_target_map'] = self._build_initiator_target_map( connector['wwpns'], conn_wwpns) LOG.debug( 'leave: _get_vdisk_map_properties: vdisk ' '%(vdisk_name)s.', {'vdisk_name': vdisk_name}) return {'driver_volume_type': type_str, 'data': properties} def _get_vdisk_params(self, type_id): params = self._build_default_params() if type_id: ctxt = context.get_admin_context() volume_type = volume_types.get_volume_type(ctxt, type_id) specs = volume_type.get('extra_specs') for k, value in specs.iteritems(): # Get the scope, if using scope format key_split = k.split(':') if len(key_split) == 1: scope = None key = key_split[0] else: scope = key_split[0] key = key_split[1] # We generally do not look at capabilities in the driver, but # protocol is a special case where the user asks for a given # protocol and we want both the scheduler and the driver to act # on the value. if ((not scope or scope == 'capabilities') and key == 'storage_protocol'): scope = None key = 'protocol' # Anything keys that the driver should look at should have the # 'drivers' scope. if scope and scope != "drivers": continue if key in params: this_type = type(params[key]).__name__ if this_type == 'int': value = int(value) elif this_type == 'bool': value = strutils.bool_from_string(value) params[key] = value self._check_vdisk_params(params) return params def _handle_keyerror(self, function, header): msg = (_('Did not find expected column in %(fun)s: %(hdr)s.') % {'fun': function, 'hdr': header}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def _is_vdisk_defined(self, vdisk_name): """Check if vdisk is defined.""" LOG.debug('enter: _is_vdisk_defined: vdisk %s.', vdisk_name) vdisk_attributes = self._get_vdisk_attributes(vdisk_name) LOG.debug( 'leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s.', {'vol': vdisk_name, 'str': vdisk_attributes is not None}) if vdisk_attributes is None: return False else: return True def _is_vdisk_copy_in_progress(self, vdisk_name): LOG.debug( '_is_vdisk_copy_in_progress: %(vdisk)s: %(vdisk_in_progress)s.', {'vdisk': vdisk_name, 'vdisk_in_progress': six.text_type(self._vdisk_copy_in_progress)}) if vdisk_name not in self._vdisk_copy_in_progress: LOG.debug( '_is_vdisk_copy_in_progress: ' 'vdisk copy is not in progress.') raise loopingcall.LoopingCallDone(retvalue=True) def _is_vdisk_map(self, vdisk_name, connector): """Check if vdisk is mapped. If map, return True and lun id. If not map, return False and expected lun id. """ LOG.debug('enter: _is_vdisk_map: %(src)s.', {'src': vdisk_name}) map_flag = False result_lun = '-1' host_name = self._get_host_from_connector(connector) if host_name is None: return (map_flag, int(result_lun)) mapping_data = self._get_hostvdisk_mappings(host_name) if vdisk_name in mapping_data: map_flag = True result_lun = mapping_data[vdisk_name]['SCSI_id'] else: lun_used = [int(v['SCSI_id']) for v in mapping_data.values()] lun_used.sort() # Start from 1 due to problems with lun id being 0. result_lun = 1 for lun_id in lun_used: if result_lun < lun_id: break elif result_lun == lun_id: result_lun += 1 LOG.debug( 'leave: _is_vdisk_map: %(src)s ' 'mapped %(map_flag)s %(result_lun)s.', {'src': vdisk_name, 'map_flag': six.text_type(map_flag), 'result_lun': result_lun}) return (map_flag, int(result_lun)) def _log_cli_output_error(self, function, cmd, out, err): LOG.error(_LE('%(fun)s: Failed with unexpected CLI output.\n ' 'Command: %(cmd)s\nstdout: %(out)s\nstderr: %(err)s\n'), {'fun': function, 'cmd': cmd, 'out': six.text_type(out), 'err': six.text_type(err)}) def _map_vdisk_to_host(self, vdisk_name, connector): """Create a mapping between a vdisk to a host.""" LOG.debug( 'enter: _map_vdisk_to_host: vdisk %(vdisk_name)s to ' 'host %(host)s.', {'vdisk_name': vdisk_name, 'host': connector}) # Check if a host object is defined for this host name host_name = self._get_host_from_connector(connector) if host_name is None: # Host does not exist - add a new host to FlashSystem host_name = self._create_host(connector) # Verify that create_new_host succeeded self._driver_assert( host_name is not None, (_('_create_host failed to return the host name.'))) (map_flag, result_lun) = self._is_vdisk_map(vdisk_name, connector) # Volume is not mapped to host, create a new LUN if not map_flag: ssh_cmd = ['svctask', 'mkvdiskhostmap', '-host', host_name, '-scsi', six.text_type(result_lun), vdisk_name] out, err = self._ssh(ssh_cmd, check_exit_code=False) if err and err.startswith('CMMVC6071E'): if not self.configuration.flashsystem_multihostmap_enabled: msg = (_('flashsystem_multihostmap_enabled is set ' 'to False, not allow multi host mapping. ' 'CMMVC6071E The VDisk-to-host mapping ' 'was not created because the VDisk is ' 'already mapped to a host.')) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) for i in range(len(ssh_cmd)): if ssh_cmd[i] == 'mkvdiskhostmap': ssh_cmd.insert(i + 1, '-force') # try to map one volume to multiple hosts out, err = self._ssh(ssh_cmd) LOG.info(_LI('Volume %s is mapping to multiple hosts.'), vdisk_name) self._assert_ssh_return( 'successfully created' in out, '_map_vdisk_to_host', ssh_cmd, out, err) else: self._assert_ssh_return( 'successfully created' in out, '_map_vdisk_to_host', ssh_cmd, out, err) LOG.debug( ('leave: _map_vdisk_to_host: LUN %(result_lun)s, vdisk ' '%(vdisk_name)s, host %(host_name)s.'), {'result_lun': result_lun, 'vdisk_name': vdisk_name, 'host_name': host_name}) return int(result_lun) def _port_conf_generator(self, cmd): ssh_cmd = cmd + ['-delim', '!'] out, err = self._ssh(ssh_cmd) if not out.strip(): return port_lines = out.strip().split('\n') if not port_lines: return header = port_lines.pop(0) yield header for portip_line in port_lines: try: port_data = self._get_hdr_dic(header, portip_line, '!') except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): self._log_cli_output_error('_port_conf_generator', ssh_cmd, out, err) yield port_data def _remove_device(self, properties, device): LOG.debug('enter: _remove_device') if not properties or not device: LOG.warning(_LW('_remove_device: invalid properties or device.')) return use_multipath = self.configuration.use_multipath_for_image_xfer device_scan_attempts = self.configuration.num_volume_device_scan_tries protocol = properties['driver_volume_type'] connector = utils.brick_get_connector(protocol, use_multipath=use_multipath, device_scan_attempts= device_scan_attempts, conn=properties) connector.disconnect_volume(properties['data'], device) LOG.debug('leave: _remove_device') def _scan_device(self, properties): LOG.debug('enter: _scan_device') use_multipath = self.configuration.use_multipath_for_image_xfer device_scan_attempts = self.configuration.num_volume_device_scan_tries protocol = properties['driver_volume_type'] connector = utils.brick_get_connector(protocol, use_multipath=use_multipath, device_scan_attempts= device_scan_attempts, conn=properties) device = connector.connect_volume(properties['data']) host_device = device['path'] if not connector.check_valid_device(host_device): msg = (_('Unable to access the backend storage ' 'via the path %(path)s.') % {'path': host_device}) raise exception.VolumeBackendAPIException(data=msg) return device LOG.debug('leave: _scan_device') def _unmap_vdisk_from_host(self, vdisk_name, connector): if 'host' in connector: host_name = self._get_host_from_connector(connector) self._driver_assert( host_name is not None, (_('_get_host_from_connector failed to return the host name ' 'for connector.'))) else: host_name = None # Check if vdisk-host mapping exists, remove if it does. If no host # name was given, but only one mapping exists, we can use that. mapping_data = self._get_vdiskhost_mappings(vdisk_name) if not mapping_data: LOG.warning(_LW('_unmap_vdisk_from_host: No mapping of volume ' '%(vol_name)s to any host found.'), {'vol_name': vdisk_name}) return if host_name is None: if len(mapping_data) > 1: LOG.warning(_LW('_unmap_vdisk_from_host: Multiple mappings of ' 'volume %(vdisk_name)s found, no host ' 'specified.'), {'vdisk_name': vdisk_name}) return else: host_name = mapping_data.keys()[0] else: if host_name not in mapping_data: LOG.error(_LE('_unmap_vdisk_from_host: No mapping of volume ' '%(vol_name)s to host %(host_name)s found.'), {'vol_name': vdisk_name, 'host_name': host_name}) return # We have a valid host_name now ssh_cmd = ['svctask', 'rmvdiskhostmap', '-host', host_name, vdisk_name] out, err = self._ssh(ssh_cmd) # Verify CLI behaviour - no output is returned from rmvdiskhostmap self._assert_ssh_return( (not out.strip()), '_unmap_vdisk_from_host', ssh_cmd, out, err) # If this host has no more mappings, delete it mapping_data = self._get_hostvdisk_mappings(host_name) if not mapping_data: self._delete_host(host_name) def _update_volume_stats(self): """Retrieve stats info from volume group.""" LOG.debug("Updating volume stats.") data = {} data['vendor_name'] = 'IBM' data['driver_version'] = self.VERSION data['storage_protocol'] = self._protocol data['total_capacity_gb'] = 0 data['free_capacity_gb'] = 0 data['reserved_percentage'] = self.configuration.reserved_percentage data['QoS_support'] = False pool = FLASHSYSTEM_VOLPOOL_NAME backend_name = self.configuration.safe_get('volume_backend_name') if not backend_name: backend_name = '%s_%s' % (self._system_name, pool) data['volume_backend_name'] = backend_name ssh_cmd = ['svcinfo', 'lsmdiskgrp', '-bytes', '-delim', '!', pool] attributes = self._execute_command_and_parse_attributes(ssh_cmd) if not attributes: msg = (_('_update_volume_stats: Could not get storage pool data.')) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) data['total_capacity_gb'] = ( float(attributes['capacity']) / units.Gi) data['free_capacity_gb'] = ( float(attributes['free_capacity']) / units.Gi) data['easytier_support'] = False # Do not support easy tier data['location_info'] = ( 'FlashSystemDriver:%(sys_id)s:%(pool)s' % {'sys_id': self._system_id, 'pool': pool}) self._stats = data def _set_vdisk_copy_in_progress(self, vdisk_list): LOG.debug( '_set_vdisk_copy_in_progress: %(vdisk)s: %(vdisk_in_progress)s.', {'vdisk': six.text_type(vdisk_list), 'vdisk_in_progress': six.text_type(self._vdisk_copy_in_progress)}) get_lock = True self._vdisk_copy_lock.acquire() for vdisk in vdisk_list: if vdisk in self._vdisk_copy_in_progress: get_lock = False break if get_lock: self._vdisk_copy_in_progress.update(vdisk_list) self._vdisk_copy_lock.release() if get_lock: LOG.debug( '_set_vdisk_copy_in_progress: %s.', six.text_type(self._vdisk_copy_in_progress)) raise loopingcall.LoopingCallDone(retvalue=True) def _unset_vdisk_copy_in_progress(self, vdisk_list): LOG.debug( '_unset_vdisk_copy_in_progress: %(vdisk)s: %(vdisk_in_progress)s.', {'vdisk': six.text_type(vdisk_list), 'vdisk_in_progress': six.text_type(self._vdisk_copy_in_progress)}) self._vdisk_copy_lock.acquire() for vdisk in vdisk_list: if vdisk in self._vdisk_copy_in_progress: self._vdisk_copy_in_progress.remove(vdisk) self._vdisk_copy_lock.release() def _wait_vdisk_copy_completed(self, vdisk_name): timer = loopingcall.FixedIntervalLoopingCall( self._is_vdisk_copy_in_progress, vdisk_name) timer.start(interval=self._check_lock_interval).wait() timer.stop() def do_setup(self, ctxt): """Check that we have all configuration details from the storage.""" LOG.debug('enter: do_setup') self._context = ctxt # Get storage system name and id ssh_cmd = ['svcinfo', 'lssystem', '-delim', '!'] attributes = self._execute_command_and_parse_attributes(ssh_cmd) if not attributes or not ('name' in attributes): msg = (_('do_setup: Could not get system name.')) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) self._system_name = attributes['name'] self._system_id = attributes['id'] # Validate value of open_access_enabled flag, for now only # support when open_access_enabled is off if not attributes or not ('open_access_enabled' in attributes) or ( attributes['open_access_enabled'] != 'off'): msg = (_('do_setup: open_access_enabled is not off.')) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Validate that the array exists pool = FLASHSYSTEM_VOLPOOL_NAME ssh_cmd = ['svcinfo', 'lsmdiskgrp', '-bytes', '-delim', '!', pool] attributes = self._execute_command_and_parse_attributes(ssh_cmd) if not attributes or not ('status' in attributes) or ( attributes['status'] == 'offline'): msg = (_('do_setup: Array does not exist or is offline.')) LOG.error(msg) raise exception.InvalidInput(reason=msg) # Get the FC names of the FlashSystem nodes ssh_cmd = ['svcinfo', 'lsnode', '-delim', '!'] out, err = self._ssh(ssh_cmd) self._assert_ssh_return( out.strip(), 'do_setup', ssh_cmd, out, err) nodes = out.strip().splitlines() self._assert_ssh_return(nodes, 'do_setup', ssh_cmd, out, err) header = nodes.pop(0) for node_line in nodes: try: node_data = self._get_hdr_dic(header, node_line, '!') except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): self._log_cli_output_error('do_setup', ssh_cmd, out, err) node = {} try: node['id'] = node_data['id'] node['name'] = node_data['name'] node['IO_group'] = node_data['IO_group_id'] node['WWNN'] = node_data['WWNN'] node['status'] = node_data['status'] node['WWPN'] = [] node['protocol'] = None if node['status'] == 'online': self._storage_nodes[node['id']] = node except KeyError: self._handle_keyerror('lsnode', header) # Get the WWPNs of the FlashSystem nodes self._get_fc_wwpns() # For each node, check what connection modes it supports. Delete any # nodes that do not support any types (may be partially configured). to_delete = [] for k, node in self._storage_nodes.iteritems(): if not node['WWPN']: to_delete.append(k) for delkey in to_delete: del self._storage_nodes[delkey] # Make sure we have at least one node configured self._driver_assert( self._storage_nodes, 'do_setup: No configured nodes.') self._protocol = node['protocol'] = 'FC' # Set for vdisk synchronization self._vdisk_copy_in_progress = set() self._vdisk_copy_lock = threading.Lock() self._check_lock_interval = 5 LOG.debug('leave: do_setup') def check_for_setup_error(self): """Ensure that the flags are set properly.""" LOG.debug('enter: check_for_setup_error') # Check that we have the system ID information if self._system_name is None: msg = ( _('check_for_setup_error: Unable to determine system name.')) raise exception.VolumeBackendAPIException(data=msg) if self._system_id is None: msg = (_('check_for_setup_error: Unable to determine system id.')) raise exception.VolumeBackendAPIException(data=msg) required_flags = ['san_ip', 'san_ssh_port', 'san_login'] for flag in required_flags: if not self.configuration.safe_get(flag): msg = (_('%s is not set.') % flag) raise exception.InvalidInput(reason=msg) # Ensure that either password or keyfile were set if not (self.configuration.san_password or self.configuration.san_private_key): msg = (_('check_for_setup_error: Password or SSH private key ' 'is required for authentication: set either ' 'san_password or san_private_key option.')) raise exception.InvalidInput(reason=msg) params = self._build_default_params() self._check_vdisk_params(params) LOG.debug('leave: check_for_setup_error') def validate_connector(self, connector): """Check connector.""" if 'FC' == self._protocol and 'wwpns' not in connector: msg = (_LE('The connector does not contain the ' 'required information: wwpns is missing')) LOG.error(msg) raise exception.InvalidConnectorException(missing='wwpns') def create_volume(self, volume): """Create volume.""" vdisk_name = volume['name'] vdisk_params = self._get_vdisk_params(volume['volume_type_id']) vdisk_size = six.text_type(volume['size']) return self._create_vdisk(vdisk_name, vdisk_size, 'gb', vdisk_params) def delete_volume(self, volume): """Delete volume.""" vdisk_name = volume['name'] self._wait_vdisk_copy_completed(vdisk_name) self._delete_vdisk(vdisk_name, False) def extend_volume(self, volume, new_size): """Extend volume.""" LOG.debug('enter: extend_volume: volume %s.', volume['name']) vdisk_name = volume['name'] self._wait_vdisk_copy_completed(vdisk_name) extend_amt = int(new_size) - volume['size'] ssh_cmd = (['svctask', 'expandvdisksize', '-size', six.text_type(extend_amt), '-unit', 'gb', vdisk_name]) out, err = self._ssh(ssh_cmd) # No output should be returned from expandvdisksize self._assert_ssh_return( (not out.strip()), 'extend_volume', ssh_cmd, out, err) LOG.debug('leave: extend_volume: volume %s.', volume['name']) @fczm_utils.AddFCZone def initialize_connection(self, volume, connector): """Perform the necessary work so that a FC connection can be made. To be able to create a FC connection from a given host to a volume, we must: 1. Translate the given WWNN to a host name 2. Create new host on the storage system if it does not yet exist 3. Map the volume to the host if it is not already done 4. Return the connection information for relevant nodes (in the proper I/O group) """ LOG.debug( 'enter: initialize_connection: volume %(vol)s with ' 'connector %(conn)s.', {'vol': volume, 'conn': connector}) vdisk_name = volume['name'] vdisk_id = volume['id'] vdisk_params = self._get_vdisk_params(volume['volume_type_id']) self._wait_vdisk_copy_completed(vdisk_name) self._driver_assert( self._is_vdisk_defined(vdisk_name), (_('initialize_connection: vdisk %s is not defined.') % vdisk_name)) lun_id = self._map_vdisk_to_host(vdisk_name, connector) properties = {} try: properties = self._get_vdisk_map_properties( connector, lun_id, vdisk_name, vdisk_id, vdisk_params) except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): self.terminate_connection(volume, connector) LOG.error(_LE('initialize_connection: Failed to collect ' 'return properties for volume %(vol)s and ' 'connector %(conn)s.'), {'vol': volume, 'conn': connector}) LOG.debug( 'leave: initialize_connection:\n volume: %(vol)s\n connector ' '%(conn)s\n properties: %(prop)s.', {'vol': volume, 'conn': connector, 'prop': properties}) return properties @fczm_utils.RemoveFCZone def terminate_connection(self, volume, connector, **kwargs): """Cleanup after connection has been terminated. When we clean up a terminated connection between a given connector and volume, we: 1. Translate the given connector to a host name 2. Remove the volume-to-host mapping if it exists 3. Delete the host if it has no more mappings (hosts are created automatically by this driver when mappings are created) """ LOG.debug( 'enter: terminate_connection: volume %(vol)s with ' 'connector %(conn)s.', {'vol': volume, 'conn': connector}) vdisk_name = volume['name'] self._wait_vdisk_copy_completed(vdisk_name) self._unmap_vdisk_from_host(vdisk_name, connector) properties = {} conn_wwpns = self._get_conn_fc_wwpns() properties['target_wwn'] = conn_wwpns properties['initiator_target_map'] = self._build_initiator_target_map( connector['wwpns'], conn_wwpns) LOG.debug( 'leave: terminate_connection: volume %(vol)s with ' 'connector %(conn)s.', {'vol': volume, 'conn': connector}) return { 'driver_volume_type': 'fibre_channel', 'data': properties } def create_snapshot(self, snapshot): """Create snapshot from volume.""" LOG.debug( 'enter: create_snapshot: create %(snap)s from %(vol)s.', {'snap': snapshot['name'], 'vol': snapshot['volume']['name']}) status = snapshot['volume']['status'] if status not in ['available', 'in-use']: msg = (_( 'create_snapshot: Volume status must be "available" or ' '"in-use" for snapshot. The invalid status is %s.') % status) raise exception.InvalidVolume(msg) self._create_and_copy_vdisk_data(snapshot['volume']['name'], snapshot['volume']['id'], snapshot['name'], snapshot['id']) LOG.debug( 'leave: create_snapshot: create %(snap)s from %(vol)s.', {'snap': snapshot['name'], 'vol': snapshot['volume']['name']}) def delete_snapshot(self, snapshot): """Delete snapshot.""" LOG.debug( 'enter: delete_snapshot: delete %(snap)s.', {'snap': snapshot['name']}) self._wait_vdisk_copy_completed(snapshot['name']) self._delete_vdisk(snapshot['name'], False) LOG.debug( 'leave: delete_snapshot: delete %(snap)s.', {'snap': snapshot['name']}) def create_volume_from_snapshot(self, volume, snapshot): """Create volume from snapshot.""" LOG.debug( 'enter: create_volume_from_snapshot: create %(vol)s from ' '%(snap)s.', {'vol': volume['name'], 'snap': snapshot['name']}) if volume['size'] != snapshot['volume_size']: msg = (_('create_volume_from_snapshot: Volume size is different ' 'from snapshot based volume.')) LOG.error(msg) raise exception.VolumeDriverException(message=msg) status = snapshot['status'] if status != 'available': msg = (_('create_volume_from_snapshot: Snapshot status ' 'must be "available" for creating volume. ' 'The invalid status is: %s.') % status) raise exception.InvalidSnapshot(msg) self._create_and_copy_vdisk_data(snapshot['name'], snapshot['id'], volume['name'], volume['id']) LOG.debug( 'leave: create_volume_from_snapshot: create %(vol)s from ' '%(snap)s.', {'vol': volume['name'], 'snap': snapshot['name']}) def create_cloned_volume(self, volume, src_volume): """Create volume from a source volume.""" LOG.debug('enter: create_cloned_volume: create %(vol)s from %(src)s.', {'src': src_volume['name'], 'vol': volume['name']}) if src_volume['size'] != volume['size']: msg = (_('create_cloned_volume: Source and destination ' 'size differ.')) LOG.error(msg) raise exception.VolumeDriverException(message=msg) self._create_and_copy_vdisk_data(src_volume['name'], src_volume['id'], volume['name'], volume['id']) LOG.debug('leave: create_cloned_volume: create %(vol)s from %(src)s.', {'src': src_volume['name'], 'vol': volume['name']}) def get_volume_stats(self, refresh=False): """Get volume stats. If we haven't gotten stats yet or 'refresh' is True, run update the stats first. """ if not self._stats or refresh: self._update_volume_stats() return self._stats
from .. import rfb_icons from ..rfb_utils import shadergraph_utils from ..rfb_utils import draw_utils from .rman_ui_base import _RManPanelHeader from ..rman_render import RmanRender import bpy class PRMAN_PT_Renderman_UI_Panel(bpy.types.Panel, _RManPanelHeader): '''Adds a RenderMan panel to the RenderMan VIEW_3D side tab ''' bl_label = "RenderMan" bl_space_type = "VIEW_3D" bl_region_type = "UI" bl_category = "Renderman" def draw(self, context): layout = self.layout scene = context.scene rm = scene.renderman if context.scene.render.engine != "PRMAN_RENDER": return # Render is_rman_interactive_running = rm.is_rman_interactive_running if is_rman_interactive_running: row = layout.row(align=True) rman_rerender_controls = rfb_icons.get_icon("rman_ipr_cancel") row.operator('renderman.stop_ipr', text="Stop IPR", icon_value=rman_rerender_controls.icon_id) elif rm.is_rman_running: row = layout.row(align=True) rman_rerender_controls = rfb_icons.get_icon("rman_ipr_cancel") row.operator('renderman.stop_render', text="Stop IPR", icon_value=rman_rerender_controls.icon_id) else: row = layout.row(align=True) rman_render_icon = rfb_icons.get_icon("rman_render") row.operator("render.render", text="Render", icon_value=rman_render_icon.icon_id) row.prop(context.scene, "rm_render", text="", icon=draw_utils.get_open_close_icon(context.scene.rm_render)) if context.scene.rm_render: scene = context.scene rd = scene.render box = layout.box() row = box.row(align=True) # Display Driver row.prop(rm, "render_into") row = box.row(align=True) row.prop(rm, "do_holdout_matte", text="Render Holdouts") # animation row = box.row(align=True) rman_batch = rfb_icons.get_icon("rman_batch") row.operator("render.render", text="Render Animation", icon_value=rman_batch.icon_id).animation = True row = layout.row(align=True) rman_rerender_controls = rfb_icons.get_icon("rman_ipr_on") row.operator('renderman.start_ipr', text="IPR", icon_value=rman_rerender_controls.icon_id) row.prop(context.scene, "rm_ipr", text="", icon=draw_utils.get_open_close_icon(context.scene.rm_render)) if context.scene.rm_ipr: scene = context.scene rd = scene.render box = layout.box() row = box.row(align=True) # Display Driver row.prop(rm, "render_into") row = layout.row(align=True) rman_batch = rfb_icons.get_icon("rman_batch") row.operator("renderman.external_render", text="External Render", icon_value=rman_batch.icon_id) row.prop(context.scene, "rm_render_external", text="", icon=draw_utils.get_open_close_icon(context.scene.rm_render_external)) if context.scene.rm_render_external: scene = context.scene rd = scene.render box = layout.box() row = box.row(align=True) # animation row = box.row(align=True) row.prop(rm, "external_animation") row = box.row(align=True) row.enabled = rm.external_animation row.prop(scene, "frame_start", text="Start") row.prop(scene, "frame_end", text="End") # spool render row = box.row(align=True) col = row.column() col.prop(rm, "queuing_system", text='') layout.separator() # Create Camera row = layout.row(align=True) row.operator("object.add_prm_camera", text="Add Camera", icon='CAMERA_DATA') row.prop(context.scene, "prm_cam", text="", icon=draw_utils.get_open_close_icon(context.scene.prm_cam)) if context.scene.prm_cam: ob = bpy.context.object box = layout.box() row = box.row(align=True) row.menu("PRMAN_MT_Camera_List_Menu", text="Camera List", icon='CAMERA_DATA') if ob.type == 'CAMERA': row = box.row(align=True) row.prop(ob, "name", text="", icon='LIGHT_HEMI') row.prop(ob, "hide_viewport", text="") row.prop(ob, "hide_render", icon='RESTRICT_RENDER_OFF', text="") row.operator("object.delete_cameras", text="", icon='PANEL_CLOSE') row = box.row(align=True) row.scale_x = 2 row.operator("view3d.object_as_camera", text="", icon='CURSOR') row.scale_x = 2 row.operator("view3d.view_camera", text="", icon='HIDE_OFF') if context.space_data.lock_camera == False: row.scale_x = 2 row.operator("wm.context_toggle", text="", icon='UNLOCKED').data_path = "space_data.lock_camera" elif context.space_data.lock_camera == True: row.scale_x = 2 row.operator("wm.context_toggle", text="", icon='LOCKED').data_path = "space_data.lock_camera" row.scale_x = 2 row.operator("view3d.camera_to_view", text="", icon='VIEW3D') row = box.row(align=True) row.label(text="Depth Of Field :") row = box.row(align=True) row.prop(context.object.data.dof, "focus_object", text="") #row.prop(context.object.data.cycles, "aperture_type", text="") row = box.row(align=True) row.prop(context.object.data.dof, "focus_distance", text="Distance") else: row = layout.row(align=True) row.label(text="No Camera Selected") layout.separator() layout.label(text="Lights:") box = layout.box() box.menu('VIEW3D_MT_RM_Add_Light_Menu', text='Add Light', icon_value=bpy.types.VIEW3D_MT_RM_Add_Light_Menu.get_icon_id()) box.menu('VIEW3D_MT_RM_Add_LightFilter_Menu', text='Add Light Filter', icon_value=bpy.types.VIEW3D_MT_RM_Add_LightFilter_Menu.get_icon_id()) layout.separator() layout.label(text="Apps:") box = layout.box() rman_it = rfb_icons.get_icon("rman_it") box.operator("renderman.start_it", icon_value=rman_it.icon_id) rman_lq = rfb_icons.get_icon("rman_localqueue") box.operator("renderman.start_localqueue", icon_value=rman_lq.icon_id) rman_lapp = rfb_icons.get_icon("rman_licenseapp") box.operator("renderman.start_licenseapp", icon_value=rman_lapp.icon_id) selected_objects = [] selected_light_objects = [] if context.selected_objects: for obj in context.selected_objects: if shadergraph_utils.is_rman_light(obj, include_light_filters=False): selected_light_objects.append(obj) elif obj.type not in ['CAMERA', 'LIGHT', 'SPEAKER']: selected_objects.append(obj) if selected_objects: layout.separator() layout.label(text="Seleced Objects:") box = layout.box() # Add Bxdf box.menu('VIEW3D_MT_RM_Add_bxdf_Menu', text='Add New Material', icon_value=bpy.types.VIEW3D_MT_RM_Add_bxdf_Menu.get_icon_id()) # Make Selected Geo Emissive rman_meshlight = rfb_icons.get_icon("out_PxrMeshLight") box.operator("object.rman_create_meshlight", text="Convert to Mesh Light", icon_value=rman_meshlight.icon_id) # Add Subdiv Sheme rman_subdiv = rfb_icons.get_icon("rman_subdiv") box.operator("mesh.rman_convert_subdiv", text="Convert to Subdiv", icon_value=rman_subdiv.icon_id) # Add/Create RIB Box / # Create Archive node box.menu('VIEW3D_MT_RM_Add_Export_Menu', icon_value=bpy.types.VIEW3D_MT_RM_Add_Export_Menu.get_icon_id()) # Diagnose layout.separator() layout.label(text='Diagnose') box = layout.box() box.enabled = not is_rman_interactive_running rman_rib = rfb_icons.get_icon('rman_rib_small') box.operator("renderman.open_scene_rib", text='View RIB', icon_value=rman_rib.icon_id) if selected_objects or selected_light_objects: box.operator("renderman.open_selected_rib", text='View Selected RIB', icon_value=rman_rib.icon_id) layout.separator() # RenderMan Doc rman_help = rfb_icons.get_icon("rman_help") layout.operator("wm.url_open", text="RenderMan Docs", icon_value=rman_help.icon_id).url = "https://rmanwiki.pixar.com/display/RFB24" rman_info = rfb_icons.get_icon("rman_blender") layout.operator("renderman.about_renderman", icon_value=rman_info.icon_id) class RENDER_PT_renderman_live_stats(bpy.types.Panel, _RManPanelHeader): bl_label = "RenderMan Live Statistics" bl_options = {'DEFAULT_CLOSED'} bl_space_type = "VIEW_3D" bl_region_type = "UI" bl_category = "Renderman" def draw(self, context): layout = self.layout scene = context.scene rm = scene.renderman rr = RmanRender.get_rman_render() layout.label(text='Diagnostics') layout.separator() box = layout.box() if rr.stats_mgr.web_socket_enabled: if rr.stats_mgr.is_connected(): for label, data in rr.stats_mgr.render_live_stats.items(): if label: box.label(text='%s: %s' % (label, data)) if rr.rman_running: box.prop(rm, 'roz_stats_iterations', slider=True, text='Iterations (%d / %d)' % (rr.stats_mgr._iterations, rr.stats_mgr._maxSamples)) box.prop(rm, 'roz_stats_progress', slider=True) else: box.label(text='(not connected)') layout.operator('renderman.attach_stats_render') else: box.label(text='(live stats disabled)') classes = [ PRMAN_PT_Renderman_UI_Panel, RENDER_PT_renderman_live_stats ] def register(): for cls in classes: bpy.utils.register_class(cls) def unregister(): for cls in classes: try: bpy.utils.unregister_class(cls) except RuntimeError: rfb_log().debug('Could not unregister class: %s' % str(cls)) pass
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import bunch import logging import six from shade import exc from shade import _utils NON_CALLABLES = (six.string_types, bool, dict, int, float, list, type(None)) log = logging.getLogger(__name__) def find_nova_addresses(addresses, ext_tag=None, key_name=None, version=4): ret = [] for (k, v) in iter(addresses.items()): if key_name is not None and k != key_name: # key_name is specified and it doesn't match the current network. # Continue with the next one continue for interface_spec in v: if ext_tag is not None: if 'OS-EXT-IPS:type' not in interface_spec: # ext_tag is specified, but this interface has no tag # We could actually return right away as this means that # this cloud doesn't support OS-EXT-IPS. Nevertheless, # it would be better to perform an explicit check. e.g.: # cloud._has_nova_extension('OS-EXT-IPS') # But this needs cloud to be passed to this function. continue elif interface_spec['OS-EXT-IPS:type'] != ext_tag: # Type doesn't match, continue with next one continue if interface_spec['version'] == version: ret.append(interface_spec['addr']) return ret def get_server_ip(server, **kwargs): addrs = find_nova_addresses(server['addresses'], **kwargs) if not addrs: return None return addrs[0] def get_server_private_ip(server, cloud=None): """Find the private IP address If Neutron is available, search for a port on a network where `router:external` is False and `shared` is False. This combination indicates a private network with private IP addresses. This port should have the private IP. If Neutron is not available, or something goes wrong communicating with it, as a fallback, try the list of addresses associated with the server dict, looking for an IP type tagged as 'fixed' in the network named 'private'. Last resort, ignore the IP type and just look for an IP on the 'private' network (e.g., Rackspace). """ if cloud and cloud.has_service('network'): try: server_ports = cloud.search_ports( filters={'device_id': server['id']}) nets = cloud.search_networks( filters={'router:external': False, 'shared': False}) except Exception as e: log.debug( "Something went wrong talking to neutron API: " "'{msg}'. Trying with Nova.".format(msg=str(e))) else: for net in nets: for port in server_ports: if net['id'] == port['network_id']: for ip in port['fixed_ips']: if _utils.is_ipv4(ip['ip_address']): return ip['ip_address'] ip = get_server_ip(server, ext_tag='fixed', key_name='private') if ip: return ip # Last resort, and Rackspace return get_server_ip(server, key_name='private') def get_server_external_ipv4(cloud, server): """Find an externally routable IP for the server. There are 5 different scenarios we have to account for: * Cloud has externally routable IP from neutron but neutron APIs don't work (only info available is in nova server record) (rackspace) * Cloud has externally routable IP from neutron (runabove, ovh) * Cloud has externally routable IP from neutron AND supports optional private tenant networks (vexxhost, unitedstack) * Cloud only has private tenant network provided by neutron and requires floating-ip for external routing (dreamhost, hp) * Cloud only has private tenant network provided by nova-network and requires floating-ip for external routing (auro) :param cloud: the cloud we're working with :param server: the server dict from which we want to get an IPv4 address :return: a string containing the IPv4 address or None """ if server['accessIPv4']: return server['accessIPv4'] if cloud.has_service('network'): try: # Search a fixed IP attached to an external net. Unfortunately # Neutron ports don't have a 'floating_ips' attribute server_ports = cloud.search_ports( filters={'device_id': server['id']}) ext_nets = cloud.search_networks(filters={'router:external': True}) except Exception as e: log.debug( "Something went wrong talking to neutron API: " "'{msg}'. Trying with Nova.".format(msg=str(e))) # Fall-through, trying with Nova else: for net in ext_nets: for port in server_ports: if net['id'] == port['network_id']: for ip in port['fixed_ips']: if _utils.is_ipv4(ip['ip_address']): return ip['ip_address'] # The server doesn't have an interface on an external network so it # can either have a floating IP or have no way to be reached from # outside the cloud. # Fall-through, trying with Nova # The cloud doesn't support Neutron or Neutron can't be contacted. The # server might have fixed addresses that are reachable from outside the # cloud (e.g. Rax) or have plain ol' floating IPs # Try to get an address from a network named 'public' ext_ip = get_server_ip(server, key_name='public') if ext_ip is not None: return ext_ip # Try to find a globally routable IP address for interfaces in server['addresses'].values(): for interface in interfaces: if _utils.is_ipv4(interface['addr']) and \ _utils.is_globally_routable_ipv4(interface['addr']): return interface['addr'] # Last, try to get a floating IP address ext_ip = get_server_ip(server, ext_tag='floating') if ext_ip is not None: return ext_ip return None def get_server_external_ipv6(server): """ Get an IPv6 address reachable from outside the cloud. This function assumes that if a server has an IPv6 address, that address is reachable from outside the cloud. :param server: the server from which we want to get an IPv6 address :return: a string containing the IPv6 address or None """ if server['accessIPv6']: return server['accessIPv6'] addresses = find_nova_addresses(addresses=server['addresses'], version=6) if addresses: return addresses[0] return None def get_groups_from_server(cloud, server, server_vars): groups = [] region = cloud.region_name cloud_name = cloud.name # Create a group for the cloud groups.append(cloud_name) # Create a group on region groups.append(region) # And one by cloud_region groups.append("%s_%s" % (cloud_name, region)) # Check if group metadata key in servers' metadata group = server['metadata'].get('group') if group: groups.append(group) for extra_group in server['metadata'].get('groups', '').split(','): if extra_group: groups.append(extra_group) groups.append('instance-%s' % server['id']) for key in ('flavor', 'image'): if 'name' in server_vars[key]: groups.append('%s-%s' % (key, server_vars[key]['name'])) for key, value in iter(server['metadata'].items()): groups.append('meta-%s_%s' % (key, value)) az = server_vars.get('az', None) if az: # Make groups for az, region_az and cloud_region_az groups.append(az) groups.append('%s_%s' % (region, az)) groups.append('%s_%s_%s' % (cloud.name, region, az)) return groups def get_hostvars_from_server(cloud, server, mounts=None): server_vars = server server_vars.pop('links', None) # First, add an IP address. Set it to '' rather than None if it does # not exist to remain consistent with the pre-existing missing values server_vars['public_v4'] = get_server_external_ipv4(cloud, server) or '' server_vars['public_v6'] = get_server_external_ipv6(server) or '' server_vars['private_v4'] = get_server_private_ip(server, cloud) or '' if cloud.private: interface_ip = server_vars['private_v4'] else: interface_ip = server_vars['public_v4'] if interface_ip: server_vars['interface_ip'] = interface_ip # Some clouds do not set these, but they're a regular part of the Nova # server record. Since we know them, go ahead and set them. In the case # where they were set previous, we use the values, so this will not break # clouds that provide the information server_vars['accessIPv4'] = server_vars['public_v4'] server_vars['accessIPv6'] = server_vars['public_v6'] server_vars['region'] = cloud.region_name server_vars['cloud'] = cloud.name flavor_id = server['flavor']['id'] flavor_name = cloud.get_flavor_name(flavor_id) if flavor_name: server_vars['flavor']['name'] = flavor_name server_vars['flavor'].pop('links', None) # OpenStack can return image as a string when you've booted from volume if str(server['image']) == server['image']: image_id = server['image'] server_vars['image'] = dict(id=image_id) else: image_id = server['image'].get('id', None) if image_id: image_name = cloud.get_image_name(image_id) if image_name: server_vars['image']['name'] = image_name server_vars['image'].pop('links', None) volumes = [] if cloud.has_service('volumes'): try: for volume in cloud.get_volumes(server): # Make things easier to consume elsewhere volume['device'] = volume['attachments'][0]['device'] volumes.append(volume) except exc.OpenStackCloudException: pass server_vars['volumes'] = volumes if mounts: for mount in mounts: for vol in server_vars['volumes']: if vol['display_name'] == mount['display_name']: if 'mount' in mount: vol['mount'] = mount['mount'] az = server_vars.get('OS-EXT-AZ:availability_zone', None) if az: server_vars['az'] = az return server_vars def obj_to_dict(obj): """ Turn an object with attributes into a dict suitable for serializing. Some of the things that are returned in OpenStack are objects with attributes. That's awesome - except when you want to expose them as JSON structures. We use this as the basis of get_hostvars_from_server above so that we can just have a plain dict of all of the values that exist in the nova metadata for a server. """ instance = bunch.Bunch() for key in dir(obj): value = getattr(obj, key) if isinstance(value, NON_CALLABLES) and not key.startswith('_'): instance[key] = value return instance def obj_list_to_dict(list): """Enumerate through lists of objects and return lists of dictonaries. Some of the objects returned in OpenStack are actually lists of objects, and in order to expose the data structures as JSON, we need to facilitate the conversion to lists of dictonaries. """ new_list = [] for obj in list: new_list.append(obj_to_dict(obj)) return new_list def warlock_to_dict(obj): # glanceclient v2 uses warlock to construct its objects. Warlock does # deep black magic to attribute look up to support validation things that # means we cannot use normal obj_to_dict obj_dict = bunch.Bunch() for (key, value) in obj.items(): if isinstance(value, NON_CALLABLES) and not key.startswith('_'): obj_dict[key] = value return obj_dict def warlock_list_to_dict(list): new_list = [] for obj in list: new_list.append(warlock_to_dict(obj)) return new_list
#!/usr/bin/env python # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Lightweight record format. This format implements log file format from leveldb: http://leveldb.googlecode.com/svn/trunk/doc/log_format.txt Full specification of format follows in case leveldb decides to change it. The log file contents are a sequence of 32KB blocks. The only exception is that the tail of the file may contain a partial block. Each block consists of a sequence of records: block := record* trailer? record := checksum: uint32 // masked crc32c of type and data[] length: uint16 type: uint8 // One of FULL, FIRST, MIDDLE, LAST data: uint8[length] A record never starts within the last six bytes of a block (since it won't fit). Any leftover bytes here form the trailer, which must consist entirely of zero bytes and must be skipped by readers. Aside: if exactly seven bytes are left in the current block, and a new non-zero length record is added, the writer must emit a FIRST record (which contains zero bytes of user data) to fill up the trailing seven bytes of the block and then emit all of the user data in subsequent blocks. More types may be added in the future. Some Readers may skip record types they do not understand, others may report that some data was skipped. FULL == 1 FIRST == 2 MIDDLE == 3 LAST == 4 The FULL record contains the contents of an entire user record. FIRST, MIDDLE, LAST are types used for user records that have been split into multiple fragments (typically because of block boundaries). FIRST is the type of the first fragment of a user record, LAST is the type of the last fragment of a user record, and MID is the type of all interior fragments of a user record. Example: consider a sequence of user records: A: length 1000 B: length 97270 C: length 8000 A will be stored as a FULL record in the first block. B will be split into three fragments: first fragment occupies the rest of the first block, second fragment occupies the entirety of the second block, and the third fragment occupies a prefix of the third block. This will leave six bytes free in the third block, which will be left empty as the trailer. C will be stored as a FULL record in the fourth block. """ import logging import struct import google from google.appengine.api.files import crc32c BLOCK_SIZE = 32 * 1024 HEADER_FORMAT = '<IHB' HEADER_LENGTH = struct.calcsize(HEADER_FORMAT) RECORD_TYPE_NONE = 0 RECORD_TYPE_FULL = 1 RECORD_TYPE_FIRST = 2 RECORD_TYPE_MIDDLE = 3 RECORD_TYPE_LAST = 4 class Error(Exception): """Base class for exceptions in this module.""" class InvalidRecordError(Error): """Raised when invalid record encountered.""" class FileWriter(object): """Interface specification for writers to be used with records module.""" def write(self, data): """Write data to the file. Args: data: byte array, string or iterable over bytes. """ raise NotImplementedError() class FileReader(object): """Interface specification for writers to be used with recordrecords module. FileReader defines a reader with position and efficient seek/position determining. All reads occur at current position. """ def read(self, size): """Read data from file. Reads data from current position and advances position past the read data block. Args: size: number of bytes to read. Returns: iterable over bytes. If number of bytes read is less then 'size' argument, it is assumed that end of file was reached. """ raise NotImplementedError() def tell(self): """Get current file position. Returns: current position as a byte offset in the file as integer. """ raise NotImplementedError() _CRC_MASK_DELTA = 0xa282ead8 def _mask_crc(crc): """Mask crc. Args: crc: integer crc. Returns: masked integer crc. """ return (((crc >> 15) | (crc << 17)) + _CRC_MASK_DELTA) & 0xFFFFFFFFL def _unmask_crc(masked_crc): """Unmask crc. Args: masked_crc: masked integer crc. Retruns: orignal crc. """ rot = (masked_crc - _CRC_MASK_DELTA) & 0xFFFFFFFFL return ((rot >> 17) | (rot << 15)) & 0xFFFFFFFFL class RecordsWriter(object): """A writer for records format. This writer should be used only inside with statement: with records.RecordsWriter(file) as writer: writer.write("record") RecordsWriter will pad last block with 0 when exiting with statement scope. """ def __init__(self, writer, _pad_last_block=True): """Constructor. Args: writer: a writer to use. Should conform to FileWriter interface. """ self.__writer = writer self.__position = 0 self.__entered = False self.__pad_last_block = _pad_last_block def __write_record(self, record_type, data): """Write single physical record.""" length = len(data) crc = crc32c.crc_update(crc32c.CRC_INIT, [record_type]) crc = crc32c.crc_update(crc, data) crc = crc32c.crc_finalize(crc) self.__writer.write( struct.pack(HEADER_FORMAT, _mask_crc(crc), length, record_type)) self.__writer.write(data) self.__position += HEADER_LENGTH + length def write(self, data): """Write single record. Args: data: record data to write as string, byte array or byte sequence. """ if not self.__entered: raise Exception("RecordWriter should be used only with 'with' statement.") block_remaining = BLOCK_SIZE - self.__position % BLOCK_SIZE if block_remaining < HEADER_LENGTH: self.__writer.write('\x00' * block_remaining) self.__position += block_remaining block_remaining = BLOCK_SIZE if block_remaining < len(data) + HEADER_LENGTH: first_chunk = data[:block_remaining - HEADER_LENGTH] self.__write_record(RECORD_TYPE_FIRST, first_chunk) data = data[len(first_chunk):] while True: block_remaining = BLOCK_SIZE - self.__position % BLOCK_SIZE if block_remaining >= len(data) + HEADER_LENGTH: self.__write_record(RECORD_TYPE_LAST, data) break else: chunk = data[:block_remaining - HEADER_LENGTH] self.__write_record(RECORD_TYPE_MIDDLE, chunk) data = data[len(chunk):] else: self.__write_record(RECORD_TYPE_FULL, data) def __enter__(self): self.__entered = True return self def __exit__(self, atype, value, traceback): self.close() def close(self): if self.__pad_last_block: pad_length = BLOCK_SIZE - self.__position % BLOCK_SIZE if pad_length and pad_length != BLOCK_SIZE: self.__writer.write('\x00' * pad_length) class RecordsReader(object): """A reader for records format.""" def __init__(self, reader): self.__reader = reader def __try_read_record(self): """Try reading a record. Returns: (data, record_type) tuple. Raises: EOFError: when end of file was reached. InvalidRecordError: when valid record could not be read. """ block_remaining = BLOCK_SIZE - self.__reader.tell() % BLOCK_SIZE if block_remaining < HEADER_LENGTH: return ('', RECORD_TYPE_NONE) header = self.__reader.read(HEADER_LENGTH) if len(header) != HEADER_LENGTH: raise EOFError('Read %s bytes instead of %s' % (len(header), HEADER_LENGTH)) (masked_crc, length, record_type) = struct.unpack(HEADER_FORMAT, header) crc = _unmask_crc(masked_crc) if length + HEADER_LENGTH > block_remaining: raise InvalidRecordError('Length is too big') data = self.__reader.read(length) if len(data) != length: raise EOFError('Not enough data read. Expected: %s but got %s' % (length, len(data))) if record_type == RECORD_TYPE_NONE: return ('', record_type) actual_crc = crc32c.crc_update(crc32c.CRC_INIT, [record_type]) actual_crc = crc32c.crc_update(actual_crc, data) actual_crc = crc32c.crc_finalize(actual_crc) if actual_crc != crc: raise InvalidRecordError('Data crc does not match') return (data, record_type) def __sync(self): """Skip reader to the block boundary.""" pad_length = BLOCK_SIZE - self.__reader.tell() % BLOCK_SIZE if pad_length and pad_length != BLOCK_SIZE: data = self.__reader.read(pad_length) if len(data) != pad_length: raise EOFError('Read %d bytes instead of %d' % (len(data), pad_length)) def read(self): """Reads record from current position in reader.""" data = None while True: last_offset = self.tell() try: (chunk, record_type) = self.__try_read_record() if record_type == RECORD_TYPE_NONE: self.__sync() elif record_type == RECORD_TYPE_FULL: if data is not None: logging.warning( "Ordering corruption: Got FULL record while already " "in a chunk at offset %d", last_offset) return chunk elif record_type == RECORD_TYPE_FIRST: if data is not None: logging.warning( "Ordering corruption: Got FIRST record while already " "in a chunk at offset %d", last_offset) data = chunk elif record_type == RECORD_TYPE_MIDDLE: if data is None: logging.warning( "Ordering corruption: Got MIDDLE record before FIRST " "record at offset %d", last_offset) else: data += chunk elif record_type == RECORD_TYPE_LAST: if data is None: logging.warning( "Ordering corruption: Got LAST record but no chunk is in " "progress at offset %d", last_offset) else: result = data + chunk data = None return result else: raise InvalidRecordError("Unsupported record type: %s" % record_type) except InvalidRecordError, e: logging.warning("Invalid record encountered at %s (%s). Syncing to " "the next block", last_offset, e) data = None self.__sync() def __iter__(self): try: while True: yield self.read() except EOFError: pass def tell(self): """Return file's current position.""" return self.__reader.tell() def seek(self, *args, **kwargs): """Set the file's current position. Arguments are passed directly to the underlying reader. """ return self.__reader.seek(*args, **kwargs)
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Integration with Deepspeed """ import importlib.util import io import json import weakref from copy import deepcopy from functools import partialmethod from .dependency_versions_check import dep_version_check from .file_utils import is_torch_available from .utils import logging if is_torch_available(): import torch logger = logging.get_logger(__name__) def is_deepspeed_available(): return importlib.util.find_spec("deepspeed") is not None class HfDeepSpeedConfig: """ This object contains a DeepSpeed configuration dictionary and can be quickly queried for things like zero stage. A `weakref` of this object is stored in the module's globals to be able to access the config from areas where things like the Trainer object is not available (e.g. `from_pretrained` and `_get_resized_embeddings`). Therefore it's important that this object remains alive while the program is still running. [`Trainer`] uses the `HfTrainerDeepSpeedConfig` subclass instead. That subclass has logic to sync the configuration with values of [`TrainingArguments`] by replacing special placeholder values: `"auto"`. Without this special logic the DeepSpeed configuration is not modified in any way. Args: config_file_or_dict (`Union[str, Dict]`): path to DeepSpeed config file or dict. """ def __init__(self, config_file_or_dict): # set global weakref object set_hf_deepspeed_config(self) dep_version_check("deepspeed") if isinstance(config_file_or_dict, dict): # Don't modify user's data should they want to reuse it (e.g. in tests), because once we # modified it, it will not be accepted here again, since `auto` values would have been overridden config = deepcopy(config_file_or_dict) elif isinstance(config_file_or_dict, str): with io.open(config_file_or_dict, "r", encoding="utf-8") as f: config = json.load(f) else: raise ValueError("expecting either a path to a DeepSpeed config file or a pre-populated dict") self.config = config # zero stage - this is done as early as possible, before model is created, to allow # ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object # during ``zero.Init()`` which needs whether fp16 is enabled, dtype, etc. self._stage = self.get_value("zero_optimization.stage", -1) # offload self._offload = False if self.is_zero2() or self.is_zero3(): offload_devices_valid = set(["cpu", "nvme"]) offload_devices = set( [ self.get_value("zero_optimization.offload_optimizer.device"), self.get_value("zero_optimization.offload_param.device"), ] ) if len(offload_devices & offload_devices_valid) > 0: self._offload = True def find_config_node(self, ds_key_long): config = self.config # find the config node of interest if it exists nodes = ds_key_long.split(".") ds_key = nodes.pop() for node in nodes: config = config.get(node) if config is None: return None, ds_key return config, ds_key def get_value(self, ds_key_long, default=None): """ Returns the set value or `default` if no value is set """ config, ds_key = self.find_config_node(ds_key_long) if config is None: return default return config.get(ds_key, default) def del_config_sub_tree(self, ds_key_long, must_exist=False): """ Deletes a sub-section of the config file if it's found. Unless `must_exist` is `True` the section doesn't have to exist. """ config = self.config # find the config node of interest if it exists nodes = ds_key_long.split(".") for node in nodes: parent_config = config config = config.get(node) if config is None: if must_exist: raise ValueError(f"Can't find {ds_key_long} entry in the config: {self.config}") else: return # if found remove it if parent_config is not None: parent_config.pop(node) def is_true(self, ds_key_long): """ Returns `True`/``False` only if the value is set, always `False` otherwise. So use this method to ask the very specific question of whether the value is set to `True` (and it's not set to `False`` or isn't set). """ value = self.get_value(ds_key_long) return False if value is None else bool(value) def is_false(self, ds_key_long): """ Returns `True`/``False` only if the value is set, always `False` otherwise. So use this method to ask the very specific question of whether the value is set to `False` (and it's not set to `True`` or isn't set). """ value = self.get_value(ds_key_long) return False if value is None else not bool(value) def is_zero2(self): return self._stage == 2 def is_zero3(self): return self._stage == 3 def is_offload(self): return self._offload class HfTrainerDeepSpeedConfig(HfDeepSpeedConfig): """ The `HfTrainerDeepSpeedConfig` object is meant to be created during `TrainingArguments` object creation and has the same lifespan as the latter. """ def __init__(self, config_file_or_dict): super().__init__(config_file_or_dict) self._dtype = torch.float16 self.mismatches = [] def dtype(self): return self._dtype def fill_match(self, ds_key_long, hf_val, hf_key=None, must_match=True): """ A utility method that massages the config file and can optionally verify that the values match. 1. Replace "auto" values with `TrainingArguments` value. 2. If it wasn't "auto" and `must_match` is true, then check that DS config matches Trainer config values and if mismatched add the entry to `self.mismatched` - will assert during `trainer_config_finalize` for one or more mismatches. """ config, ds_key = self.find_config_node(ds_key_long) if config is None: return if config.get(ds_key) == "auto": config[ds_key] = hf_val return if not must_match: return ds_val = config.get(ds_key) if ds_val is not None and ds_val != hf_val: self.mismatches.append(f"- ds {ds_key_long}={ds_val} vs hf {hf_key}={hf_val}") fill_only = partialmethod(fill_match, must_match=False) def trainer_config_process(self, args): """ Adjust the config with `TrainingArguments` values. This stage is run during `TrainingArguments` object creation. """ # DeepSpeed does: # train_batch_size = world_size * train_micro_batch_size_per_gpu * gradient_accumulation_steps train_batch_size = args.world_size * args.per_device_train_batch_size * args.gradient_accumulation_steps self.fill_match( "train_micro_batch_size_per_gpu", args.per_device_train_batch_size, "per_device_train_batch_size" ) self.fill_match("gradient_accumulation_steps", args.gradient_accumulation_steps, "gradient_accumulation_steps") self.fill_match("train_batch_size", train_batch_size, "train_batch_size (calculated)") self.fill_match("gradient_clipping", args.max_grad_norm, "max_grad_norm") self.fill_match("optimizer.params.lr", args.learning_rate, "learning_rate") self.fill_match("optimizer.params.betas", [args.adam_beta1, args.adam_beta2], "adam_beta1+adam_beta2") self.fill_match("optimizer.params.eps", args.adam_epsilon, "adam_epsilon") self.fill_match("optimizer.params.weight_decay", args.weight_decay, "weight_decay") self.fill_only("scheduler.params.warmup_min_lr", 0) # not a trainer arg self.fill_match("scheduler.params.warmup_max_lr", args.learning_rate, "learning_rate") # total_num_steps - will get set in trainer_config_finalize # fp16 if args.fp16: fp16_backend = "apex" if args.fp16_backend == "apex" else "amp" else: fp16_backend = None # amp: similar to the pytorch native amp - it has a bunch of optional params but we won't set # any here unless the user did the work self.fill_match("fp16.enabled", fp16_backend == "amp", "fp16+fp16_backend(amp)") # apex: delegates amp work to apex (which needs to be available), but it cannot be used with any # ZeRO features self.fill_match("amp.enabled", fp16_backend == "apex", "fp16+fp16_backend(apex)") self.fill_match("amp.opt_level", args.fp16_opt_level, "fp16_opt_level") # only if we have an explicit fp16.enabled = False then it's fp32, if it's True or this # whole config section is missing then the fallback is fp16 if self.is_false("fp16.enabled"): self._dtype = torch.float32 # later there will be other dtypes besides just fp16 and fp32 # also not quite sure what dtype should be under apex, defaulting to fp16 for now def trainer_config_finalize(self, args, model, num_training_steps): """ This stage is run after we have the model and know num_training_steps. Now we we can complete the configuration process. """ # zero if self.is_zero3(): # automatically assign the optimal config values based on model config hidden_size = model.config.hidden_size self.fill_only("zero_optimization.reduce_bucket_size", hidden_size * hidden_size) self.fill_only("zero_optimization.stage3_prefetch_bucket_size", 0.9 * hidden_size * hidden_size) self.fill_only("zero_optimization.stage3_param_persistence_threshold", 10 * hidden_size) # scheduler self.fill_match("scheduler.params.total_num_steps", num_training_steps, "num_training_steps (calculated)") self.fill_match("scheduler.params.warmup_num_steps", args.get_warmup_steps(num_training_steps), "warmup_steps") if len(self.mismatches) > 0: mismatches = "\n".join(self.mismatches) raise ValueError( f"Please correct the following DeepSpeed config values that mismatch TrainingArguments values:\n{mismatches}\n" "The easiest method is to set these DeepSpeed config values to 'auto'." ) # keep the config object global to be able to access it anywhere during TrainingArguments life-cycle _hf_deepspeed_config_weak_ref = None def set_hf_deepspeed_config(hf_deepspeed_config_obj): # this is a special weakref global object to allow us to get to Deepspeed config from APIs # that don't have an easy way to get to the Deepspeed config outside of the Trainer domain. global _hf_deepspeed_config_weak_ref # will go away automatically when HfDeepSpeedConfig is destroyed (when TrainingArguments is destroyed) _hf_deepspeed_config_weak_ref = weakref.ref(hf_deepspeed_config_obj) def is_deepspeed_zero3_enabled(): if _hf_deepspeed_config_weak_ref is not None and _hf_deepspeed_config_weak_ref() is not None: return _hf_deepspeed_config_weak_ref().is_zero3() else: return False def deepspeed_config(): if _hf_deepspeed_config_weak_ref is not None and _hf_deepspeed_config_weak_ref() is not None: return _hf_deepspeed_config_weak_ref().config else: return None def deepspeed_optim_sched(trainer, hf_deepspeed_config, args, num_training_steps): """ A convenience wrapper that deals with optimizer and lr scheduler configuration. """ config = hf_deepspeed_config.config # Optimizer + Scheduler # Currently supported combos: # 1. DS scheduler + DS optimizer: Yes # 2. HF scheduler + HF optimizer: Yes # 3. DS scheduler + HF optimizer: Yes # 4. HF scheduler + DS optimizer: Yes # # Unless Offload is enabled in which case it's: # 1. DS scheduler + DS optimizer: Yes # 2. HF scheduler + HF optimizer: Mostly* # 3. DS scheduler + HF optimizer: Mostly* # 4. HF scheduler + DS optimizer: Yes # # Mostly*: All non-native DeepSpeed optimizers that have both CPU and GPU implementation should work (except LAMB) optimizer = None if "optimizer" in config: if args.adafactor: raise ValueError( "--adafactor was passed, but also found `optimizer` configured in the DeepSpeed config. " "Only one optimizer can be configured." ) else: if hf_deepspeed_config.is_offload(): logger.info( "Detected ZeRO Offload and non-DeepSpeed optimizers: This combination should work as long as the custom optimizer has both CPU and GPU implementation (except LAMB)" ) # ds supports Adam, OneBitAdam, and Lamb optimizers and can import other optimizers from torch. # But trainer uses AdamW by default. optimizer = trainer.create_optimizer() # To use other optimizers requires voiding warranty with: `zero_allow_untested_optimizer` config["zero_allow_untested_optimizer"] = True def _lr_scheduler_callable(optimizer): return trainer.create_scheduler(num_training_steps=num_training_steps, optimizer=optimizer) lr_scheduler = None if "scheduler" not in config: if optimizer is None: # Optimizer is not available, so use callable to defer lr_scheduler creation to DS init lr_scheduler = _lr_scheduler_callable else: lr_scheduler = trainer.create_scheduler(num_training_steps=num_training_steps, optimizer=optimizer) return optimizer, lr_scheduler def deepspeed_reinit(trainer): """ this is a temp hack based on: https://github.com/microsoft/DeepSpeed/issues/1394#issuecomment-937405374 until Deepspeed fixes a bug where it can't resume from a checkpoint after it did some stepping https://github.com/microsoft/DeepSpeed/issues/1612 """ import deepspeed deepspeed_engine, optimizer, _, lr_scheduler = deepspeed.initialize(**trainer.deepspeed_initialize_kwargs) return deepspeed_engine, optimizer, lr_scheduler def deepspeed_init(trainer, num_training_steps, resume_from_checkpoint=None, inference=False): """ Init DeepSpeed, after updating the DeepSpeed configuration with any relevant Trainer's args. If `resume_from_checkpoint` was passed then an attempt to resume from a previously saved checkpoint will be made. Args: trainer: Trainer object num_training_steps: per single gpu resume_from_checkpoint: path to a checkpoint if to resume from after normal DeepSpeedEngine load inference: launch in inference mode (no optimizer and no lr scheduler) Returns: model, optimizer, lr_scheduler """ import deepspeed from deepspeed.utils import logger as ds_logger model = trainer.model args = trainer.args # resume config update - some bits like `model` and `num_training_steps` only become available during train hf_deepspeed_config = args.hf_deepspeed_config hf_deepspeed_config.trainer_config_finalize(args, model, num_training_steps) config = hf_deepspeed_config.config # set the Deepspeed log level consistent with the Trainer ds_logger.setLevel(args.get_process_log_level()) if inference: # only Z3 makes sense for the inference if not hf_deepspeed_config.is_zero3(): raise ValueError("ZeRO inference only makes sense with ZeRO Stage 3 - please adjust your config") # in case the training config is re-used for inference hf_deepspeed_config.del_config_sub_tree("optimizer") hf_deepspeed_config.del_config_sub_tree("lr_scheduler") optimizer, lr_scheduler = None, None model_parameters = None else: optimizer, lr_scheduler = deepspeed_optim_sched(trainer, hf_deepspeed_config, args, num_training_steps) model_parameters = list(filter(lambda p: p.requires_grad, model.parameters())) # keep for quick debug: # from pprint import pprint; pprint(config) kwargs = dict( model=model, model_parameters=model_parameters, config_params=config, optimizer=optimizer, lr_scheduler=lr_scheduler, ) deepspeed_engine, optimizer, _, lr_scheduler = deepspeed.initialize(**kwargs) # stash kwargs to enabled a later deepspeed_reinit trainer.deepspeed_initialize_kwargs = kwargs if resume_from_checkpoint is not None: # it's possible that the user is trying to resume from model_path, which doesn't necessarily # contain a deepspeed checkpoint. e.g. examples just check if the dir exists and assume it's # a resume from a checkpoint and not just a local pretrained weight. So we check here if the # path contains what looks like a deepspeed checkpoint import glob deepspeed_checkpoint_dirs = sorted(glob.glob(f"{resume_from_checkpoint}/global_step*")) if len(deepspeed_checkpoint_dirs) > 0: logger.info(f"Attempting to resume from {resume_from_checkpoint}") # this magically updates self.optimizer and self.lr_scheduler load_path, _ = deepspeed_engine.load_checkpoint( resume_from_checkpoint, load_optimizer_states=True, load_lr_scheduler_states=True ) if load_path is None: raise ValueError(f"[deepspeed] failed to resume from checkpoint {resume_from_checkpoint}") else: logger.info(f"{resume_from_checkpoint} doesn't have deepspeed checkpoints, doing nothing") return deepspeed_engine, optimizer, lr_scheduler
## Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Exports an example linear regression inference graph. Exports a TensorFlow graph to `/tmp/saved_model/half_plus_two/` based on the `SavedModel` format. This graph calculates, \\( y = a*x + b \\) and/or, independently, \\( y2 = a*x2 + c \\) where `a`, `b` and `c` are variables with `a=0.5` and `b=2` and `c=3`. Output from this program is typically used to exercise SavedModel load and execution code. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import os import sys import tensorflow as tf from tensorflow.python.lib.io import file_io FLAGS = None def _write_assets(assets_directory, assets_filename): """Writes asset files to be used with SavedModel for half plus two. Args: assets_directory: The directory to which the assets should be written. assets_filename: Name of the file to which the asset contents should be written. Returns: The path to which the assets file was written. """ if not file_io.file_exists(assets_directory): file_io.recursive_create_dir(assets_directory) path = os.path.join( tf.compat.as_bytes(assets_directory), tf.compat.as_bytes(assets_filename)) file_io.write_string_to_file(path, "asset-file-contents") return path def _build_regression_signature(input_tensor, output_tensor): """Helper function for building a regression SignatureDef.""" input_tensor_info = tf.saved_model.utils.build_tensor_info(input_tensor) signature_inputs = { tf.saved_model.signature_constants.REGRESS_INPUTS: input_tensor_info } output_tensor_info = tf.saved_model.utils.build_tensor_info(output_tensor) signature_outputs = { tf.saved_model.signature_constants.REGRESS_OUTPUTS: output_tensor_info } return tf.saved_model.signature_def_utils.build_signature_def( signature_inputs, signature_outputs, tf.saved_model.signature_constants.REGRESS_METHOD_NAME) # Possibly extend this to allow passing in 'classes', but for now this is # sufficient for testing purposes. def _build_classification_signature(input_tensor, scores_tensor): """Helper function for building a classification SignatureDef.""" input_tensor_info = tf.saved_model.utils.build_tensor_info(input_tensor) signature_inputs = { tf.saved_model.signature_constants.CLASSIFY_INPUTS: input_tensor_info } output_tensor_info = tf.saved_model.utils.build_tensor_info(scores_tensor) signature_outputs = { tf.saved_model.signature_constants.CLASSIFY_OUTPUT_SCORES: output_tensor_info } return tf.saved_model.signature_def_utils.build_signature_def( signature_inputs, signature_outputs, tf.saved_model.signature_constants.CLASSIFY_METHOD_NAME) def _generate_saved_model_for_half_plus_two(export_dir, as_text=False, use_main_op=False): """Generates SavedModel for half plus two. Args: export_dir: The directory to which the SavedModel should be written. as_text: Writes the SavedModel protocol buffer in text format to disk. use_main_op: Whether to supply a main op during SavedModel build time. """ builder = tf.saved_model.builder.SavedModelBuilder(export_dir) with tf.Session(graph=tf.Graph()) as sess: # Set up the model parameters as variables to exercise variable loading # functionality upon restore. a = tf.Variable(0.5, name="a") b = tf.Variable(2.0, name="b") c = tf.Variable(3.0, name="c") # Create a placeholder for serialized tensorflow.Example messages to be fed. serialized_tf_example = tf.placeholder(tf.string, name="tf_example") # Parse the tensorflow.Example looking for a feature named "x" with a single # floating point value. feature_configs = { "x": tf.FixedLenFeature( [1], dtype=tf.float32), "x2": tf.FixedLenFeature( [1], dtype=tf.float32, default_value=[0.0]) } tf_example = tf.parse_example(serialized_tf_example, feature_configs) # Use tf.identity() to assign name x = tf.identity(tf_example["x"], name="x") y = tf.add(tf.multiply(a, x), b, name="y") y2 = tf.add(tf.multiply(a, x), c, name="y2") x2 = tf.identity(tf_example["x2"], name="x2") y3 = tf.add(tf.multiply(a, x2), c, name="y3") # Create an assets file that can be saved and restored as part of the # SavedModel. original_assets_directory = "/tmp/original/export/assets" original_assets_filename = "foo.txt" original_assets_filepath = _write_assets(original_assets_directory, original_assets_filename) # Set up the assets collection. assets_filepath = tf.constant(original_assets_filepath) tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, assets_filepath) filename_tensor = tf.Variable( original_assets_filename, name="filename_tensor", trainable=False, collections=[]) assign_filename_op = filename_tensor.assign(original_assets_filename) # Set up the signature for Predict with input and output tensor # specification. predict_input_tensor = tf.saved_model.utils.build_tensor_info(x) predict_signature_inputs = {"x": predict_input_tensor} predict_output_tensor = tf.saved_model.utils.build_tensor_info(y) predict_signature_outputs = {"y": predict_output_tensor} predict_signature_def = ( tf.saved_model.signature_def_utils.build_signature_def( predict_signature_inputs, predict_signature_outputs, tf.saved_model.signature_constants.PREDICT_METHOD_NAME)) signature_def_map = { "regress_x_to_y": _build_regression_signature(serialized_tf_example, y), "regress_x_to_y2": _build_regression_signature(serialized_tf_example, y2), "regress_x2_to_y3": _build_regression_signature(x2, y3), "classify_x_to_y": _build_classification_signature(serialized_tf_example, y), tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: predict_signature_def } # Initialize all variables and then save the SavedModel. sess.run(tf.global_variables_initializer()) signature_def_map = { "regress_x_to_y": _build_regression_signature(serialized_tf_example, y), "regress_x_to_y2": _build_regression_signature(serialized_tf_example, y2), "regress_x2_to_y3": _build_regression_signature(x2, y3), "classify_x_to_y": _build_classification_signature(serialized_tf_example, y), "classify_x2_to_y3": _build_classification_signature(x2, y3), tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: predict_signature_def } if use_main_op: builder.add_meta_graph_and_variables( sess, [tf.saved_model.tag_constants.SERVING], signature_def_map=signature_def_map, assets_collection=tf.get_collection(tf.GraphKeys.ASSET_FILEPATHS), main_op=tf.group(tf.saved_model.main_op.main_op(), assign_filename_op)) else: builder.add_meta_graph_and_variables( sess, [tf.saved_model.tag_constants.SERVING], signature_def_map=signature_def_map, assets_collection=tf.get_collection(tf.GraphKeys.ASSET_FILEPATHS), legacy_init_op=tf.group(assign_filename_op)) builder.save(as_text) def main(_): _generate_saved_model_for_half_plus_two(FLAGS.output_dir) print("SavedModel generated at: %s" % FLAGS.output_dir) _generate_saved_model_for_half_plus_two(FLAGS.output_dir_pbtxt, as_text=True) print("SavedModel generated at: %s" % FLAGS.output_dir_pbtxt) _generate_saved_model_for_half_plus_two( FLAGS.output_dir_main_op, use_main_op=True) print("SavedModel generated at: %s" % FLAGS.output_dir_main_op) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--output_dir", type=str, default="/tmp/saved_model_half_plus_two", help="Directory where to output SavedModel.") parser.add_argument( "--output_dir_pbtxt", type=str, default="/tmp/saved_model_half_plus_two_pbtxt", help="Directory where to output the text format of SavedModel.") parser.add_argument( "--output_dir_main_op", type=str, default="/tmp/saved_model_half_plus_two_main_op", help="Directory where to output the SavedModel with a main op.") FLAGS, unparsed = parser.parse_known_args() tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
from tests.utils import order_insert_statements from tests.testcases import FakeBackendsTestCase class SQLite3Test(FakeBackendsTestCase): database_backend = 'sqlite3' def assert_sql(self, migration_num, app_name='test_app'): expected_sql = self.real_sql(app_name, migration_num) fake_backend_sql = self.fake_sql(app_name, migration_num) # Since the order of the fields in the INSERT INTO statement # when using the sqlite3 backend is not deterministic, we test them # by ordering the fields in a determistic manner (alphabetically) expected_sql = ''.join(order_insert_statements(expected_sql)).replace('\n', '') fake_backend_sql = ''.join(order_insert_statements(fake_backend_sql)).replace('\n', '') print('*** Expected output ***') print(expected_sql) print('*** Gotten outputs ***') print(fake_backend_sql) self.assertEqual(expected_sql, fake_backend_sql) def test_fake_backend_create_table_0001(self): app_name = 'test_app' migration_num = '0001' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_create_table_with_field_0002(self): app_name = 'test_app' migration_num = '0002' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_add_not_null_field_0003(self): app_name = 'test_app' migration_num = '0003' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_drop_field_0004(self): app_name = 'test_app' migration_num = '0004' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_alter_field_0005(self): app_name = 'test_app' migration_num = '0005' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_add_big_int_0006(self): app_name = 'test_app' migration_num = '0006' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_add_binary_0007(self): app_name = 'test_app' migration_num = '0007' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_add_boolean_0008(self): app_name = 'test_app' migration_num = '0008' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_add_char_0009(self): app_name = 'test_app' migration_num = '0009' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_add_date_0010(self): app_name = 'test_app' migration_num = '0010' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_add_datetime_0011(self): app_name = 'test_app' migration_num = '0011' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_add_decimal_0012(self): app_name = 'test_app' migration_num = '0012' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_add_duration_0013(self): app_name = 'test_app' migration_num = '0013' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_add_email_0014(self): app_name = 'test_app' migration_num = '0014' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_add_file_0015(self): app_name = 'test_app' migration_num = '0015' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_add_filepath_0016(self): app_name = 'test_app' migration_num = '0016' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_add_float_0017(self): app_name = 'test_app' migration_num = '0017' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_add_generic_ip_0018(self): app_name = 'test_app' migration_num = '0018' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_add_null_bool_0019(self): app_name = 'test_app' migration_num = '0019' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_add_pos_int_0020(self): app_name = 'test_app' migration_num = '0020' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_add_small_pos_int_0021(self): app_name = 'test_app' migration_num = '0021' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_add_slug_0022(self): app_name = 'test_app' migration_num = '0022' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_add_small_int_0023(self): app_name = 'test_app' migration_num = '0023' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_add_text_0024(self): app_name = 'test_app' migration_num = '0024' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_add_time_0025(self): app_name = 'test_app' migration_num = '0025' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_add_url_0026(self): app_name = 'test_app' migration_num = '0026' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_add_uuid_0027(self): app_name = 'test_app' migration_num = '0027' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_add_float_0028(self): app_name = 'test_app' migration_num = '0028' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_add_unique_together_0029(self): app_name = 'test_app' migration_num = '0029' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_change_field_name_0030(self): app_name = 'test_app' migration_num = '0030' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_add_field_index_0031(self): app_name = 'test_app' migration_num = '0031' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_drop_field_index_0032(self): app_name = 'test_app' migration_num = '0032' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_add_unique_on_field_0033(self): app_name = 'test_app' migration_num = '0033' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_drop_unique_on_field_0034(self): app_name = 'test_app' migration_num = '0034' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_add_unique_for_date_0035(self): app_name = 'test_app' migration_num = '0035' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_drop_unique_for_date_0036(self): app_name = 'test_app' migration_num = '0036' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_thirdobject_with_pk_0037(self): app_name = 'test_app' migration_num = '0037' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_rename_table_0038(self): app_name = 'test_app' migration_num = '0038' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_rename_field_0039(self): app_name = 'test_app' migration_num = '0039' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_drop_field_0040(self): app_name = 'test_app' migration_num = '0040' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_add_fk_cascade_0041(self): app_name = 'test_app' migration_num = '0041' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_remove_fk_index_0042(self): app_name = 'test_app' migration_num = '0042' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_add_fk_protect_0043(self): app_name = 'test_app' migration_num = '0043' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_add_fk_set_null_0044(self): app_name = 'test_app' migration_num = '0044' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_add_fk_do_nothing_0045(self): app_name = 'test_app' migration_num = '0045' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_add_one2one_0046(self): app_name = 'test_app' migration_num = '0046' self.assert_sql(app_name=app_name, migration_num=migration_num) def test_fake_backend_add_many2many_0047(self): app_name = 'test_app' migration_num = '0047' self.assert_sql(app_name=app_name, migration_num=migration_num)
import numpy as np import pytest import xarray as xr from xarray.core import merge from . import raises_regex from .test_dataset import create_test_data class TestMergeInternals(object): def test_broadcast_dimension_size(self): actual = merge.broadcast_dimension_size( [xr.Variable('x', [1]), xr.Variable('y', [2, 1])]) assert actual == {'x': 1, 'y': 2} actual = merge.broadcast_dimension_size( [xr.Variable(('x', 'y'), [[1, 2]]), xr.Variable('y', [2, 1])]) assert actual == {'x': 1, 'y': 2} with pytest.raises(ValueError): actual = merge.broadcast_dimension_size( [xr.Variable(('x', 'y'), [[1, 2]]), xr.Variable('y', [2])]) class TestMergeFunction(object): def test_merge_arrays(self): data = create_test_data() actual = xr.merge([data.var1, data.var2]) expected = data[['var1', 'var2']] assert actual.identical(expected) def test_merge_datasets(self): data = create_test_data() actual = xr.merge([data[['var1']], data[['var2']]]) expected = data[['var1', 'var2']] assert actual.identical(expected) actual = xr.merge([data, data]) assert actual.identical(data) def test_merge_dataarray_unnamed(self): data = xr.DataArray([1, 2], dims='x') with raises_regex( ValueError, 'without providing an explicit name'): xr.merge([data]) def test_merge_dicts_simple(self): actual = xr.merge([{'foo': 0}, {'bar': 'one'}, {'baz': 3.5}]) expected = xr.Dataset({'foo': 0, 'bar': 'one', 'baz': 3.5}) assert actual.identical(expected) def test_merge_dicts_dims(self): actual = xr.merge([{'y': ('x', [13])}, {'x': [12]}]) expected = xr.Dataset({'x': [12], 'y': ('x', [13])}) assert actual.identical(expected) def test_merge_error(self): ds = xr.Dataset({'x': 0}) with pytest.raises(xr.MergeError): xr.merge([ds, ds + 1]) def test_merge_alignment_error(self): ds = xr.Dataset(coords={'x': [1, 2]}) other = xr.Dataset(coords={'x': [2, 3]}) with raises_regex(ValueError, 'indexes .* not equal'): xr.merge([ds, other], join='exact') def test_merge_no_conflicts_single_var(self): ds1 = xr.Dataset({'a': ('x', [1, 2]), 'x': [0, 1]}) ds2 = xr.Dataset({'a': ('x', [2, 3]), 'x': [1, 2]}) expected = xr.Dataset({'a': ('x', [1, 2, 3]), 'x': [0, 1, 2]}) assert expected.identical(xr.merge([ds1, ds2], compat='no_conflicts')) assert expected.identical(xr.merge([ds2, ds1], compat='no_conflicts')) assert ds1.identical(xr.merge([ds1, ds2], compat='no_conflicts', join='left')) assert ds2.identical(xr.merge([ds1, ds2], compat='no_conflicts', join='right')) expected = xr.Dataset({'a': ('x', [2]), 'x': [1]}) assert expected.identical(xr.merge([ds1, ds2], compat='no_conflicts', join='inner')) with pytest.raises(xr.MergeError): ds3 = xr.Dataset({'a': ('x', [99, 3]), 'x': [1, 2]}) xr.merge([ds1, ds3], compat='no_conflicts') with pytest.raises(xr.MergeError): ds3 = xr.Dataset({'a': ('y', [2, 3]), 'y': [1, 2]}) xr.merge([ds1, ds3], compat='no_conflicts') def test_merge_no_conflicts_multi_var(self): data = create_test_data() data1 = data.copy(deep=True) data2 = data.copy(deep=True) expected = data[['var1', 'var2']] actual = xr.merge([data1.var1, data2.var2], compat='no_conflicts') assert expected.identical(actual) data1['var1'][:, :5] = np.nan data2['var1'][:, 5:] = np.nan data1['var2'][:4, :] = np.nan data2['var2'][4:, :] = np.nan del data2['var3'] actual = xr.merge([data1, data2], compat='no_conflicts') assert data.equals(actual) def test_merge_no_conflicts_preserve_attrs(self): data = xr.Dataset({'x': ([], 0, {'foo': 'bar'})}) actual = xr.merge([data, data]) assert data.identical(actual) def test_merge_no_conflicts_broadcast(self): datasets = [xr.Dataset({'x': ('y', [0])}), xr.Dataset({'x': np.nan})] actual = xr.merge(datasets) expected = xr.Dataset({'x': ('y', [0])}) assert expected.identical(actual) datasets = [xr.Dataset({'x': ('y', [np.nan])}), xr.Dataset({'x': 0})] actual = xr.merge(datasets) assert expected.identical(actual) class TestMergeMethod(object): def test_merge(self): data = create_test_data() ds1 = data[['var1']] ds2 = data[['var3']] expected = data[['var1', 'var3']] actual = ds1.merge(ds2) assert expected.identical(actual) actual = ds2.merge(ds1) assert expected.identical(actual) actual = data.merge(data) assert data.identical(actual) actual = data.reset_coords(drop=True).merge(data) assert data.identical(actual) actual = data.merge(data.reset_coords(drop=True)) assert data.identical(actual) with pytest.raises(ValueError): ds1.merge(ds2.rename({'var3': 'var1'})) with raises_regex( ValueError, 'should be coordinates or not'): data.reset_coords().merge(data) with raises_regex( ValueError, 'should be coordinates or not'): data.merge(data.reset_coords()) def test_merge_broadcast_equals(self): ds1 = xr.Dataset({'x': 0}) ds2 = xr.Dataset({'x': ('y', [0, 0])}) actual = ds1.merge(ds2) assert ds2.identical(actual) actual = ds2.merge(ds1) assert ds2.identical(actual) actual = ds1.copy() actual.update(ds2) assert ds2.identical(actual) ds1 = xr.Dataset({'x': np.nan}) ds2 = xr.Dataset({'x': ('y', [np.nan, np.nan])}) actual = ds1.merge(ds2) assert ds2.identical(actual) def test_merge_compat(self): ds1 = xr.Dataset({'x': 0}) ds2 = xr.Dataset({'x': 1}) for compat in ['broadcast_equals', 'equals', 'identical', 'no_conflicts']: with pytest.raises(xr.MergeError): ds1.merge(ds2, compat=compat) ds2 = xr.Dataset({'x': [0, 0]}) for compat in ['equals', 'identical']: with raises_regex( ValueError, 'should be coordinates or not'): ds1.merge(ds2, compat=compat) ds2 = xr.Dataset({'x': ((), 0, {'foo': 'bar'})}) with pytest.raises(xr.MergeError): ds1.merge(ds2, compat='identical') with raises_regex(ValueError, 'compat=.* invalid'): ds1.merge(ds2, compat='foobar') def test_merge_auto_align(self): ds1 = xr.Dataset({'a': ('x', [1, 2]), 'x': [0, 1]}) ds2 = xr.Dataset({'b': ('x', [3, 4]), 'x': [1, 2]}) expected = xr.Dataset({'a': ('x', [1, 2, np.nan]), 'b': ('x', [np.nan, 3, 4])}, {'x': [0, 1, 2]}) assert expected.identical(ds1.merge(ds2)) assert expected.identical(ds2.merge(ds1)) expected = expected.isel(x=slice(2)) assert expected.identical(ds1.merge(ds2, join='left')) assert expected.identical(ds2.merge(ds1, join='right')) expected = expected.isel(x=slice(1, 2)) assert expected.identical(ds1.merge(ds2, join='inner')) assert expected.identical(ds2.merge(ds1, join='inner')) def test_merge_no_conflicts(self): ds1 = xr.Dataset({'a': ('x', [1, 2]), 'x': [0, 1]}) ds2 = xr.Dataset({'a': ('x', [2, 3]), 'x': [1, 2]}) expected = xr.Dataset({'a': ('x', [1, 2, 3]), 'x': [0, 1, 2]}) assert expected.identical(ds1.merge(ds2, compat='no_conflicts')) assert expected.identical(ds2.merge(ds1, compat='no_conflicts')) assert ds1.identical(ds1.merge(ds2, compat='no_conflicts', join='left')) assert ds2.identical(ds1.merge(ds2, compat='no_conflicts', join='right')) expected2 = xr.Dataset({'a': ('x', [2]), 'x': [1]}) assert expected2.identical(ds1.merge(ds2, compat='no_conflicts', join='inner')) with pytest.raises(xr.MergeError): ds3 = xr.Dataset({'a': ('x', [99, 3]), 'x': [1, 2]}) ds1.merge(ds3, compat='no_conflicts') with pytest.raises(xr.MergeError): ds3 = xr.Dataset({'a': ('y', [2, 3]), 'y': [1, 2]}) ds1.merge(ds3, compat='no_conflicts')
#!/usr/bin/env yamtbx.python """ (c) RIKEN 2015. All rights reserved. Author: Keitaro Yamashita This software is released under the new BSD License; see LICENSE. """ master_params_str = """\ lstin = None .type = path dmin_lst = None .type = path anomalous = False .type = bool cell = average *first .type = choice(multi=False) sgnum = None .type = int nbins = 9 .type = int output = "XSCALE.HKL" .type = str d_min = None .type = float workdir = "." .type = path use_tmpdir_if_available = False .type = bool cbf_to_dat = True .type = bool nproc = None .type = int wfac1 = None .type = float reference = bmax bmed bmin .type = choice(multi=False) .help = Reference selection for final scaling frames_per_batch = None .type = int .help = affects NBATCH=. When 1, NBATCH= matches the number of frames in the dataset. corrections = decay modulation absorption .type = choice(multi=True) """ import os import sys import math import iotbx.phil from yamtbx import util from yamtbx.dataproc.xds import xscale from yamtbx.dataproc.xds.xds_ascii import XDS_ASCII xscale_comm = "xscale_par" def check_valid_xac(xac): if not os.path.isfile(xac): return False line = open(xac).readline() return "FORMAT=XDS_ASCII" in line # check_valid_xac() def get_xac_info(xac, get_nframes=False): ret = {} for l in open(xac): if l.startswith("!FORMAT=XDS_ASCII"): # !FORMAT=XDS_ASCII MERGE=FALSE FRIEDEL'S_LAW=FALSE ret["friedels_law"] = l[l.rindex("=")+1:].strip() if l.startswith("!INCLUDE_RESOLUTION_RANGE="): ret["resol_range"] = l[l.index("=")+1:].strip() elif l.startswith("!SPACE_GROUP_NUMBER="): ret["spgr_num"] = l[l.index("=")+1:].strip() elif l.startswith("!UNIT_CELL_CONSTANTS="): ret["cell"] = l[l.index("=")+1:].strip() elif l.startswith("!END_OF_HEADER"): break if not "resol_range" in ret: d_max_min = XDS_ASCII(xac, i_only=True).as_miller_set().d_max_min() ret["resol_range"] = "%.3f %.3f" % d_max_min if get_nframes: frame_range = XDS_ASCII(f, read_data=False).get_frame_range() ret["nframes"] = frame_range[1] - frame_range[0] return ret # get_xac_info() def make_shells(d_max, d_min, nbins): step = ( 1./(d_min**2) - 1./(d_max**2) ) / float(nbins) start = 1./(d_max**2) rshells = " ".join(map(lambda i: "%.2f" % (start + i * step)**(-1./2), xrange(1, nbins+1))) return " RESOLUTION_SHELLS= %s\n" % rshells # make_shells() def prep_xscale_inp(wdir, xscale_inp_head, xac_files, infos, frames_per_batch=None, corrections=None, ref_idx=None): xscale_inp = os.path.join(wdir, "XSCALE.INP") inp_out = open(xscale_inp, "w") inp_out.write(xscale_inp_head) for i, xds_ascii in enumerate(xac_files): star = "*" if ref_idx == i else " " inp_out.write(" INPUT_FILE=%s%s\n" % (star, os.path.relpath(xds_ascii, params.workdir))) if "resol_range_user" in infos[xds_ascii]: inp_out.write(" INCLUDE_RESOLUTION_RANGE= %s\n" % infos[xds_ascii]["resol_range_user"]) else: inp_out.write(" ! INCLUDE_RESOLUTION_RANGE= %s\n" % infos[xds_ascii]["resol_range"]) if frames_per_batch: nbatch = int(math.ceil(infos[xds_ascii]["nframes"] / frames_per_batch)) inp_out.write(" NBATCH= %d\n" % nbatch) if corrections: inp_out.write(" CORRECTIONS= %s\n" % " ".join(map(lambda s: s.upper(), corrections))) inp_out.write("\n") inp_out.close() # prep_xscale_inp() def run(params, xac_files): if len(xac_files) == 0: print "No XDS_ASCII.HKL files provided." return # Parse dmin_dict = {} if params.dmin_lst: for l in open(params.dmin_lst): sp = l.split() if len(sp) != 2: continue f, dmin = sp dmin_dict[f] = dmin xscale_inp_head = "!SNRC= 3 ! was MINIMUM_I/SIGMA= before BUILT=20191015\n\n" if params.wfac1 is not None: xscale_inp_head += "WFAC1= %.3f\n" % params.wfac1 if params.nproc: xscale_inp_head += "MAXIMUM_NUMBER_OF_PROCESSORS= %d\n" % params.nproc infos = {} d_max, d_min = 0, 100 cells = [] for xds_ascii in xac_files: info = get_xac_info(xds_ascii, get_nframes=params.frames_per_batch is not None) if xds_ascii in dmin_dict: dmax,dmin = info["resol_range"].split() info["resol_range_user"] = "%s %s" % (dmax, dmin_dict[xds_ascii]) infos[xds_ascii] = info resrng = map(float, info["resol_range"].split()) d_max = max(d_max, resrng[0]) d_min = min(d_min, resrng[1]) cells.append(map(float, info["cell"].split())) if params.d_min is not None: d_min = max(params.d_min, d_min) if params.cell == "average": cell_sum = reduce(lambda x,y: map(lambda a: a[0]+a[1], zip(x,y)), cells) cell_mean = map(lambda x: x/float(len(cells)), cell_sum) if params.sgnum is not None: sgnum = str(params.sgnum) else: sgnum = infos[xac_files[0]]["spgr_num"] xscale_inp_head += " SPACE_GROUP_NUMBER= %s\n" % sgnum xscale_inp_head += " UNIT_CELL_CONSTANTS= %s\n" % " ".join(map(lambda x: "%.3f"%x, cell_mean)) xscale_inp_head += make_shells(d_max, d_min, params.nbins) + "\n" xscale_inp_head += " OUTPUT_FILE= %s\n" % params.output xscale_inp_head += " FRIEDEL'S_LAW= %s\n\n" % ("FALSE" if params.anomalous else "TRUE") prep_xscale_inp(params.workdir, xscale_inp_head, xac_files, infos, params.frames_per_batch, params.corrections) xscale.run_xscale(os.path.join(params.workdir, "XSCALE.INP"), cbf_to_dat=params.cbf_to_dat, use_tmpdir_if_available=params.use_tmpdir_if_available) if params.reference: print "Choosing reference data (reference=%s)" % params.reference ref_idx = xscale.decide_scaling_reference_based_on_bfactor(os.path.join(params.workdir, "XSCALE.LP"), params.reference, return_as="index") if ref_idx != 0: for f in "XSCALE.INP", "XSCALE.LP": util.rotate_file(os.path.join(params.workdir, f)) prep_xscale_inp(params.workdir, xscale_inp_head, xac_files, infos, params.frames_per_batch, params.corrections, ref_idx=ref_idx) xscale.run_xscale(os.path.join(params.workdir, "XSCALE.INP"), cbf_to_dat=params.cbf_to_dat, use_tmpdir_if_available=params.use_tmpdir_if_available) # run() if __name__ == "__main__": cmdline = iotbx.phil.process_command_line(args=sys.argv[1:], master_string=master_params_str) params = cmdline.work.extract() xac_files = filter(check_valid_xac, cmdline.remaining_args) if params.lstin: xac_files.extend(filter(check_valid_xac, util.read_path_list(params.lstin))) print "XDS_ASCII.HKL files given:" for f in xac_files: print " %s" % f print run(params, xac_files)
from keras.layers import Input, Dense, Dropout, BatchNormalization from keras.models import Model from keras.datasets import mnist from keras.callbacks import EarlyStopping from keras.models import Sequential, load_model from keras.optimizers import RMSprop from keras.callbacks import TensorBoard # from __future__ import print_function from IPython.display import SVG, Image from keras import regularizers, Model from matplotlib import rc import keras import matplotlib.pyplot as plt import numpy as np plt.show(block=True) num_classes = 10 input_dim = 784 batch_size = 256 (x_train, y_train), (x_val, y_val) = mnist.load_data() x_train = x_train.astype('float32') / 255. x_val = x_val.astype('float32') / 255. # x_train = np.concatenate((x_train, x_val)) x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:]))) x_val = x_val.reshape((len(x_val), np.prod(x_val.shape[1:]))) # print(x_train.shape) y_train = keras.utils.to_categorical(y_train, num_classes) y_val = keras.utils.to_categorical(y_val, num_classes) early_stopping = EarlyStopping(monitor='val_loss', patience=4) # ====================================================================================================================== # train the first layer weights encoding_dim1 = 128 epoch1 = 8 input_img = Input(shape=(input_dim,)) encoded1 = Dense(encoding_dim1, activation='relu')(input_img) # encoded1_bn = BatchNormalization()(encoded1) decoded1 = Dense(input_dim, activation='relu')(encoded1) class1 = Dense(num_classes, activation='softmax')(decoded1) autoencoder1 = Model(input_img, class1) autoencoder1.compile(optimizer=RMSprop(), loss='binary_crossentropy', metrics=['accuracy']) encoder1 = Model(input_img, encoded1) encoder1.compile(optimizer=RMSprop(), loss='binary_crossentropy') # autoencoder1 = keras.models.load_model('models/autoencoder1.h5') # encoder1 = keras.models.load_model('models/encoder1.h5') autoencoder1.fit(x_train , y_train , epochs=epoch1 , batch_size=batch_size , shuffle=True , verbose=False , validation_split=0.1 ) # autoencoder1.save('models/autoencoder1.h5') # encoder1.save('models/encoder1.h5') score1 = autoencoder1.evaluate(x_val, y_val, verbose=0) print('Test loss:', score1[0]) print('Test accuracy:', score1[1]) # ========================================================== # train the second layer weights first_layer_code = encoder1.predict(x_train) encoding_dim2 = 64 epoch2 = 5 encoded_2_input = Input(shape=(encoding_dim1,)) encoded2 = Dense(encoding_dim2, activation='relu')(encoded_2_input) # encoded2_bn = BatchNormalization()(encoded2) decoded2 = Dense(encoding_dim1, activation='relu')(encoded2) class2 = Dense(num_classes, activation='softmax')(decoded2) autoencoder2 = Model(encoded_2_input, class2) autoencoder2.compile(optimizer=RMSprop(), loss='binary_crossentropy', metrics=['accuracy']) encoder2 = Model(encoded_2_input, encoded2) encoder2.compile(optimizer=RMSprop(), loss='binary_crossentropy') # autoencoder2 = keras.models.load_model('models/autoencoder2.h5') # encoder2 = keras.models.load_model('models/encoder2.h5') autoencoder2.fit(first_layer_code , y_train , epochs=epoch2 , batch_size=batch_size , shuffle=True # , verbose=False # , validation_data=(x_val, x_val) , validation_split=0.1 # , callbacks=[early_stopping] ) # autoencoder2.save('models/autoencoder2.h5') # encoder2.save('models/encoder2.h5') first_layer_code_val = encoder1.predict(x_val) score2 = autoencoder2.evaluate(first_layer_code_val, y_val, verbose=0) print('Test loss:', score2[0]) print('Test accuracy:', score2[1]) # ========================================================== # train the third layer weights second_layer_code = encoder2.predict(encoder1.predict(x_train)) encoding_dim3 = 32 epoch3 = 5 encoded_3_input = Input(shape=(encoding_dim2,)) encoded3 = Dense(encoding_dim3, activation='relu')(encoded_3_input) # encoded3_bn = BatchNormalization()(encoded3) decoded3 = Dense(encoding_dim1, activation='relu')(encoded3) class3 = Dense(num_classes, activation='softmax')(decoded3) autoencoder3 = Model(encoded_3_input, class3) autoencoder3.compile(optimizer=RMSprop(), loss='binary_crossentropy', metrics=['accuracy']) encoder3 = Model(encoded_3_input, encoded3) encoder3.compile(optimizer=RMSprop(), loss='binary_crossentropy') # autoencoder2 = keras.models.load_model('models/autoencoder2.h5') # encoder2 = keras.models.load_model('models/encoder2.h5') autoencoder3.fit(second_layer_code , y_train , epochs=epoch3 , batch_size=batch_size , shuffle=True # , verbose=False # , validation_data=(x_val, x_val) , validation_split=0.1 # , callbacks=[early_stopping] ) # autoencoder2.save('models/autoencoder2.h5') # encoder2.save('models/encoder2.h5') second_layer_code_val = encoder2.predict(encoder1.predict(x_val)) score3 = autoencoder3.evaluate(second_layer_code_val, y_val, verbose=0) print('Test loss:', score3[0]) print('Test accuracy:', score3[1]) # ========================================================== # train the sae epoch4 = 10 sae_encoded1 = Dense(encoding_dim1, activation='relu')(input_img) sae_encoded2 = Dense(encoding_dim2, activation='relu')(sae_encoded1) sae_encoded3 = Dense(encoding_dim3, activation='relu')(sae_encoded2) sae_decoded1 = Dense(encoding_dim2, activation='relu')(sae_encoded3) sae_decoded2 = Dense(encoding_dim1, activation='relu')(sae_decoded1) sae_decoded3 = Dense(input_dim, activation='sigmoid')(sae_decoded2) sae = Model(input_img, sae_decoded3) sae.layers[1].set_weights(autoencoder1.layers[1].get_weights()) sae.layers[2].set_weights(autoencoder2.layers[1].get_weights()) sae.layers[3].set_weights(autoencoder3.layers[1].get_weights()) # sae.layers[4].set_weights(autoencoder3.layers[2].get_weights()) # sae.layers[5].set_weights(autoencoder2.layers[2].get_weights()) # sae.layers[6].set_weights(autoencoder1.layers[2].get_weights()) sae.compile(loss='binary_crossentropy', optimizer=RMSprop()) sae.fit(x_train , x_train , epochs=epoch4 , batch_size=batch_size , shuffle=True # , verbose=False # , validation_data=(x_val, x_val) , validation_split=0.1 # , callbacks=[early_stopping] ) score4 = sae.evaluate(x_val, x_val, verbose=0) print('Test loss:', score4) decoded_imgs = sae.predict(x_val) n = 10 # how many digits we will display plt.figure(figsize=(20, 4)) for i in range(n): # display original ax = plt.subplot(2, n, i + 1) plt.imshow(x_val[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) # display reconstruction ax = plt.subplot(2, n, i + 1 + n) plt.imshow(decoded_imgs[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show() class_layer = Dense(num_classes, activation='softmax')(sae_decoded3) classifier = Model(inputs=input_img, outputs=class_layer) classifier.compile(loss='categorical_crossentropy', optimizer=RMSprop(), metrics=['accuracy']) classifier.fit(x_train, y_train , epochs=7 , batch_size=batch_size , validation_split=0.1 , shuffle=True) score5 = classifier.evaluate(x_val, y_val) print('Test loss:', score5[0]) print('Test accuracy:', score5[1])
import unittest from django.contrib.gis.gdal import OGRGeometry, OGRGeomType, \ OGRException, OGRIndexError, SpatialReference, CoordTransform, \ gdal_version from django.contrib.gis.tests.geometries import * class OGRGeomTest(unittest.TestCase): "This tests the OGR Geometry." def test00a_geomtype(self): "Testing OGRGeomType object." # OGRGeomType should initialize on all these inputs. try: g = OGRGeomType(1) g = OGRGeomType(7) g = OGRGeomType('point') g = OGRGeomType('GeometrycollectioN') g = OGRGeomType('LINearrING') g = OGRGeomType('Unknown') except: self.fail('Could not create an OGRGeomType object!') # Should throw TypeError on this input self.assertRaises(OGRException, OGRGeomType, 23) self.assertRaises(OGRException, OGRGeomType, 'fooD') self.assertRaises(OGRException, OGRGeomType, 9) # Equivalence can take strings, ints, and other OGRGeomTypes self.assertEqual(True, OGRGeomType(1) == OGRGeomType(1)) self.assertEqual(True, OGRGeomType(7) == 'GeometryCollection') self.assertEqual(True, OGRGeomType('point') == 'POINT') self.assertEqual(False, OGRGeomType('point') == 2) self.assertEqual(True, OGRGeomType('unknown') == 0) self.assertEqual(True, OGRGeomType(6) == 'MULtiPolyGON') self.assertEqual(False, OGRGeomType(1) != OGRGeomType('point')) self.assertEqual(True, OGRGeomType('POINT') != OGRGeomType(6)) # Testing the Django field name equivalent property. self.assertEqual('PointField', OGRGeomType('Point').django) self.assertEqual('GeometryField', OGRGeomType('Unknown').django) self.assertEqual(None, OGRGeomType('none').django) # 'Geometry' initialization implies an unknown geometry type. gt = OGRGeomType('Geometry') self.assertEqual(0, gt.num) self.assertEqual('Unknown', gt.name) def test00b_geomtype_25d(self): "Testing OGRGeomType object with 25D types." wkb25bit = OGRGeomType.wkb25bit self.failUnless(OGRGeomType(wkb25bit + 1) == 'Point25D') self.failUnless(OGRGeomType('MultiLineString25D') == (5 + wkb25bit)) self.assertEqual('GeometryCollectionField', OGRGeomType('GeometryCollection25D').django) def test01a_wkt(self): "Testing WKT output." for g in wkt_out: geom = OGRGeometry(g.wkt) self.assertEqual(g.wkt, geom.wkt) def test01a_ewkt(self): "Testing EWKT input/output." for ewkt_val in ('POINT (1 2 3)', 'LINEARRING (0 0,1 1,2 1,0 0)'): # First with ewkt output when no SRID in EWKT self.assertEqual(ewkt_val, OGRGeometry(ewkt_val).ewkt) # No test consumption with an SRID specified. ewkt_val = 'SRID=4326;%s' % ewkt_val geom = OGRGeometry(ewkt_val) self.assertEqual(ewkt_val, geom.ewkt) self.assertEqual(4326, geom.srs.srid) def test01b_gml(self): "Testing GML output." for g in wkt_out: geom = OGRGeometry(g.wkt) self.assertEqual(g.gml, geom.gml) def test01c_hex(self): "Testing HEX input/output." for g in hex_wkt: geom1 = OGRGeometry(g.wkt) self.assertEqual(g.hex, geom1.hex) # Constructing w/HEX geom2 = OGRGeometry(g.hex) self.assertEqual(geom1, geom2) def test01d_wkb(self): "Testing WKB input/output." from binascii import b2a_hex for g in hex_wkt: geom1 = OGRGeometry(g.wkt) wkb = geom1.wkb self.assertEqual(b2a_hex(wkb).upper(), g.hex) # Constructing w/WKB. geom2 = OGRGeometry(wkb) self.assertEqual(geom1, geom2) def test01e_json(self): "Testing GeoJSON input/output." from django.contrib.gis.gdal.prototypes.geom import GEOJSON if not GEOJSON: return for g in json_geoms: geom = OGRGeometry(g.wkt) if not hasattr(g, 'not_equal'): self.assertEqual(g.json, geom.json) self.assertEqual(g.json, geom.geojson) self.assertEqual(OGRGeometry(g.wkt), OGRGeometry(geom.json)) def test02_points(self): "Testing Point objects." prev = OGRGeometry('POINT(0 0)') for p in points: if not hasattr(p, 'z'): # No 3D pnt = OGRGeometry(p.wkt) self.assertEqual(1, pnt.geom_type) self.assertEqual('POINT', pnt.geom_name) self.assertEqual(p.x, pnt.x) self.assertEqual(p.y, pnt.y) self.assertEqual((p.x, p.y), pnt.tuple) def test03_multipoints(self): "Testing MultiPoint objects." for mp in multipoints: mgeom1 = OGRGeometry(mp.wkt) # First one from WKT self.assertEqual(4, mgeom1.geom_type) self.assertEqual('MULTIPOINT', mgeom1.geom_name) mgeom2 = OGRGeometry('MULTIPOINT') # Creating empty multipoint mgeom3 = OGRGeometry('MULTIPOINT') for g in mgeom1: mgeom2.add(g) # adding each point from the multipoints mgeom3.add(g.wkt) # should take WKT as well self.assertEqual(mgeom1, mgeom2) # they should equal self.assertEqual(mgeom1, mgeom3) self.assertEqual(mp.points, mgeom2.tuple) self.assertEqual(mp.n_p, mgeom2.point_count) def test04_linestring(self): "Testing LineString objects." prev = OGRGeometry('POINT(0 0)') for ls in linestrings: linestr = OGRGeometry(ls.wkt) self.assertEqual(2, linestr.geom_type) self.assertEqual('LINESTRING', linestr.geom_name) self.assertEqual(ls.n_p, linestr.point_count) self.assertEqual(ls.tup, linestr.tuple) self.assertEqual(True, linestr == OGRGeometry(ls.wkt)) self.assertEqual(True, linestr != prev) self.assertRaises(OGRIndexError, linestr.__getitem__, len(linestr)) prev = linestr # Testing the x, y properties. x = [tmpx for tmpx, tmpy in ls.tup] y = [tmpy for tmpx, tmpy in ls.tup] self.assertEqual(x, linestr.x) self.assertEqual(y, linestr.y) def test05_multilinestring(self): "Testing MultiLineString objects." prev = OGRGeometry('POINT(0 0)') for mls in multilinestrings: mlinestr = OGRGeometry(mls.wkt) self.assertEqual(5, mlinestr.geom_type) self.assertEqual('MULTILINESTRING', mlinestr.geom_name) self.assertEqual(mls.n_p, mlinestr.point_count) self.assertEqual(mls.tup, mlinestr.tuple) self.assertEqual(True, mlinestr == OGRGeometry(mls.wkt)) self.assertEqual(True, mlinestr != prev) prev = mlinestr for ls in mlinestr: self.assertEqual(2, ls.geom_type) self.assertEqual('LINESTRING', ls.geom_name) self.assertRaises(OGRIndexError, mlinestr.__getitem__, len(mlinestr)) def test06_linearring(self): "Testing LinearRing objects." prev = OGRGeometry('POINT(0 0)') for rr in linearrings: lr = OGRGeometry(rr.wkt) #self.assertEqual(101, lr.geom_type.num) self.assertEqual('LINEARRING', lr.geom_name) self.assertEqual(rr.n_p, len(lr)) self.assertEqual(True, lr == OGRGeometry(rr.wkt)) self.assertEqual(True, lr != prev) prev = lr def test07a_polygons(self): "Testing Polygon objects." # Testing `from_bbox` class method bbox = (-180,-90,180,90) p = OGRGeometry.from_bbox( bbox ) self.assertEqual(bbox, p.extent) prev = OGRGeometry('POINT(0 0)') for p in polygons: poly = OGRGeometry(p.wkt) self.assertEqual(3, poly.geom_type) self.assertEqual('POLYGON', poly.geom_name) self.assertEqual(p.n_p, poly.point_count) self.assertEqual(p.n_i + 1, len(poly)) # Testing area & centroid. self.assertAlmostEqual(p.area, poly.area, 9) x, y = poly.centroid.tuple self.assertAlmostEqual(p.centroid[0], x, 9) self.assertAlmostEqual(p.centroid[1], y, 9) # Testing equivalence self.assertEqual(True, poly == OGRGeometry(p.wkt)) self.assertEqual(True, poly != prev) if p.ext_ring_cs: ring = poly[0] self.assertEqual(p.ext_ring_cs, ring.tuple) self.assertEqual(p.ext_ring_cs, poly[0].tuple) self.assertEqual(len(p.ext_ring_cs), ring.point_count) for r in poly: self.assertEqual('LINEARRING', r.geom_name) def test07b_closepolygons(self): "Testing closing Polygon objects." # Both rings in this geometry are not closed. poly = OGRGeometry('POLYGON((0 0, 5 0, 5 5, 0 5), (1 1, 2 1, 2 2, 2 1))') self.assertEqual(8, poly.point_count) print "\nBEGIN - expecting IllegalArgumentException; safe to ignore.\n" try: c = poly.centroid except OGRException: # Should raise an OGR exception, rings are not closed pass else: self.fail('Should have raised an OGRException!') print "\nEND - expecting IllegalArgumentException; safe to ignore.\n" # Closing the rings -- doesn't work on GDAL versions 1.4.1 and below: # http://trac.osgeo.org/gdal/ticket/1673 major, minor1, minor2 = gdal_version().split('.') if major == '1': iminor1 = int(minor1) if iminor1 < 4 or (iminor1 == 4 and minor2.startswith('1')): return poly.close_rings() self.assertEqual(10, poly.point_count) # Two closing points should've been added self.assertEqual(OGRGeometry('POINT(2.5 2.5)'), poly.centroid) def test08_multipolygons(self): "Testing MultiPolygon objects." prev = OGRGeometry('POINT(0 0)') for mp in multipolygons: mpoly = OGRGeometry(mp.wkt) self.assertEqual(6, mpoly.geom_type) self.assertEqual('MULTIPOLYGON', mpoly.geom_name) if mp.valid: self.assertEqual(mp.n_p, mpoly.point_count) self.assertEqual(mp.num_geom, len(mpoly)) self.assertRaises(OGRIndexError, mpoly.__getitem__, len(mpoly)) for p in mpoly: self.assertEqual('POLYGON', p.geom_name) self.assertEqual(3, p.geom_type) self.assertEqual(mpoly.wkt, OGRGeometry(mp.wkt).wkt) def test09a_srs(self): "Testing OGR Geometries with Spatial Reference objects." for mp in multipolygons: # Creating a geometry w/spatial reference sr = SpatialReference('WGS84') mpoly = OGRGeometry(mp.wkt, sr) self.assertEqual(sr.wkt, mpoly.srs.wkt) # Ensuring that SRS is propagated to clones. klone = mpoly.clone() self.assertEqual(sr.wkt, klone.srs.wkt) # Ensuring all children geometries (polygons and their rings) all # return the assigned spatial reference as well. for poly in mpoly: self.assertEqual(sr.wkt, poly.srs.wkt) for ring in poly: self.assertEqual(sr.wkt, ring.srs.wkt) # Ensuring SRS propagate in topological ops. a, b = topology_geoms[0] a, b = OGRGeometry(a.wkt, sr), OGRGeometry(b.wkt, sr) diff = a.difference(b) union = a.union(b) self.assertEqual(sr.wkt, diff.srs.wkt) self.assertEqual(sr.srid, union.srs.srid) # Instantiating w/an integer SRID mpoly = OGRGeometry(mp.wkt, 4326) self.assertEqual(4326, mpoly.srid) mpoly.srs = SpatialReference(4269) self.assertEqual(4269, mpoly.srid) self.assertEqual('NAD83', mpoly.srs.name) # Incrementing through the multipolyogn after the spatial reference # has been re-assigned. for poly in mpoly: self.assertEqual(mpoly.srs.wkt, poly.srs.wkt) poly.srs = 32140 for ring in poly: # Changing each ring in the polygon self.assertEqual(32140, ring.srs.srid) self.assertEqual('NAD83 / Texas South Central', ring.srs.name) ring.srs = str(SpatialReference(4326)) # back to WGS84 self.assertEqual(4326, ring.srs.srid) # Using the `srid` property. ring.srid = 4322 self.assertEqual('WGS 72', ring.srs.name) self.assertEqual(4322, ring.srid) def test09b_srs_transform(self): "Testing transform()." orig = OGRGeometry('POINT (-104.609 38.255)', 4326) trans = OGRGeometry('POINT (992385.4472045 481455.4944650)', 2774) # Using an srid, a SpatialReference object, and a CoordTransform object # or transformations. t1, t2, t3 = orig.clone(), orig.clone(), orig.clone() t1.transform(trans.srid) t2.transform(SpatialReference('EPSG:2774')) ct = CoordTransform(SpatialReference('WGS84'), SpatialReference(2774)) t3.transform(ct) # Testing use of the `clone` keyword. k1 = orig.clone() k2 = k1.transform(trans.srid, clone=True) self.assertEqual(k1, orig) self.assertNotEqual(k1, k2) prec = 3 for p in (t1, t2, t3, k2): self.assertAlmostEqual(trans.x, p.x, prec) self.assertAlmostEqual(trans.y, p.y, prec) def test09c_transform_dim(self): "Testing coordinate dimension is the same on transformed geometries." ls_orig = OGRGeometry('LINESTRING(-104.609 38.255)', 4326) ls_trans = OGRGeometry('LINESTRING(992385.4472045 481455.4944650)', 2774) prec = 3 ls_orig.transform(ls_trans.srs) # Making sure the coordinate dimension is still 2D. self.assertEqual(2, ls_orig.coord_dim) self.assertAlmostEqual(ls_trans.x[0], ls_orig.x[0], prec) self.assertAlmostEqual(ls_trans.y[0], ls_orig.y[0], prec) def test10_difference(self): "Testing difference()." for i in xrange(len(topology_geoms)): g_tup = topology_geoms[i] a = OGRGeometry(g_tup[0].wkt) b = OGRGeometry(g_tup[1].wkt) d1 = OGRGeometry(diff_geoms[i].wkt) d2 = a.difference(b) self.assertEqual(d1, d2) self.assertEqual(d1, a - b) # __sub__ is difference operator a -= b # testing __isub__ self.assertEqual(d1, a) def test11_intersection(self): "Testing intersects() and intersection()." for i in xrange(len(topology_geoms)): g_tup = topology_geoms[i] a = OGRGeometry(g_tup[0].wkt) b = OGRGeometry(g_tup[1].wkt) i1 = OGRGeometry(intersect_geoms[i].wkt) self.assertEqual(True, a.intersects(b)) i2 = a.intersection(b) self.assertEqual(i1, i2) self.assertEqual(i1, a & b) # __and__ is intersection operator a &= b # testing __iand__ self.assertEqual(i1, a) def test12_symdifference(self): "Testing sym_difference()." for i in xrange(len(topology_geoms)): g_tup = topology_geoms[i] a = OGRGeometry(g_tup[0].wkt) b = OGRGeometry(g_tup[1].wkt) d1 = OGRGeometry(sdiff_geoms[i].wkt) d2 = a.sym_difference(b) self.assertEqual(d1, d2) self.assertEqual(d1, a ^ b) # __xor__ is symmetric difference operator a ^= b # testing __ixor__ self.assertEqual(d1, a) def test13_union(self): "Testing union()." for i in xrange(len(topology_geoms)): g_tup = topology_geoms[i] a = OGRGeometry(g_tup[0].wkt) b = OGRGeometry(g_tup[1].wkt) u1 = OGRGeometry(union_geoms[i].wkt) u2 = a.union(b) self.assertEqual(u1, u2) self.assertEqual(u1, a | b) # __or__ is union operator a |= b # testing __ior__ self.assertEqual(u1, a) def test14_add(self): "Testing GeometryCollection.add()." # Can't insert a Point into a MultiPolygon. mp = OGRGeometry('MultiPolygon') pnt = OGRGeometry('POINT(5 23)') self.assertRaises(OGRException, mp.add, pnt) # GeometryCollection.add may take an OGRGeometry (if another collection # of the same type all child geoms will be added individually) or WKT. for mp in multipolygons: mpoly = OGRGeometry(mp.wkt) mp1 = OGRGeometry('MultiPolygon') mp2 = OGRGeometry('MultiPolygon') mp3 = OGRGeometry('MultiPolygon') for poly in mpoly: mp1.add(poly) # Adding a geometry at a time mp2.add(poly.wkt) # Adding WKT mp3.add(mpoly) # Adding a MultiPolygon's entire contents at once. for tmp in (mp1, mp2, mp3): self.assertEqual(mpoly, tmp) def test15_extent(self): "Testing `extent` property." # The xmin, ymin, xmax, ymax of the MultiPoint should be returned. mp = OGRGeometry('MULTIPOINT(5 23, 0 0, 10 50)') self.assertEqual((0.0, 0.0, 10.0, 50.0), mp.extent) # Testing on the 'real world' Polygon. poly = OGRGeometry(polygons[3].wkt) ring = poly.shell x, y = ring.x, ring.y xmin, ymin = min(x), min(y) xmax, ymax = max(x), max(y) self.assertEqual((xmin, ymin, xmax, ymax), poly.extent) def test16_25D(self): "Testing 2.5D geometries." pnt_25d = OGRGeometry('POINT(1 2 3)') self.assertEqual('Point25D', pnt_25d.geom_type.name) self.assertEqual(3.0, pnt_25d.z) self.assertEqual(3, pnt_25d.coord_dim) ls_25d = OGRGeometry('LINESTRING(1 1 1,2 2 2,3 3 3)') self.assertEqual('LineString25D', ls_25d.geom_type.name) self.assertEqual([1.0, 2.0, 3.0], ls_25d.z) self.assertEqual(3, ls_25d.coord_dim) def test17_pickle(self): "Testing pickle support." import cPickle g1 = OGRGeometry('LINESTRING(1 1 1,2 2 2,3 3 3)', 'WGS84') g2 = cPickle.loads(cPickle.dumps(g1)) self.assertEqual(g1, g2) self.assertEqual(4326, g2.srs.srid) self.assertEqual(g1.srs.wkt, g2.srs.wkt) def suite(): s = unittest.TestSuite() s.addTest(unittest.makeSuite(OGRGeomTest)) return s def run(verbosity=2): unittest.TextTestRunner(verbosity=verbosity).run(suite())
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_serialization import jsonutils from neutronclient._i18n import _ from neutronclient.common import exceptions from neutronclient.common import utils from neutronclient.neutron import v2_0 as neutronv20 from neutronclient.neutron.v2_0.vpn import utils as vpn_utils def _format_peer_cidrs(ipsec_site_connection): try: return '\n'.join([jsonutils.dumps(cidrs) for cidrs in ipsec_site_connection['peer_cidrs']]) except (TypeError, KeyError): return '' class ListIPsecSiteConnection(neutronv20.ListCommand): """List IPsec site connections that belong to a given tenant.""" resource = 'ipsec_site_connection' _formatters = {'peer_cidrs': _format_peer_cidrs} list_columns = [ 'id', 'name', 'peer_address', 'auth_mode', 'status'] pagination_support = True sorting_support = True class ShowIPsecSiteConnection(neutronv20.ShowCommand): """Show information of a given IPsec site connection.""" resource = 'ipsec_site_connection' help_resource = 'IPsec site connection' class IPsecSiteConnectionMixin(object): def add_known_arguments(self, parser): parser.add_argument( '--dpd', metavar="action=ACTION,interval=INTERVAL,timeout=TIMEOUT", type=utils.str2dict_type( optional_keys=['action', 'interval', 'timeout']), help=vpn_utils.dpd_help("IPsec connection.")) parser.add_argument( '--local-ep-group', help=_('Local endpoint group ID/name with subnet(s) for ' 'the IPsec connection.')) parser.add_argument( '--peer-ep-group', help=_('Peer endpoint group ID/name with CIDR(s) for ' 'the IPsec connection.')) def args2body(self, parsed_args, body=None): """Add in conditional args and then return all conn info.""" if body is None: body = {} if parsed_args.dpd: vpn_utils.validate_dpd_dict(parsed_args.dpd) body['dpd'] = parsed_args.dpd if parsed_args.local_ep_group: _local_epg = neutronv20.find_resourceid_by_name_or_id( self.get_client(), 'endpoint_group', parsed_args.local_ep_group) body['local_ep_group_id'] = _local_epg if parsed_args.peer_ep_group: _peer_epg = neutronv20.find_resourceid_by_name_or_id( self.get_client(), 'endpoint_group', parsed_args.peer_ep_group) body['peer_ep_group_id'] = _peer_epg return {self.resource: body} class CreateIPsecSiteConnection(IPsecSiteConnectionMixin, neutronv20.CreateCommand): """Create an IPsec site connection.""" resource = 'ipsec_site_connection' def add_known_arguments(self, parser): parser.add_argument( '--admin-state-down', default=True, action='store_false', help=_('Set admin state up to false.')) parser.add_argument( '--name', help=_('Set a name for the connection.')) parser.add_argument( '--description', help=_('Set a description for the connection.')) parser.add_argument( '--mtu', default='1500', help=_('MTU size for the connection, default:1500')) parser.add_argument( '--initiator', default='bi-directional', choices=['bi-directional', 'response-only'], help=_('Initiator state in lowercase, default:bi-directional')) parser.add_argument( '--vpnservice-id', metavar='VPNSERVICE', required=True, help=_('VPN service instance ID associated with this connection.')) parser.add_argument( '--ikepolicy-id', metavar='IKEPOLICY', required=True, help=_('IKE policy ID associated with this connection.')) parser.add_argument( '--ipsecpolicy-id', metavar='IPSECPOLICY', required=True, help=_('IPsec policy ID associated with this connection.')) parser.add_argument( '--peer-address', required=True, help=_('Peer gateway public IPv4/IPv6 address or FQDN.')) parser.add_argument( '--peer-id', required=True, help=_('Peer router identity for authentication. Can be ' 'IPv4/IPv6 address, e-mail address, key id, or FQDN.')) parser.add_argument( '--peer-cidr', action='append', dest='peer_cidrs', help=_('[DEPRECATED in Mitaka] Remote subnet(s) in CIDR format. ' 'Cannot be specified when using endpoint groups. Only ' 'applicable, if subnet provided for VPN service.')) parser.add_argument( '--psk', required=True, help=_('Pre-shared key string.')) super(CreateIPsecSiteConnection, self).add_known_arguments(parser) def args2body(self, parsed_args): _vpnservice_id = neutronv20.find_resourceid_by_name_or_id( self.get_client(), 'vpnservice', parsed_args.vpnservice_id) _ikepolicy_id = neutronv20.find_resourceid_by_name_or_id( self.get_client(), 'ikepolicy', parsed_args.ikepolicy_id) _ipsecpolicy_id = neutronv20.find_resourceid_by_name_or_id( self.get_client(), 'ipsecpolicy', parsed_args.ipsecpolicy_id) if int(parsed_args.mtu) < 68: message = _("Invalid MTU value: MTU must be " "greater than or equal to 68") raise exceptions.CommandError(message) if (bool(parsed_args.local_ep_group) != bool(parsed_args.peer_ep_group)): message = _("You must specify both local and peer endpoint " "groups.") raise exceptions.CommandError(message) if parsed_args.peer_cidrs and parsed_args.local_ep_group: message = _("You cannot specify both endpoint groups and peer " "CIDR(s).") raise exceptions.CommandError(message) if not parsed_args.peer_cidrs and not parsed_args.local_ep_group: message = _("You must specify endpoint groups or peer CIDR(s).") raise exceptions.CommandError(message) body = { 'vpnservice_id': _vpnservice_id, 'ikepolicy_id': _ikepolicy_id, 'ipsecpolicy_id': _ipsecpolicy_id, 'admin_state_up': parsed_args.admin_state_down, } neutronv20.update_dict(parsed_args, body, ['peer_id', 'mtu', 'initiator', 'psk', 'peer_address']) if parsed_args.name: body['name'] = parsed_args.name if parsed_args.description: body['description'] = parsed_args.description if parsed_args.tenant_id: body['tenant_id'] = parsed_args.tenant_id if parsed_args.peer_cidrs: body['peer_cidrs'] = parsed_args.peer_cidrs return super(CreateIPsecSiteConnection, self).args2body(parsed_args, body) class UpdateIPsecSiteConnection(IPsecSiteConnectionMixin, neutronv20.UpdateCommand): """Update a given IPsec site connection.""" resource = 'ipsec_site_connection' help_resource = 'IPsec site connection' class DeleteIPsecSiteConnection(neutronv20.DeleteCommand): """Delete a given IPsec site connection.""" resource = 'ipsec_site_connection' help_resource = 'IPsec site connection'
# -*- coding: utf-8 -*- from __future__ import unicode_literals, print_function import types from unittest import TestCase from mock import Mock, patch from django.core.exceptions import ImproperlyConfigured from django.core.handlers.wsgi import STATUS_CODE_TEXT from six import string_types from skd_smoke import generate_test_method, prepare_test_name, \ prepare_configuration, generate_fail_test_method, prepare_test_method_doc,\ SmokeTestCase, EMPTY_TEST_CONFIGURATION_MSG, \ INCORRECT_TEST_CONFIGURATION_MSG, IMPROPERLY_BUILT_CONFIGURATION_MSG, \ NOT_REQUIRED_PARAM_TYPE_CHECK, UNSUPPORTED_CONFIGURATION_KEY_MSG, \ UNKNOWN_HTTP_METHOD_MSG, HTTP_METHODS, LINK_TO_DOCUMENTATION, \ INCORRECT_REQUIRED_PARAM_TYPE_MSG, REQUIRED_PARAMS, \ INCORRECT_NOT_REQUIRED_PARAM_TYPE_MSG class SmokeGeneratorTestCase(TestCase): def test_required_params_definition(self): for param_setting in REQUIRED_PARAMS: self.assertIn('display_name', param_setting) self.assertIsInstance(param_setting['display_name'], string_types) self.assertIn('expected_type', param_setting) def test_non_required_params_definition(self): for param_name, param_type in NOT_REQUIRED_PARAM_TYPE_CHECK.items(): self.assertIsInstance(param_name, string_types) self.assertIn('type', param_type) self.assertIsInstance(param_type['type'], string_types) self.assertIn('func', param_type) self.assertTrue(callable(param_type['func']), 'Non-required parameter type check func must be ' 'callable.') def test_prepare_configuration(self): test = prepare_configuration([('a', 200, 'GET'), ('b', 200, 'GET')]) self.assertIsNotNone(test) self.assertEqual( test, [('a', 200, 'GET', {}), ('b', 200, 'GET', {})]) def test_prepare_configuration_with_incorrect_http_method(self): unknown_http_method = 'unknown' with self.assertRaises(ImproperlyConfigured) as cm: prepare_configuration([('a', 200, unknown_http_method)]) raised_exception = cm.exception exception_msg = str(raised_exception) self.assertIn(UNKNOWN_HTTP_METHOD_MSG % unknown_http_method, exception_msg) def test_prepare_configuration_with_all_incorrect_parameters(self): incorrect_required_params = [ 100500, # url should be string_types 'incorrect', # status should be int 666 # method should be string_types ] incorrect_not_required_params = { 'redirect_to': 123, # should be string_types 'comment': 123, # should be string_types 'initialize': 'initialize', # should be callable 'url_args': 'url_args', # should be list or callable 'url_kwargs': 'url_kwargs', # should be dict or callable 'request_data': 'request_data', # should be dict or callable 'user_credentials': 'user' # should be dict or callable } with self.assertRaises(ImproperlyConfigured) as cm: prepare_configuration([ (incorrect_required_params[0], incorrect_required_params[1], incorrect_required_params[2], incorrect_not_required_params,) ]) raised_exception = cm.exception exception_msg = str(raised_exception) error_messages = [] counter = 0 for param_dict in REQUIRED_PARAMS: value = incorrect_required_params[counter] error_messages.append( INCORRECT_REQUIRED_PARAM_TYPE_MSG % ( param_dict['display_name'], counter, param_dict['expected_type'], type(value), value ) ) counter += 1 for param, type_info in NOT_REQUIRED_PARAM_TYPE_CHECK.items(): value = incorrect_not_required_params[param] actual_type = type(value) error_messages.append( INCORRECT_NOT_REQUIRED_PARAM_TYPE_MSG % ( param, type_info['type'], actual_type, value )) error_messages.append(LINK_TO_DOCUMENTATION) for error_message in error_messages: self.assertIn(error_message, exception_msg) def test_prepare_configuration_with_all_http_methods(self): config = [] for http_method in HTTP_METHODS: config.append(('url', 200, http_method, {})) prepared_config = prepare_configuration(config) self.assertEqual(config, prepared_config) def test_generate_fail_test_method(self): test = generate_fail_test_method('test') self.assertIsNotNone(test) self.assertEqual(type(test), types.FunctionType) self.assertEqual(test.__name__, 'fail_method') def test_generate_test_method(self): test = generate_test_method('urlname', 200) self.assertIsNotNone(test) self.assertEqual(type(test), types.FunctionType) self.assertEqual(test.__name__, 'new_test_method') def test_prepare_test_name_with_just_urlname(self): test = prepare_test_name('urlname', 'GET', 200) name = test[0:test.rfind('_')] self.assertEqual(name, 'test_smoke_urlname_get_200') def test_prepare_test_name_with_namespace_urlname(self): test = prepare_test_name('namespace:urlname', 'GET', 200) name = test[0:test.rfind('_')] self.assertEqual(name, 'test_smoke_namespace_urlname_get_200') def test_prepare_test_name_with_plain_url(self): test = prepare_test_name('/enclosed/url/', 'GET', 200) name = test[0:test.rfind('_')] self.assertEqual(name, 'test_smoke_enclosed_url_get_200') def test_prepare_test_method_doc(self): test = prepare_test_method_doc('GET', 'urlname', 200, 'status_text', None) self.assertIsNotNone(test) self.assertEqual(test, 'GET urlname 200 "status_text" {}') class SmokeTestCaseTestCase(TestCase): def check_if_class_contains_test_methods(self, cls, prefix='test_smoke'): for attr in cls.__dict__: if attr.startswith(prefix): return True return False def check_if_class_contains_fail_test_method(self, cls): return hasattr(cls, getattr(cls, 'FAIL_METHOD_NAME')) def call_cls_method_by_name(self, cls, name): method_name = getattr(cls, name) test_method = getattr(cls, method_name) mock = Mock(spec=cls) test_method(mock) return mock def generate_docs_from_configuration(self, configuration): docs = [] for c in configuration: if c[-1]: conf_value = c[-1].get('request_data', {}) if callable(conf_value): request_data = conf_value.__name__ else: request_data = conf_value comment = c[-1].get('comment', None) else: comment = None request_data = {} doc = '%(method)s %(url)s %(status_code)s "%(status)s" ' \ '%(request_data)r' % { 'method': c[2], 'url': c[0], 'status_code': c[1], 'status': STATUS_CODE_TEXT.get(c[1], 'UNKNOWN'), 'request_data': request_data } if comment: doc = '%s %s' % (doc, comment) docs.append(doc) return docs def assert_called_fail_test_method(self, cls, msg): mock = self.call_cls_method_by_name(cls, 'FAIL_METHOD_NAME') self.assertEqual(mock.fail.call_count, 1) self.assertIn(msg, mock.fail.call_args_list[0][0][0]) def assert_generated_test_method(self, cls, name, configuration, doc, url, user_credentials=None, redirect_to=None): # check existence self.assertTrue(hasattr(cls, name), 'There is no "%s" in %s but should be.' % (name, cls)) test_method = getattr(cls, name) _, status_code, method, params = configuration method_lower = method.lower() # check __name__ self.assertEqual(test_method.__name__, name) # check __doc__ self.assertEqual(test_method.__doc__, doc) method_mock = Mock() method_mock.return_value = response = Mock(status_code=status_code) method_mocks = {method_lower: method_mock} if user_credentials: login_mock = Mock() method_mocks['login'] = login_mock # check actual run client_mock = Mock(**method_mocks) testcase_mock = Mock(spec=cls, assertEqual=Mock(), client=client_mock, assertTrue=Mock(), assertRedirects=Mock()) test_method(testcase_mock) request_data = params.get('request_data', {}) if callable(request_data): request_data = request_data(testcase_mock) getattr(client_mock, method_lower).assert_called_once_with( url, data=request_data) testcase_mock.assertEqual.assert_called_once_with( status_code, status_code) if user_credentials: login_mock.assert_called_once_with(**user_credentials) if redirect_to: testcase_mock.assertRedirects.assert_called_once_with( response, redirect_to, fetch_redirect_response=False ) else: testcase_mock.assertRedirects.assert_not_called() def test_empty_configuration(self): EmptyConfig = type( str('BrokenConfig'), (SmokeTestCase,), {'TESTS_CONFIGURATION': ()}) self.assertFalse( self.check_if_class_contains_test_methods(EmptyConfig), 'TestCase contains generated test method but should not ' '(test configuration is empty).' ) self.assertTrue( self.check_if_class_contains_fail_test_method(EmptyConfig), 'Generated TestCase should contain one fake test method to inform ' 'that its configuration is empty.' ) self.assert_called_fail_test_method( EmptyConfig, EMPTY_TEST_CONFIGURATION_MSG) def test_configuration_in_wrong_type(self): BrokenConfig = type(str('BrokenConfig'), (SmokeTestCase,), {}) self.assertFalse( self.check_if_class_contains_test_methods(BrokenConfig), 'TestCase contains generated test method but should not ' '(test configuration is broken).' ) self.assertTrue( self.check_if_class_contains_fail_test_method(BrokenConfig), 'Generated TestCase should contain one fake test method to inform ' 'that its configuration is broken.' ) self.assert_called_fail_test_method( BrokenConfig, INCORRECT_TEST_CONFIGURATION_MSG) def test_configuration_built_incorrectly1(self): conf = ( ('a', 200), # too few arguments ) BrokenConfig = type( str('BrokenConfig'), (SmokeTestCase,), {'TESTS_CONFIGURATION': conf}) self.assertFalse( self.check_if_class_contains_test_methods(BrokenConfig), 'TestCase contains generated test method but should not ' '(test configuration is broken).' ) self.assertTrue( self.check_if_class_contains_fail_test_method(BrokenConfig), 'Generated TestCase should contain one fake test method to inform ' 'that its configuration is broken.' ) self.assert_called_fail_test_method( BrokenConfig, IMPROPERLY_BUILT_CONFIGURATION_MSG) def test_configuration_built_incorrectly2(self): data = { 'request_data': {'param': 'value'}, 'url_kwargs': lambda _: None, # start unsupported data keys 'unsupported_param1': 1, 'unsupported_param2': 2, # end unsupported data keys } conf = ( ('url', 200, 'GET', data), ) unsupported_keys = set(data.keys()) - \ set(NOT_REQUIRED_PARAM_TYPE_CHECK.keys()) BrokenConfig = type( str('BrokenConfig'), (SmokeTestCase,), {'TESTS_CONFIGURATION': conf}) self.assertFalse( self.check_if_class_contains_test_methods(BrokenConfig), 'TestCase contains generated test method but should not ' '(test configuration is broken).' ) self.assertTrue( self.check_if_class_contains_fail_test_method(BrokenConfig), 'Generated TestCase should contain one fake test method to inform ' 'that its configuration is broken.' ) self.assert_called_fail_test_method( BrokenConfig, UNSUPPORTED_CONFIGURATION_KEY_MSG % ', '.join(unsupported_keys) ) def test_simple_correct_configuration(self): conf = ( ('a', 200, 'GET', {}), ) CorrectConfig = type( str('CorrectConfig'), (SmokeTestCase,), {'TESTS_CONFIGURATION': conf}) if self.check_if_class_contains_fail_test_method(CorrectConfig): mock = self.call_cls_method_by_name(CorrectConfig, 'FAIL_METHOD_NAME') self.fail( 'Generated TestCase contains fail test method but should not ' 'cause its configuration is correct. Error stacktrace:\n%s' % mock.fail.call_args_list[0][0][0] ) self.assertTrue( self.check_if_class_contains_test_methods(CorrectConfig), 'TestCase should contain at least one generated test method.' ) @patch('skd_smoke.uuid4') @patch('skd_smoke.resolve_url') def test_simple_generated_test_methods(self, mock_django_resolve_url, mock_uuid4): conf = ( ('/some_url/', 302, 'GET', {'comment': 'text comment'}), ('/comments/', 201, 'POST', {'request_data': {'message': 'new comment'}}), ('namespace:url', 200, 'GET', {}), ('some_url2', 200, 'GET', {}), ) expected_test_method_names = [ 'test_smoke_some_url_get_302_ffffffff', 'test_smoke_comments_post_201_ffffffff', 'test_smoke_namespace_url_get_200_ffffffff', 'test_smoke_some_url2_get_200_ffffffff', ] expected_docs = self.generate_docs_from_configuration(conf) mock_django_resolve_url.return_value = url = '/url/' mock_uuid4.return_value = Mock(hex='ffffffff') CorrectConfig = type( str('CorrectConfig'), (SmokeTestCase,), {'TESTS_CONFIGURATION': conf}) if self.check_if_class_contains_fail_test_method(CorrectConfig): mock = self.call_cls_method_by_name(CorrectConfig, 'FAIL_METHOD_NAME') self.fail( 'Generated TestCase contains fail test method but should not ' 'cause its configuration is correct. Error stacktrace:\n%s' % mock.fail.call_args_list[0][0][0] ) self.assertTrue( self.check_if_class_contains_test_methods(CorrectConfig), 'TestCase should contain at least one generated test method.' ) for i, name in enumerate(expected_test_method_names): self.assert_generated_test_method(CorrectConfig, name, conf[i], expected_docs[i], url) @patch('skd_smoke.uuid4') @patch('skd_smoke.resolve_url') def test_generated_test_method_with_redirect_to_setting( self, mock_django_resolve_url, mock_uuid4): redirect_url = '/redirect_url/' conf = ( ('urlname', 302, 'GET', {'redirect_to': redirect_url}), ) expected_test_method_names = [ 'test_smoke_urlname_get_302_ffffffff', ] expected_docs = self.generate_docs_from_configuration(conf) mock_django_resolve_url.return_value = url = '/url/' mock_uuid4.return_value = Mock(hex='ffffffff') CorrectConfig = type( str('CorrectConfig'), (SmokeTestCase,), {'TESTS_CONFIGURATION': conf}) if self.check_if_class_contains_fail_test_method(CorrectConfig): mock = self.call_cls_method_by_name(CorrectConfig, 'FAIL_METHOD_NAME') self.fail( 'Generated TestCase contains fail test method but should not ' 'cause its configuration is correct. Error stacktrace:\n%s' % mock.fail.call_args_list[0][0][0] ) self.assertTrue( self.check_if_class_contains_test_methods(CorrectConfig), 'TestCase should contain at least one generated test method.' ) self.assert_generated_test_method( CorrectConfig, expected_test_method_names[0], conf[0], expected_docs[0], url, redirect_to=redirect_url) @patch('skd_smoke.uuid4') @patch('skd_smoke.resolve_url') def test_generated_test_method_with_initialize_callable( self, mock_django_resolve_url, mock_uuid4): initialize_mock = Mock() conf = ( ('urlname_with_slug', 200, 'GET', {'initialize': initialize_mock}), ) expected_test_method_names = [ 'test_smoke_urlname_with_slug_get_200_ffffffff', ] expected_docs = self.generate_docs_from_configuration(conf) mock_django_resolve_url.return_value = url = '/url/' mock_uuid4.return_value = Mock(hex='ffffffff') CorrectConfig = type( str('CorrectConfig'), (SmokeTestCase,), {'TESTS_CONFIGURATION': conf}) if self.check_if_class_contains_fail_test_method(CorrectConfig): mock = self.call_cls_method_by_name(CorrectConfig, 'FAIL_METHOD_NAME') self.fail( 'Generated TestCase contains fail test method but should not ' 'cause its configuration is correct. Error stacktrace:\n%s' % mock.fail.call_args_list[0][0][0] ) self.assertTrue( self.check_if_class_contains_test_methods(CorrectConfig), 'TestCase should contain at least one generated test method.' ) self.assert_generated_test_method( CorrectConfig, expected_test_method_names[0], conf[0], expected_docs[0], url) self.assertEqual(initialize_mock.call_count, 1) @patch('skd_smoke.uuid4') @patch('skd_smoke.resolve_url') def test_generated_test_method_with_url_kwargs_as_dict( self, mock_django_resolve_url, mock_uuid4): url_kwargs = {'slug': 'cool_article'} conf = ( ('urlname_with_slug', 200, 'GET', {'url_kwargs': url_kwargs}), ) expected_test_method_names = [ 'test_smoke_urlname_with_slug_get_200_ffffffff', ] expected_docs = self.generate_docs_from_configuration(conf) mock_django_resolve_url.return_value = url = '/url/' mock_uuid4.return_value = Mock(hex='ffffffff') CorrectConfig = type( str('CorrectConfig'), (SmokeTestCase,), {'TESTS_CONFIGURATION': conf}) if self.check_if_class_contains_fail_test_method(CorrectConfig): mock = self.call_cls_method_by_name(CorrectConfig, 'FAIL_METHOD_NAME') self.fail( 'Generated TestCase contains fail test method but should not ' 'cause its configuration is correct. Error stacktrace:\n%s' % mock.fail.call_args_list[0][0][0] ) self.assertTrue( self.check_if_class_contains_test_methods(CorrectConfig), 'TestCase should contain at least one generated test method.' ) self.assert_generated_test_method( CorrectConfig, expected_test_method_names[0], conf[0], expected_docs[0], url) # actual check that resolve_url was called with correct urlname AND # url kwargs returned from call of get_url_kwargs() method mock_django_resolve_url.assert_called_once_with( conf[0][0], **url_kwargs) @patch('skd_smoke.uuid4') @patch('skd_smoke.resolve_url') def test_generated_test_method_with_url_kwargs_as_callable( self, mock_django_resolve_url, mock_uuid4): url_kwargs = {'slug': 'cool_article'} def get_url_kwargs(testcase): return url_kwargs conf = ( ('urlname_with_slug', 200, 'GET', {'url_kwargs': get_url_kwargs}), ) expected_test_method_names = [ 'test_smoke_urlname_with_slug_get_200_ffffffff', ] expected_docs = self.generate_docs_from_configuration(conf) mock_django_resolve_url.return_value = url = '/url/' mock_uuid4.return_value = Mock(hex='ffffffff') CorrectConfig = type( str('CorrectConfig'), (SmokeTestCase,), {'TESTS_CONFIGURATION': conf}) if self.check_if_class_contains_fail_test_method(CorrectConfig): mock = self.call_cls_method_by_name(CorrectConfig, 'FAIL_METHOD_NAME') self.fail( 'Generated TestCase contains fail test method but should not ' 'cause its configuration is correct. Error stacktrace:\n%s' % mock.fail.call_args_list[0][0][0] ) self.assertTrue( self.check_if_class_contains_test_methods(CorrectConfig), 'TestCase should contain at least one generated test method.' ) self.assert_generated_test_method( CorrectConfig, expected_test_method_names[0], conf[0], expected_docs[0], url) # actual check that resolve_url was called with correct urlname AND # url kwargs returned from call of get_url_kwargs() method mock_django_resolve_url.assert_called_once_with( conf[0][0], **url_kwargs) @patch('skd_smoke.uuid4') @patch('skd_smoke.resolve_url') def test_generated_test_method_with_url_args_as_list( self, mock_django_resolve_url, mock_uuid4): url_args = ['arg1'] conf = ( ('urlname_with_arg', 200, 'GET', {'url_args': url_args}), ) expected_test_method_names = [ 'test_smoke_urlname_with_arg_get_200_ffffffff', ] expected_docs = self.generate_docs_from_configuration(conf) mock_django_resolve_url.return_value = url = '/url/' mock_uuid4.return_value = Mock(hex='ffffffff') CorrectConfig = type( str('CorrectConfig'), (SmokeTestCase,), {'TESTS_CONFIGURATION': conf}) if self.check_if_class_contains_fail_test_method(CorrectConfig): mock = self.call_cls_method_by_name(CorrectConfig, 'FAIL_METHOD_NAME') self.fail( 'Generated TestCase contains fail test method but should not ' 'cause its configuration is correct. Error stacktrace:\n%s' % mock.fail.call_args_list[0][0][0] ) self.assertTrue( self.check_if_class_contains_test_methods(CorrectConfig), 'TestCase should contain at least one generated test method.' ) self.assert_generated_test_method( CorrectConfig, expected_test_method_names[0], conf[0], expected_docs[0], url) # actual check that resolve_url was called with correct urlname AND # url kwargs returned from call of get_url_kwargs() method mock_django_resolve_url.assert_called_once_with( conf[0][0], *url_args) @patch('skd_smoke.uuid4') @patch('skd_smoke.resolve_url') def test_generated_test_method_with_url_args_as_callable( self, mock_django_resolve_url, mock_uuid4): url_args = ['arg1'] def get_url_args(testcase): return url_args conf = ( ('urlname_with_arg', 200, 'GET', {'url_args': get_url_args}), ) expected_test_method_names = [ 'test_smoke_urlname_with_arg_get_200_ffffffff', ] expected_docs = self.generate_docs_from_configuration(conf) mock_django_resolve_url.return_value = url = '/url/' mock_uuid4.return_value = Mock(hex='ffffffff') CorrectConfig = type( str('CorrectConfig'), (SmokeTestCase,), {'TESTS_CONFIGURATION': conf}) if self.check_if_class_contains_fail_test_method(CorrectConfig): mock = self.call_cls_method_by_name(CorrectConfig, 'FAIL_METHOD_NAME') self.fail( 'Generated TestCase contains fail test method but should not ' 'cause its configuration is correct. Error stacktrace:\n%s' % mock.fail.call_args_list[0][0][0] ) self.assertTrue( self.check_if_class_contains_test_methods(CorrectConfig), 'TestCase should contain at least one generated test method.' ) self.assert_generated_test_method( CorrectConfig, expected_test_method_names[0], conf[0], expected_docs[0], url) # actual check that resolve_url was called with correct urlname AND # url kwargs returned from call of get_url_args() method mock_django_resolve_url.assert_called_once_with( conf[0][0], *url_args) @patch('skd_smoke.uuid4') @patch('skd_smoke.resolve_url') def test_generated_test_method_with_user_credentials_as_dict( self, mock_django_resolve_url, mock_uuid4): user_credentials = {'username': 'test_user', 'password': '1234'} conf = ( ('urlname_with_slug', 200, 'GET', {'user_credentials': user_credentials}), ) expected_test_method_names = [ 'test_smoke_urlname_with_slug_get_200_ffffffff', ] expected_docs = self.generate_docs_from_configuration(conf) mock_django_resolve_url.return_value = url = '/url/' mock_uuid4.return_value = Mock(hex='ffffffff') CorrectConfig = type( str('CorrectConfig'), (SmokeTestCase,), {'TESTS_CONFIGURATION': conf}) if self.check_if_class_contains_fail_test_method(CorrectConfig): mock = self.call_cls_method_by_name(CorrectConfig, 'FAIL_METHOD_NAME') self.fail( 'Generated TestCase contains fail test method but should not ' 'cause its configuration is correct. Error stacktrace:\n%s' % mock.fail.call_args_list[0][0][0] ) self.assertTrue( self.check_if_class_contains_test_methods(CorrectConfig), 'TestCase should contain at least one generated test method.' ) self.assert_generated_test_method( CorrectConfig, expected_test_method_names[0], conf[0], expected_docs[0], url, user_credentials) @patch('skd_smoke.uuid4') @patch('skd_smoke.resolve_url') def test_generated_test_method_with_user_credentials_as_callable( self, mock_django_resolve_url, mock_uuid4): user_credentials = {'username': 'test_user', 'password': '1234'} def get_user_creadentials(testcase): return user_credentials conf = ( ('urlname_with_slug', 200, 'GET', {'user_credentials': get_user_creadentials}), ) expected_test_method_names = [ 'test_smoke_urlname_with_slug_get_200_ffffffff', ] expected_docs = self.generate_docs_from_configuration(conf) mock_django_resolve_url.return_value = url = '/url/' mock_uuid4.return_value = Mock(hex='ffffffff') CorrectConfig = type( str('CorrectConfig'), (SmokeTestCase,), {'TESTS_CONFIGURATION': conf}) if self.check_if_class_contains_fail_test_method(CorrectConfig): mock = self.call_cls_method_by_name(CorrectConfig, 'FAIL_METHOD_NAME') self.fail( 'Generated TestCase contains fail test method but should not ' 'cause its configuration is correct. Error stacktrace:\n%s' % mock.fail.call_args_list[0][0][0] ) self.assertTrue( self.check_if_class_contains_test_methods(CorrectConfig), 'TestCase should contain at least one generated test method.' ) self.assert_generated_test_method( CorrectConfig, expected_test_method_names[0], conf[0], expected_docs[0], url, user_credentials) @patch('skd_smoke.uuid4') @patch('skd_smoke.resolve_url') def test_generated_test_method_with_request_data_as_dict( self, mock_django_resolve_url, mock_uuid4): request_data = {'message': 'new comment'} conf = ( ('/some_url/', 200, 'GET', {}), ('/comments/', 201, 'POST', {'request_data': request_data}), ('namespace:url', 200, 'GET', {}), ('some_url2', 200, 'GET', {}), ) expected_test_method_names = [ 'test_smoke_some_url_get_200_ffffffff', 'test_smoke_comments_post_201_ffffffff', 'test_smoke_namespace_url_get_200_ffffffff', 'test_smoke_some_url2_get_200_ffffffff', ] expected_docs = self.generate_docs_from_configuration(conf) mock_django_resolve_url.return_value = url = '/url/' mock_uuid4.return_value = Mock(hex='ffffffff') CorrectConfig = type( str('CorrectConfig'), (SmokeTestCase,), {'TESTS_CONFIGURATION': conf}) if self.check_if_class_contains_fail_test_method(CorrectConfig): mock = self.call_cls_method_by_name(CorrectConfig, 'FAIL_METHOD_NAME') self.fail( 'Generated TestCase contains fail test method but should not ' 'cause its configuration is correct. Error stacktrace:\n%s' % mock.fail.call_args_list[0][0][0] ) self.assertTrue( self.check_if_class_contains_test_methods(CorrectConfig), 'TestCase should contain at least one generated test method.' ) for i, name in enumerate(expected_test_method_names): self.assert_generated_test_method(CorrectConfig, name, conf[i], expected_docs[i], url) @patch('skd_smoke.uuid4') @patch('skd_smoke.resolve_url') def test_generated_test_method_with_request_data_as_callable( self, mock_django_resolve_url, mock_uuid4): request_data = {'message': 'new comment'} def get_request_data(testcase): return request_data conf = ( ('/some_url/', 200, 'GET', {}), ('/comments/', 201, 'POST', {'request_data': get_request_data}), ('namespace:url', 200, 'GET', {}), ('some_url2', 200, 'GET', {}), ) expected_test_method_names = [ 'test_smoke_some_url_get_200_ffffffff', 'test_smoke_comments_post_201_ffffffff', 'test_smoke_namespace_url_get_200_ffffffff', 'test_smoke_some_url2_get_200_ffffffff', ] expected_docs = self.generate_docs_from_configuration(conf) mock_django_resolve_url.return_value = url = '/url/' mock_uuid4.return_value = Mock(hex='ffffffff') CorrectConfig = type( str('CorrectConfig'), (SmokeTestCase,), {'TESTS_CONFIGURATION': conf}) if self.check_if_class_contains_fail_test_method(CorrectConfig): mock = self.call_cls_method_by_name(CorrectConfig, 'FAIL_METHOD_NAME') self.fail( 'Generated TestCase contains fail test method but should not ' 'cause its configuration is correct. Error stacktrace:\n%s' % mock.fail.call_args_list[0][0][0] ) self.assertTrue( self.check_if_class_contains_test_methods(CorrectConfig), 'TestCase should contain at least one generated test method.' ) for i, name in enumerate(expected_test_method_names): self.assert_generated_test_method(CorrectConfig, name, conf[i], expected_docs[i], url)
# -*- coding: utf-8 -*- """ XForms - Controllers """ module = request.controller def create(): """ Given a Table, returns an XForms to create an instance: http://code.javarosa.org/wiki/buildxforms http://www.w3schools.com/xforms/ http://oreilly.com/catalog/9780596003692/preview.html Known field requirements that don't work properly: IS_IN_DB IS_NOT_ONE_OF IS_EMAIL IS_DATE_IN_RANGE IS_DATETIME_IN_RANGE """ if len(request.args) == 0: session.error = T("Need to specify a table!") redirect(URL(r=request)) _table = request.args[0] title = _table table = db[_table] instance_list = [] bindings_list = [] controllers_list = [] itext_list = [TAG["text"](TAG["value"](s3.crud_strings[_table].subtitle_list), _id="title"),] for field in table.fields: if field in ["id", "created_on", "modified_on", "uuid", "mci", "deleted", "created_by", "modified_by", "deleted_fk", "owned_by_role", "owned_by_user"]: # This will get added server-side pass elif table[field].writable == False and table[field].readable == False: pass else: ref = "/" + title + "/" + field instance_list.append(generate_instance(table, field)) bindings_list.append(generate_bindings(table, field, ref)) controller, _itext_list = generate_controllers(table, field, ref) controllers_list.append(controller) itext_list.extend(_itext_list) #bindings_list.append(TAG["itext"](TAG["translation"](itext_list,_lang="eng"))) instance = TAG[title](instance_list, _xmlns="") bindings = bindings_list controllers = TAG["h:body"](controllers_list) response.headers["Content-Type"] = "text/xml" response.view = "xforms.xml" return dict(title=title, instance=instance, bindings=bindings, controllers=controllers, itext_list=itext_list) def uses_requirement(requirement, field): """ Check if a given database field uses the specified requirement (IS_IN_SET, IS_INT_IN_RANGE, etc) """ if hasattr(field.requires, "other") or requirement in str(field.requires): if hasattr(field.requires, "other"): if requirement in str(field.requires.other): return True elif requirement in str(field.requires): return True return False def generate_instance(table, field): """ Generates XML for the instance of the specified field. """ if table[field].default: instance = TAG[field](table[field].default) else: instance = TAG[field]() return instance def generate_bindings(table, field, ref): """ Generates the XML for bindings for the specified database field. """ if "IS_NOT_EMPTY" in str(table[field].requires): required = "true()" else: required = "false()" if table[field].type == "string": _type = "string" elif table[field].type == "double": _type = "decimal" # Collect doesn't support datetime yet elif table[field].type == "date": _type = "date" elif table[field].type == "datetime": _type = "datetime" elif table[field].type == "integer": _type = "int" elif table[field].type == "boolean": _type = "boolean" elif table[field].type == "upload": # For images _type = "binary" elif table[field].type == "text": _type = "text" else: # Unknown type _type = "string" if uses_requirement("IS_INT_IN_RANGE", table[field]) \ or uses_requirement("IS_FLOAT_IN_RANGE", table[field]): if hasattr(table[field].requires, "other"): maximum = table[field].requires.other.maximum minimum = table[field].requires.other.minimum else: maximum = table[field].requires.maximum minimum = table[field].requires.minimum if minimum is None: constraint = "(. < " + str(maximum) + ")" elif maximum is None: constraint = "(. > " + str(minimum) + ")" else: constraint = "(. > " + str(minimum) + " and . < " + str(maximum) + ")" binding = TAG["bind"](_nodeset=ref, _type=_type, _required=required, _constraint=constraint) # elif uses_requirement("IS_DATETIME_IN_RANGE", field): # pass # elif uses_requirement("IS_EMAIL", field): # pass elif uses_requirement("IS_IN_SET", table[field]): binding = TAG["bind"](_nodeset=ref, _required=required) else: binding = TAG["bind"](_nodeset=ref, _type=_type, _required=required) return binding def generate_controllers(table, field, ref): """ Generates the controllers XML for the database table field. """ itext_list = [] # Internationalization controllers_list = [] itext_list.append(TAG["text"](TAG["value"](table[field].label), _id=ref + ":label")) itext_list.append(TAG["text"](TAG["value"](table[field].comment), _id=ref + ":hint")) if hasattr(table[field].requires, "option"): items_list = [] for option in table[field].requires.theset: items_list.append(TAG["item"](TAG["label"](option), TAG["value"](option))) controllers_list.append(TAG["select1"](items_list, _ref=field)) #elif uses_requirement("IS_IN_DB", field): # ToDo (similar to IS_IN_SET)? #pass #elif uses_requirement("IS_NOT_ONE_OF", field): # ToDo #pass elif uses_requirement("IS_IN_SET", table[field]): # Defined below if hasattr(table[field].requires, "other"): insetrequires = table[field].requires.other else: insetrequires = table[field].requires theset = insetrequires.theset items_list=[] items_list.append(TAG["label"](_ref="jr:itext('" + ref + ":label')")) items_list.append(TAG["hint"](_ref="jr:itext('" + ref + ":hint')")) option_num = 0 # for formatting something like "jr:itext('stuff:option0')" for option in theset: if table[field].type == "integer": option = int(option) option_ref = ref + ":option" + str(option_num) items_list.append(TAG["item"](TAG["label"](_ref="jr:itext('" + option_ref + "')"), TAG["value"](option))) #itext_list.append(TAG["text"](TAG["value"](table[field].represent(option)), _id=option_ref)) itext_list.append(TAG["text"](TAG["value"](insetrequires.labels[theset.index(str(option))]), _id=option_ref)) option_num += 1 if insetrequires.multiple: controller = TAG["select"](items_list, _ref=ref) else: controller = TAG["select1"](items_list, _ref=ref) elif table[field].type == "boolean": # Using select1, is there an easier way to do this? items_list=[] items_list.append(TAG["label"](_ref="jr:itext('" + ref + ":label')")) items_list.append(TAG["hint"](_ref="jr:itext('" + ref + ":hint')")) # True option items_list.append(TAG["item"](TAG["label"](_ref="jr:itext('" + ref + ":option0')"), TAG["value"](1))) itext_list.append(TAG["text"](TAG["value"]("True"), _id=ref + ":option0")) # False option items_list.append(TAG["item"](TAG["label"](_ref="jr:itext('" + ref + ":option1')"), TAG["value"](0))) itext_list.append(TAG["text"](TAG["value"]("False"), _id=ref + ":option1")) controller = TAG["select1"](items_list, _ref=ref) elif table[field].type == "upload": # For uploading images items_list=[] items_list.append(TAG["label"](_ref="jr:itext('" + ref + ":label')")) items_list.append(TAG["hint"](_ref="jr:itext('" + ref + ":hint')")) controller = TAG["upload"](items_list, _ref=ref, _mediatype="image/*") elif table[field].writable == False: controller = TAG["input"](TAG["label"](table[field].label), _ref=ref, _readonly="true", _default=table[field].default.upper()) else: # Normal Input field controller = TAG["input"](TAG["label"](table[field].label), _ref=ref) return controller, itext_list def csvdata(nodelist): """ Returns the data in the given node as a comma separated string """ data = "" for subnode in nodelist: if (subnode.nodeType == subnode.ELEMENT_NODE): try: data = data + "," + subnode.childNodes[0].data except: data = data+ "," return data[1:] + "\n" def csvheader(parent, nodelist): """ Gives the header for the CSV """ header = "" for subnode in nodelist: if (subnode.nodeType == subnode.ELEMENT_NODE): header = header + "," + parent + "." + subnode.tagName return header[1:] + "\n" def importxml(db, xmlinput): """ Converts the XML to a CSV compatible with the import_from_csv_file of web2py ToDo: rewrite this to go via S3XRC for proper Authz checking, Audit, Create/Update checking. """ import cStringIO import xml.dom.minidom try: doc = xml.dom.minidom.parseString(xmlinput) except: raise Exception("XML parse error") parent = doc.childNodes[0].tagName csvout = csvheader(parent, doc.childNodes[0].childNodes) for subnode in doc.childNodes: csvout = csvout + csvdata(subnode.childNodes) fh = cStringIO.StringIO() fh.write(csvout) fh.seek(0, 0) db[parent].import_from_csv_file(fh) @auth.s3_requires_membership(1) def post(): data = importxml(db, request.body.read()) return data @auth.s3_requires_membership(2) def submission_old(): """ Allows for submission of xforms by ODK Collect http://code.google.com/p/opendatakit/ """ response.headers["Content-Type"] = "text/xml" xml = str(request.post_vars.xml_submission_file.value) if len(xml) == 0: raise HTTP(400, "Need some xml!") importxml(db, xml) r = HTTP(201, "Saved.") r.headers["Location"] = request.env.http_host raise r @auth.s3_requires_membership(2) def submission(): """ Allows for submission of Xforms by ODK Collect using the S3XRC framework. """ try: from cStringIO import StringIO # Faster, where available except: from StringIO import StringIO import cgi from lxml import etree source = request.post_vars.get("xml_submission_file", None) if isinstance(source, cgi.FieldStorage): if source.filename: xmlinput = source.file else: xmlinput = source.value if isinstance(xmlinput, basestring): xmlinput = StringIO(xmlinput) else: raise HTTP(400, "Invalid Request: Expected an XForm") tree = etree.parse(xmlinput) resource = tree.getroot().tag prefix, name = resource.split("_") res = s3mgr.define_resource(prefix, name) stylesheet = os.path.join(request.folder, "static", "formats", "odk", "import.xsl") try: result = res.import_xml(source=tree, stylesheet=stylesheet) except IOError, SyntaxError: raise HTTP(500, "Internal server error") # Parse response status = json.loads(result)["statuscode"] if status == 200: r = HTTP(201, "Saved") # ODK Collect only accepts 201 r.headers["Location"] = request.env.http_host raise r else: raise HTTP(status, result) def formList(): """ Generates a list of Xforms based on database tables for ODK Collect http://code.google.com/p/opendatakit/ ToDo: Provide a static list of forms, such as RMS_Req, GIS_Landmark, MPR_Missing_Report, CR_Shelter, PR_Presence """ # Test statements #xml = TAG.forms(*[TAG.form(getName("Name"), _url = "http://" + request.env.http_host + URL(c="static", "current.xml"))]) #xml = TAG.forms(*[TAG.form(getName(t), _url = "http://" + request.env.http_host + URL(f="create", args=t)) for t in db.tables()]) # List of a couple simple tables to avoid a giant list of all the tables tables = ["rms_req", "pf_missing_report", "cr_shelter", "pr_presence", "pr_person", "pr_pimage"] xml = TAG.forms() for table in tables: xml.append(TAG.form(get_name(table), _url = "http://" + request.env.http_host + URL(f="create", args=db[table]))) response.headers["Content-Type"] = "text/xml" response.view = "xforms.xml" return xml def get_name(name): """ Generates a pretty(er) name from a database table name. """ return name[name.find("_") + 1:].replace("_", " ").capitalize()
import random try: from com.xhaus.jyson import JysonCodec as json except ImportError: import json from utils import * from net.grinder.script import Test from net.grinder.plugin.http import HTTPRequest import itertools class AbstractQuery(object): one_day = (1000 * 60 * 60 * 24) query_interval_name = None num_queries_for_current_node = 0 query_request = None query_name = None test_number = 0 @classmethod def create_metrics(cls, agent_number): # divide the total number of each query type into the ones need by this # worker total_queries = default_config[cls.query_interval_name] start_job, end_job = generate_job_range(total_queries, default_config['num_nodes'], agent_number) cls.num_queries_for_current_node = end_job - start_job # Grinder test infrastructure test = Test(cls.test_number, cls.query_name) cls.query_request = HTTPRequest() test.record(cls.query_request) return [cls] * cls.num_queries_for_current_node def generate(self, time, logger): raise Exception("Can't instantiate abstract query") class SinglePlotQuery(AbstractQuery): query_interval_name = 'singleplot_per_interval' query_name = "SinglePlotQuery" test_number = 3 def generate(self, time, logger): tenant_id = random.randint(0, default_config['num_tenants']) metric_name = generate_metric_name( random.randint(0, default_config['metrics_per_tenant'])) to = time frm = time - self.one_day resolution = 'FULL' url = "%s/v2.0/%d/views/%s?from=%d&to=%s&resolution=%s" % ( default_config['query_url'], tenant_id, metric_name, frm, to, resolution) result = self.query_request.GET(url) # logger(result.getText()) return result class MultiPlotQuery(AbstractQuery): query_interval_name = 'multiplot_per_interval' query_name = "MultiPlotQuery" test_number = 4 def generate_multiplot_payload(self): metrics_count = min(default_config['max_multiplot_metrics'], random.randint(0, default_config[ 'metrics_per_tenant'])) metrics_list = map(generate_metric_name, range(metrics_count)) return json.dumps(metrics_list) def generate(self, time, logger): tenant_id = random.randint(0, default_config['num_tenants']) payload = self.generate_multiplot_payload() to = time frm = time - self.one_day resolution = 'FULL' url = "%s/v2.0/%d/views?from=%d&to=%d&resolution=%s" % ( default_config['query_url'], tenant_id, frm, to, resolution) result = self.query_request.POST(url, payload) # logger(result.getText()) return result class SearchQuery(AbstractQuery): query_interval_name = 'search_queries_per_interval' query_name = "SearchQuery" test_number = 5 def generate_metrics_regex(self): metric_name = generate_metric_name( random.randint(0, default_config['metrics_per_tenant'])) return ".".join(metric_name.split('.')[0:-1]) + ".*" def generate(self, time, logger): tenant_id = random.randint(0, default_config['num_tenants']) metric_regex = self.generate_metrics_regex() url = "%s/v2.0/%d/metrics/search?query=%s" % ( default_config['query_url'], tenant_id, metric_regex) result = self.query_request.GET(url) # logger(result.getText()) return result class AnnotationsQuery(AbstractQuery): query_interval_name = 'annotations_queries_per_interval' query_name = "AnnotationsQuery" test_number = 6 def generate(self, time, logger): tenant_id = random.randint(0, default_config['annotations_num_tenants']) to = time frm = time - self.one_day url = "%s/v2.0/%d/events/getEvents?from=%d&until=%d" % ( default_config['query_url'], tenant_id, frm, to) result = self.query_request.GET(url) return result class EnumSearchQuery(AbstractQuery): query_interval_name = 'enum_search_queries_per_interval' query_name = "EnumSearchQuery" test_number = 8 def generate_metrics_regex(self): metric_name = 'enum_grinder_' + generate_metric_name( random.randint(0, default_config['enum_metrics_per_tenant'])) return ".".join(metric_name.split('.')[0:-1]) + ".*" def generate(self, time, logger): tenant_id = random.randint(0, default_config['enum_num_tenants']) metric_regex = self.generate_metrics_regex() url = "%s/v2.0/%d/metrics/search?query=%s&include_enum_values=true" % ( default_config['query_url'], tenant_id, metric_regex) result = self.query_request.GET(url) return result class EnumSinglePlotQuery(AbstractQuery): query_interval_name = 'enum_single_plot_queries_per_interval' query_name = "EnumSinglePlotQuery" test_number = 9 def generate(self, time, logger): tenant_id = random.randint(0, default_config['enum_num_tenants']) metric_name = generate_enum_metric_name( random.randint(0, default_config['enum_metrics_per_tenant'])) to = time frm = time - self.one_day resolution = 'FULL' url = "%s/v2.0/%d/views/%s?from=%d&to=%s&resolution=%s" % ( default_config['query_url'], tenant_id, metric_name, frm, to, resolution) result = self.query_request.GET(url) # logger(result.getText()) return result class EnumMultiPlotQuery(AbstractQuery): query_interval_name = 'enum_multiplot_per_interval' query_name = "EnumMultiPlotQuery" test_number = 10 def generate_multiplot_payload(self): metrics_count = min(default_config['max_multiplot_metrics'], random.randint(0, default_config[ 'enum_metrics_per_tenant'])) metrics_list = map(generate_enum_metric_name, range(metrics_count)) return json.dumps(metrics_list) def generate(self, time, logger): tenant_id = random.randint(0, default_config['enum_num_tenants']) payload = self.generate_multiplot_payload() to = time frm = time - self.one_day resolution = 'FULL' url = "%s/v2.0/%d/views?from=%d&to=%d&resolution=%s" % ( default_config['query_url'], tenant_id, frm, to, resolution) result = self.query_request.POST(url, payload) # logger(result.getText()) return result class QueryThread(AbstractThread): # The list of queries to be invoked across all threads in this worker queries = [] query_types = [SinglePlotQuery, MultiPlotQuery, SearchQuery, EnumSearchQuery, EnumSinglePlotQuery, AnnotationsQuery, EnumMultiPlotQuery] @classmethod def create_metrics(cls, agent_number): cls.queries = list(itertools.chain( *[x.create_metrics(agent_number) for x in cls.query_types])) random.shuffle(cls.queries) @classmethod def num_threads(cls): return default_config['query_concurrency'] def __init__(self, thread_num): AbstractThread.__init__(self, thread_num) self.query_instances = [x(thread_num, self.num_threads()) for x in self.query_types] total_queries_for_current_node = reduce( lambda x, y: x + y, [x.num_queries_for_current_node for x in self.query_instances]) start, end = generate_job_range(total_queries_for_current_node, self.num_threads(), thread_num) self.slice = self.queries[start:end] self.query_fn_dict = dict( [[type(x), x.generate] for x in self.query_instances]) def make_request(self, logger): if len(self.slice) == 0: logger("Warning: no work for current thread") self.sleep(1000000) return None self.check_position(logger, len(self.slice)) result = self.query_fn_dict[self.slice[self.position]]( int(self.time()), logger) self.position += 1 return result ThreadManager.add_type(QueryThread)
#!/usr/bin/env python # Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. '''Produces various output formats from a set of JavaScript files with closure style require/provide calls. Scans one or more directory trees for JavaScript files. Then, from a given list of top-level files, sorts all required input files topologically. The top-level files are appended to the sorted list in the order specified on the command line. If no root directories are specified, the source files are assumed to be ordered already and no dependency analysis is performed. The resulting file list can then be used in one of the following ways: - list: a plain list of files, one per line is output. - html: a series of html <script> tags with src attributes containing paths is output. - bundle: a concatenation of all the files, separated by newlines is output. - compressed_bundle: A bundle where non-significant whitespace, including comments, has been stripped is output. - copy: the files are copied, or hard linked if possible, to the destination directory. In this case, no output is generated. ''' import errno import optparse import os import re import shutil import sys _SCRIPT_DIR = os.path.realpath(os.path.dirname(__file__)) _CHROME_SOURCE = os.path.realpath( os.path.join(_SCRIPT_DIR, *[os.path.pardir] * 7)) sys.path.insert( 0, os.path.join(_CHROME_SOURCE, 'third_party/devtools-frontend/src/scripts/build')) sys.path.insert( 0, os.path.join(_CHROME_SOURCE, ('third_party/chromevox/third_party/' + 'closure-library/closure/bin/build'))) import depstree import rjsmin import source import treescan def Die(message): '''Prints an error message and exit the program.''' print >> sys.stderr, message sys.exit(1) class SourceWithPaths(source.Source): '''A source.Source object with its relative input and output paths''' def __init__(self, content, in_path, out_path): super(SourceWithPaths, self).__init__(content) self._in_path = in_path self._out_path = out_path def GetInPath(self): return self._in_path def GetOutPath(self): return self._out_path def __str__(self): return self.GetOutPath() class Bundle(): '''An ordered list of sources without duplicates.''' def __init__(self): self._added_paths = set() self._added_sources = [] def Add(self, sources): '''Appends one or more source objects the list if it doesn't already exist. Args: sources: A SourceWithPath or an iterable of such objects. ''' if isinstance(sources, SourceWithPaths): sources = [sources] for source in sources: path = source.GetInPath() if path not in self._added_paths: self._added_paths.add(path) self._added_sources.append(source) def GetInPaths(self): return (source.GetInPath() for source in self._added_sources) def GetOutPaths(self): return (source.GetOutPath() for source in self._added_sources) def GetSources(self): return self._added_sources def GetUncompressedSource(self): return '\n'.join((s.GetSource() for s in self._added_sources)) def GetCompressedSource(self): return rjsmin.jsmin(self.GetUncompressedSource()) class PathRewriter(): '''A list of simple path rewrite rules to map relative input paths to relative output paths. ''' def __init__(self, specs=[]): '''Args: specs: A list of mappings, each consisting of the input prefix and the corresponding output prefix separated by colons. ''' self._prefix_map = [] for spec in specs: parts = spec.split(':') if len(parts) != 2: Die('Invalid prefix rewrite spec %s' % spec) if not parts[0].endswith('/') and parts[0] != '': parts[0] += '/' self._prefix_map.append(parts) self._prefix_map.sort(reverse=True) def RewritePath(self, in_path): '''Rewrites an input path according to the list of rules. Args: in_path, str: The input path to rewrite. Returns: str: The corresponding output path. ''' for in_prefix, out_prefix in self._prefix_map: if in_path.startswith(in_prefix): return os.path.join(out_prefix, in_path[len(in_prefix):]) return in_path def ReadSources(roots=[], source_files=[], need_source_text=False, path_rewriter=PathRewriter(), exclude=[]): '''Reads all source specified on the command line, including sources included by --root options. ''' def EnsureSourceLoaded(in_path, sources): if in_path not in sources: out_path = path_rewriter.RewritePath(in_path) sources[in_path] = SourceWithPaths( source.GetFileContents(in_path), in_path, out_path) # Only read the actual source file if we will do a dependency analysis or # the caller asks for it. need_source_text = need_source_text or len(roots) > 0 sources = {} for root in roots: for name in treescan.ScanTreeForJsFiles(root): if any((r.search(name) for r in exclude)): continue EnsureSourceLoaded(name, sources) for path in source_files: if need_source_text: EnsureSourceLoaded(path, sources) else: # Just add an empty representation of the source. sources[path] = SourceWithPaths('', path, path_rewriter.RewritePath(path)) return sources def _GetBase(sources): '''Gets the closure base.js file if present among the sources. Args: sources: Dictionary with input path names as keys and SourceWithPaths as values. Returns: SourceWithPath: The source file providing the goog namespace. ''' for source in sources.itervalues(): if (os.path.basename(source.GetInPath()) == 'base.js' and 'goog' in source.provides): return source Die('goog.base not provided by any file.') def CalcDeps(bundle, sources, top_level): '''Calculates dependencies for a set of top-level files. Args: bundle: Bundle to add the sources to. sources, dict: Mapping from input path to SourceWithPaths objects. top_level, list: List of top-level input paths to calculate dependencies for. ''' providers = [s for s in sources.itervalues() if len(s.provides) > 0] deps = depstree.DepsTree(providers) namespaces = [] for path in top_level: namespaces.extend(sources[path].requires) # base.js is an implicit dependency that always goes first. bundle.Add(_GetBase(sources)) bundle.Add(deps.GetDependencies(namespaces)) def _MarkAsCompiled(sources): '''Sets COMPILED to true in the Closure base.js source. Args: sources: Dictionary with input paths names as keys and SourcWithPaths objects as values. ''' base = _GetBase(sources) new_content, count = re.subn( '^var COMPILED = false;$', 'var COMPILED = true;', base.GetSource(), count=1, flags=re.MULTILINE) if count != 1: Die('COMPILED var assignment not found in %s' % base.GetInPath()) sources[base.GetInPath()] = SourceWithPaths(new_content, base.GetInPath(), base.GetOutPath()) def LinkOrCopyFiles(sources, dest_dir): '''Copies a list of sources to a destination directory.''' def LinkOrCopyOneFile(src, dst): try: os.makedirs(os.path.dirname(dst)) except OSError as err: if err.errno != errno.EEXIST: raise if os.path.exists(dst): os.unlink(dst) try: os.link(src, dst) except: shutil.copy(src, dst) for source in sources: LinkOrCopyOneFile(source.GetInPath(), os.path.join(dest_dir, source.GetOutPath())) def WriteOutput(bundle, format, out_file, dest_dir): '''Writes output in the specified format. Args: bundle: The ordered bundle iwth all sources already added. format: Output format, one of list, html, bundle, compressed_bundle. out_file: File object to receive the output. dest_dir: Prepended to each path mentioned in the output, if applicable. ''' if format == 'list': paths = bundle.GetOutPaths() if dest_dir: paths = (os.path.join(dest_dir, p) for p in paths) paths = (os.path.normpath(p) for p in paths) out_file.write('\n'.join(paths)) elif format == 'html': HTML_TEMPLATE = '<script src=\'%s\'>' script_lines = (HTML_TEMPLATE % p for p in bundle.GetOutPaths()) out_file.write('\n'.join(script_lines)) elif format == 'bundle': out_file.write(bundle.GetUncompressedSource()) elif format == 'compressed_bundle': out_file.write(bundle.GetCompressedSource()) out_file.write('\n') def WriteStampfile(stampfile): '''Writes a stamp file. Args: stampfile, string: name of stamp file to touch ''' with open(stampfile, 'w') as file: os.utime(stampfile, None) def WriteDepfile(depfile, outfile, infiles): '''Writes a depfile. Args: depfile, string: name of dep file to write outfile, string: Name of output file to use as the target in the generated .d file. infiles, list: File names to list as dependencies in the .d file. ''' content = '%s: %s' % (outfile, ' '.join(infiles)) dirname = os.path.dirname(depfile) if not os.path.exists(dirname): os.makedirs(dirname) open(depfile, 'w').write(content) def CreateOptionParser(): parser = optparse.OptionParser(description=__doc__) parser.usage = '%prog [options] <top_level_file>...' parser.add_option( '-d', '--dest_dir', action='store', metavar='DIR', help=('Destination directory. Used when translating ' + 'input paths to output paths and when copying ' 'files.')) parser.add_option( '-o', '--output_file', action='store', metavar='FILE', help=('File to output result to for modes that output ' 'a single file.')) parser.add_option( '-r', '--root', dest='roots', action='append', default=[], metavar='ROOT', help='Roots of directory trees to scan for sources.') parser.add_option( '-M', '--module', dest='modules', action='append', default=[], metavar='FILENAME', help='Source modules to load') parser.add_option( '-w', '--rewrite_prefix', action='append', default=[], dest='prefix_map', metavar='SPEC', help=('Two path prefixes, separated by colons ' + 'specifying that a file whose (relative) path ' + 'name starts with the first prefix should have ' + 'that prefix replaced by the second prefix to ' + 'form a path relative to the output directory.')) parser.add_option( '-m', '--mode', type='choice', action='store', choices=['list', 'html', 'bundle', 'compressed_bundle', 'copy'], default='list', metavar='MODE', help=("Otput mode. One of 'list', 'html', 'bundle', " + "'compressed_bundle' or 'copy'.")) parser.add_option( '-x', '--exclude', action='append', default=[], help=('Exclude files whose full path contains a match for ' 'the given regular expression. Does not apply to ' 'filenames given as arguments or with the ' '-m option.')) parser.add_option( '--depfile', metavar='FILENAME', help='Store .d style dependencies in FILENAME') parser.add_option( '--stampfile', metavar='FILENAME', help='Write empty stamp file') return parser def main(): options, args = CreateOptionParser().parse_args() if len(args) < 1: Die('At least one top-level source file must be specified.') if options.depfile and not options.output_file: Die('--depfile requires an output file') will_output_source_text = options.mode in ('bundle', 'compressed_bundle') path_rewriter = PathRewriter(options.prefix_map) exclude = [re.compile(r) for r in options.exclude] sources = ReadSources(options.roots, options.modules + args, will_output_source_text or len(options.modules) > 0, path_rewriter, exclude) if will_output_source_text: _MarkAsCompiled(sources) bundle = Bundle() if len(options.roots) > 0 or len(options.modules) > 0: CalcDeps(bundle, sources, args) bundle.Add((sources[name] for name in args)) if options.mode == 'copy': if options.dest_dir is None: Die('Must specify --dest_dir when copying.') LinkOrCopyFiles(bundle.GetSources(), options.dest_dir) else: if options.output_file: out_file = open(options.output_file, 'w') else: out_file = sys.stdout try: WriteOutput(bundle, options.mode, out_file, options.dest_dir) finally: if options.output_file: out_file.close() if options.stampfile: WriteStampfile(options.stampfile) if options.depfile: WriteDepfile(options.depfile, options.output_file, bundle.GetInPaths()) if __name__ == '__main__': main()