gt
stringclasses
1 value
context
stringlengths
2.49k
119k
from __future__ import unicode_literals import os import subprocess import sys import time from .common import AudioConversionError, PostProcessor from ..compat import ( compat_subprocess_get_DEVNULL, ) from ..utils import ( encodeArgument, encodeFilename, get_exe_version, is_outdated_version, PostProcessingError, prepend_extension, shell_quote, subtitles_filename, ) class FFmpegPostProcessorError(PostProcessingError): pass class FFmpegPostProcessor(PostProcessor): def __init__(self, downloader=None, deletetempfiles=False): PostProcessor.__init__(self, downloader) self._versions = self.get_versions() self._deletetempfiles = deletetempfiles def check_version(self): if not self._executable: raise FFmpegPostProcessorError('ffmpeg or avconv not found. Please install one.') required_version = '10-0' if self._uses_avconv() else '1.0' if is_outdated_version( self._versions[self._executable], required_version): warning = 'Your copy of %s is outdated, update %s to version %s or newer if you encounter any errors.' % ( self._executable, self._executable, required_version) if self._downloader: self._downloader.report_warning(warning) @staticmethod def get_versions(): programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe'] return dict((p, get_exe_version(p, args=['-version'])) for p in programs) @property def _executable(self): if self._downloader.params.get('prefer_ffmpeg', False): prefs = ('ffmpeg', 'avconv') else: prefs = ('avconv', 'ffmpeg') for p in prefs: if self._versions[p]: return p return None @property def _probe_executable(self): if self._downloader.params.get('prefer_ffmpeg', False): prefs = ('ffprobe', 'avprobe') else: prefs = ('avprobe', 'ffprobe') for p in prefs: if self._versions[p]: return p return None def _uses_avconv(self): return self._executable == 'avconv' def run_ffmpeg_multiple_files(self, input_paths, out_path, opts): self.check_version() files_cmd = [] for path in input_paths: files_cmd.extend(['-i', encodeFilename(path, True)]) cmd = ([self._executable, '-y'] + files_cmd + [encodeArgument(o) for o in opts] + [encodeFilename(self._ffmpeg_filename_argument(out_path), True)]) if self._downloader.params.get('verbose', False): self._downloader.to_screen('[debug] ffmpeg command line: %s' % shell_quote(cmd)) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() if p.returncode != 0: stderr = stderr.decode('utf-8', 'replace') msg = stderr.strip().split('\n')[-1] raise FFmpegPostProcessorError(msg) if self._deletetempfiles: for ipath in input_paths: os.remove(ipath) def run_ffmpeg(self, path, out_path, opts): self.run_ffmpeg_multiple_files([path], out_path, opts) def _ffmpeg_filename_argument(self, fn): # ffmpeg broke --, see https://ffmpeg.org/trac/ffmpeg/ticket/2127 for details if fn.startswith('-'): return './' + fn return fn class FFmpegExtractAudioPP(FFmpegPostProcessor): def __init__(self, downloader=None, preferredcodec=None, preferredquality=None, nopostoverwrites=False): FFmpegPostProcessor.__init__(self, downloader) if preferredcodec is None: preferredcodec = 'best' self._preferredcodec = preferredcodec self._preferredquality = preferredquality self._nopostoverwrites = nopostoverwrites def get_audio_codec(self, path): if not self._probe_executable: raise PostProcessingError('ffprobe or avprobe not found. Please install one.') try: cmd = [ self._probe_executable, '-show_streams', encodeFilename(self._ffmpeg_filename_argument(path), True)] handle = subprocess.Popen(cmd, stderr=compat_subprocess_get_DEVNULL(), stdout=subprocess.PIPE) output = handle.communicate()[0] if handle.wait() != 0: return None except (IOError, OSError): return None audio_codec = None for line in output.decode('ascii', 'ignore').split('\n'): if line.startswith('codec_name='): audio_codec = line.split('=')[1].strip() elif line.strip() == 'codec_type=audio' and audio_codec is not None: return audio_codec return None def run_ffmpeg(self, path, out_path, codec, more_opts): if codec is None: acodec_opts = [] else: acodec_opts = ['-acodec', codec] opts = ['-vn'] + acodec_opts + more_opts try: FFmpegPostProcessor.run_ffmpeg(self, path, out_path, opts) except FFmpegPostProcessorError as err: raise AudioConversionError(err.msg) def run(self, information): path = information['filepath'] filecodec = self.get_audio_codec(path) if filecodec is None: raise PostProcessingError('WARNING: unable to obtain file audio codec with ffprobe') uses_avconv = self._uses_avconv() more_opts = [] if self._preferredcodec == 'best' or self._preferredcodec == filecodec or (self._preferredcodec == 'm4a' and filecodec == 'aac'): if filecodec == 'aac' and self._preferredcodec in ['m4a', 'best']: # Lossless, but in another container acodec = 'copy' extension = 'm4a' more_opts = ['-bsf:a' if uses_avconv else '-absf', 'aac_adtstoasc'] elif filecodec in ['aac', 'mp3', 'vorbis', 'opus']: # Lossless if possible acodec = 'copy' extension = filecodec if filecodec == 'aac': more_opts = ['-f', 'adts'] if filecodec == 'vorbis': extension = 'ogg' else: # MP3 otherwise. acodec = 'libmp3lame' extension = 'mp3' more_opts = [] if self._preferredquality is not None: if int(self._preferredquality) < 10: more_opts += ['-q:a' if uses_avconv else '-aq', self._preferredquality] else: more_opts += ['-b:a' if uses_avconv else '-ab', self._preferredquality + 'k'] else: # We convert the audio (lossy) acodec = {'mp3': 'libmp3lame', 'aac': 'aac', 'm4a': 'aac', 'opus': 'opus', 'vorbis': 'libvorbis', 'wav': None}[self._preferredcodec] extension = self._preferredcodec more_opts = [] if self._preferredquality is not None: # The opus codec doesn't support the -aq option if int(self._preferredquality) < 10 and extension != 'opus': more_opts += ['-q:a' if uses_avconv else '-aq', self._preferredquality] else: more_opts += ['-b:a' if uses_avconv else '-ab', self._preferredquality + 'k'] if self._preferredcodec == 'aac': more_opts += ['-f', 'adts'] if self._preferredcodec == 'm4a': more_opts += ['-bsf:a' if uses_avconv else '-absf', 'aac_adtstoasc'] if self._preferredcodec == 'vorbis': extension = 'ogg' if self._preferredcodec == 'wav': extension = 'wav' more_opts += ['-f', 'wav'] prefix, sep, ext = path.rpartition('.') # not os.path.splitext, since the latter does not work on unicode in all setups new_path = prefix + sep + extension # If we download foo.mp3 and convert it to... foo.mp3, then don't delete foo.mp3, silly. if new_path == path: self._nopostoverwrites = True try: if self._nopostoverwrites and os.path.exists(encodeFilename(new_path)): self._downloader.to_screen('[youtube] Post-process file %s exists, skipping' % new_path) else: self._downloader.to_screen('[' + self._executable + '] Destination: ' + new_path) self.run_ffmpeg(path, new_path, acodec, more_opts) except: etype, e, tb = sys.exc_info() if isinstance(e, AudioConversionError): msg = 'audio conversion failed: ' + e.msg else: msg = 'error running ' + self._executable raise PostProcessingError(msg) # Try to update the date time for extracted audio file. if information.get('filetime') is not None: try: os.utime(encodeFilename(new_path), (time.time(), information['filetime'])) except: self._downloader.report_warning('Cannot update utime of audio file') information['filepath'] = new_path return self._nopostoverwrites, information class FFmpegVideoConvertorPP(FFmpegPostProcessor): def __init__(self, downloader=None, preferedformat=None): super(FFmpegVideoConvertorPP, self).__init__(downloader) self._preferedformat = preferedformat def run(self, information): path = information['filepath'] prefix, sep, ext = path.rpartition('.') outpath = prefix + sep + self._preferedformat if information['ext'] == self._preferedformat: self._downloader.to_screen('[ffmpeg] Not converting video file %s - already is in target format %s' % (path, self._preferedformat)) return True, information self._downloader.to_screen('[' + 'ffmpeg' + '] Converting video from %s to %s, Destination: ' % (information['ext'], self._preferedformat) + outpath) self.run_ffmpeg(path, outpath, []) information['filepath'] = outpath information['format'] = self._preferedformat information['ext'] = self._preferedformat return False, information class FFmpegEmbedSubtitlePP(FFmpegPostProcessor): # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt _lang_map = { 'aa': 'aar', 'ab': 'abk', 'ae': 'ave', 'af': 'afr', 'ak': 'aka', 'am': 'amh', 'an': 'arg', 'ar': 'ara', 'as': 'asm', 'av': 'ava', 'ay': 'aym', 'az': 'aze', 'ba': 'bak', 'be': 'bel', 'bg': 'bul', 'bh': 'bih', 'bi': 'bis', 'bm': 'bam', 'bn': 'ben', 'bo': 'bod', 'br': 'bre', 'bs': 'bos', 'ca': 'cat', 'ce': 'che', 'ch': 'cha', 'co': 'cos', 'cr': 'cre', 'cs': 'ces', 'cu': 'chu', 'cv': 'chv', 'cy': 'cym', 'da': 'dan', 'de': 'deu', 'dv': 'div', 'dz': 'dzo', 'ee': 'ewe', 'el': 'ell', 'en': 'eng', 'eo': 'epo', 'es': 'spa', 'et': 'est', 'eu': 'eus', 'fa': 'fas', 'ff': 'ful', 'fi': 'fin', 'fj': 'fij', 'fo': 'fao', 'fr': 'fra', 'fy': 'fry', 'ga': 'gle', 'gd': 'gla', 'gl': 'glg', 'gn': 'grn', 'gu': 'guj', 'gv': 'glv', 'ha': 'hau', 'he': 'heb', 'hi': 'hin', 'ho': 'hmo', 'hr': 'hrv', 'ht': 'hat', 'hu': 'hun', 'hy': 'hye', 'hz': 'her', 'ia': 'ina', 'id': 'ind', 'ie': 'ile', 'ig': 'ibo', 'ii': 'iii', 'ik': 'ipk', 'io': 'ido', 'is': 'isl', 'it': 'ita', 'iu': 'iku', 'ja': 'jpn', 'jv': 'jav', 'ka': 'kat', 'kg': 'kon', 'ki': 'kik', 'kj': 'kua', 'kk': 'kaz', 'kl': 'kal', 'km': 'khm', 'kn': 'kan', 'ko': 'kor', 'kr': 'kau', 'ks': 'kas', 'ku': 'kur', 'kv': 'kom', 'kw': 'cor', 'ky': 'kir', 'la': 'lat', 'lb': 'ltz', 'lg': 'lug', 'li': 'lim', 'ln': 'lin', 'lo': 'lao', 'lt': 'lit', 'lu': 'lub', 'lv': 'lav', 'mg': 'mlg', 'mh': 'mah', 'mi': 'mri', 'mk': 'mkd', 'ml': 'mal', 'mn': 'mon', 'mr': 'mar', 'ms': 'msa', 'mt': 'mlt', 'my': 'mya', 'na': 'nau', 'nb': 'nob', 'nd': 'nde', 'ne': 'nep', 'ng': 'ndo', 'nl': 'nld', 'nn': 'nno', 'no': 'nor', 'nr': 'nbl', 'nv': 'nav', 'ny': 'nya', 'oc': 'oci', 'oj': 'oji', 'om': 'orm', 'or': 'ori', 'os': 'oss', 'pa': 'pan', 'pi': 'pli', 'pl': 'pol', 'ps': 'pus', 'pt': 'por', 'qu': 'que', 'rm': 'roh', 'rn': 'run', 'ro': 'ron', 'ru': 'rus', 'rw': 'kin', 'sa': 'san', 'sc': 'srd', 'sd': 'snd', 'se': 'sme', 'sg': 'sag', 'si': 'sin', 'sk': 'slk', 'sl': 'slv', 'sm': 'smo', 'sn': 'sna', 'so': 'som', 'sq': 'sqi', 'sr': 'srp', 'ss': 'ssw', 'st': 'sot', 'su': 'sun', 'sv': 'swe', 'sw': 'swa', 'ta': 'tam', 'te': 'tel', 'tg': 'tgk', 'th': 'tha', 'ti': 'tir', 'tk': 'tuk', 'tl': 'tgl', 'tn': 'tsn', 'to': 'ton', 'tr': 'tur', 'ts': 'tso', 'tt': 'tat', 'tw': 'twi', 'ty': 'tah', 'ug': 'uig', 'uk': 'ukr', 'ur': 'urd', 'uz': 'uzb', 've': 'ven', 'vi': 'vie', 'vo': 'vol', 'wa': 'wln', 'wo': 'wol', 'xh': 'xho', 'yi': 'yid', 'yo': 'yor', 'za': 'zha', 'zh': 'zho', 'zu': 'zul', } def __init__(self, downloader=None, subtitlesformat='srt'): super(FFmpegEmbedSubtitlePP, self).__init__(downloader) self._subformat = subtitlesformat @classmethod def _conver_lang_code(cls, code): """Convert language code from ISO 639-1 to ISO 639-2/T""" return cls._lang_map.get(code[:2]) def run(self, information): if information['ext'] != 'mp4': self._downloader.to_screen('[ffmpeg] Subtitles can only be embedded in mp4 files') return True, information if not information.get('subtitles'): self._downloader.to_screen('[ffmpeg] There aren\'t any subtitles to embed') return True, information sub_langs = [key for key in information['subtitles']] filename = information['filepath'] input_files = [filename] + [subtitles_filename(filename, lang, self._subformat) for lang in sub_langs] opts = ['-map', '0:0', '-map', '0:1', '-c:v', 'copy', '-c:a', 'copy'] for (i, lang) in enumerate(sub_langs): opts.extend(['-map', '%d:0' % (i + 1), '-c:s:%d' % i, 'mov_text']) lang_code = self._conver_lang_code(lang) if lang_code is not None: opts.extend(['-metadata:s:s:%d' % i, 'language=%s' % lang_code]) opts.extend(['-f', 'mp4']) temp_filename = filename + '.temp' self._downloader.to_screen('[ffmpeg] Embedding subtitles in \'%s\'' % filename) self.run_ffmpeg_multiple_files(input_files, temp_filename, opts) os.remove(encodeFilename(filename)) os.rename(encodeFilename(temp_filename), encodeFilename(filename)) return True, information class FFmpegMetadataPP(FFmpegPostProcessor): def run(self, info): metadata = {} if info.get('title') is not None: metadata['title'] = info['title'] if info.get('upload_date') is not None: metadata['date'] = info['upload_date'] if info.get('uploader') is not None: metadata['artist'] = info['uploader'] elif info.get('uploader_id') is not None: metadata['artist'] = info['uploader_id'] if not metadata: self._downloader.to_screen('[ffmpeg] There isn\'t any metadata to add') return True, info filename = info['filepath'] temp_filename = prepend_extension(filename, 'temp') if info['ext'] == 'm4a': options = ['-vn', '-acodec', 'copy'] else: options = ['-c', 'copy'] for (name, value) in metadata.items(): options.extend(['-metadata', '%s=%s' % (name, value)]) self._downloader.to_screen('[ffmpeg] Adding metadata to \'%s\'' % filename) self.run_ffmpeg(filename, temp_filename, options) os.remove(encodeFilename(filename)) os.rename(encodeFilename(temp_filename), encodeFilename(filename)) return True, info class FFmpegMergerPP(FFmpegPostProcessor): def run(self, info): filename = info['filepath'] args = ['-c', 'copy', '-map', '0:v:0', '-map', '1:a:0', '-shortest'] self._downloader.to_screen('[ffmpeg] Merging formats into "%s"' % filename) self.run_ffmpeg_multiple_files(info['__files_to_merge'], filename, args) return True, info class FFmpegAudioFixPP(FFmpegPostProcessor): def run(self, info): filename = info['filepath'] temp_filename = prepend_extension(filename, 'temp') options = ['-vn', '-acodec', 'copy'] self._downloader.to_screen('[ffmpeg] Fixing audio file "%s"' % filename) self.run_ffmpeg(filename, temp_filename, options) os.remove(encodeFilename(filename)) os.rename(encodeFilename(temp_filename), encodeFilename(filename)) return True, info
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ## Cassandra Web Browser import sys try: from mod_python import util # FieldStorage form processing except: pass try: # Choice of json parser import json except: import simplejson as json import pprint # JSON printer import cutejson # My own JSON printer - cute! try: sys.path.append('../../interface/gen-py') except: pass from cassandra import Cassandra # Cassandra thrift API from thrift import Thrift from thrift.transport import TSocket from thrift.transport import TTransport from thrift.protocol import TBinaryProtocol from cgi import escape, FieldStorage from datetime import datetime # Most timestamp handling import time # clock(), time() from copy import copy # Cloning objects import urllib # URL-encoding form parameters ### Action handlers - Cassandra ########################################################################################### def _cassandraConnect(params): # Make socket transport = TSocket.TSocket(params['host'], params['port']) # Buffering is critical. Raw sockets are very slow transport = TTransport.TBufferedTransport(transport) # Wrap in a protocol protocol = TBinaryProtocol.TBinaryProtocol(transport) # Create a client to use the protocol encoder #lient = Calculator.Client(protocol) client = Cassandra.Client(protocol) # Connect! transport.open() return {'client':client, 'transport':transport } def _cassandraGetSliceFrom(params): try: cassandra = _cassandraConnect(params) s = "" range = Cassandra.SliceRange(params['search'],params['searchend'],params['descending'],params['count']+params['offset']) predicate = Cassandra.SlicePredicate(None, range) parent = Cassandra.ColumnParent(params['cf'], None) # No supercolumn, we are not using them ATM result = cassandra['client'].get_slice(params['table'], params['row'], parent, predicate, 1) ## Close! cassandra['transport'].close() n = len(result)-params['offset'] if (n<0): n = 0 s += "Obtained: "+str(n)+" columns (click on column data to open)" s += ' <span style="color:gray">- query predicate: '+str(predicate) + "</span><br>\n" s += "%s<table class=\"main\">" % _navigationButtonsHtml(params) strcol = """\ <tr class=\"%s\"> <td class="rownum">%d</td> <td><tt>%s</tt></td> <td class=\"link\"><a href=\"%s\"><tt>%s</tt></a></td> <td>%s<br/><span class="parsedts">%s</span></td> <td><a title="Delete column" href=\"#\" onclick=\"javascript:if(confirm('Are you sure you want to delete this column?'))window.open('%s')\">&times;</a></td> </tr> """ i=0 deleteParams = {\ "host":params["host"],\ "port":params["port"],\ "table":params["table"],\ "row":params["row"],\ "cf":params["cf"]\ } clazz = "even" for csp in result: col = csp.column # Assuming we are only querying columns - need to change this for supercolumns? s+="<!-- a column -- >\n" if i>=params['offset']: targetmap = copy(params) targetmap['open'] = col.name target = "view?%s" % urllib.urlencode(targetmap) deleteParams['columnName'] = col.name svalue = col.value if len(svalue)>120: svalue = escape(svalue[0:120])+_htmlEllipsis() else: svalue = escape(svalue) s += strcol % (clazz, i, col.name, target, svalue, str(col.timestamp), _cassandraTimestampToDatetime(col.timestamp), "delete?"+urllib.urlencode(deleteParams)) if clazz=="odd": clazz = "even" else: clazz = "odd" i=i+1 s += "</table>\n" s += _navigationButtonsHtml(params) return s except Thrift.TException, tx: print '%s' % (tx.message) return 'TException: %s' % (tx.message) except Cassandra.InvalidRequestException, ire: print '%s' % (ire.why) return 'InvalidRequestException: %s' % (ire.why) def _cassandraGetColumn(params): try: cassandra = _cassandraConnect(params) pp = pprint.PrettyPrinter(indent = 2) result = cassandra['client'].get(params['table'], params['row'], Cassandra.ColumnPath(params["cf"], None, params['open']), 1) col = result.column # Close! cassandra['transport'].close() s = "" # Headers s += "Column: %s<br/>\nTimestamp: %s - <span class=\"parsedts\">%s</span>" % \ (col.name, col.timestamp, _cassandraTimestampToDatetime(col.timestamp)) jsonObj = None error = "" try: jsonObj = json.loads(col.value) except ValueError, exc: error = str(exc) # Colorful pretty printed if jsonObj!=None: s += "<h3>Data is a json object!</h3>\n<div class=\"columnData\"><tt>\n"+cutejson.cutejson(jsonObj)+"\n</tt></div>" else: s += "<br/><br/>(Not a valid json string) - "+str(error) # Plain data s += "<h3>Data</h3><div class=\"columnData\">%s</div><br/>" % escape(col.value) if jsonObj!=None: s += "<h3>By Python prettyprinter:</h3><div class=\"columnData\"><pre>%s</pre></div>" % json.dumps(json.loads(col.value), indent=4, sort_keys=True) # s += "Formatted json:<div class=\"columnData\"><pre>%s</pre></div>" % pp.pformat(json.loads(result.value)) return s except Thrift.TException, tx: print '%s' % (tx.message) return 'Thrift Error: %s' % (tx.message) except Cassandra.InvalidRequestException, ire: print '%s' % (ire.message) return 'Invalid Request Error: %s' % (ire.message) def _cassandraDeleteColumn(params): try: cassandra = _cassandraConnect(params) cfcol = Cassandra.ColumnPath(params["cf"],None,params['columnName']) cassandra['client'].remove(params['table'], params['row'], cfcol, _nowAsCassandraTimestamp(), 1) cassandra['transport'].close() s = "Column deleted. (You can delete this window, and refresh the launcher window contents)<ul><li>table: %s</li><li>Row: %s</li><li>CF:Col: %s</li>" %\ (params['table'], params['row'], cfcol) return s except Thrift.TException, tx: print '%s' % (tx.message) return 'Thrift Error: %s' % (tx.message) def _cassandraInsert(params, colName, columnValue): try: cassandra = _cassandraConnect(params) cassandra['client'].insert(params['table'], params['row'], Cassandra.ColumnPath(params["cf"],None,colName), columnValue, _nowAsCassandraTimestamp(), 1) cassandra['transport'].close() return "Column '%s' inserted" % escape(colName) except Thrift.TException, tx: print '%s' % (tx.message) return 'Error inserting column - Thrift Error: %s' % (tx.message) except Cassandra.InvalidRequestException, ire: print '%s' % (ire.message) return 'Error inserting column - Invalid Request Error: %s' % (ire.message) ### Request handlers ########################################################################################### def index(req): # Defaults to "list": return list(req) def list(req): params = _processForm(req) return _list(params) def _list(params): t0 = time.clock() if params['action']=="insert": if (params['newname']!=None and params['value']!=None): message="<div class=\"message\">%s</div>" % _cassandraInsert(params,params['newname'],params['value']) ## Change params so that we browse on the inserted column params['search'] = params['newname'] params['searchend'] = '' params['descending'] = 0 params['offset'] = 0 else: message="<div class=\"message\">%s</div>" % "Please specify a column name and a column value! (Press your browser's Back button)" else: message="" paramsTable = _formatParams(params) cassandraData = _cassandraGetSliceFrom(params); t1 = time.clock() return _mainHtml() % (params['host'], css(), paramsTable+message+cassandraData+_htmlFooter(t1-t0)) def view(req): params = _processForm(req) return _view(params) def _view(params): t0 = time.clock() paramsTable = _formatParams(params) if params['open']==None: colstr = "<b>No column specified!</b>" else: colstr = _cassandraGetColumn(params); t1 = time.clock() return _mainHtml() % (params['host'], css(), paramsTable+colstr+_htmlFooter(t1-t0)) def delete(req): params = _processForm(req) return _delete(params) def _delete(params): t0 = time.clock() paramsTable = _formatParams(params) if params['columnName']==None: colstr = "<b>No column specified!</b>" else: colstr = _cassandraDeleteColumn(params) t1 = time.clock() return _mainHtml() % (params['host'], css(), paramsTable+colstr+_htmlFooter(t1-t0)) ### HTML formatting functions ########################################################################################### def _formatParams(params): s = "<form action=\"list\">" s += "<div>" s += "Host: <input size=\"30\" name=\"host\" value=\"%s\" title=\"%s\"></input>:\n" % (params['host'] , "Server address") s += "<input size=\"4\" name=\"port\" value=\"%s\" title=\"%s\"></input>, \n" % (params['port'] , "Thrift port") s += "Keyspace: <input size=\"10\" name=\"table\" value=\"%s\" title=\"%s\"></input>, \n" % (params['table'] , "") s += "Row: <input size=\"60\" name=\"row\" value=\"%s\" title=\"%s\"></input>, \n" % (params['row'] , "") s += "CF: <input size=\"30\" name=\"cf\" value=\"%s\" title=\"%s\"></input>\n" % (params['cf'] , "") s += "</div><div class=\"onNotInserting\">" s += "start value: <input size=\"60\" name=\"search\" value=\"%s\" title=\"%s\"></input>, \n" % (params['search'] , "Start value for the search (optional)") s += "end value: <input size=\"60\" name=\"searchend\" value=\"%s\" title=\"%s\"></input>, \n" % (params['searchend'], "End value for the search (optional)") s += "offset: <input size=\"4\" name=\"offset\" value=\"%s\" title=\"%s\"></input>, \n" % \ (params['offset'] , "Offset - This is used to allow paging in the web UI. Note that offset+count elements will be queried to cassandra; a big number impacts performance!") s += "count: <input size=\"4\" name=\"count\" value=\"%s\" title=\"%s\"></input>\n" % (params['count'] , "") if params['descending']==1: checked = "checked=\"true\"" else: checked = "" s += " - <input type=\"checkbox\" name=\"descending\" value=\"1\" %s>Reversed</input><br/>\n" % checked s += "</div>" s += "<input id=\"querybutton\" class=\"submit onNotInserting\" type=\"submit\"/>\n" s += """\ &nbsp;&nbsp;<a class="onNotInserting inserthelp" onclick="javascript:document.getElementsByTagName('body')[0].className = 'inserting';" href="#">Insert column</a> <div class="onInserting"> <div> Name: <input size=\"60\" name=\"newname\" value=\"\" title=\"Name for the new column\"></input> </div> <div> Value: <textarea rows="20" cols="60" name="value" title="Value of the new column"></textarea> </div> <input id="insertbutton" class="submit" type="submit" value="insert" name="action" title="Insert the column on the keyspace, row and columnfamily specified above."/> &nbsp;&nbsp;<a class="" onclick="javascript:document.getElementsByTagName('body')[0].className = '';" href="#">Cancel</a> </div> """ s += "</form>" return s def _htmlEllipsis(): return " <span style=\"color:red\">(&hellip;)</span>" def _htmlFooter(ts=0): return "<center style=\"color:gray\">Rendered in %fms - (c)Copyright Apache Software Foundation, 2009</center>" \ % ts def css(): return cutejson.css()+"""\ body { font-family: Sans-serif; font-size: 8pt; } td { font-size: 8pt; } pre { margin: 0; } table { border: 1px solid gray; } table.main td.link { cursor: pointer; } table.main tr.odd { background: #f8f8f8; } table.main td.link:hover { background: #ffffcc; } table.main td { padding: 1px 5px; } table.main td a, table.maintd a:visited { text-decoration: none; color: black; } a, a:visited { color: #0000df; } div.columnData { overflow: auto; margin: 10px 0px; width: 90%; padding: 5px; border: 1px solid gray; font-family: monospace; } .rownum, .parsedts { color: gray; } .navbar { margin: 6px 0; font-size: 110%; } .navbar a { text-decoration: none; padding: 1px; } .navbar a:hover { color: white; background: #0000df;} input, textarea { background: #f8f8f8; border: 1px solid gray; padding: 2px; } input.submit { font-weight: bold; background: #FFFFBB; cursor: pointer; } /*#querybutton { vertical-align: top; margin-top: 4px; } #insertbutton, .inserthelp { vertical-align: bottom; margin-bottom: 4px; }*/ textarea { vertical-align: baseline; } div.message { padding: 4px; border: 1px solid blue; background: #f8f8f8; width: 90%; color: blue; margin: 8px 0px; } .onInserting { display:none } body.inserting div.onInserting { display: block; } body.inserting span.onInserting { display: inline; } body.inserting .onNotInserting { display: none; } """ def _navigationButtonsHtml(params): s = "<div class=\"navbar\">" paramsFst = copy(params) paramsFst['offset'] = 0 s += "<a href=\"list?%s\">&lArr; &lArr; First</a> " % urllib.urlencode(paramsFst) s += "&nbsp;&nbsp;&bull;&nbsp;&nbsp;" if params['offset']>0: paramsPrev = copy(params) paramsPrev['offset'] = paramsPrev['offset'] - paramsPrev['count'] if paramsPrev['offset']<0: paramsPrev['offset'] = 0 s += "<a href=list?%s>&lArr; Previous</a> " % urllib.urlencode(paramsPrev) else: s += "&lArr; Previous" s += "&nbsp;&nbsp;&bull;&nbsp;&nbsp;" s += "<a href=list?%s>Refresh</a> " % urllib.urlencode(params) s += "&nbsp;&nbsp;&bull;&nbsp;&nbsp;" paramsNext = copy(params) paramsNext['offset'] = paramsNext['offset'] + paramsNext['count'] s += "<a href=list?%s>&rArr; Next</a> " % urllib.urlencode(paramsNext) s += "</div>" return s def _mainHtml(): return """\ <html> <title>Cassandra on %s</title> <head><style>%s</style></head> <body> <h2>Cassandra browser</h2> %s </body></html> """ ### Misc functions ########################################################################################### def _processForm(req): form = util.FieldStorage(req) pyopts = req.get_options() defhost = "localhost" defport = 9160 deftable = "Keyspace" defrow = "" defcf = "" # ugly conditionals, 2.5 compatible if "CassandraKeyspace" in pyopts: deftable = pyopts["CassandraKeyspace"] if "CassandraHost" in pyopts: defhost = pyopts["CassandraHost"] if "CassandraPort" in pyopts: defport = pyopts["CassandraPort"] if "CassandraRow" in pyopts: defrow = pyopts["CassandraRow"] if "CassandraColumnFamily" in pyopts: defcf = pyopts["CassandraColumnFamily"] return getRequiredParameters(form, defhost, defport, deftable, defrow, defcf) def getRequiredParameters(form, defhost, defport, deftable, defrow, defcf): params = {} # Generic params, used in most actions params['table'] = str(form.getfirst('table', deftable)) params['host'] = str(form.getfirst('host', defhost)) params['port'] = int(form.getfirst('port', defport)) params['row'] = str(form.getfirst('row', defrow)) params['cf'] = str(form.getfirst('cf', defcf)) params['search'] = str(form.getfirst('search', "")) params['searchend'] = str(form.getfirst('searchend', "")) params['offset'] = int(form.getfirst('offset', 0)) params['count'] = int(form.getfirst('count', 20)) params['descending'] = int(form.getfirst('descending', 0)) # For column inserter params['value'] = form.getfirst('value', None) params['action'] = form.getfirst('action', None) params['newname'] = form.getfirst('newname', None) # For column viewer params['open'] = form.getfirst('open', None) params['columnName'] = form.getfirst('columnName', None) # For deletion -- TODO: unify this with other names! return params def _cassandraTimestampToDatetime(colTimestamp): return datetime.fromtimestamp(colTimestamp/1000) def _nowAsCassandraTimestamp(): return long(time.time()*1000) def _index_wsgi(environ, start_response): path = environ.get('PATH_INFO', '') found_path = None for path_option in ['list', 'view', 'delete']: if path_option in path: found_path = path_option break if not found_path: start_response('404 NOT FOUND', [('Content-Type', 'text/html')]) return 'Path must be list, view, or delete' start_response('200 OK', [('Content-Type', 'text/html')]) form = FieldStorage(fp=environ['wsgi.input'], environ=environ) params = getRequiredParameters(form, 'localhost', 9160, 'Keyspace1', '', 'Standard1') try: retval = globals()['_' + found_path](params) except KeyError, e: return ['Request missing parameter(s):' + str(e)] return [retval] if __name__ == "__main__": from wsgiref.simple_server import make_server from optparse import OptionParser parser = OptionParser() parser.add_option("-a", "--address", dest="host", help="Host address on which to listen", default="localhost") parser.add_option("-p", "--port", dest="port", help="Port on which to listen", default="8111") (options, args) = parser.parse_args() srv = make_server(options.host, int(options.port), _index_wsgi) srv.serve_forever()
import time import logging from sevenbridges.models.bulk import BulkRecord from sevenbridges.decorators import inplace_reload from sevenbridges.errors import ( SbgError, TaskValidationError ) from sevenbridges.meta.fields import ( HrefField, UuidField, StringField, CompoundField, DateTimeField, BooleanField, DictField ) from sevenbridges.meta.resource import Resource from sevenbridges.meta.transformer import Transform from sevenbridges.models.app import App from sevenbridges.models.file import File from sevenbridges.models.enums import FileApiFormats, TaskStatus from sevenbridges.models.compound.price import Price from sevenbridges.models.compound.tasks.batch_by import BatchBy from sevenbridges.models.compound.tasks.batch_group import BatchGroup from sevenbridges.models.compound.tasks.execution_status import ExecutionStatus from sevenbridges.models.compound.tasks.input import Input from sevenbridges.models.compound.tasks.output import Output from sevenbridges.models.execution_details import ExecutionDetails logger = logging.getLogger(__name__) class Task(Resource): """ Central resource for managing tasks. """ _URL = { 'query': '/tasks', 'get': '/tasks/{id}', 'delete': '/tasks/{id}', 'run': '/tasks/{id}/actions/run', 'clone': '/tasks/{id}/actions/clone', 'abort': '/tasks/{id}/actions/abort', 'execution_details': "/tasks/{id}/execution_details", 'bulk_get': '/bulk/tasks/get', } href = HrefField(read_only=True) id = UuidField(read_only=True) name = StringField(read_only=False) status = StringField(read_only=True) description = StringField(read_only=False) project = StringField(read_only=False) app = StringField(read_only=False) type = StringField(read_only=True) created_by = StringField(read_only=True) executed_by = StringField(read_only=True) start_time = DateTimeField(read_only=True) created_time = DateTimeField(read_only=True) end_time = DateTimeField(read_only=True) batch = BooleanField(read_only=False) batch_by = CompoundField(BatchBy, read_only=False) batch_group = CompoundField(BatchGroup, read_only=True) batch_input = StringField(read_only=False) parent = StringField(read_only=True) execution_status = CompoundField(ExecutionStatus, read_only=True) errors = DictField(read_only=True) warnings = DictField(read_only=True) price = CompoundField(Price, read_only=True) inputs = CompoundField(Input, read_only=False) outputs = CompoundField(Output, read_only=True) execution_settings = DictField(read_only=True) use_interruptible_instances = BooleanField(read_only=False) origin = StringField(read_only=True, name='origin_id') def __str__(self): return f'<Task: id={self.id}>' def __eq__(self, other): if type(other) is not type(self): return False return self is other or self.id == other.id @classmethod def query(cls, project=None, status=None, batch=None, parent=None, created_from=None, created_to=None, started_from=None, started_to=None, ended_from=None, ended_to=None, offset=None, limit=None, order_by=None, order=None, origin=None, api=None): """ Query (List) tasks. Date parameters may be both strings and python date objects. :param project: Target project. optional. :param status: Task status. :param batch: Only batch tasks. :param parent: Parent batch task identifier. :param ended_to: All tasks that ended until this date. :param ended_from: All tasks that ended from this date. :param started_to: All tasks that were started until this date. :param started_from: All tasks that were started from this date. :param created_to: All tasks that were created until this date. :param created_from: All tasks that were created from this date. :param offset: Pagination offset. :param limit: Pagination limit. :param order_by: Property to order by. :param order: Ascending or descending ordering. :param origin: Entity that created the task, e.g. automation run, if task was created by an automation run. :param api: Api instance. :return: Collection object. """ api = api or cls._API if parent: parent = Transform.to_task(parent) if project: project = Transform.to_project(project) if created_from: created_from = Transform.to_datestring(created_from) if created_to: created_to = Transform.to_datestring(created_to) if started_from: started_from = Transform.to_datestring(started_from) if started_to: started_to = Transform.to_datestring(started_to) if ended_from: ended_from = Transform.to_datestring(ended_from) if ended_to: ended_to = Transform.to_datestring(ended_to) if origin: origin = Transform.to_automation_run(origin) return super()._query( url=cls._URL['query'], project=project, status=status, batch=batch, parent=parent, created_from=created_from, created_to=created_to, started_from=started_from, started_to=started_to, ended_from=ended_from, ended_to=ended_to, offset=offset, limit=limit, order_by=order_by, order=order, fields='_all', origin_id=origin, api=api ) @classmethod def create(cls, name, project, app, revision=None, batch_input=None, batch_by=None, inputs=None, description=None, run=False, disable_batch=False, interruptible=None, execution_settings=None, api=None): """ Creates a task on server. :param name: Task name. :param project: Project identifier. :param app: CWL app identifier. :param revision: CWL app revision. :param batch_input: Batch input. :param batch_by: Batch criteria. :param inputs: Input map. :param description: Task description. :param run: True if you want to run a task upon creation. :param disable_batch: If True disables batching of a batch task. :param interruptible: If True interruptible instance will be used. :param execution_settings: Execution settings for the task. :param api: Api instance. :return: Task object. :raises: TaskValidationError if validation Fails. :raises: SbgError if any exception occurs during request. """ task_data = {} params = {} project = Transform.to_project(project) app_id = Transform.to_app(app) if revision: app_id = f'{app_id}/{revision}' else: if isinstance(app, App): app_id = f'{app_id}/{app.revision}' task_inputs = { 'inputs': Task._serialize_inputs(inputs) if inputs else {} } if batch_input and batch_by: task_data['batch_input'] = batch_input task_data['batch_by'] = batch_by if disable_batch: params.update({'batch': False}) task_meta = { 'name': name, 'project': project, 'app': app_id, 'description': description, } task_data.update(task_meta) task_data.update(task_inputs) if interruptible is not None: task_data['use_interruptible_instances'] = interruptible if execution_settings: task_data.update({'execution_settings': execution_settings}) if run: params.update({'action': 'run'}) api = api if api else cls._API created_task = api.post(cls._URL['query'], data=task_data, params=params).json() if run and 'errors' in created_task and created_task['errors']: raise TaskValidationError( 'Unable to run task! Task contains errors.', task=Task(api=api, **created_task) ) return Task(api=api, **created_task) @inplace_reload def abort(self, inplace=True): """ Abort task :param inplace Apply action on the current object or return a new one. :return: Task object. """ extra = { 'resource': type(self).__name__, 'query': {'id': self.id} } logger.info('Aborting task', extra=extra) task_data = self._api.post( url=self._URL['abort'].format(id=self.id)).json() return Task(api=self._api, **task_data) @inplace_reload def run(self, batch=True, interruptible=None, inplace=True): """ Run task :param batch if False batching will be disabled. :param interruptible: If true interruptible instance will be used. :param inplace Apply action on the current object or return a new one. :return: Task object. """ params = {} if not batch: params['batch'] = False if interruptible is not None: params['use_interruptible_instances'] = interruptible extra = { 'resource': type(self).__name__, 'query': {'id': self.id, 'batch': batch} } logger.info('Running task', extra=extra) task_data = self._api.post( url=self._URL['run'].format(id=self.id), params=params).json() return Task(api=self._api, **task_data) def clone(self, run=True): """ Clone task :param run: run task after cloning :return: Task object. """ params = {} if run: params.update({'action': 'run'}) extra = { 'resource': type(self).__name__, 'query': {'id': self.id, 'run': run} } logger.info('Cloning task', extra=extra) task_data = self._api.post( url=self._URL['clone'].format(id=self.id), params=params).json() return Task(api=self._api, **task_data) @inplace_reload def save(self, inplace=True): """ Saves all modification to the task on the server. :param inplace Apply edits on the current instance or get a new one. :return: Task instance. """ modified_data = self._modified_data() if modified_data: task_request_data = {} inputs = modified_data.pop('inputs', None) execution_settings = modified_data.pop('execution_settings', None) task_request_data.update(modified_data) if inputs: task_request_data['inputs'] = self._serialize_inputs(inputs) if execution_settings: task_request_data['execution_settings'] = ( self._serialize_execution_settings(execution_settings) ) extra = { 'resource': type(self).__name__, 'query': {'id': self.id, 'data': task_request_data} } logger.info('Saving task', extra=extra) data = self._api.patch(url=self._URL['get'].format(id=self.id), data=task_request_data).json() task = Task(api=self._api, **data) return task def _serialize_execution_settings(self, execution_settings): instance_type = execution_settings.get( 'instance_type', self.execution_settings.get('instance_type', None) ) max_parallel_instances = execution_settings.get( 'max_parallel_instances', self.execution_settings.get('max_parallel_instances', None) ) use_memoization = execution_settings.get( 'use_memoization', self.execution_settings.get('use_memoization', None) ) use_elastic_disc = execution_settings.get( 'use_elastic_disc', self.execution_settings.get('use_elastic_disc', None) ) serialized_es_mapping = { 'instance_type': instance_type, 'max_parallel_instances': max_parallel_instances, 'use_memoization': use_memoization, 'use_elastic_disc': use_elastic_disc } serialized_es = dict() for key, value in serialized_es_mapping.items(): if value is not None: serialized_es[key] = value return serialized_es @staticmethod def _serialize_inputs(input_value): """ Recursively serialises input dictionary. :param input_value: input dictionary to serialize :return: serialized input dictionary """ if isinstance(input_value, list): return_value = [] for elem in input_value: return_value.append(Task._serialize_inputs(elem)) elif isinstance(input_value, dict): return_value = {} for key in input_value: return_value[key] = Task._serialize_inputs( input_value[key] ) elif isinstance(input_value, File): return_value = Task._to_api_file_format(input_value) else: return_value = input_value return return_value @staticmethod def _to_api_file_format(_file): return { 'class': ( FileApiFormats.FOLDER if _file.is_folder() else FileApiFormats.FILE ), 'path': _file.id } def get_execution_details(self): """ Retrieves execution details for a task. :return: Execution details instance. """ extra = { 'resource': type(self).__name__, 'query': {'id': self.id} } logger.info('Get execution details', extra=extra) data = self._api.get( self._URL['execution_details'].format(id=self.id)).json() return ExecutionDetails(api=self._api, **data) def get_batch_children(self, status=None, created_from=None, created_to=None, started_from=None, started_to=None, ended_from=None, ended_to=None, order_by=None, order=None, offset=None, limit=None, api=None): """ Retrieves batch child tasks for this task if its a batch task. :return: Collection instance. :raises SbError if task is not a batch task. """ api = api or self._api if not self.batch: raise SbgError("This task is not a batch task.") return self.query( parent=self.id, status=status, created_from=created_from, created_to=created_to, started_from=started_from, started_to=started_to, ended_from=ended_from, ended_to=ended_to, order_by=order_by, order=order, offset=offset, limit=limit, api=api, ) @classmethod def bulk_get(cls, tasks, api=None): """ Retrieve tasks with specified ids in bulk :param tasks: Tasks to be retrieved. :param api: Api instance. :return: List of TaskBulkRecord objects. """ api = api or cls._API task_ids = [Transform.to_task(task) for task in tasks] data = {'task_ids': task_ids} logger.debug('Getting tasks in bulk.') response = api.post(url=cls._URL['bulk_get'], data=data) return TaskBulkRecord.parse_records(response=response, api=api) def wait(self=None, period=10, callback=None, *args, **kwargs): """Wait until task is complete :param period: Time in seconds between reloads :param callback: Function to call after the task has finished, arguments and keyword arguments can be provided for it :return: Return value of provided callback function or None if a callback function was not provided """ while self.status not in TaskStatus.terminal_states: self.reload() time.sleep(period) if callback: return callback(*args, **kwargs) class TaskBulkRecord(BulkRecord): resource = CompoundField(cls=Task, read_only=False) def __str__(self): return f'<TaskBulkRecord valid={self.valid}>'
# pylint: disable=g-bad-file-header # Copyright 2017 The Bazel Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import stat import string import unittest from src.test.py.bazel import test_base # pylint: disable=g-import-not-at-top if os.name == 'nt': import win32api class LauncherTest(test_base.TestBase): def _buildJavaTargets(self, bazel_bin, binary_suffix): exit_code, _, stderr = self.RunBazel(['build', '//foo']) self.AssertExitCode(exit_code, 0, stderr) main_binary = os.path.join(bazel_bin, 'foo/foo%s' % binary_suffix) self.assertTrue(os.path.isfile(main_binary)) self.assertTrue( os.path.isdir( os.path.join(bazel_bin, 'foo/foo%s.runfiles' % binary_suffix))) if self.IsWindows(): self.assertTrue(os.path.isfile(main_binary)) self.AssertRunfilesManifestContains( os.path.join(bazel_bin, 'foo/foo%s.runfiles/MANIFEST' % binary_suffix), '__main__/bar/bar.txt') else: self.assertTrue( os.path.islink( os.path.join(bazel_bin, 'foo/foo.runfiles/__main__/bar/bar.txt'))) exit_code, stdout, stderr = self.RunProgram([main_binary]) self.AssertExitCode(exit_code, 0, stderr) self.assertEqual(len(stdout), 4) self.assertEqual(stdout[0], 'hello java') if self.IsWindows(): self.assertRegexpMatches( stdout[1], r'java_runfiles=.*foo\\foo%s.runfiles' % binary_suffix) self.assertEqual(stdout[2], 'runfiles_manifest_only=1') self.assertRegexpMatches( stdout[3], r'^runfiles_manifest_file=[a-zA-Z]:[/\\].*MANIFEST$') else: self.assertRegexpMatches(stdout[1], r'java_runfiles=.*/foo/foo.runfiles') self.assertEqual(stdout[2], 'runfiles_manifest_only=') self.assertRegexpMatches(stdout[3], r'^runfiles_manifest_file.*MANIFEST$') def _buildShBinaryTargets(self, bazel_bin, bin1_suffix): exit_code, _, stderr = self.RunBazel(['build', '//foo:bin1.sh']) self.AssertExitCode(exit_code, 0, stderr) bin1 = os.path.join(bazel_bin, 'foo', 'bin1.sh%s' % bin1_suffix) self.assertTrue(os.path.exists(bin1)) self.assertTrue( os.path.isdir( os.path.join(bazel_bin, 'foo/bin1.sh%s.runfiles' % bin1_suffix))) exit_code, _, stderr = self.RunBazel(['build', '//foo:bin2.cmd']) self.AssertExitCode(exit_code, 0, stderr) bin2 = os.path.join(bazel_bin, 'foo/bin2.cmd') self.assertTrue(os.path.exists(bin2)) self.assertTrue( os.path.isdir(os.path.join(bazel_bin, 'foo/bin2.cmd.runfiles'))) exit_code, _, stderr = self.RunBazel(['build', '//foo:bin3.bat']) if self.IsWindows(): self.AssertExitCode(exit_code, 1, stderr) self.assertIn('target name extension should match source file extension', os.linesep.join(stderr)) else: bin3 = os.path.join(bazel_bin, 'foo', 'bin3.bat') self.assertTrue(os.path.exists(bin3)) self.assertTrue( os.path.isdir(os.path.join(bazel_bin, 'foo/bin3.bat.runfiles'))) if self.IsWindows(): self.assertTrue(os.path.isfile(bin1)) self.assertTrue(os.path.isfile(bin2)) else: self.assertTrue(os.path.islink(bin1)) self.assertTrue(os.path.islink(bin2)) self.assertTrue(os.path.islink(bin3)) if self.IsWindows(): self.AssertRunfilesManifestContains( os.path.join(bazel_bin, 'foo/bin1.sh%s.runfiles/MANIFEST' % bin1_suffix), '__main__/bar/bar.txt') self.AssertRunfilesManifestContains( os.path.join(bazel_bin, 'foo/bin2.cmd.runfiles/MANIFEST'), '__main__/bar/bar.txt') else: self.assertTrue( os.path.islink( os.path.join(bazel_bin, 'foo/bin1.sh.runfiles/__main__/bar/bar.txt'))) self.assertTrue( os.path.islink( os.path.join(bazel_bin, 'foo/bin2.cmd.runfiles/__main__/bar/bar.txt'))) self.assertTrue( os.path.islink( os.path.join(bazel_bin, 'foo/bin3.bat.runfiles/__main__/bar/bar.txt'))) exit_code, stdout, stderr = self.RunProgram([bin1]) self.AssertExitCode(exit_code, 0, stderr) self.assertEqual(len(stdout), 3) self.assertEqual(stdout[0], 'hello shell') if self.IsWindows(): self.assertEqual(stdout[1], 'runfiles_manifest_only=1') self.assertRegexpMatches( stdout[2], (r'^runfiles_manifest_file=' r'[a-zA-Z]:/.*/foo/bin1.sh%s.runfiles/MANIFEST$' % bin1_suffix)) else: # TODO(laszlocsomor): Find out whether the runfiles-related envvars should # be set on Linux (e.g. $RUNFILES, $RUNFILES_MANIFEST_FILE). Currently # they aren't, and that may be a bug. If it's indeed a bug, fix that bug # and update this test. self.assertEqual(stdout[1], 'runfiles_manifest_only=') self.assertEqual(stdout[2], 'runfiles_manifest_file=') if self.IsWindows(): exit_code, stdout, stderr = self.RunProgram([bin2]) self.AssertExitCode(exit_code, 0, stderr) self.assertEqual(stdout[0], 'hello batch') def _buildPyTargets(self, bazel_bin, binary_suffix): # Verify that the build of our py_binary succeeds. exit_code, _, stderr = self.RunBazel(['build', '//foo:foo']) self.AssertExitCode(exit_code, 0, stderr) # Verify that generated files exist. foo_bin = os.path.join(bazel_bin, 'foo', 'foo%s' % binary_suffix) self.assertTrue(os.path.isfile(foo_bin)) self.assertTrue( os.path.isdir( os.path.join(bazel_bin, 'foo/foo%s.runfiles' % binary_suffix))) # Verify contents of runfiles (manifest). if self.IsWindows(): self.AssertRunfilesManifestContains( os.path.join(bazel_bin, 'foo/foo%s.runfiles/MANIFEST' % binary_suffix), '__main__/bar/bar.txt') else: self.assertTrue( os.path.islink( os.path.join(bazel_bin, 'foo/foo.runfiles/__main__/bar/bar.txt'))) # Try to run the built py_binary. exit_code, stdout, stderr = self.RunProgram([foo_bin]) self.AssertExitCode(exit_code, 0, stderr) self.assertEqual(stdout[0], 'Hello World!') # Try to use the py_binary as an executable in a Starlark rule. exit_code, stdout, stderr = self.RunBazel(['build', '//foo:hello']) self.AssertExitCode(exit_code, 0, stderr) # Verify that the Starlark action generated the right output. hello_path = os.path.join(bazel_bin, 'foo', 'hello.txt') self.assertTrue(os.path.isfile(hello_path)) with open(hello_path, 'r') as f: self.assertEqual(f.read(), 'Hello World!') # Verify that running py_test succeeds. exit_code, _, stderr = self.RunBazel(['test', '//foo:test']) self.AssertExitCode(exit_code, 0, stderr) def _buildAndCheckArgumentPassing(self, package, target_name): exit_code, stdout, stderr = self.RunBazel(['info', 'bazel-bin']) self.AssertExitCode(exit_code, 0, stderr) bazel_bin = stdout[0] exit_code, _, stderr = self.RunBazel( ['build', '//%s:%s' % (package, target_name)]) self.AssertExitCode(exit_code, 0, stderr) bin_suffix = '.exe' if self.IsWindows() else '' bin1 = os.path.join(bazel_bin, package, '%s%s' % (target_name, bin_suffix)) self.assertTrue(os.path.exists(bin1)) self.assertTrue( os.path.isdir( os.path.join(bazel_bin, '%s/%s%s.runfiles' % (package, target_name, bin_suffix)))) arguments = ['a', 'a b', '"b"', 'C:\\a\\b\\', '"C:\\a b\\c\\"'] exit_code, stdout, stderr = self.RunProgram([bin1] + arguments) self.AssertExitCode(exit_code, 0, stderr) self.assertEqual(stdout, arguments) def testJavaBinaryLauncher(self): self.CreateWorkspaceWithDefaultRepos('WORKSPACE') self.ScratchFile('foo/BUILD', [ 'java_binary(', ' name = "foo",', ' srcs = ["Main.java"],', ' main_class = "Main",', ' data = ["//bar:bar.txt"],', ')', ]) self.ScratchFile('foo/Main.java', [ 'public class Main {', ' public static void main(String[] args) {' ' System.out.println("hello java");', ' System.out.println("java_runfiles=" + ', ' System.getenv("JAVA_RUNFILES"));', ' System.out.println("runfiles_manifest_only=" + ', ' System.getenv("RUNFILES_MANIFEST_ONLY"));', ' System.out.println("runfiles_manifest_file=" + ', ' System.getenv("RUNFILES_MANIFEST_FILE"));', ' }', '}', ]) self.ScratchFile('bar/BUILD', ['exports_files(["bar.txt"])']) self.ScratchFile('bar/bar.txt', ['hello']) exit_code, stdout, stderr = self.RunBazel(['info', 'bazel-bin']) self.AssertExitCode(exit_code, 0, stderr) bazel_bin = stdout[0] self._buildJavaTargets(bazel_bin, '.exe' if self.IsWindows() else '') def testJavaBinaryArgumentPassing(self): self.CreateWorkspaceWithDefaultRepos('WORKSPACE') self.ScratchFile('foo/BUILD', [ 'java_binary(', ' name = "bin",', ' srcs = ["Main.java"],', ' main_class = "Main",', ')', ]) self.ScratchFile('foo/Main.java', [ 'public class Main {', ' public static void main(String[] args) {' ' for (String arg : args) {', ' System.out.println(arg);', ' }' ' }', '}', ]) self._buildAndCheckArgumentPassing('foo', 'bin') def testShBinaryLauncher(self): self.CreateWorkspaceWithDefaultRepos('WORKSPACE') self.ScratchFile( 'foo/BUILD', [ # On Linux/MacOS, all sh_binary rules generate an output file with # the same name as the rule, and this is a symlink to the file in # `srcs`. (Bazel allows only one file in `sh_binary.srcs`.) # On Windows, if the srcs's extension is one of ".exe", ".cmd", or # ".bat", then Bazel requires the rule's name has the same # extension, and the output file will be a copy of the source file. 'sh_binary(', ' name = "bin1.sh",', ' srcs = ["foo.sh"],', ' data = ["//bar:bar.txt"],', ')', 'sh_binary(', ' name = "bin2.cmd",', # name's extension matches that of srcs[0] ' srcs = ["foo.cmd"],', ' data = ["//bar:bar.txt"],', ')', 'sh_binary(', ' name = "bin3.bat",', # name's extension doesn't match srcs[0]'s ' srcs = ["foo.cmd"],', ' data = ["//bar:bar.txt"],', ')', ]) foo_sh = self.ScratchFile('foo/foo.sh', [ '#!/bin/bash', 'echo hello shell', 'echo runfiles_manifest_only=${RUNFILES_MANIFEST_ONLY:-}', 'echo runfiles_manifest_file=${RUNFILES_MANIFEST_FILE:-}', ]) foo_cmd = self.ScratchFile('foo/foo.cmd', ['@echo hello batch']) self.ScratchFile('bar/BUILD', ['exports_files(["bar.txt"])']) self.ScratchFile('bar/bar.txt', ['hello']) os.chmod(foo_sh, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) os.chmod(foo_cmd, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) exit_code, stdout, stderr = self.RunBazel(['info', 'bazel-bin']) self.AssertExitCode(exit_code, 0, stderr) bazel_bin = stdout[0] self._buildShBinaryTargets(bazel_bin, '.exe' if self.IsWindows() else '') def testShBinaryArgumentPassing(self): self.CreateWorkspaceWithDefaultRepos('WORKSPACE') self.ScratchFile('foo/BUILD', [ 'sh_binary(', ' name = "bin",', ' srcs = ["bin.sh"],', ')', ]) foo_sh = self.ScratchFile('foo/bin.sh', [ '#!/bin/bash', '# Store arguments in a array', 'args=("$@")', '# Get the number of arguments', 'N=${#args[@]}', '# Echo each argument', 'for (( i=0;i<$N;i++)); do', ' echo ${args[${i}]}', 'done', ]) os.chmod(foo_sh, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) self._buildAndCheckArgumentPassing('foo', 'bin') def testPyBinaryLauncher(self): self.CreateWorkspaceWithDefaultRepos('WORKSPACE') self.ScratchFile('foo/foo.bzl', [ 'def _impl(ctx):', ' ctx.actions.run(', ' arguments=[ctx.outputs.out.path],', ' outputs=[ctx.outputs.out],', ' executable=ctx.executable._hello_world,', ' use_default_shell_env=True)', '', 'helloworld = rule(', ' implementation=_impl,', ' attrs={', ' "srcs": attr.label_list(allow_files=True),', ' "out": attr.output(mandatory=True),', ' "_hello_world": attr.label(executable=True, cfg="host",', ' allow_files=True,', ' default=Label("//foo:foo"))', ' }', ')', ]) self.ScratchFile('foo/BUILD', [ 'load(":foo.bzl", "helloworld")', '', 'py_binary(', ' name = "foo",', ' srcs = ["foo.py"],', ' data = ["//bar:bar.txt"],', ')', '', 'py_test(', ' name = "test",', ' srcs = ["test.py"],', ')', '', 'helloworld(', ' name = "hello",', ' out = "hello.txt",', ')' ]) foo_py = self.ScratchFile('foo/foo.py', [ '#!/usr/bin/env python', 'import sys', 'if len(sys.argv) == 2:', ' with open(sys.argv[1], "w") as f:', ' f.write("Hello World!")', 'else:', ' print("Hello World!")', ]) test_py = self.ScratchFile('foo/test.py', [ '#!/usr/bin/env python', 'import unittest', 'class MyTest(unittest.TestCase):', ' def test_dummy(self):', ' pass', 'if __name__ == \'__main__\':', ' unittest.main()', ]) self.ScratchFile('bar/BUILD', ['exports_files(["bar.txt"])']) self.ScratchFile('bar/bar.txt', ['hello']) os.chmod(foo_py, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) os.chmod(test_py, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) exit_code, stdout, stderr = self.RunBazel(['info', 'bazel-bin']) self.AssertExitCode(exit_code, 0, stderr) bazel_bin = stdout[0] self._buildPyTargets(bazel_bin, '.exe' if self.IsWindows() else '') def testPyBinaryArgumentPassing(self): self.CreateWorkspaceWithDefaultRepos('WORKSPACE') self.ScratchFile('foo/BUILD', [ 'py_binary(', ' name = "bin",', ' srcs = ["bin.py"],', ')', ]) self.ScratchFile('foo/bin.py', [ 'import sys', 'for arg in sys.argv[1:]:', ' print(arg)', ]) self._buildAndCheckArgumentPassing('foo', 'bin') def testWindowsJavaExeLauncher(self): # Skip this test on non-Windows platforms if not self.IsWindows(): return self.CreateWorkspaceWithDefaultRepos('WORKSPACE') self.ScratchFile('foo/BUILD', [ 'java_binary(', ' name = "foo",', ' srcs = ["Main.java"],', ' main_class = "Main",', ' jvm_flags = ["--flag1", "--flag2"],', ')', ]) self.ScratchFile('foo/Main.java', [ 'public class Main {', ' public static void main(String[] args) {' ' System.out.println("helloworld");', ' }', '}', ]) exit_code, stdout, stderr = self.RunBazel(['info', 'bazel-bin']) self.AssertExitCode(exit_code, 0, stderr) bazel_bin = stdout[0] exit_code, _, stderr = self.RunBazel(['build', '//foo:foo']) self.AssertExitCode(exit_code, 0, stderr) binary = os.path.join(bazel_bin, 'foo', 'foo.exe') self.assertTrue(os.path.exists(binary)) # Add this flag to make launcher print the command it generated instead of # launching the real program. print_cmd = '--print_launcher_command' exit_code, stdout, stderr = self.RunProgram([binary, '--debug', print_cmd]) self.AssertExitCode(exit_code, 0, stderr) self.assertIn( '-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=5005', stdout) exit_code, stdout, stderr = self.RunProgram( [binary, '--debug', print_cmd], env_add={'DEFAULT_JVM_DEBUG_PORT': '12345'}) self.AssertExitCode(exit_code, 0, stderr) self.assertIn( '-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=12345', stdout) exit_code, stdout, stderr = self.RunProgram( [binary, '--debug=12345', print_cmd], env_add={ 'DEFAULT_JVM_DEBUG_SUSPEND': 'n', 'PERSISTENT_TEST_RUNNER': 'true' }) self.AssertExitCode(exit_code, 0, stderr) self.assertIn( '-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=12345' ',quiet=y', stdout) exit_code, stdout, stderr = self.RunProgram( [binary, '--main_advice=MyMain', print_cmd]) self.AssertExitCode(exit_code, 0, stderr) self.assertIn('MyMain', stdout) exit_code, stdout, stderr = self.RunProgram( [binary, '--main_advice_classpath=foo/bar', print_cmd]) self.AssertExitCode(exit_code, 0, stderr) self.assertIn('-classpath', stdout) classpath = stdout[stdout.index('-classpath') + 1] self.assertIn('foo/bar', classpath) exit_code, stdout, stderr = self.RunProgram( [binary, '--jvm_flag="--some_path="./a b/c""', print_cmd]) self.AssertExitCode(exit_code, 0, stderr) self.assertIn('"--some_path=\\"./a b/c\\""', stdout) exit_code, stdout, stderr = self.RunProgram( [binary, '--jvm_flags="--path1=a --path2=b"', print_cmd]) self.AssertExitCode(exit_code, 0, stderr) self.assertIn('--path1=a', stdout) self.assertIn('--path2=b', stdout) exit_code, stdout, stderr = self.RunProgram( [binary, print_cmd], env_add={'JVM_FLAGS': '--foo --bar'}) self.AssertExitCode(exit_code, 0, stderr) self.assertIn('--flag1', stdout) self.assertIn('--flag2', stdout) self.assertIn('--foo', stdout) self.assertIn('--bar', stdout) exit_code, stdout, stderr = self.RunProgram( [binary, '--singlejar', print_cmd]) self.AssertExitCode(exit_code, 1, stderr) self.assertIn('foo_deploy.jar does not exist', ''.join(stderr)) exit_code, _, stderr = self.RunBazel(['build', '//foo:foo_deploy.jar']) self.AssertExitCode(exit_code, 0, stderr) exit_code, stdout, stderr = self.RunProgram( [binary, '--singlejar', print_cmd]) self.AssertExitCode(exit_code, 0, stderr) self.assertIn('-classpath', stdout) classpath = stdout[stdout.index('-classpath') + 1] self.assertIn('foo_deploy.jar', classpath) exit_code, stdout, stderr = self.RunProgram([binary, '--print_javabin']) self.AssertExitCode(exit_code, 0, stderr) self.assertIn('local_jdk/bin/java.exe', ''.join(stdout)) my_tmp_dir = self.ScratchDir('my/temp/dir') exit_code, stdout, stderr = self.RunProgram( [binary, print_cmd], env_add={'TEST_TMPDIR': my_tmp_dir}) self.AssertExitCode(exit_code, 0, stderr) self.assertIn('-Djava.io.tmpdir=%s' % my_tmp_dir, stdout) exit_code, stdout, stderr = self.RunProgram( [binary, '--classpath_limit=0', print_cmd]) self.AssertExitCode(exit_code, 0, stderr) self.assertIn('-classpath', stdout) classpath = stdout[stdout.index('-classpath') + 1] self.assertRegexpMatches(classpath, r'foo-[A-Za-z0-9]+-classpath.jar$') def testWindowsNativeLauncherInNonEnglishPath(self): if not self.IsWindows(): return self.CreateWorkspaceWithDefaultRepos('WORKSPACE') self.ScratchFile('bin/BUILD', [ 'java_binary(', ' name = "bin_java",', ' srcs = ["Main.java"],', ' main_class = "Main",', ')', 'sh_binary(', ' name = "bin_sh",', ' srcs = ["main.sh"],', ')', ]) self.ScratchFile('bin/Main.java', [ 'public class Main {', ' public static void main(String[] args) {' ' System.out.println("helloworld");', ' }', '}', ]) self.ScratchFile('bin/main.sh', [ 'echo "helloworld"', ]) exit_code, stdout, stderr = self.RunBazel(['info', 'bazel-bin']) self.AssertExitCode(exit_code, 0, stderr) bazel_bin = stdout[0] exit_code, _, stderr = self.RunBazel(['build', '//bin/...']) self.AssertExitCode(exit_code, 0, stderr) for f in [ 'bin_java.exe', 'bin_java.exe.runfiles_manifest', 'bin_sh.exe', 'bin_sh', 'bin_sh.exe.runfiles_manifest', ]: self.CopyFile(os.path.join(bazel_bin, 'bin', f), os.path.join(u'./\u6d4b\u8bd5', f)) unicode_binary_path = u'./\u6d4b\u8bd5/bin_java.exe' exit_code, stdout, stderr = self.RunProgram([unicode_binary_path]) self.AssertExitCode(exit_code, 0, stderr) self.assertEqual('helloworld', ''.join(stdout)) unicode_binary_path = u'./\u6d4b\u8bd5/bin_sh.exe' exit_code, stdout, stderr = self.RunProgram([unicode_binary_path]) self.AssertExitCode(exit_code, 0, stderr) self.assertEqual('helloworld', ''.join(stdout)) def testWindowsNativeLauncherInLongPath(self): if not self.IsWindows(): return self.CreateWorkspaceWithDefaultRepos('WORKSPACE') self.ScratchFile('bin/BUILD', [ 'java_binary(', ' name = "bin_java",', ' srcs = ["Main.java"],', ' main_class = "Main",', ')', 'sh_binary(', ' name = "bin_sh",', ' srcs = ["main.sh"],', ')', 'py_binary(', ' name = "bin_py",', ' srcs = ["bin_py.py"],', ')', ]) self.ScratchFile('bin/Main.java', [ 'public class Main {', ' public static void main(String[] args) {' ' System.out.println("helloworld");', ' }', '}', ]) self.ScratchFile('bin/main.sh', [ 'echo "helloworld"', ]) self.ScratchFile('bin/bin_py.py', [ 'print("helloworld")', ]) exit_code, stdout, stderr = self.RunBazel(['info', 'bazel-bin']) self.AssertExitCode(exit_code, 0, stderr) bazel_bin = stdout[0] exit_code, _, stderr = self.RunBazel(['build', '//bin/...']) self.AssertExitCode(exit_code, 0, stderr) # Create a directory with a path longer than 260 long_dir_path = './' + '/'.join( [(c * 8 + '.' + c * 3) for c in string.ascii_lowercase]) for f in [ 'bin_java.exe', 'bin_java.exe.runfiles_manifest', 'bin_sh.exe', 'bin_sh', 'bin_sh.exe.runfiles_manifest', 'bin_py.exe', 'bin_py.zip', 'bin_py.exe.runfiles_manifest', ]: self.CopyFile( os.path.join(bazel_bin, 'bin', f), os.path.join(long_dir_path, f)) long_binary_path = os.path.abspath(long_dir_path + '/bin_java.exe') # subprocess doesn't support long path without shell=True exit_code, stdout, stderr = self.RunProgram([long_binary_path], shell=True) self.AssertExitCode(exit_code, 0, stderr) self.assertEqual('helloworld', ''.join(stdout)) # Make sure we can launch the binary with a shortened Windows 8dot3 path short_binary_path = win32api.GetShortPathName(long_binary_path) exit_code, stdout, stderr = self.RunProgram([short_binary_path], shell=True) self.AssertExitCode(exit_code, 0, stderr) self.assertEqual('helloworld', ''.join(stdout)) long_binary_path = os.path.abspath(long_dir_path + '/bin_sh.exe') # subprocess doesn't support long path without shell=True exit_code, stdout, stderr = self.RunProgram([long_binary_path], shell=True) self.AssertExitCode(exit_code, 0, stderr) self.assertEqual('helloworld', ''.join(stdout)) # Make sure we can launch the binary with a shortened Windows 8dot3 path short_binary_path = win32api.GetShortPathName(long_binary_path) exit_code, stdout, stderr = self.RunProgram([short_binary_path], shell=True) self.AssertExitCode(exit_code, 0, stderr) self.assertEqual('helloworld', ''.join(stdout)) long_binary_path = os.path.abspath(long_dir_path + '/bin_py.exe') # subprocess doesn't support long path without shell=True exit_code, stdout, stderr = self.RunProgram([long_binary_path], shell=True) self.AssertExitCode(exit_code, 0, stderr) self.assertEqual('helloworld', ''.join(stdout)) # Make sure we can launch the binary with a shortened Windows 8dot3 path short_binary_path = win32api.GetShortPathName(long_binary_path) exit_code, stdout, stderr = self.RunProgram([short_binary_path], shell=True) self.AssertExitCode(exit_code, 0, stderr) self.assertEqual('helloworld', ''.join(stdout)) def AssertRunfilesManifestContains(self, manifest, entry): with open(manifest, 'r') as f: for l in f: tokens = l.strip().split(' ', 1) if len(tokens) == 2 and tokens[0] == entry: return self.fail('Runfiles manifest "%s" did not contain "%s"' % (manifest, entry)) if __name__ == '__main__': unittest.main()
# Copyright 2020 Google LLC # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Fold batchnormalization with previous QDepthwiseConv2D layers.""" import tensorflow as tf from tensorflow.keras import layers from tensorflow.keras.models import Model from .qconvolutional import QDepthwiseConv2D from .quantizers import * from tensorflow.python.framework import smart_cond as tf_utils from tensorflow.python.ops import math_ops from tensorflow.python.ops import array_ops tf.compat.v2.enable_v2_behavior() class QDepthwiseConv2DBatchnorm(QDepthwiseConv2D): """Fold batchnormalization with a previous QDepthwiseConv2d layer.""" def __init__( self, # QDepthwiseConv2d params kernel_size, strides=(1, 1), padding="VALID", depth_multiplier=1, data_format=None, activation=None, use_bias=True, depthwise_initializer="he_normal", bias_initializer="zeros", depthwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, bias_constraint=None, dilation_rate=(1, 1), depthwise_quantizer=None, bias_quantizer=None, depthwise_range=None, bias_range=None, # batchnorm params axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer="zeros", gamma_initializer="ones", moving_mean_initializer="zeros", moving_variance_initializer="ones", beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None, renorm=False, renorm_clipping=None, renorm_momentum=0.99, fused=None, trainable=True, virtual_batch_size=None, adjustment=None, # other params ema_freeze_delay=None, folding_mode="ema_stats_folding", **kwargs): """A composite layer that folds depthwiseconv2d and batch normalization. The first group of parameters correponds to the initialization parameters of a QDepthwiseConv2d layer. check qkeras.qconvolutional.QDepthwiseConv2D for details. The 2nd group of parameters corresponds to the initialization parameters of a BatchNormalization layer. Check keras.layers.normalization.BatchNorma lizationBase for details. The 3rd group of parameters corresponds to the initialization parameters specific to this class. ema_freeze_delay: int or None. number of steps before batch normalization mv_mean and mv_variance will be frozen and used in the folded layer. folding_mode: string "ema_stats_folding": mimic tflite which uses the ema statistics to fold the kernel to suppress quantization induced jitter then performs the correction to have a similar effect of using the current batch statistics. "batch_stats_folding": use batch mean and variance to fold kernel first; after enough training steps switch to moving_mean and moving_variance for kernel folding. """ # intialization the QDepthwiseConv2d part of the composite layer super(QDepthwiseConv2DBatchnorm, self).__init__( kernel_size=kernel_size, strides=strides, padding=padding, depth_multiplier=depth_multiplier, data_format=data_format, activation=activation, use_bias=use_bias, depthwise_initializer=depthwise_initializer, bias_initializer=bias_initializer, depthwise_regularizer=depthwise_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, depthwise_constraint=depthwise_constraint, bias_constraint=bias_constraint, dilation_rate=dilation_rate, depthwise_quantizer=depthwise_quantizer, bias_quantizer=bias_quantizer, depthwise_range=depthwise_range, bias_range=bias_range, **kwargs) # initialization of batchnorm part of the composite layer self.batchnorm = layers.BatchNormalization( axis=axis, momentum=momentum, epsilon=epsilon, center=center, scale=scale, beta_initializer=beta_initializer, gamma_initializer=gamma_initializer, moving_mean_initializer=moving_mean_initializer, moving_variance_initializer=moving_variance_initializer, beta_regularizer=beta_regularizer, gamma_regularizer=gamma_regularizer, beta_constraint=beta_constraint, gamma_constraint=gamma_constraint, renorm=renorm, renorm_clipping=renorm_clipping, renorm_momentum=renorm_momentum, fused=fused, trainable=trainable, virtual_batch_size=virtual_batch_size, adjustment=adjustment) self.ema_freeze_delay = ema_freeze_delay assert folding_mode in ["ema_stats_folding", "batch_stats_folding"] self.folding_mode = folding_mode def build(self, input_shape): super(QDepthwiseConv2DBatchnorm, self).build(input_shape) # If start training from scratch, self._iteration (i.e., training_steps) # is initialized with -1. When loading ckpt, it can load the number of # training steps that have been previously trainied. # TODO(lishanok): develop a way to count iterations outside layer self._iteration = tf.Variable(-1, trainable=False, name="iteration", dtype=tf.int64) def call(self, inputs, training=None): # numpy value, mark the layer is in training training = self.batchnorm._get_training_value(training) # pylint: disable=protected-access # checking if to update batchnorm params if (self.ema_freeze_delay is None) or (self.ema_freeze_delay < 0): # if ema_freeze_delay is None or a negative value, do not freeze bn stats bn_training = tf.cast(training, dtype=bool) else: bn_training = tf.math.logical_and(training, tf.math.less_equal( self._iteration, self.ema_freeze_delay)) depthwise_kernel = self.depthwise_kernel # run depthwise_conv2d to produce output for the following batchnorm conv_outputs = tf.keras.backend.depthwise_conv2d( inputs, depthwise_kernel, strides=self.strides, padding=self.padding, dilation_rate=self.dilation_rate, data_format=self.data_format) if self.use_bias: bias = self.bias conv_outputs = tf.keras.backend.bias_add( conv_outputs, bias, data_format=self.data_format) else: bias = 0 _ = self.batchnorm(conv_outputs, training=bn_training) self._iteration.assign_add(tf_utils.smart_cond( training, lambda: tf.constant(1, tf.int64), lambda: tf.constant(0, tf.int64))) # calcuate mean and variance from current batch bn_shape = conv_outputs.shape ndims = len(bn_shape) reduction_axes = [i for i in range(ndims) if i not in self.batchnorm.axis] keep_dims = len(self.batchnorm.axis) > 1 mean, variance = self.batchnorm._moments( # pylint: disable=protected-access math_ops.cast(conv_outputs, self.batchnorm._param_dtype), # pylint: disable=protected-access reduction_axes, keep_dims=keep_dims) gamma = self.batchnorm.gamma beta = self.batchnorm.beta moving_mean = self.batchnorm.moving_mean moving_variance = self.batchnorm.moving_variance if self.folding_mode not in ["batch_stats_folding", "ema_stats_folding"]: assert ValueError("mode {} not supported!".format(self.folding_mode)) mv_inv = math_ops.rsqrt(moving_variance + self.batchnorm.epsilon) batch_inv = math_ops.rsqrt(variance + self.batchnorm.epsilon) if gamma is not None: mv_inv *= gamma batch_inv *= gamma folded_bias = tf_utils.smart_cond( bn_training, lambda: batch_inv * (bias - mean) + beta, lambda: mv_inv * (bias - moving_mean) + beta) if self.folding_mode == "batch_stats_folding": # using batch mean and variance in the initial training stage # after sufficient training, switch to moving mean and variance inv = tf_utils.smart_cond(bn_training, lambda: batch_inv, lambda: mv_inv) elif self.folding_mode == "ema_stats_folding": # We always scale the weights with a correction factor to the long term # statistics prior to quantization. This ensures that there is no jitter # in the quantized weights due to batch to batch variation. During the # initial phase of training, we undo the scaling of the weights so that # outputs are identical to regular batch normalization. We also modify # the bias terms correspondingly. After sufficient training, switch from # using batch statistics to long term moving averages for batch # normalization. # use batch stats for calcuating bias before bn freeze, and use moving # stats after bn freeze # moving stats is always used to fold kernel in tflite; before bn freeze # an additional correction factor will be applied to the depthwiseconv2d # output inv = mv_inv # for DepthwiseConv2D inv needs to be broadcasted to the last 2 dimensions # of the kernels depthwise_weights_shape = [ depthwise_kernel.get_shape().as_list()[2], depthwise_kernel.get_shape().as_list()[3] ] inv = array_ops.reshape(inv, depthwise_weights_shape) # wrap conv kernel with bn parameters folded_depthwise_kernel = inv * depthwise_kernel # quantize the folded kernel if self.depthwise_quantizer is not None: q_folded_depthwise_kernel = self.depthwise_quantizer_internal( folded_depthwise_kernel) else: q_folded_depthwise_kernel = folded_depthwise_kernel # If loaded from a ckpt, bias_quantizer is the ckpt value # Else if bias_quantizer not specified, bias # quantizer is None and we need to calculate bias quantizer # type according to accumulator type. User can call # bn_folding_utils.populate_bias_quantizer_for_folded_layers( # model, input_quantizer_list]) to populate such bias quantizer. if self.bias_quantizer is not None: q_folded_bias = self.bias_quantizer_internal(folded_bias) else: q_folded_bias = folded_bias applied_kernel = q_folded_depthwise_kernel applied_bias = q_folded_bias # calculate depthwise_conv2d output using the quantized folded kernel folded_outputs = tf.keras.backend.depthwise_conv2d( inputs, applied_kernel, strides=self.strides, padding=self.padding, dilation_rate=self.dilation_rate, data_format=self.data_format) if training is True and self.folding_mode == "ema_stats_folding": batch_inv = math_ops.rsqrt(variance + self.batchnorm.epsilon) y_corr = tf_utils.smart_cond( bn_training, lambda: (math_ops.sqrt(moving_variance + self.batchnorm.epsilon) * math_ops.rsqrt(variance + self.batchnorm.epsilon)), lambda: tf.constant(1.0, shape=moving_variance.shape)) folded_outputs = math_ops.mul(folded_outputs, y_corr) folded_outputs = tf.keras.backend.bias_add( folded_outputs, applied_bias, data_format=self.data_format) if self.activation is not None: return self.activation(folded_outputs) return folded_outputs def get_config(self): base_config = super().get_config() bn_config = self.batchnorm.get_config() config = {"ema_freeze_delay": self.ema_freeze_delay, "folding_mode": self.folding_mode} name = base_config["name"] out_config = dict( list(base_config.items()) + list(bn_config.items()) + list(config.items())) # names from different config override each other; use the base layer name # as the this layer's config name out_config["name"] = name return out_config def get_quantization_config(self): return { "depthwise_quantizer": str(self.depthwise_quantizer_internal), "bias_quantizer": str(self.bias_quantizer_internal), "activation": str(self.activation), "filters": str(self.filters) } def get_quantizers(self): return self.quantizers def get_folded_weights(self): """Function to get the batchnorm folded weights. This function converts the weights by folding batchnorm parameters into the weight of QDepthwiseConv2d. The high-level equation: W_fold = gamma * W / sqrt(variance + epsilon) bias_fold = gamma * (bias - moving_mean) / sqrt(variance + epsilon) + beta """ depthwise_kernel = self.depthwise_kernel if self.use_bias: bias = self.bias else: bias = 0 # get Batchnorm stats gamma = self.batchnorm.gamma beta = self.batchnorm.beta moving_mean = self.batchnorm.moving_mean moving_variance = self.batchnorm.moving_variance # get the inversion factor so that we replace division by multiplication inv = math_ops.rsqrt(moving_variance + self.batchnorm.epsilon) if gamma is not None: inv *= gamma # fold bias with bn stats folded_bias = inv * (bias - moving_mean) + beta # for DepthwiseConv2D inv needs to be broadcasted to the last 2 dimensions # of the kernels depthwise_weights_shape = [ depthwise_kernel.get_shape().as_list()[2], depthwise_kernel.get_shape().as_list()[3] ] inv = array_ops.reshape(inv, depthwise_weights_shape) # wrap conv kernel with bn parameters folded_depthwise_kernel = inv * depthwise_kernel return [folded_depthwise_kernel, folded_bias]
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import annotations import os import re from abc import ABC, abstractmethod from textwrap import dedent from typing import Callable, ClassVar, Iterator, Optional, cast from typing_extensions import final from pants.backend.docker.registries import ALL_DEFAULT_REGISTRIES from pants.base.build_environment import get_buildroot from pants.core.goals.run import RestartableField from pants.engine.addresses import Address from pants.engine.fs import GlobMatchErrorBehavior from pants.engine.target import ( COMMON_TARGET_FIELDS, AsyncFieldMixin, BoolField, Dependencies, DictStringToStringField, InvalidFieldException, OptionalSingleSourceField, StringField, StringSequenceField, Target, ) from pants.util.docutil import bin_name, doc_url # Common help text to be applied to each field that supports value interpolation. _interpolation_help = ( "{kind} may use placeholders in curly braces to be interpolated. The placeholders are derived " "from various sources, such as the Dockerfile instructions and build args.\n\n" ) class DockerImageBuildArgsField(StringSequenceField): alias = "extra_build_args" default = () help = ( "Build arguments (`--build-arg`) to use when building this image. " "Entries are either strings in the form `ARG_NAME=value` to set an explicit value; " "or just `ARG_NAME` to copy the value from Pants's own environment.\n\n" "Use `[docker].build_args` to set default build args for all images." ) class DockerImageContextRootField(StringField): alias = "context_root" help = ( "Specify which directory to use as the Docker build context root. This affects the file " "paths to use for the `COPY` and `ADD` instructions. For example, whether " "`COPY files/f.txt` should look for the file relative to the build root: " "`<build root>/files/f.txt` vs relative to the BUILD file: " "`<build root>/path_to_build_file/files/f.txt`.\n\n" "Specify the `context_root` path as `files` for relative to build root, or as `./files` " "for relative to the BUILD file.\n\n" "If `context_root` is not specified, it defaults to `[docker].default_context_root`." ) @classmethod def compute_value(cls, raw_value: Optional[str], address: Address) -> Optional[str]: value_or_default = super().compute_value(raw_value, address=address) if isinstance(value_or_default, str) and value_or_default.startswith("/"): val = value_or_default.strip("/") raise InvalidFieldException( f"The `{cls.alias}` field in target {address} must be a relative path, but was " f"{value_or_default!r}. Use {val!r} for a path relative to the build root, or " f"{'./' + val!r} for a path relative to the BUILD file (i.e. {os.path.join(address.spec_path, val)!r})." ) return value_or_default class DockerImageSourceField(OptionalSingleSourceField): default = "Dockerfile" # When the default glob value is in effect, we don't want the normal glob match error behavior # to kick in for a missing Dockerfile, in case there are `instructions` provided, in which case # we generate the Dockerfile instead. If there are no `instructions`, or there are both # `instructions` and a Dockerfile hydrated from the `source` glob, we error out with a message # to the user. default_glob_match_error_behavior = GlobMatchErrorBehavior.ignore help = ( "The Dockerfile to use when building the Docker image.\n\n" "Use the `instructions` field instead if you prefer not having the Dockerfile in your " "source tree." ) class DockerImageInstructionsField(StringSequenceField): alias = "instructions" required = False help = ( "The `Dockerfile` content, typically one instruction per list item.\n\n" "Use the `source` field instead if you prefer having the Dockerfile in your source tree." "\n\n" + dedent( """\ Example: # example/BUILD docker_image( instructions=[ "FROM base/image:1.0", "RUN echo example", ], ) """ ) ) class DockerImageTagsField(StringSequenceField): alias = "image_tags" default = ("latest",) help = ( "Any tags to apply to the Docker image name (the version is usually applied as a tag).\n\n" + _interpolation_help.format(kind="tag") + f"See {doc_url('tagging-docker-images')}." ) class DockerImageTargetStageField(StringField): alias = "target_stage" help = ( "Specify target build stage, rather than building the entire `Dockerfile`.\n\n" "When using multi-stage build, you may name your stages, and can target them when building " "to only selectively build a certain stage. See also the `--docker-build-target-stage` " "option.\n\n" "Read more about [multi-stage Docker builds]" "(https://docs.docker.com/develop/develop-images/multistage-build/#stop-at-a-specific-build-stage)" ) class DockerImageDependenciesField(Dependencies): supports_transitive_excludes = True class DockerImageRegistriesField(StringSequenceField): alias = "registries" default = (ALL_DEFAULT_REGISTRIES,) help = ( "List of addresses or configured aliases to any Docker registries to use for the " "built image.\n\n" "The address is a domain name with optional port for your registry, and any registry " "aliases are prefixed with `@` for addresses in the [docker].registries configuration " "section.\n\n" "By default, all configured registries with `default = true` are used.\n\n" + dedent( """\ Example: # pants.toml [docker.registries.my-registry-alias] address = "myregistrydomain:port" default = false # optional # example/BUILD docker_image( registries = [ "@my-registry-alias", "myregistrydomain:port", ], ) """ ) + ( "The above example shows two valid `registry` options: using an alias to a configured " "registry and the address to a registry verbatim in the BUILD file." ) ) class DockerImageRepositoryField(StringField): alias = "repository" help = ( 'The repository name for the Docker image. e.g. "<repository>/<name>".\n\n' "It uses the `[docker].default_repository` by default.\n\n" + _interpolation_help.format(kind="repository") + "Additional placeholders for the repository field are: `name`, `directory` and " "`parent_directory`.\n\nSee the documentation for `[docker].default_repository` for more " "information." ) class DockerImageSkipPushField(BoolField): alias = "skip_push" default = False help = ( f"If set to true, do not push this image to registries when running `{bin_name()} publish`." ) OptionValueFormatter = Callable[[str], str] class DockerBuildOptionFieldMixin(ABC): """Inherit this mixin class to provide options to `docker build`.""" docker_build_option: ClassVar[str] @abstractmethod def option_values(self, *, value_formatter: OptionValueFormatter) -> Iterator[str]: """Subclasses must implement this, to turn their `self.value` into none, one or more option values.""" @final def options(self, value_formatter: OptionValueFormatter) -> Iterator[str]: for value in self.option_values(value_formatter=value_formatter): yield from (self.docker_build_option, value) class DockerImageBuildImageLabelsOptionField(DockerBuildOptionFieldMixin, DictStringToStringField): alias = "image_labels" help = ( "Provide image metadata.\n\n" + _interpolation_help.format(kind="label value") + "See [Docker labels](https://docs.docker.com/config/labels-custom-metadata/" "#manage-labels-on-objects) for more information." ) docker_build_option = "--label" def option_values(self, value_formatter: OptionValueFormatter) -> Iterator[str]: for label, value in (self.value or {}).items(): yield f"{label}={value_formatter(value)}" class DockerImageBuildSecretsOptionField( AsyncFieldMixin, DockerBuildOptionFieldMixin, DictStringToStringField ): alias = "secrets" help = ( "Secret files to expose to the build (only if BuildKit enabled).\n\n" "Secrets may use absolute paths, or paths relative to your build root, or the BUILD file " "if prefixed with `./`. The id should be valid as used by the Docker build `--secret` " "option. See [Docker secrets](https://docs.docker.com/engine/swarm/secrets/) for more " "information.\n\n" + dedent( """\ Example: docker_image( secrets={ "mysecret": "/var/secrets/some-secret", "repo-secret": "src/proj/secrets/some-secret", "target-secret": "./secrets/some-secret", } ) """ ) ) docker_build_option = "--secret" def option_values(self, **kwargs) -> Iterator[str]: # os.path.join() discards preceding parts if encountering an abs path, e.g. if the secret # `path` is an absolute path, the `buildroot` and `spec_path` will not be considered. Also, # an empty path part is ignored. for secret, path in (self.value or {}).items(): full_path = os.path.join( get_buildroot(), self.address.spec_path if re.match(r"\.{1,2}/", path) else "", path, ) yield f"id={secret},src={os.path.normpath(full_path)}" class DockerImageBuildSSHOptionField(DockerBuildOptionFieldMixin, StringSequenceField): alias = "ssh" default = () help = ( "SSH agent socket or keys to expose to the build (only if BuildKit enabled) " "(format: default|<id>[=<socket>|<key>[,<key>]])\n\n" "The exposed agent and/or keys can then be used in your `Dockerfile` by mounting them in " "your `RUN` instructions:\n\n" " RUN --mount=type=ssh ...\n\n" "See [Docker documentation](https://docs.docker.com/develop/develop-images" "/build_enhancements/#using-ssh-to-access-private-data-in-builds) for more information." ) docker_build_option = "--ssh" def option_values(self, **kwargs) -> Iterator[str]: yield from cast("tuple[str]", self.value) class DockerImageTarget(Target): alias = "docker_image" core_fields = ( *COMMON_TARGET_FIELDS, DockerImageBuildArgsField, DockerImageDependenciesField, DockerImageSourceField, DockerImageInstructionsField, DockerImageContextRootField, DockerImageTagsField, DockerImageRegistriesField, DockerImageRepositoryField, DockerImageBuildImageLabelsOptionField, DockerImageBuildSecretsOptionField, DockerImageBuildSSHOptionField, DockerImageSkipPushField, DockerImageTargetStageField, RestartableField, ) help = ( "The `docker_image` target describes how to build and tag a Docker image.\n\n" "Any dependencies, as inferred or explicitly specified, will be included in the Docker " "build context, after being packaged if applicable.\n\n" "By default, will use a Dockerfile from the same directory as the BUILD file this target " "is defined in. Point at another file with the `source` field, or use the `instructions` " "field to have the Dockerfile contents verbatim directly in the BUILD file.\n\n" "Dependencies on upstream/base images defined by another `docker_image` are inferred if " "referenced by a build argument with a default value of the target address.\n\n" + dedent( """\ Example: # src/docker/downstream/Dockerfile ARG BASE=src/docker/upstream:image FROM $BASE ... """ ) )
# engine/default.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Default implementations of per-dialect sqlalchemy.engine classes. These are semi-private implementation classes which are only of importance to database dialect authors; dialects will usually use the classes here as the base class for their own corresponding classes. """ import re import random from . import reflection, interfaces, result from ..sql import compiler, expression from .. import types as sqltypes from .. import exc, util, pool, processors import codecs import weakref from .. import event AUTOCOMMIT_REGEXP = re.compile( r'\s*(?:UPDATE|INSERT|CREATE|DELETE|DROP|ALTER)', re.I | re.UNICODE) class DefaultDialect(interfaces.Dialect): """Default implementation of Dialect""" statement_compiler = compiler.SQLCompiler ddl_compiler = compiler.DDLCompiler type_compiler = compiler.GenericTypeCompiler preparer = compiler.IdentifierPreparer supports_alter = True # the first value we'd get for an autoincrement # column. default_sequence_base = 1 # most DBAPIs happy with this for execute(). # not cx_oracle. execute_sequence_format = tuple supports_views = True supports_sequences = False sequences_optional = False preexecute_autoincrement_sequences = False postfetch_lastrowid = True implicit_returning = False supports_right_nested_joins = True supports_native_enum = False supports_native_boolean = False supports_simple_order_by_label = True # if the NUMERIC type # returns decimal.Decimal. # *not* the FLOAT type however. supports_native_decimal = False if util.py3k: supports_unicode_statements = True supports_unicode_binds = True returns_unicode_strings = True description_encoding = None else: supports_unicode_statements = False supports_unicode_binds = False returns_unicode_strings = False description_encoding = 'use_encoding' name = 'default' # length at which to truncate # any identifier. max_identifier_length = 9999 # length at which to truncate # the name of an index. # Usually None to indicate # 'use max_identifier_length'. # thanks to MySQL, sigh max_index_name_length = None supports_sane_rowcount = True supports_sane_multi_rowcount = True dbapi_type_map = {} colspecs = {} default_paramstyle = 'named' supports_default_values = False supports_empty_insert = True supports_multivalues_insert = False server_version_info = None # indicates symbol names are # UPPERCASEd if they are case insensitive # within the database. # if this is True, the methods normalize_name() # and denormalize_name() must be provided. requires_name_normalize = False reflection_options = () def __init__(self, convert_unicode=False, encoding='utf-8', paramstyle=None, dbapi=None, implicit_returning=None, supports_right_nested_joins=None, case_sensitive=True, label_length=None, **kwargs): if not getattr(self, 'ported_sqla_06', True): util.warn( "The %s dialect is not yet ported to the 0.6 format" % self.name) self.convert_unicode = convert_unicode self.encoding = encoding self.positional = False self._ischema = None self.dbapi = dbapi if paramstyle is not None: self.paramstyle = paramstyle elif self.dbapi is not None: self.paramstyle = self.dbapi.paramstyle else: self.paramstyle = self.default_paramstyle if implicit_returning is not None: self.implicit_returning = implicit_returning self.positional = self.paramstyle in ('qmark', 'format', 'numeric') self.identifier_preparer = self.preparer(self) self.type_compiler = self.type_compiler(self) if supports_right_nested_joins is not None: self.supports_right_nested_joins = supports_right_nested_joins self.case_sensitive = case_sensitive if label_length and label_length > self.max_identifier_length: raise exc.ArgumentError( "Label length of %d is greater than this dialect's" " maximum identifier length of %d" % (label_length, self.max_identifier_length)) self.label_length = label_length if self.description_encoding == 'use_encoding': self._description_decoder = \ processors.to_unicode_processor_factory( encoding ) elif self.description_encoding is not None: self._description_decoder = \ processors.to_unicode_processor_factory( self.description_encoding ) self._encoder = codecs.getencoder(self.encoding) self._decoder = processors.to_unicode_processor_factory(self.encoding) @util.memoized_property def _type_memos(self): return weakref.WeakKeyDictionary() @property def dialect_description(self): return self.name + "+" + self.driver @classmethod def get_pool_class(cls, url): return getattr(cls, 'poolclass', pool.QueuePool) def initialize(self, connection): try: self.server_version_info = \ self._get_server_version_info(connection) except NotImplementedError: self.server_version_info = None try: self.default_schema_name = \ self._get_default_schema_name(connection) except NotImplementedError: self.default_schema_name = None try: self.default_isolation_level = \ self.get_isolation_level(connection.connection) except NotImplementedError: self.default_isolation_level = None self.returns_unicode_strings = self._check_unicode_returns(connection) if self.description_encoding is not None and \ self._check_unicode_description(connection): self._description_decoder = self.description_encoding = None self.do_rollback(connection.connection) def on_connect(self): """return a callable which sets up a newly created DBAPI connection. This is used to set dialect-wide per-connection options such as isolation modes, unicode modes, etc. If a callable is returned, it will be assembled into a pool listener that receives the direct DBAPI connection, with all wrappers removed. If None is returned, no listener will be generated. """ return None def _check_unicode_returns(self, connection): if util.py2k and not self.supports_unicode_statements: cast_to = util.binary_type else: cast_to = util.text_type def check_unicode(formatstr, type_): cursor = connection.connection.cursor() try: try: cursor.execute( cast_to( expression.select( [expression.cast( expression.literal_column( "'test %s returns'" % formatstr), type_) ]).compile(dialect=self) ) ) row = cursor.fetchone() return isinstance(row[0], util.text_type) except self.dbapi.Error as de: util.warn("Exception attempting to " "detect unicode returns: %r" % de) return False finally: cursor.close() # detect plain VARCHAR unicode_for_varchar = check_unicode("plain", sqltypes.VARCHAR(60)) # detect if there's an NVARCHAR type with different behavior available unicode_for_unicode = check_unicode("unicode", sqltypes.Unicode(60)) if unicode_for_unicode and not unicode_for_varchar: return "conditional" else: return unicode_for_varchar def _check_unicode_description(self, connection): # all DBAPIs on Py2K return cursor.description as encoded, # until pypy2.1beta2 with sqlite, so let's just check it - # it's likely others will start doing this too in Py2k. if util.py2k and not self.supports_unicode_statements: cast_to = util.binary_type else: cast_to = util.text_type cursor = connection.connection.cursor() try: cursor.execute( cast_to( expression.select([ expression.literal_column("'x'").label("some_label") ]).compile(dialect=self) ) ) return isinstance(cursor.description[0][0], util.text_type) finally: cursor.close() def type_descriptor(self, typeobj): """Provide a database-specific :class:`.TypeEngine` object, given the generic object which comes from the types module. This method looks for a dictionary called ``colspecs`` as a class or instance-level variable, and passes on to :func:`.types.adapt_type`. """ return sqltypes.adapt_type(typeobj, self.colspecs) def reflecttable(self, connection, table, include_columns, exclude_columns=None): insp = reflection.Inspector.from_engine(connection) return insp.reflecttable(table, include_columns, exclude_columns) def get_pk_constraint(self, conn, table_name, schema=None, **kw): """Compatibility method, adapts the result of get_primary_keys() for those dialects which don't implement get_pk_constraint(). """ return { 'constrained_columns': self.get_primary_keys(conn, table_name, schema=schema, **kw) } def validate_identifier(self, ident): if len(ident) > self.max_identifier_length: raise exc.IdentifierError( "Identifier '%s' exceeds maximum length of %d characters" % (ident, self.max_identifier_length) ) def connect(self, *cargs, **cparams): return self.dbapi.connect(*cargs, **cparams) def create_connect_args(self, url): opts = url.translate_connect_args() opts.update(url.query) return [[], opts] def set_engine_execution_options(self, engine, opts): if 'isolation_level' in opts: isolation_level = opts['isolation_level'] @event.listens_for(engine, "engine_connect") def set_isolation(connection, branch): if not branch: self._set_connection_isolation(connection, isolation_level) def set_connection_execution_options(self, connection, opts): if 'isolation_level' in opts: self._set_connection_isolation(connection, opts['isolation_level']) def _set_connection_isolation(self, connection, level): self.set_isolation_level(connection.connection, level) connection.connection._connection_record.\ finalize_callback.append(self.reset_isolation_level) def do_begin(self, dbapi_connection): pass def do_rollback(self, dbapi_connection): dbapi_connection.rollback() def do_commit(self, dbapi_connection): dbapi_connection.commit() def do_close(self, dbapi_connection): dbapi_connection.close() def create_xid(self): """Create a random two-phase transaction ID. This id will be passed to do_begin_twophase(), do_rollback_twophase(), do_commit_twophase(). Its format is unspecified. """ return "_sa_%032x" % random.randint(0, 2 ** 128) def do_savepoint(self, connection, name): connection.execute(expression.SavepointClause(name)) def do_rollback_to_savepoint(self, connection, name): connection.execute(expression.RollbackToSavepointClause(name)) def do_release_savepoint(self, connection, name): connection.execute(expression.ReleaseSavepointClause(name)) def do_executemany(self, cursor, statement, parameters, context=None): cursor.executemany(statement, parameters) def do_execute(self, cursor, statement, parameters, context=None): cursor.execute(statement, parameters) def do_execute_no_params(self, cursor, statement, context=None): cursor.execute(statement) def is_disconnect(self, e, connection, cursor): return False def reset_isolation_level(self, dbapi_conn): # default_isolation_level is read from the first connection # after the initial set of 'isolation_level', if any, so is # the configured default of this dialect. self.set_isolation_level(dbapi_conn, self.default_isolation_level) class DefaultExecutionContext(interfaces.ExecutionContext): isinsert = False isupdate = False isdelete = False isddl = False executemany = False result_map = None compiled = None statement = None postfetch_cols = None prefetch_cols = None returning_cols = None _is_implicit_returning = False _is_explicit_returning = False # a hook for SQLite's translation of # result column names _translate_colname = None @classmethod def _init_ddl(cls, dialect, connection, dbapi_connection, compiled_ddl): """Initialize execution context for a DDLElement construct.""" self = cls.__new__(cls) self.dialect = dialect self.root_connection = connection self._dbapi_connection = dbapi_connection self.engine = connection.engine self.compiled = compiled = compiled_ddl self.isddl = True self.execution_options = compiled.statement._execution_options if connection._execution_options: self.execution_options = dict(self.execution_options) self.execution_options.update(connection._execution_options) if not dialect.supports_unicode_statements: self.unicode_statement = util.text_type(compiled) self.statement = dialect._encoder(self.unicode_statement)[0] else: self.statement = self.unicode_statement = util.text_type(compiled) self.cursor = self.create_cursor() self.compiled_parameters = [] if dialect.positional: self.parameters = [dialect.execute_sequence_format()] else: self.parameters = [{}] return self @classmethod def _init_compiled(cls, dialect, connection, dbapi_connection, compiled, parameters): """Initialize execution context for a Compiled construct.""" self = cls.__new__(cls) self.dialect = dialect self.root_connection = connection self._dbapi_connection = dbapi_connection self.engine = connection.engine self.compiled = compiled if not compiled.can_execute: raise exc.ArgumentError("Not an executable clause") self.execution_options = compiled.statement._execution_options if connection._execution_options: self.execution_options = dict(self.execution_options) self.execution_options.update(connection._execution_options) # compiled clauseelement. process bind params, process table defaults, # track collections used by ResultProxy to target and process results self.result_map = compiled.result_map self.unicode_statement = util.text_type(compiled) if not dialect.supports_unicode_statements: self.statement = self.unicode_statement.encode( self.dialect.encoding) else: self.statement = self.unicode_statement self.isinsert = compiled.isinsert self.isupdate = compiled.isupdate self.isdelete = compiled.isdelete if self.isinsert or self.isupdate or self.isdelete: self._is_explicit_returning = bool(compiled.statement._returning) self._is_implicit_returning = bool(compiled.returning and \ not compiled.statement._returning) if not parameters: self.compiled_parameters = [compiled.construct_params()] else: self.compiled_parameters = \ [compiled.construct_params(m, _group_number=grp) for grp, m in enumerate(parameters)] self.executemany = len(parameters) > 1 self.cursor = self.create_cursor() if self.isinsert or self.isupdate: self.postfetch_cols = self.compiled.postfetch self.prefetch_cols = self.compiled.prefetch self.returning_cols = self.compiled.returning self.__process_defaults() processors = compiled._bind_processors # Convert the dictionary of bind parameter values # into a dict or list to be sent to the DBAPI's # execute() or executemany() method. parameters = [] if dialect.positional: for compiled_params in self.compiled_parameters: param = [] for key in self.compiled.positiontup: if key in processors: param.append(processors[key](compiled_params[key])) else: param.append(compiled_params[key]) parameters.append(dialect.execute_sequence_format(param)) else: encode = not dialect.supports_unicode_statements for compiled_params in self.compiled_parameters: param = {} if encode: for key in compiled_params: if key in processors: param[dialect._encoder(key)[0]] = \ processors[key](compiled_params[key]) else: param[dialect._encoder(key)[0]] = \ compiled_params[key] else: for key in compiled_params: if key in processors: param[key] = processors[key](compiled_params[key]) else: param[key] = compiled_params[key] parameters.append(param) self.parameters = dialect.execute_sequence_format(parameters) return self @classmethod def _init_statement(cls, dialect, connection, dbapi_connection, statement, parameters): """Initialize execution context for a string SQL statement.""" self = cls.__new__(cls) self.dialect = dialect self.root_connection = connection self._dbapi_connection = dbapi_connection self.engine = connection.engine # plain text statement self.execution_options = connection._execution_options if not parameters: if self.dialect.positional: self.parameters = [dialect.execute_sequence_format()] else: self.parameters = [{}] elif isinstance(parameters[0], dialect.execute_sequence_format): self.parameters = parameters elif isinstance(parameters[0], dict): if dialect.supports_unicode_statements: self.parameters = parameters else: self.parameters = [ dict((dialect._encoder(k)[0], d[k]) for k in d) for d in parameters ] or [{}] else: self.parameters = [dialect.execute_sequence_format(p) for p in parameters] self.executemany = len(parameters) > 1 if not dialect.supports_unicode_statements and \ isinstance(statement, util.text_type): self.unicode_statement = statement self.statement = dialect._encoder(statement)[0] else: self.statement = self.unicode_statement = statement self.cursor = self.create_cursor() return self @classmethod def _init_default(cls, dialect, connection, dbapi_connection): """Initialize execution context for a ColumnDefault construct.""" self = cls.__new__(cls) self.dialect = dialect self.root_connection = connection self._dbapi_connection = dbapi_connection self.engine = connection.engine self.execution_options = connection._execution_options self.cursor = self.create_cursor() return self @util.memoized_property def no_parameters(self): return self.execution_options.get("no_parameters", False) @util.memoized_property def is_crud(self): return self.isinsert or self.isupdate or self.isdelete @util.memoized_property def should_autocommit(self): autocommit = self.execution_options.get('autocommit', not self.compiled and self.statement and expression.PARSE_AUTOCOMMIT or False) if autocommit is expression.PARSE_AUTOCOMMIT: return self.should_autocommit_text(self.unicode_statement) else: return autocommit def _execute_scalar(self, stmt, type_): """Execute a string statement on the current cursor, returning a scalar result. Used to fire off sequences, default phrases, and "select lastrowid" types of statements individually or in the context of a parent INSERT or UPDATE statement. """ conn = self.root_connection if isinstance(stmt, util.text_type) and \ not self.dialect.supports_unicode_statements: stmt = self.dialect._encoder(stmt)[0] if self.dialect.positional: default_params = self.dialect.execute_sequence_format() else: default_params = {} conn._cursor_execute(self.cursor, stmt, default_params, context=self) r = self.cursor.fetchone()[0] if type_ is not None: # apply type post processors to the result proc = type_._cached_result_processor( self.dialect, self.cursor.description[0][1] ) if proc: return proc(r) return r @property def connection(self): return self.root_connection._branch() def should_autocommit_text(self, statement): return AUTOCOMMIT_REGEXP.match(statement) def create_cursor(self): return self._dbapi_connection.cursor() def pre_exec(self): pass def post_exec(self): pass def get_result_processor(self, type_, colname, coltype): """Return a 'result processor' for a given type as present in cursor.description. This has a default implementation that dialects can override for context-sensitive result type handling. """ return type_._cached_result_processor(self.dialect, coltype) def get_lastrowid(self): """return self.cursor.lastrowid, or equivalent, after an INSERT. This may involve calling special cursor functions, issuing a new SELECT on the cursor (or a new one), or returning a stored value that was calculated within post_exec(). This function will only be called for dialects which support "implicit" primary key generation, keep preexecute_autoincrement_sequences set to False, and when no explicit id value was bound to the statement. The function is called once, directly after post_exec() and before the transaction is committed or ResultProxy is generated. If the post_exec() method assigns a value to `self._lastrowid`, the value is used in place of calling get_lastrowid(). Note that this method is *not* equivalent to the ``lastrowid`` method on ``ResultProxy``, which is a direct proxy to the DBAPI ``lastrowid`` accessor in all cases. """ return self.cursor.lastrowid def handle_dbapi_exception(self, e): pass def get_result_proxy(self): return result.ResultProxy(self) @property def rowcount(self): return self.cursor.rowcount def supports_sane_rowcount(self): return self.dialect.supports_sane_rowcount def supports_sane_multi_rowcount(self): return self.dialect.supports_sane_multi_rowcount def post_insert(self): if not self._is_implicit_returning and \ not self._is_explicit_returning and \ not self.compiled.inline and \ self.dialect.postfetch_lastrowid and \ (not self.inserted_primary_key or \ None in self.inserted_primary_key): table = self.compiled.statement.table lastrowid = self.get_lastrowid() autoinc_col = table._autoincrement_column if autoinc_col is not None: # apply type post processors to the lastrowid proc = autoinc_col.type._cached_result_processor( self.dialect, None) if proc is not None: lastrowid = proc(lastrowid) self.inserted_primary_key = [ lastrowid if c is autoinc_col else v for c, v in zip( table.primary_key, self.inserted_primary_key) ] def _fetch_implicit_returning(self, resultproxy): table = self.compiled.statement.table row = resultproxy.fetchone() ipk = [] for c, v in zip(table.primary_key, self.inserted_primary_key): if v is not None: ipk.append(v) else: ipk.append(row[c]) self.inserted_primary_key = ipk self.returned_defaults = row def _fetch_implicit_update_returning(self, resultproxy): row = resultproxy.fetchone() self.returned_defaults = row def lastrow_has_defaults(self): return (self.isinsert or self.isupdate) and \ bool(self.postfetch_cols) def set_input_sizes(self, translate=None, exclude_types=None): """Given a cursor and ClauseParameters, call the appropriate style of ``setinputsizes()`` on the cursor, using DB-API types from the bind parameter's ``TypeEngine`` objects. This method only called by those dialects which require it, currently cx_oracle. """ if not hasattr(self.compiled, 'bind_names'): return types = dict( (self.compiled.bind_names[bindparam], bindparam.type) for bindparam in self.compiled.bind_names) if self.dialect.positional: inputsizes = [] for key in self.compiled.positiontup: typeengine = types[key] dbtype = typeengine.dialect_impl(self.dialect).\ get_dbapi_type(self.dialect.dbapi) if dbtype is not None and \ (not exclude_types or dbtype not in exclude_types): inputsizes.append(dbtype) try: self.cursor.setinputsizes(*inputsizes) except Exception as e: self.root_connection._handle_dbapi_exception( e, None, None, None, self) else: inputsizes = {} for key in self.compiled.bind_names.values(): typeengine = types[key] dbtype = typeengine.dialect_impl(self.dialect).\ get_dbapi_type(self.dialect.dbapi) if dbtype is not None and \ (not exclude_types or dbtype not in exclude_types): if translate: key = translate.get(key, key) if not self.dialect.supports_unicode_binds: key = self.dialect._encoder(key)[0] inputsizes[key] = dbtype try: self.cursor.setinputsizes(**inputsizes) except Exception as e: self.root_connection._handle_dbapi_exception( e, None, None, None, self) def _exec_default(self, default, type_): if default.is_sequence: return self.fire_sequence(default, type_) elif default.is_callable: return default.arg(self) elif default.is_clause_element: # TODO: expensive branching here should be # pulled into _exec_scalar() conn = self.connection c = expression.select([default.arg]).compile(bind=conn) return conn._execute_compiled(c, (), {}).scalar() else: return default.arg def get_insert_default(self, column): if column.default is None: return None else: return self._exec_default(column.default, column.type) def get_update_default(self, column): if column.onupdate is None: return None else: return self._exec_default(column.onupdate, column.type) def __process_defaults(self): """Generate default values for compiled insert/update statements, and generate inserted_primary_key collection. """ if self.executemany: if len(self.compiled.prefetch): scalar_defaults = {} # pre-determine scalar Python-side defaults # to avoid many calls of get_insert_default()/ # get_update_default() for c in self.prefetch_cols: if self.isinsert and c.default and c.default.is_scalar: scalar_defaults[c] = c.default.arg elif self.isupdate and c.onupdate and c.onupdate.is_scalar: scalar_defaults[c] = c.onupdate.arg for param in self.compiled_parameters: self.current_parameters = param for c in self.prefetch_cols: if c in scalar_defaults: val = scalar_defaults[c] elif self.isinsert: val = self.get_insert_default(c) else: val = self.get_update_default(c) if val is not None: param[c.key] = val del self.current_parameters else: self.current_parameters = compiled_parameters = \ self.compiled_parameters[0] for c in self.compiled.prefetch: if self.isinsert: val = self.get_insert_default(c) else: val = self.get_update_default(c) if val is not None: compiled_parameters[c.key] = val del self.current_parameters if self.isinsert: self.inserted_primary_key = [ self.compiled_parameters[0].get(c.key, None) for c in self.compiled.\ statement.table.primary_key ] DefaultDialect.execution_ctx_cls = DefaultExecutionContext
# -*- coding: utf-8 -*- __all__ = ['Distribution'] import io import sys import re import os import warnings import numbers import distutils.log import distutils.core import distutils.cmd import distutils.dist from distutils.util import strtobool from distutils.debug import DEBUG from distutils.fancy_getopt import translate_longopt import itertools from collections import defaultdict from email import message_from_file from distutils.errors import ( DistutilsOptionError, DistutilsPlatformError, DistutilsSetupError, ) from distutils.util import rfc822_escape from distutils.version import StrictVersion if "__PEX_UNVENDORED__" in __import__("os").environ: from setuptools.extern import six # vendor:skip else: from pex.third_party.setuptools.extern import six if "__PEX_UNVENDORED__" in __import__("os").environ: from setuptools.extern import packaging # vendor:skip else: from pex.third_party.setuptools.extern import packaging if "__PEX_UNVENDORED__" in __import__("os").environ: from setuptools.extern import ordered_set # vendor:skip else: from pex.third_party.setuptools.extern import ordered_set if "__PEX_UNVENDORED__" in __import__("os").environ: from setuptools.extern.six.moves import map, filter, filterfalse # vendor:skip else: from pex.third_party.setuptools.extern.six.moves import map, filter, filterfalse from . import SetuptoolsDeprecationWarning if "__PEX_UNVENDORED__" in __import__("os").environ: from setuptools.depends import Require # vendor:skip else: from pex.third_party.setuptools.depends import Require if "__PEX_UNVENDORED__" in __import__("os").environ: from setuptools import windows_support # vendor:skip else: from pex.third_party.setuptools import windows_support if "__PEX_UNVENDORED__" in __import__("os").environ: from setuptools.monkey import get_unpatched # vendor:skip else: from pex.third_party.setuptools.monkey import get_unpatched if "__PEX_UNVENDORED__" in __import__("os").environ: from setuptools.config import parse_configuration # vendor:skip else: from pex.third_party.setuptools.config import parse_configuration if "__PEX_UNVENDORED__" in __import__("os").environ: import pkg_resources # vendor:skip else: import pex.third_party.pkg_resources as pkg_resources if "__PEX_UNVENDORED__" in __import__("os").environ: __import__('setuptools.extern.packaging.specifiers') # vendor:skip else: __import__('pex.third_party.setuptools.extern.packaging.specifiers') if "__PEX_UNVENDORED__" in __import__("os").environ: __import__('setuptools.extern.packaging.version') # vendor:skip else: __import__('pex.third_party.setuptools.extern.packaging.version') def _get_unpatched(cls): warnings.warn("Do not call this function", DistDeprecationWarning) return get_unpatched(cls) def get_metadata_version(self): mv = getattr(self, 'metadata_version', None) if mv is None: if self.long_description_content_type or self.provides_extras: mv = StrictVersion('2.1') elif (self.maintainer is not None or self.maintainer_email is not None or getattr(self, 'python_requires', None) is not None or self.project_urls): mv = StrictVersion('1.2') elif (self.provides or self.requires or self.obsoletes or self.classifiers or self.download_url): mv = StrictVersion('1.1') else: mv = StrictVersion('1.0') self.metadata_version = mv return mv def read_pkg_file(self, file): """Reads the metadata values from a file object.""" msg = message_from_file(file) def _read_field(name): value = msg[name] if value == 'UNKNOWN': return None return value def _read_list(name): values = msg.get_all(name, None) if values == []: return None return values self.metadata_version = StrictVersion(msg['metadata-version']) self.name = _read_field('name') self.version = _read_field('version') self.description = _read_field('summary') # we are filling author only. self.author = _read_field('author') self.maintainer = None self.author_email = _read_field('author-email') self.maintainer_email = None self.url = _read_field('home-page') self.license = _read_field('license') if 'download-url' in msg: self.download_url = _read_field('download-url') else: self.download_url = None self.long_description = _read_field('description') self.description = _read_field('summary') if 'keywords' in msg: self.keywords = _read_field('keywords').split(',') self.platforms = _read_list('platform') self.classifiers = _read_list('classifier') # PEP 314 - these fields only exist in 1.1 if self.metadata_version == StrictVersion('1.1'): self.requires = _read_list('requires') self.provides = _read_list('provides') self.obsoletes = _read_list('obsoletes') else: self.requires = None self.provides = None self.obsoletes = None # Based on Python 3.5 version def write_pkg_file(self, file): """Write the PKG-INFO format data to a file object. """ version = self.get_metadata_version() if six.PY2: def write_field(key, value): file.write("%s: %s\n" % (key, self._encode_field(value))) else: def write_field(key, value): file.write("%s: %s\n" % (key, value)) write_field('Metadata-Version', str(version)) write_field('Name', self.get_name()) write_field('Version', self.get_version()) write_field('Summary', self.get_description()) write_field('Home-page', self.get_url()) if version < StrictVersion('1.2'): write_field('Author', self.get_contact()) write_field('Author-email', self.get_contact_email()) else: optional_fields = ( ('Author', 'author'), ('Author-email', 'author_email'), ('Maintainer', 'maintainer'), ('Maintainer-email', 'maintainer_email'), ) for field, attr in optional_fields: attr_val = getattr(self, attr) if attr_val is not None: write_field(field, attr_val) write_field('License', self.get_license()) if self.download_url: write_field('Download-URL', self.download_url) for project_url in self.project_urls.items(): write_field('Project-URL', '%s, %s' % project_url) long_desc = rfc822_escape(self.get_long_description()) write_field('Description', long_desc) keywords = ','.join(self.get_keywords()) if keywords: write_field('Keywords', keywords) if version >= StrictVersion('1.2'): for platform in self.get_platforms(): write_field('Platform', platform) else: self._write_list(file, 'Platform', self.get_platforms()) self._write_list(file, 'Classifier', self.get_classifiers()) # PEP 314 self._write_list(file, 'Requires', self.get_requires()) self._write_list(file, 'Provides', self.get_provides()) self._write_list(file, 'Obsoletes', self.get_obsoletes()) # Setuptools specific for PEP 345 if hasattr(self, 'python_requires'): write_field('Requires-Python', self.python_requires) # PEP 566 if self.long_description_content_type: write_field( 'Description-Content-Type', self.long_description_content_type ) if self.provides_extras: for extra in self.provides_extras: write_field('Provides-Extra', extra) sequence = tuple, list def check_importable(dist, attr, value): try: ep = pkg_resources.EntryPoint.parse('x=' + value) assert not ep.extras except (TypeError, ValueError, AttributeError, AssertionError): raise DistutilsSetupError( "%r must be importable 'module:attrs' string (got %r)" % (attr, value) ) def assert_string_list(dist, attr, value): """Verify that value is a string list""" try: # verify that value is a list or tuple to exclude unordered # or single-use iterables assert isinstance(value, (list, tuple)) # verify that elements of value are strings assert ''.join(value) != value except (TypeError, ValueError, AttributeError, AssertionError): raise DistutilsSetupError( "%r must be a list of strings (got %r)" % (attr, value) ) def check_nsp(dist, attr, value): """Verify that namespace packages are valid""" ns_packages = value assert_string_list(dist, attr, ns_packages) for nsp in ns_packages: if not dist.has_contents_for(nsp): raise DistutilsSetupError( "Distribution contains no modules or packages for " + "namespace package %r" % nsp ) parent, sep, child = nsp.rpartition('.') if parent and parent not in ns_packages: distutils.log.warn( "WARNING: %r is declared as a package namespace, but %r" " is not: please correct this in setup.py", nsp, parent ) def check_extras(dist, attr, value): """Verify that extras_require mapping is valid""" try: list(itertools.starmap(_check_extra, value.items())) except (TypeError, ValueError, AttributeError): raise DistutilsSetupError( "'extras_require' must be a dictionary whose values are " "strings or lists of strings containing valid project/version " "requirement specifiers." ) def _check_extra(extra, reqs): name, sep, marker = extra.partition(':') if marker and pkg_resources.invalid_marker(marker): raise DistutilsSetupError("Invalid environment marker: " + marker) list(pkg_resources.parse_requirements(reqs)) def assert_bool(dist, attr, value): """Verify that value is True, False, 0, or 1""" if bool(value) != value: tmpl = "{attr!r} must be a boolean value (got {value!r})" raise DistutilsSetupError(tmpl.format(attr=attr, value=value)) def check_requirements(dist, attr, value): """Verify that install_requires is a valid requirements list""" try: list(pkg_resources.parse_requirements(value)) if isinstance(value, (dict, set)): raise TypeError("Unordered types are not allowed") except (TypeError, ValueError) as error: tmpl = ( "{attr!r} must be a string or list of strings " "containing valid project/version requirement specifiers; {error}" ) raise DistutilsSetupError(tmpl.format(attr=attr, error=error)) def check_specifier(dist, attr, value): """Verify that value is a valid version specifier""" try: packaging.specifiers.SpecifierSet(value) except packaging.specifiers.InvalidSpecifier as error: tmpl = ( "{attr!r} must be a string " "containing valid version specifiers; {error}" ) raise DistutilsSetupError(tmpl.format(attr=attr, error=error)) def check_entry_points(dist, attr, value): """Verify that entry_points map is parseable""" try: pkg_resources.EntryPoint.parse_map(value) except ValueError as e: raise DistutilsSetupError(e) def check_test_suite(dist, attr, value): if not isinstance(value, six.string_types): raise DistutilsSetupError("test_suite must be a string") def check_package_data(dist, attr, value): """Verify that value is a dictionary of package names to glob lists""" if not isinstance(value, dict): raise DistutilsSetupError( "{!r} must be a dictionary mapping package names to lists of " "string wildcard patterns".format(attr)) for k, v in value.items(): if not isinstance(k, six.string_types): raise DistutilsSetupError( "keys of {!r} dict must be strings (got {!r})" .format(attr, k) ) assert_string_list(dist, 'values of {!r} dict'.format(attr), v) def check_packages(dist, attr, value): for pkgname in value: if not re.match(r'\w+(\.\w+)*', pkgname): distutils.log.warn( "WARNING: %r not a valid package name; please use only " ".-separated package names in setup.py", pkgname ) _Distribution = get_unpatched(distutils.core.Distribution) class Distribution(_Distribution): """Distribution with support for features, tests, and package data This is an enhanced version of 'distutils.dist.Distribution' that effectively adds the following new optional keyword arguments to 'setup()': 'install_requires' -- a string or sequence of strings specifying project versions that the distribution requires when installed, in the format used by 'pkg_resources.require()'. They will be installed automatically when the package is installed. If you wish to use packages that are not available in PyPI, or want to give your users an alternate download location, you can add a 'find_links' option to the '[easy_install]' section of your project's 'setup.cfg' file, and then setuptools will scan the listed web pages for links that satisfy the requirements. 'extras_require' -- a dictionary mapping names of optional "extras" to the additional requirement(s) that using those extras incurs. For example, this:: extras_require = dict(reST = ["docutils>=0.3", "reSTedit"]) indicates that the distribution can optionally provide an extra capability called "reST", but it can only be used if docutils and reSTedit are installed. If the user installs your package using EasyInstall and requests one of your extras, the corresponding additional requirements will be installed if needed. 'features' **deprecated** -- a dictionary mapping option names to 'setuptools.Feature' objects. Features are a portion of the distribution that can be included or excluded based on user options, inter-feature dependencies, and availability on the current system. Excluded features are omitted from all setup commands, including source and binary distributions, so you can create multiple distributions from the same source tree. Feature names should be valid Python identifiers, except that they may contain the '-' (minus) sign. Features can be included or excluded via the command line options '--with-X' and '--without-X', where 'X' is the name of the feature. Whether a feature is included by default, and whether you are allowed to control this from the command line, is determined by the Feature object. See the 'Feature' class for more information. 'test_suite' -- the name of a test suite to run for the 'test' command. If the user runs 'python setup.py test', the package will be installed, and the named test suite will be run. The format is the same as would be used on a 'unittest.py' command line. That is, it is the dotted name of an object to import and call to generate a test suite. 'package_data' -- a dictionary mapping package names to lists of filenames or globs to use to find data files contained in the named packages. If the dictionary has filenames or globs listed under '""' (the empty string), those names will be searched for in every package, in addition to any names for the specific package. Data files found using these names/globs will be installed along with the package, in the same location as the package. Note that globs are allowed to reference the contents of non-package subdirectories, as long as you use '/' as a path separator. (Globs are automatically converted to platform-specific paths at runtime.) In addition to these new keywords, this class also has several new methods for manipulating the distribution's contents. For example, the 'include()' and 'exclude()' methods can be thought of as in-place add and subtract commands that add or remove packages, modules, extensions, and so on from the distribution. They are used by the feature subsystem to configure the distribution for the included and excluded features. """ _DISTUTILS_UNSUPPORTED_METADATA = { 'long_description_content_type': None, 'project_urls': dict, 'provides_extras': ordered_set.OrderedSet, 'license_files': ordered_set.OrderedSet, } _patched_dist = None def patch_missing_pkg_info(self, attrs): # Fake up a replacement for the data that would normally come from # PKG-INFO, but which might not yet be built if this is a fresh # checkout. # if not attrs or 'name' not in attrs or 'version' not in attrs: return key = pkg_resources.safe_name(str(attrs['name'])).lower() dist = pkg_resources.working_set.by_key.get(key) if dist is not None and not dist.has_metadata('PKG-INFO'): dist._version = pkg_resources.safe_version(str(attrs['version'])) self._patched_dist = dist def __init__(self, attrs=None): have_package_data = hasattr(self, "package_data") if not have_package_data: self.package_data = {} attrs = attrs or {} if 'features' in attrs or 'require_features' in attrs: Feature.warn_deprecated() self.require_features = [] self.features = {} self.dist_files = [] # Filter-out setuptools' specific options. self.src_root = attrs.pop("src_root", None) self.patch_missing_pkg_info(attrs) self.dependency_links = attrs.pop('dependency_links', []) self.setup_requires = attrs.pop('setup_requires', []) for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'): vars(self).setdefault(ep.name, None) _Distribution.__init__(self, { k: v for k, v in attrs.items() if k not in self._DISTUTILS_UNSUPPORTED_METADATA }) # Fill-in missing metadata fields not supported by distutils. # Note some fields may have been set by other tools (e.g. pbr) # above; they are taken preferrentially to setup() arguments for option, default in self._DISTUTILS_UNSUPPORTED_METADATA.items(): for source in self.metadata.__dict__, attrs: if option in source: value = source[option] break else: value = default() if default else None setattr(self.metadata, option, value) if isinstance(self.metadata.version, numbers.Number): # Some people apparently take "version number" too literally :) self.metadata.version = str(self.metadata.version) if self.metadata.version is not None: try: ver = packaging.version.Version(self.metadata.version) normalized_version = str(ver) if self.metadata.version != normalized_version: warnings.warn( "Normalizing '%s' to '%s'" % ( self.metadata.version, normalized_version, ) ) self.metadata.version = normalized_version except (packaging.version.InvalidVersion, TypeError): warnings.warn( "The version specified (%r) is an invalid version, this " "may not work as expected with newer versions of " "setuptools, pip, and PyPI. Please see PEP 440 for more " "details." % self.metadata.version ) self._finalize_requires() def _finalize_requires(self): """ Set `metadata.python_requires` and fix environment markers in `install_requires` and `extras_require`. """ if getattr(self, 'python_requires', None): self.metadata.python_requires = self.python_requires if getattr(self, 'extras_require', None): for extra in self.extras_require.keys(): # Since this gets called multiple times at points where the # keys have become 'converted' extras, ensure that we are only # truly adding extras we haven't seen before here. extra = extra.split(':')[0] if extra: self.metadata.provides_extras.add(extra) self._convert_extras_requirements() self._move_install_requirements_markers() def _convert_extras_requirements(self): """ Convert requirements in `extras_require` of the form `"extra": ["barbazquux; {marker}"]` to `"extra:{marker}": ["barbazquux"]`. """ spec_ext_reqs = getattr(self, 'extras_require', None) or {} self._tmp_extras_require = defaultdict(list) for section, v in spec_ext_reqs.items(): # Do not strip empty sections. self._tmp_extras_require[section] for r in pkg_resources.parse_requirements(v): suffix = self._suffix_for(r) self._tmp_extras_require[section + suffix].append(r) @staticmethod def _suffix_for(req): """ For a requirement, return the 'extras_require' suffix for that requirement. """ return ':' + str(req.marker) if req.marker else '' def _move_install_requirements_markers(self): """ Move requirements in `install_requires` that are using environment markers `extras_require`. """ # divide the install_requires into two sets, simple ones still # handled by install_requires and more complex ones handled # by extras_require. def is_simple_req(req): return not req.marker spec_inst_reqs = getattr(self, 'install_requires', None) or () inst_reqs = list(pkg_resources.parse_requirements(spec_inst_reqs)) simple_reqs = filter(is_simple_req, inst_reqs) complex_reqs = filterfalse(is_simple_req, inst_reqs) self.install_requires = list(map(str, simple_reqs)) for r in complex_reqs: self._tmp_extras_require[':' + str(r.marker)].append(r) self.extras_require = dict( (k, [str(r) for r in map(self._clean_req, v)]) for k, v in self._tmp_extras_require.items() ) def _clean_req(self, req): """ Given a Requirement, remove environment markers and return it. """ req.marker = None return req def _parse_config_files(self, filenames=None): """ Adapted from distutils.dist.Distribution.parse_config_files, this method provides the same functionality in subtly-improved ways. """ if "__PEX_UNVENDORED__" in __import__("os").environ: from setuptools.extern.six.moves.configparser import ConfigParser # vendor:skip else: from pex.third_party.setuptools.extern.six.moves.configparser import ConfigParser # Ignore install directory options if we have a venv if six.PY3 and sys.prefix != sys.base_prefix: ignore_options = [ 'install-base', 'install-platbase', 'install-lib', 'install-platlib', 'install-purelib', 'install-headers', 'install-scripts', 'install-data', 'prefix', 'exec-prefix', 'home', 'user', 'root'] else: ignore_options = [] ignore_options = frozenset(ignore_options) if filenames is None: filenames = self.find_config_files() if DEBUG: self.announce("Distribution.parse_config_files():") parser = ConfigParser() for filename in filenames: with io.open(filename, encoding='utf-8') as reader: if DEBUG: self.announce(" reading {filename}".format(**locals())) (parser.read_file if six.PY3 else parser.readfp)(reader) for section in parser.sections(): options = parser.options(section) opt_dict = self.get_option_dict(section) for opt in options: if opt != '__name__' and opt not in ignore_options: val = self._try_str(parser.get(section, opt)) opt = opt.replace('-', '_') opt_dict[opt] = (filename, val) # Make the ConfigParser forget everything (so we retain # the original filenames that options come from) parser.__init__() # If there was a "global" section in the config file, use it # to set Distribution options. if 'global' in self.command_options: for (opt, (src, val)) in self.command_options['global'].items(): alias = self.negative_opt.get(opt) try: if alias: setattr(self, alias, not strtobool(val)) elif opt in ('verbose', 'dry_run'): # ugh! setattr(self, opt, strtobool(val)) else: setattr(self, opt, val) except ValueError as msg: raise DistutilsOptionError(msg) @staticmethod def _try_str(val): """ On Python 2, much of distutils relies on string values being of type 'str' (bytes) and not unicode text. If the value can be safely encoded to bytes using the default encoding, prefer that. Why the default encoding? Because that value can be implicitly decoded back to text if needed. Ref #1653 """ if six.PY3: return val try: return val.encode() except UnicodeEncodeError: pass return val def _set_command_options(self, command_obj, option_dict=None): """ Set the options for 'command_obj' from 'option_dict'. Basically this means copying elements of a dictionary ('option_dict') to attributes of an instance ('command'). 'command_obj' must be a Command instance. If 'option_dict' is not supplied, uses the standard option dictionary for this command (from 'self.command_options'). (Adopted from distutils.dist.Distribution._set_command_options) """ command_name = command_obj.get_command_name() if option_dict is None: option_dict = self.get_option_dict(command_name) if DEBUG: self.announce(" setting options for '%s' command:" % command_name) for (option, (source, value)) in option_dict.items(): if DEBUG: self.announce(" %s = %s (from %s)" % (option, value, source)) try: bool_opts = [translate_longopt(o) for o in command_obj.boolean_options] except AttributeError: bool_opts = [] try: neg_opt = command_obj.negative_opt except AttributeError: neg_opt = {} try: is_string = isinstance(value, six.string_types) if option in neg_opt and is_string: setattr(command_obj, neg_opt[option], not strtobool(value)) elif option in bool_opts and is_string: setattr(command_obj, option, strtobool(value)) elif hasattr(command_obj, option): setattr(command_obj, option, value) else: raise DistutilsOptionError( "error in %s: command '%s' has no such option '%s'" % (source, command_name, option)) except ValueError as msg: raise DistutilsOptionError(msg) def parse_config_files(self, filenames=None, ignore_option_errors=False): """Parses configuration files from various levels and loads configuration. """ self._parse_config_files(filenames=filenames) parse_configuration(self, self.command_options, ignore_option_errors=ignore_option_errors) self._finalize_requires() def parse_command_line(self): """Process features after parsing command line options""" result = _Distribution.parse_command_line(self) if self.features: self._finalize_features() return result def _feature_attrname(self, name): """Convert feature name to corresponding option attribute name""" return 'with_' + name.replace('-', '_') def fetch_build_eggs(self, requires): """Resolve pre-setup requirements""" resolved_dists = pkg_resources.working_set.resolve( pkg_resources.parse_requirements(requires), installer=self.fetch_build_egg, replace_conflicting=True, ) for dist in resolved_dists: pkg_resources.working_set.add(dist, replace=True) return resolved_dists def finalize_options(self): """ Allow plugins to apply arbitrary operations to the distribution. Each hook may optionally define a 'order' to influence the order of execution. Smaller numbers go first and the default is 0. """ hook_key = 'setuptools.finalize_distribution_options' def by_order(hook): return getattr(hook, 'order', 0) eps = pkg_resources.iter_entry_points(hook_key) for ep in sorted(eps, key=by_order): ep.load()(self) def _finalize_setup_keywords(self): for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'): value = getattr(self, ep.name, None) if value is not None: ep.require(installer=self.fetch_build_egg) ep.load()(self, ep.name, value) def _finalize_2to3_doctests(self): if getattr(self, 'convert_2to3_doctests', None): # XXX may convert to set here when we can rely on set being builtin self.convert_2to3_doctests = [ os.path.abspath(p) for p in self.convert_2to3_doctests ] else: self.convert_2to3_doctests = [] def get_egg_cache_dir(self): egg_cache_dir = os.path.join(os.curdir, '.eggs') if not os.path.exists(egg_cache_dir): os.mkdir(egg_cache_dir) windows_support.hide_file(egg_cache_dir) readme_txt_filename = os.path.join(egg_cache_dir, 'README.txt') with open(readme_txt_filename, 'w') as f: f.write('This directory contains eggs that were downloaded ' 'by setuptools to build, test, and run plug-ins.\n\n') f.write('This directory caches those eggs to prevent ' 'repeated downloads.\n\n') f.write('However, it is safe to delete this directory.\n\n') return egg_cache_dir def fetch_build_egg(self, req): """Fetch an egg needed for building""" if "__PEX_UNVENDORED__" in __import__("os").environ: from setuptools.installer import fetch_build_egg # vendor:skip else: from pex.third_party.setuptools.installer import fetch_build_egg return fetch_build_egg(self, req) def _finalize_feature_opts(self): """Add --with-X/--without-X options based on optional features""" if not self.features: return go = [] no = self.negative_opt.copy() for name, feature in self.features.items(): self._set_feature(name, None) feature.validate(self) if feature.optional: descr = feature.description incdef = ' (default)' excdef = '' if not feature.include_by_default(): excdef, incdef = incdef, excdef new = ( ('with-' + name, None, 'include ' + descr + incdef), ('without-' + name, None, 'exclude ' + descr + excdef), ) go.extend(new) no['without-' + name] = 'with-' + name self.global_options = self.feature_options = go + self.global_options self.negative_opt = self.feature_negopt = no def _finalize_features(self): """Add/remove features and resolve dependencies between them""" # First, flag all the enabled items (and thus their dependencies) for name, feature in self.features.items(): enabled = self.feature_is_included(name) if enabled or (enabled is None and feature.include_by_default()): feature.include_in(self) self._set_feature(name, 1) # Then disable the rest, so that off-by-default features don't # get flagged as errors when they're required by an enabled feature for name, feature in self.features.items(): if not self.feature_is_included(name): feature.exclude_from(self) self._set_feature(name, 0) def get_command_class(self, command): """Pluggable version of get_command_class()""" if command in self.cmdclass: return self.cmdclass[command] eps = pkg_resources.iter_entry_points('distutils.commands', command) for ep in eps: ep.require(installer=self.fetch_build_egg) self.cmdclass[command] = cmdclass = ep.load() return cmdclass else: return _Distribution.get_command_class(self, command) def print_commands(self): for ep in pkg_resources.iter_entry_points('distutils.commands'): if ep.name not in self.cmdclass: # don't require extras as the commands won't be invoked cmdclass = ep.resolve() self.cmdclass[ep.name] = cmdclass return _Distribution.print_commands(self) def get_command_list(self): for ep in pkg_resources.iter_entry_points('distutils.commands'): if ep.name not in self.cmdclass: # don't require extras as the commands won't be invoked cmdclass = ep.resolve() self.cmdclass[ep.name] = cmdclass return _Distribution.get_command_list(self) def _set_feature(self, name, status): """Set feature's inclusion status""" setattr(self, self._feature_attrname(name), status) def feature_is_included(self, name): """Return 1 if feature is included, 0 if excluded, 'None' if unknown""" return getattr(self, self._feature_attrname(name)) def include_feature(self, name): """Request inclusion of feature named 'name'""" if self.feature_is_included(name) == 0: descr = self.features[name].description raise DistutilsOptionError( descr + " is required, but was excluded or is not available" ) self.features[name].include_in(self) self._set_feature(name, 1) def include(self, **attrs): """Add items to distribution that are named in keyword arguments For example, 'dist.include(py_modules=["x"])' would add 'x' to the distribution's 'py_modules' attribute, if it was not already there. Currently, this method only supports inclusion for attributes that are lists or tuples. If you need to add support for adding to other attributes in this or a subclass, you can add an '_include_X' method, where 'X' is the name of the attribute. The method will be called with the value passed to 'include()'. So, 'dist.include(foo={"bar":"baz"})' will try to call 'dist._include_foo({"bar":"baz"})', which can then handle whatever special inclusion logic is needed. """ for k, v in attrs.items(): include = getattr(self, '_include_' + k, None) if include: include(v) else: self._include_misc(k, v) def exclude_package(self, package): """Remove packages, modules, and extensions in named package""" pfx = package + '.' if self.packages: self.packages = [ p for p in self.packages if p != package and not p.startswith(pfx) ] if self.py_modules: self.py_modules = [ p for p in self.py_modules if p != package and not p.startswith(pfx) ] if self.ext_modules: self.ext_modules = [ p for p in self.ext_modules if p.name != package and not p.name.startswith(pfx) ] def has_contents_for(self, package): """Return true if 'exclude_package(package)' would do something""" pfx = package + '.' for p in self.iter_distribution_names(): if p == package or p.startswith(pfx): return True def _exclude_misc(self, name, value): """Handle 'exclude()' for list/tuple attrs without a special handler""" if not isinstance(value, sequence): raise DistutilsSetupError( "%s: setting must be a list or tuple (%r)" % (name, value) ) try: old = getattr(self, name) except AttributeError: raise DistutilsSetupError( "%s: No such distribution setting" % name ) if old is not None and not isinstance(old, sequence): raise DistutilsSetupError( name + ": this setting cannot be changed via include/exclude" ) elif old: setattr(self, name, [item for item in old if item not in value]) def _include_misc(self, name, value): """Handle 'include()' for list/tuple attrs without a special handler""" if not isinstance(value, sequence): raise DistutilsSetupError( "%s: setting must be a list (%r)" % (name, value) ) try: old = getattr(self, name) except AttributeError: raise DistutilsSetupError( "%s: No such distribution setting" % name ) if old is None: setattr(self, name, value) elif not isinstance(old, sequence): raise DistutilsSetupError( name + ": this setting cannot be changed via include/exclude" ) else: new = [item for item in value if item not in old] setattr(self, name, old + new) def exclude(self, **attrs): """Remove items from distribution that are named in keyword arguments For example, 'dist.exclude(py_modules=["x"])' would remove 'x' from the distribution's 'py_modules' attribute. Excluding packages uses the 'exclude_package()' method, so all of the package's contained packages, modules, and extensions are also excluded. Currently, this method only supports exclusion from attributes that are lists or tuples. If you need to add support for excluding from other attributes in this or a subclass, you can add an '_exclude_X' method, where 'X' is the name of the attribute. The method will be called with the value passed to 'exclude()'. So, 'dist.exclude(foo={"bar":"baz"})' will try to call 'dist._exclude_foo({"bar":"baz"})', which can then handle whatever special exclusion logic is needed. """ for k, v in attrs.items(): exclude = getattr(self, '_exclude_' + k, None) if exclude: exclude(v) else: self._exclude_misc(k, v) def _exclude_packages(self, packages): if not isinstance(packages, sequence): raise DistutilsSetupError( "packages: setting must be a list or tuple (%r)" % (packages,) ) list(map(self.exclude_package, packages)) def _parse_command_opts(self, parser, args): # Remove --with-X/--without-X options when processing command args self.global_options = self.__class__.global_options self.negative_opt = self.__class__.negative_opt # First, expand any aliases command = args[0] aliases = self.get_option_dict('aliases') while command in aliases: src, alias = aliases[command] del aliases[command] # ensure each alias can expand only once! import shlex args[:1] = shlex.split(alias, True) command = args[0] nargs = _Distribution._parse_command_opts(self, parser, args) # Handle commands that want to consume all remaining arguments cmd_class = self.get_command_class(command) if getattr(cmd_class, 'command_consumes_arguments', None): self.get_option_dict(command)['args'] = ("command line", nargs) if nargs is not None: return [] return nargs def get_cmdline_options(self): """Return a '{cmd: {opt:val}}' map of all command-line options Option names are all long, but do not include the leading '--', and contain dashes rather than underscores. If the option doesn't take an argument (e.g. '--quiet'), the 'val' is 'None'. Note that options provided by config files are intentionally excluded. """ d = {} for cmd, opts in self.command_options.items(): for opt, (src, val) in opts.items(): if src != "command line": continue opt = opt.replace('_', '-') if val == 0: cmdobj = self.get_command_obj(cmd) neg_opt = self.negative_opt.copy() neg_opt.update(getattr(cmdobj, 'negative_opt', {})) for neg, pos in neg_opt.items(): if pos == opt: opt = neg val = None break else: raise AssertionError("Shouldn't be able to get here") elif val == 1: val = None d.setdefault(cmd, {})[opt] = val return d def iter_distribution_names(self): """Yield all packages, modules, and extension names in distribution""" for pkg in self.packages or (): yield pkg for module in self.py_modules or (): yield module for ext in self.ext_modules or (): if isinstance(ext, tuple): name, buildinfo = ext else: name = ext.name if name.endswith('module'): name = name[:-6] yield name def handle_display_options(self, option_order): """If there were any non-global "display-only" options (--help-commands or the metadata display options) on the command line, display the requested info and return true; else return false. """ import sys if six.PY2 or self.help_commands: return _Distribution.handle_display_options(self, option_order) # Stdout may be StringIO (e.g. in tests) if not isinstance(sys.stdout, io.TextIOWrapper): return _Distribution.handle_display_options(self, option_order) # Don't wrap stdout if utf-8 is already the encoding. Provides # workaround for #334. if sys.stdout.encoding.lower() in ('utf-8', 'utf8'): return _Distribution.handle_display_options(self, option_order) # Print metadata in UTF-8 no matter the platform encoding = sys.stdout.encoding errors = sys.stdout.errors newline = sys.platform != 'win32' and '\n' or None line_buffering = sys.stdout.line_buffering sys.stdout = io.TextIOWrapper( sys.stdout.detach(), 'utf-8', errors, newline, line_buffering) try: return _Distribution.handle_display_options(self, option_order) finally: sys.stdout = io.TextIOWrapper( sys.stdout.detach(), encoding, errors, newline, line_buffering) class Feature: """ **deprecated** -- The `Feature` facility was never completely implemented or supported, `has reported issues <https://github.com/pypa/setuptools/issues/58>`_ and will be removed in a future version. A subset of the distribution that can be excluded if unneeded/wanted Features are created using these keyword arguments: 'description' -- a short, human readable description of the feature, to be used in error messages, and option help messages. 'standard' -- if true, the feature is included by default if it is available on the current system. Otherwise, the feature is only included if requested via a command line '--with-X' option, or if another included feature requires it. The default setting is 'False'. 'available' -- if true, the feature is available for installation on the current system. The default setting is 'True'. 'optional' -- if true, the feature's inclusion can be controlled from the command line, using the '--with-X' or '--without-X' options. If false, the feature's inclusion status is determined automatically, based on 'availabile', 'standard', and whether any other feature requires it. The default setting is 'True'. 'require_features' -- a string or sequence of strings naming features that should also be included if this feature is included. Defaults to empty list. May also contain 'Require' objects that should be added/removed from the distribution. 'remove' -- a string or list of strings naming packages to be removed from the distribution if this feature is *not* included. If the feature *is* included, this argument is ignored. This argument exists to support removing features that "crosscut" a distribution, such as defining a 'tests' feature that removes all the 'tests' subpackages provided by other features. The default for this argument is an empty list. (Note: the named package(s) or modules must exist in the base distribution when the 'setup()' function is initially called.) other keywords -- any other keyword arguments are saved, and passed to the distribution's 'include()' and 'exclude()' methods when the feature is included or excluded, respectively. So, for example, you could pass 'packages=["a","b"]' to cause packages 'a' and 'b' to be added or removed from the distribution as appropriate. A feature must include at least one 'requires', 'remove', or other keyword argument. Otherwise, it can't affect the distribution in any way. Note also that you can subclass 'Feature' to create your own specialized feature types that modify the distribution in other ways when included or excluded. See the docstrings for the various methods here for more detail. Aside from the methods, the only feature attributes that distributions look at are 'description' and 'optional'. """ @staticmethod def warn_deprecated(): msg = ( "Features are deprecated and will be removed in a future " "version. See https://github.com/pypa/setuptools/issues/65." ) warnings.warn(msg, DistDeprecationWarning, stacklevel=3) def __init__( self, description, standard=False, available=True, optional=True, require_features=(), remove=(), **extras): self.warn_deprecated() self.description = description self.standard = standard self.available = available self.optional = optional if isinstance(require_features, (str, Require)): require_features = require_features, self.require_features = [ r for r in require_features if isinstance(r, str) ] er = [r for r in require_features if not isinstance(r, str)] if er: extras['require_features'] = er if isinstance(remove, str): remove = remove, self.remove = remove self.extras = extras if not remove and not require_features and not extras: raise DistutilsSetupError( "Feature %s: must define 'require_features', 'remove', or " "at least one of 'packages', 'py_modules', etc." ) def include_by_default(self): """Should this feature be included by default?""" return self.available and self.standard def include_in(self, dist): """Ensure feature and its requirements are included in distribution You may override this in a subclass to perform additional operations on the distribution. Note that this method may be called more than once per feature, and so should be idempotent. """ if not self.available: raise DistutilsPlatformError( self.description + " is required, " "but is not available on this platform" ) dist.include(**self.extras) for f in self.require_features: dist.include_feature(f) def exclude_from(self, dist): """Ensure feature is excluded from distribution You may override this in a subclass to perform additional operations on the distribution. This method will be called at most once per feature, and only after all included features have been asked to include themselves. """ dist.exclude(**self.extras) if self.remove: for item in self.remove: dist.exclude_package(item) def validate(self, dist): """Verify that feature makes sense in context of distribution This method is called by the distribution just before it parses its command line. It checks to ensure that the 'remove' attribute, if any, contains only valid package/module names that are present in the base distribution when 'setup()' is called. You may override it in a subclass to perform any other required validation of the feature against a target distribution. """ for item in self.remove: if not dist.has_contents_for(item): raise DistutilsSetupError( "%s wants to be able to remove %s, but the distribution" " doesn't contain any packages or modules under %s" % (self.description, item, item) ) class DistDeprecationWarning(SetuptoolsDeprecationWarning): """Class for warning about deprecations in dist in setuptools. Not ignored by default, unlike DeprecationWarning."""
#!/usr/bin/env python # -*- coding: utf-8 -*- ''' Translates a source file using a translation model. ''' import sys import numpy import json import os import logging from multiprocessing import Process, Queue from collections import defaultdict from Queue import Empty from util import load_dict, load_config, seqs2words from compat import fill_options from hypgraph import HypGraphRenderer from settings import TranslationSettings class Translation(object): #TODO move to separate file? """ Models a translated segment. """ def __init__(self, source_words, target_words, sentence_id=None, score=0, alignment=None, target_probs=None, hyp_graph=None, hypothesis_id=None): self.source_words = source_words self.target_words = target_words self.sentence_id = sentence_id self.score = score self.alignment = alignment #TODO: assertion of length? self.target_probs = target_probs #TODO: assertion of length? self.hyp_graph = hyp_graph self.hypothesis_id = hypothesis_id def get_alignment(self): return self.alignment def get_alignment_text(self): """ Returns this translation's alignment rendered as a string. Columns in header: sentence id ||| target words ||| score ||| source words ||| number of source words ||| number of target words """ columns = [ self.sentence_id, " ".join(self.target_words), self.score, " ".join(self.source_words), len(self.source_words) + 1, len(self.target_words) + 1 ] header = "{0} ||| {1} ||| {2} ||| {3} ||| {4} {5}\n".format(*columns) matrix = [] for target_word_alignment in self.alignment: current_weights = [] for weight in target_word_alignment: current_weights.append(str(weight)) matrix.append(" ".join(current_weights)) return header + "\n".join(matrix) def get_alignment_json(self, as_string=True): """ Returns this translation's alignment as a JSON serializable object (@param as_string False) or a JSON formatted string (@param as_string True). """ source_tokens = self.source_words + ["</s>"] target_tokens = self.target_words + ["</s>"] if self.hypothesis_id is not None: tid = self.sentence_id + self.hypothesis_id else: tid = self.sentence_id links = [] for target_index, target_word_alignment in enumerate(self.alignment): for source_index, weight in enumerate(target_word_alignment): links.append( (target_tokens[target_index], source_tokens[source_index], str(weight), self.sentence_id, tid) ) return json.dumps(links, ensure_ascii=False, indent=2) if as_string else links def get_target_probs(self): """ Returns this translation's word probabilities as a string. """ return " ".join("{0}".format(prob) for prob in self.target_probs) def save_hyp_graph(self, filename, word_idict_trg, detailed=True, highlight_best=True): """ Writes this translation's search graph to disk. """ if self.hyp_graph: renderer = HypGraphRenderer(self.hyp_graph) renderer.wordify(word_idict_trg) renderer.save(filename, detailed, highlight_best) else: pass #TODO: Warning if no search graph has been constructed during decoding? class QueueItem(object): """ Models items in a queue. """ def __init__(self, **kwargs): self.__dict__.update(kwargs) class Translator(object): def __init__(self, settings): """ Loads translation models. """ self._models = settings.models self._num_processes = settings.num_processes self._device_list = settings.device_list self._verbose = settings.verbose self._retrieved_translations = defaultdict(dict) # load model options self._load_model_options() # load and invert dictionaries self._build_dictionaries() # set up queues self._init_queues() # init worker processes self._init_processes() def _load_model_options(self): """ Loads config options for each model. """ options = [] for model in self._models: options.append(load_config(model)) # backward compatibility fill_options(options[-1]) self._options = options def _build_dictionaries(self): """ Builds and inverts source and target dictionaries, taken from the first model since all of them must have the same vocabulary. """ dictionaries = self._options[0]['dictionaries'] dictionaries_source = dictionaries[:-1] dictionary_target = dictionaries[-1] # load and invert source dictionaries word_dicts = [] word_idicts = [] for dictionary in dictionaries_source: word_dict = load_dict(dictionary) if self._options[0]['n_words_src']: for key, idx in word_dict.items(): if idx >= self._options[0]['n_words_src']: del word_dict[key] word_idict = dict() for kk, vv in word_dict.iteritems(): word_idict[vv] = kk word_idict[0] = '<eos>' word_idict[1] = 'UNK' word_dicts.append(word_dict) word_idicts.append(word_idict) self._word_dicts = word_dicts self._word_idicts = word_idicts # load and invert target dictionary word_dict_trg = load_dict(dictionary_target) word_idict_trg = dict() for kk, vv in word_dict_trg.iteritems(): word_idict_trg[vv] = kk word_idict_trg[0] = '<eos>' word_idict_trg[1] = 'UNK' self._word_idict_trg = word_idict_trg def _init_queues(self): """ Sets up shared queues for inter-process communication. """ self._input_queue = Queue() self._output_queue = Queue() def shutdown(self): """ Executed from parent process to terminate workers, method: "poison pill". """ for process in self._processes: self._input_queue.put(None) def _init_processes(self): """ Starts child (worker) processes. """ processes = [None] * self._num_processes for process_id in xrange(self._num_processes): deviceid = '' if self._device_list is not None and len(self._device_list) != 0: deviceid = self._device_list[process_id % len(self._device_list)].strip() processes[process_id] = Process( target=self._start_worker, args=(process_id, deviceid) ) processes[process_id].start() self._processes = processes ### MODEL LOADING AND TRANSLATION IN CHILD PROCESS ### def _load_theano(self): """ Loads models, sets theano shared variables and builds samplers. This entails irrevocable binding to a specific GPU. """ from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams from theano import shared from nmt import (build_sampler, gen_sample) from theano_util import (numpy_floatX, load_params, init_theano_params) trng = RandomStreams(1234) use_noise = shared(numpy_floatX(0.)) fs_init = [] fs_next = [] for model, option in zip(self._models, self._options): param_list = numpy.load(model).files param_list = dict.fromkeys( [key for key in param_list if not key.startswith('adam_')], 0) params = load_params(model, param_list) tparams = init_theano_params(params) # always return alignment at this point f_init, f_next = build_sampler( tparams, option, use_noise, trng, return_alignment=True) fs_init.append(f_init) fs_next.append(f_next) return trng, fs_init, fs_next, gen_sample def _set_device(self, device_id): """ Modifies environment variable to change the THEANO device. """ if device_id != '': try: theano_flags = os.environ['THEANO_FLAGS'].split(',') exist = False for i in xrange(len(theano_flags)): if theano_flags[i].strip().startswith('device'): exist = True theano_flags[i] = '%s=%s' % ('device', device_id) break if exist is False: theano_flags.append('%s=%s' % ('device', device_id)) os.environ['THEANO_FLAGS'] = ','.join(theano_flags) except KeyError: # environment variable does not exist at all os.environ['THEANO_FLAGS'] = 'device=%s' % device_id def _load_models(self, process_id, device_id): """ Modifies environment variable to change the THEANO device, then loads models and returns them. """ logging.debug("Process '%s' - Loading models on device %s\n" % (process_id, device_id)) # modify environment flag 'device' self._set_device(device_id) # build and return models return self._load_theano() def _start_worker(self, process_id, device_id): """ Function executed by each worker once started. Do not execute in the parent process. """ # load theano functionality trng, fs_init, fs_next, gen_sample = self._load_models(process_id, device_id) # listen to queue in while loop, translate items while True: input_item = self._input_queue.get() if input_item is None: break idx = input_item.idx request_id = input_item.request_id output_item = self._translate(process_id, input_item, trng, fs_init, fs_next, gen_sample) self._output_queue.put((request_id, idx, output_item)) return def _translate(self, process_id, input_item, trng, fs_init, fs_next, gen_sample): """ Actual translation (model sampling). """ # unpack input item attributes normalization_alpha = input_item.normalization_alpha nbest = input_item.nbest idx = input_item.idx # logging logging.debug('{0} - {1}\n'.format(process_id, idx)) # sample given an input sequence and obtain scores sample, score, word_probs, alignment, hyp_graph = self._sample(input_item, trng, fs_init, fs_next, gen_sample) # normalize scores according to sequence lengths if normalization_alpha: adjusted_lengths = numpy.array([len(s) ** normalization_alpha for s in sample]) score = score / adjusted_lengths if nbest is True: output_item = sample, score, word_probs, alignment, hyp_graph else: # return translation with lowest score only sidx = numpy.argmin(score) output_item = sample[sidx], score[sidx], word_probs[ sidx], alignment[sidx], hyp_graph return output_item def _sample(self, input_item, trng, fs_init, fs_next, gen_sample): """ Sample from model. """ # unpack input item attributes return_hyp_graph = input_item.return_hyp_graph return_alignment = input_item.return_alignment suppress_unk = input_item.suppress_unk k = input_item.k seq = input_item.seq max_ratio = input_item.max_ratio maxlen = 200 #TODO: should be configurable if max_ratio: maxlen = int(max_ratio * len(seq)) return gen_sample(fs_init, fs_next, numpy.array(seq).T.reshape( [len(seq[0]), len(seq), 1]), trng=trng, k=k, maxlen=maxlen, stochastic=False, argmax=False, return_alignment=return_alignment, suppress_unk=suppress_unk, return_hyp_graph=return_hyp_graph) ### WRITING TO AND READING FROM QUEUES ### def _send_jobs(self, input_, translation_settings): """ """ source_sentences = [] for idx, line in enumerate(input_): if translation_settings.char_level: words = list(line.decode('utf-8').strip()) else: words = line.strip().split() x = [] for w in words: w = [self._word_dicts[i][f] if f in self._word_dicts[i] else 1 for (i,f) in enumerate(w.split('|'))] if len(w) != self._options[0]['factors']: logging.warning('Expected {0} factors, but input word has {1}\n'.format(self._options[0]['factors'], len(w))) for midx in xrange(self._num_processes): self._processes[midx].terminate() sys.exit(1) x.append(w) x += [[0]*self._options[0]['factors']] input_item = QueueItem(verbose=self._verbose, return_hyp_graph=translation_settings.get_search_graph, return_alignment=translation_settings.get_alignment, k=translation_settings.beam_width, suppress_unk=translation_settings.suppress_unk, normalization_alpha=translation_settings.normalization_alpha, nbest=translation_settings.n_best, max_ratio=translation_settings.max_ratio, seq=x, idx=idx, request_id=translation_settings.request_id) self._input_queue.put(input_item) source_sentences.append(words) return idx+1, source_sentences def _retrieve_jobs(self, num_samples, request_id, timeout=5): """ """ while len(self._retrieved_translations[request_id]) < num_samples: resp = None while resp is None: try: resp = self._output_queue.get(True, timeout) # if queue is empty after 5s, check if processes are still alive except Empty: for midx in xrange(self._num_processes): if not self._processes[midx].is_alive() and self._processes[midx].exitcode != 0: # kill all other processes and raise exception if one dies self._input_queue.cancel_join_thread() self._output_queue.cancel_join_thread() for idx in xrange(self._num_processes): self._processes[idx].terminate() logging.error("Translate worker process {0} crashed with exitcode {1}".format(self._processes[midx].pid, self._processes[midx].exitcode)) sys.exit(1) request_id, idx, output_item = resp self._retrieved_translations[request_id][idx] = output_item #print self._retrieved_translations for idx in xrange(num_samples): yield self._retrieved_translations[request_id][idx] # then remove all entries with this request ID from the dictionary del self._retrieved_translations[request_id] ### EXPOSED TRANSLATION FUNCTIONS ### def translate(self, source_segments, translation_settings): """ Returns the translation of @param source_segments. """ logging.info('Translating {0} segments...\n'.format(len(source_segments))) n_samples, source_sentences = self._send_jobs(source_segments, translation_settings) translations = [] for i, trans in enumerate(self._retrieve_jobs(n_samples, translation_settings.request_id)): samples, scores, word_probs, alignment, hyp_graph = trans # n-best list if translation_settings.n_best is True: order = numpy.argsort(scores) n_best_list = [] for j in order: current_alignment = None if not translation_settings.get_alignment else alignment[j] translation = Translation(sentence_id=i, source_words=source_sentences[i], target_words=seqs2words(samples[j], self._word_idict_trg, join=False), score=scores[j], alignment=current_alignment, target_probs=word_probs[j], hyp_graph=hyp_graph, hypothesis_id=j) n_best_list.append(translation) translations.append(n_best_list) # single-best translation else: current_alignment = None if not translation_settings.get_alignment else alignment translation = Translation(sentence_id=i, source_words=source_sentences[i], target_words=seqs2words(samples, self._word_idict_trg, join=False), score=scores, alignment=current_alignment, target_probs=word_probs, hyp_graph=hyp_graph) translations.append(translation) return translations def translate_file(self, input_object, translation_settings): """ """ source_segments = input_object.readlines() return self.translate(source_segments, translation_settings) def translate_string(self, segment, translation_settings): """ Translates a single segment """ if not segment.endswith('\n'): segment += '\n' source_segments = [segment] return self.translate(source_segments, translation_settings) def translate_list(self, segments, translation_settings): """ Translates a list of segments """ source_segments = [s + '\n' if not s.endswith('\n') else s for s in segments] return self.translate(source_segments, translation_settings) ### FUNCTIONS FOR WRITING THE RESULTS ### def write_alignment(self, translation, translation_settings): """ Writes alignments to a file. """ output_file = translation_settings.output_alignment if translation_settings.json_alignment: output_file.write(translation.get_alignment_json() + "\n") else: output_file.write(translation.get_alignment_text() + "\n\n") def write_translation(self, output_file, translation, translation_settings): """ Writes a single translation to a file or STDOUT. """ output_items = [] # sentence ID only for nbest if translation_settings.n_best is True: output_items.append(str(translation.sentence_id)) # translations themselves output_items.append(" ".join(translation.target_words)) # write scores for nbest? if translation_settings.n_best is True: output_items.append(str(translation.score)) # write probabilities? if translation_settings.get_word_probs: output_items.append(translation.get_target_probs()) if translation_settings.n_best is True: output_file.write(" ||| ".join(output_items) + "\n") else: output_file.write("\n".join(output_items) + "\n") # write alignments to file? if translation_settings.get_alignment: self.write_alignment(translation, translation_settings) # construct hypgraph? if translation_settings.get_search_graph: translation.save_hyp_graph( translation_settings.search_graph_filename, self._word_idict_trg, detailed=True, highlight_best=True ) def write_translations(self, output_file, translations, translation_settings): """ Writes translations to a file or STDOUT. """ if translation_settings.n_best is True: for nbest_list in translations: for translation in nbest_list: self.write_translation(output_file, translation, translation_settings) else: for translation in translations: self.write_translation(output_file, translation, translation_settings) def main(input_file, output_file, translation_settings): """ Translates a source language file (or STDIN) into a target language file (or STDOUT). """ translator = Translator(translation_settings) translations = translator.translate_file(input_file, translation_settings) translator.write_translations(output_file, translations, translation_settings) logging.info('Done') translator.shutdown() if __name__ == "__main__": # parse console arguments translation_settings = TranslationSettings(from_console_arguments=True) input_file = translation_settings.input output_file = translation_settings.output # start logging level = logging.DEBUG if translation_settings.verbose else logging.WARNING logging.basicConfig(level=level, format='%(levelname)s: %(message)s') main(input_file, output_file, translation_settings)
# Copyright (c) 2013 Dell Inc. # Copyright 2013 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Volume driver for Dell EqualLogic Storage.""" import functools import random import eventlet from eventlet import greenthread import greenlet from oslo.config import cfg from cinder import exception from cinder.i18n import _ from cinder.openstack.common import excutils from cinder.openstack.common import log as logging from cinder.openstack.common import processutils from cinder import ssh_utils from cinder import utils from cinder.volume.drivers.san import SanISCSIDriver LOG = logging.getLogger(__name__) eqlx_opts = [ cfg.StrOpt('eqlx_group_name', default='group-0', help='Group name to use for creating volumes'), cfg.IntOpt('eqlx_cli_timeout', default=30, help='Timeout for the Group Manager cli command execution'), cfg.IntOpt('eqlx_cli_max_retries', default=5, help='Maximum retry count for reconnection'), cfg.BoolOpt('eqlx_use_chap', default=False, help='Use CHAP authentication for targets?'), cfg.StrOpt('eqlx_chap_login', default='admin', help='Existing CHAP account name'), cfg.StrOpt('eqlx_chap_password', default='password', help='Password for specified CHAP account name', secret=True), cfg.StrOpt('eqlx_pool', default='default', help='Pool in which volumes will be created') ] CONF = cfg.CONF CONF.register_opts(eqlx_opts) def with_timeout(f): @functools.wraps(f) def __inner(self, *args, **kwargs): timeout = kwargs.pop('timeout', None) gt = eventlet.spawn(f, self, *args, **kwargs) if timeout is None: return gt.wait() else: kill_thread = eventlet.spawn_after(timeout, gt.kill) try: res = gt.wait() except greenlet.GreenletExit: raise exception.VolumeBackendAPIException( data="Command timed out") else: kill_thread.cancel() return res return __inner class DellEQLSanISCSIDriver(SanISCSIDriver): """Implements commands for Dell EqualLogic SAN ISCSI management. To enable the driver add the following line to the cinder configuration: volume_driver=cinder.volume.drivers.eqlx.DellEQLSanISCSIDriver Driver's prerequisites are: - a separate volume group set up and running on the SAN - SSH access to the SAN - a special user must be created which must be able to - create/delete volumes and snapshots; - clone snapshots into volumes; - modify volume access records; The access credentials to the SAN are provided by means of the following flags san_ip=<ip_address> san_login=<user name> san_password=<user password> san_private_key=<file containing SSH private key> Thin provision of volumes is enabled by default, to disable it use: san_thin_provision=false In order to use target CHAP authentication (which is disabled by default) SAN administrator must create a local CHAP user and specify the following flags for the driver: eqlx_use_chap=true eqlx_chap_login=<chap_login> eqlx_chap_password=<chap_password> eqlx_group_name parameter actually represents the CLI prompt message without '>' ending. E.g. if prompt looks like 'group-0>', then the parameter must be set to 'group-0' Also, the default CLI command execution timeout is 30 secs. Adjustable by eqlx_cli_timeout=<seconds> """ VERSION = "1.0.0" def __init__(self, *args, **kwargs): super(DellEQLSanISCSIDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(eqlx_opts) self._group_ip = None self.sshpool = None def _get_output(self, chan): out = '' ending = '%s> ' % self.configuration.eqlx_group_name while out.find(ending) == -1: ret = chan.recv(102400) if len(ret) == 0: # According to paramiko.channel.Channel documentation, which # says "If a string of length zero is returned, the channel # stream has closed". So we can confirm that the EQL server # has closed the connection. msg = _("The EQL array has closed the connection.") LOG.error(msg) raise processutils.ProcessExecutionError(description=msg) out += ret LOG.debug("CLI output\n%s", out) return out.splitlines() def _get_prefixed_value(self, lines, prefix): for line in lines: if line.startswith(prefix): return line[len(prefix):] return @with_timeout def _ssh_execute(self, ssh, command, *arg, **kwargs): transport = ssh.get_transport() chan = transport.open_session() completed = False try: chan.invoke_shell() LOG.debug("Reading CLI MOTD") self._get_output(chan) cmd = 'stty columns 255' LOG.debug("Setting CLI terminal width: '%s'", cmd) chan.send(cmd + '\r') out = self._get_output(chan) LOG.debug("Sending CLI command: '%s'", command) chan.send(command + '\r') out = self._get_output(chan) completed = True if any(ln.startswith(('% Error', 'Error:')) for ln in out): desc = _("Error executing EQL command") cmdout = '\n'.join(out) LOG.error(cmdout) raise processutils.ProcessExecutionError( stdout=cmdout, cmd=command, description=desc) return out finally: if not completed: LOG.debug("Timed out executing command: '%s'", command) chan.close() def _run_ssh(self, cmd_list, attempts=1): utils.check_ssh_injection(cmd_list) command = ' '. join(cmd_list) if not self.sshpool: password = self.configuration.san_password privatekey = self.configuration.san_private_key min_size = self.configuration.ssh_min_pool_conn max_size = self.configuration.ssh_max_pool_conn self.sshpool = ssh_utils.SSHPool( self.configuration.san_ip, self.configuration.san_ssh_port, self.configuration.ssh_conn_timeout, self.configuration.san_login, password=password, privatekey=privatekey, min_size=min_size, max_size=max_size) try: total_attempts = attempts with self.sshpool.item() as ssh: while attempts > 0: attempts -= 1 try: LOG.info(_('EQL-driver: executing "%s".'), command) return self._ssh_execute( ssh, command, timeout=self.configuration.eqlx_cli_timeout) except processutils.ProcessExecutionError: raise except Exception as e: LOG.exception(e) greenthread.sleep(random.randint(20, 500) / 100.0) msg = (_("SSH Command failed after '%(total_attempts)r' " "attempts : '%(command)s'") % {'total_attempts': total_attempts, 'command': command}) raise exception.VolumeBackendAPIException(data=msg) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_('Error running SSH command: "%s".'), command) def _eql_execute(self, *args, **kwargs): return self._run_ssh( args, attempts=self.configuration.eqlx_cli_max_retries) def _get_volume_data(self, lines): prefix = 'iSCSI target name is ' target_name = self._get_prefixed_value(lines, prefix)[:-1] lun_id = "%s:%s,1 %s 0" % (self._group_ip, '3260', target_name) model_update = {} model_update['provider_location'] = lun_id if self.configuration.eqlx_use_chap: model_update['provider_auth'] = 'CHAP %s %s' % \ (self.configuration.eqlx_chap_login, self.configuration.eqlx_chap_password) return model_update def _get_space_in_gb(self, val): scale = 1.0 part = 'GB' if val.endswith('MB'): scale = 1.0 / 1024 part = 'MB' elif val.endswith('TB'): scale = 1.0 * 1024 part = 'TB' return scale * float(val.partition(part)[0]) def _update_volume_stats(self): """Retrieve stats info from eqlx group.""" LOG.debug('Updating volume stats.') data = {} backend_name = "eqlx" if self.configuration: backend_name = self.configuration.safe_get('volume_backend_name') data["volume_backend_name"] = backend_name or 'eqlx' data["vendor_name"] = 'Dell' data["driver_version"] = self.VERSION data["storage_protocol"] = 'iSCSI' data['reserved_percentage'] = 0 data['QoS_support'] = False data['total_capacity_gb'] = 'infinite' data['free_capacity_gb'] = 'infinite' for line in self._eql_execute('pool', 'select', self.configuration.eqlx_pool, 'show'): if line.startswith('TotalCapacity:'): out_tup = line.rstrip().partition(' ') data['total_capacity_gb'] = self._get_space_in_gb(out_tup[-1]) if line.startswith('FreeSpace:'): out_tup = line.rstrip().partition(' ') data['free_capacity_gb'] = self._get_space_in_gb(out_tup[-1]) self._stats = data def _check_volume(self, volume): """Check if the volume exists on the Array.""" command = ['volume', 'select', volume['name'], 'show'] try: self._eql_execute(*command) except processutils.ProcessExecutionError as err: with excutils.save_and_reraise_exception(): if err.stdout.find('does not exist.\n') > -1: LOG.debug('Volume %s does not exist, ' 'it may have already been deleted', volume['name']) raise exception.VolumeNotFound(volume_id=volume['id']) def _parse_connection(self, connector, out): """Returns the correct connection id for the initiator. This parses the cli output from the command 'volume select <volumename> access show' and returns the correct connection id. """ lines = [line for line in out if line != ''] # Every record has 2 lines for i in xrange(0, len(lines), 2): try: int(lines[i][0]) # sanity check if len(lines[i + 1].split()) == 1: check = lines[i].split()[1] + lines[i + 1].strip() if connector['initiator'] == check: return lines[i].split()[0] except (IndexError, ValueError): pass # skip the line that is not a valid access record return None def do_setup(self, context): """Disable cli confirmation and tune output format.""" try: disabled_cli_features = ('confirmation', 'paging', 'events', 'formatoutput') for feature in disabled_cli_features: self._eql_execute('cli-settings', feature, 'off') for line in self._eql_execute('grpparams', 'show'): if line.startswith('Group-Ipaddress:'): out_tup = line.rstrip().partition(' ') self._group_ip = out_tup[-1] LOG.info(_('EQL-driver: Setup is complete, group IP is "%s".'), self._group_ip) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_('Failed to setup the Dell EqualLogic driver.')) def create_volume(self, volume): """Create a volume.""" try: cmd = ['volume', 'create', volume['name'], "%sG" % (volume['size'])] if self.configuration.eqlx_pool != 'default': cmd.append('pool') cmd.append(self.configuration.eqlx_pool) if self.configuration.san_thin_provision: cmd.append('thin-provision') out = self._eql_execute(*cmd) self.add_multihost_access(volume) return self._get_volume_data(out) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_('Failed to create volume "%s".'), volume['name']) def add_multihost_access(self, volume): """Add multihost-access to a volume. Needed for live migration.""" try: cmd = ['volume', 'select', volume['name'], 'multihost-access', 'enable'] self._eql_execute(*cmd) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_('Failed to add multihost-access' ' for volume "%s".'), volume['name']) def delete_volume(self, volume): """Delete a volume.""" try: self._check_volume(volume) self._eql_execute('volume', 'select', volume['name'], 'offline') self._eql_execute('volume', 'delete', volume['name']) except exception.VolumeNotFound: LOG.warn(_('Volume %s was not found while trying to delete it.'), volume['name']) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_('Failed to delete volume "%s".'), volume['name']) def create_snapshot(self, snapshot): """"Create snapshot of existing volume on appliance.""" try: out = self._eql_execute('volume', 'select', snapshot['volume_name'], 'snapshot', 'create-now') prefix = 'Snapshot name is ' snap_name = self._get_prefixed_value(out, prefix) self._eql_execute('volume', 'select', snapshot['volume_name'], 'snapshot', 'rename', snap_name, snapshot['name']) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_('Failed to create snapshot of volume "%s".'), snapshot['volume_name']) def create_volume_from_snapshot(self, volume, snapshot): """Create new volume from other volume's snapshot on appliance.""" try: out = self._eql_execute('volume', 'select', snapshot['volume_name'], 'snapshot', 'select', snapshot['name'], 'clone', volume['name']) self.add_multihost_access(volume) return self._get_volume_data(out) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_('Failed to create volume from snapshot "%s".'), snapshot['name']) def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" try: src_volume_name = self.configuration.\ volume_name_template % src_vref['id'] out = self._eql_execute('volume', 'select', src_volume_name, 'clone', volume['name']) self.add_multihost_access(volume) return self._get_volume_data(out) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_('Failed to create clone of volume "%s".'), volume['name']) def delete_snapshot(self, snapshot): """Delete volume's snapshot.""" try: self._eql_execute('volume', 'select', snapshot['volume_name'], 'snapshot', 'delete', snapshot['name']) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_('Failed to delete snapshot %(snap)s of ' 'volume %(vol)s.'), {'snap': snapshot['name'], 'vol': snapshot['volume_name']}) def initialize_connection(self, volume, connector): """Restrict access to a volume.""" try: cmd = ['volume', 'select', volume['name'], 'access', 'create', 'initiator', connector['initiator']] if self.configuration.eqlx_use_chap: cmd.extend(['authmethod', 'chap', 'username', self.configuration.eqlx_chap_login]) self._eql_execute(*cmd) iscsi_properties = self._get_iscsi_properties(volume) return { 'driver_volume_type': 'iscsi', 'data': iscsi_properties } except Exception: with excutils.save_and_reraise_exception(): LOG.error(_('Failed to initialize connection' ' to volume "%s".'), volume['name']) def terminate_connection(self, volume, connector, force=False, **kwargs): """Remove access restrictions from a volume.""" try: out = self._eql_execute('volume', 'select', volume['name'], 'access', 'show') connection_id = self._parse_connection(connector, out) if connection_id is not None: self._eql_execute('volume', 'select', volume['name'], 'access', 'delete', connection_id) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_('Failed to terminate connection' ' to volume "%s".'), volume['name']) def create_export(self, context, volume): """Create an export of a volume. Driver has nothing to do here for the volume has been exported already by the SAN, right after it's creation. """ pass def ensure_export(self, context, volume): """Ensure an export of a volume. Driver has nothing to do here for the volume has been exported already by the SAN, right after it's creation. We will just make sure that the volume exists on the array and issue a warning. """ try: self._check_volume(volume) except exception.VolumeNotFound: LOG.warn(_('Volume %s is not found!, it may have been deleted.'), volume['name']) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_('Failed to ensure export of volume "%s".'), volume['name']) def remove_export(self, context, volume): """Remove an export of a volume. Driver has nothing to do here for the volume has been exported already by the SAN, right after it's creation. Nothing to remove since there's nothing exported. """ pass def extend_volume(self, volume, new_size): """Extend the size of the volume.""" try: self._eql_execute('volume', 'select', volume['name'], 'size', "%sG" % new_size) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_('Failed to extend_volume %(name)s from ' '%(current_size)sGB to %(new_size)sGB.'), {'name': volume['name'], 'current_size': volume['size'], 'new_size': new_size}) def local_path(self, volume): raise NotImplementedError()
import gc import gzip import json from collections import Counter, OrderedDict from pathlib import Path import sys import pymysql from joblib import Memory import settings memory = Memory('cache/dataset', verbose=0) class Database: def __init__(self): self.connection = pymysql.connect(host=settings.DB_HOST, user=settings.DB_USER, password=settings.DB_PASSWORD, db=settings.DB_NAME, charset='utf8') @property def cursor(self): try: return self._cursor except AttributeError: self._cursor = self.connection.cursor() return self._cursor def commit(self): self.connection.commit() self._cursor.close() del self._cursor def close(self): self.connection.close() @property def count_users(self): self.cursor.execute('SELECT COUNT(*) FROM users') counter['users'] = self.cursor.fetchone()[0] def count(self): counter = Counter() counter['users'] = self.count_users self.cursor.execute('SELECT COUNT(DISTINCT company) FROM users') counter['companies'] = self.cursor.fetchone()[0] self.cursor.execute('SELECT COUNT(DISTINCT location_country) FROM users') counter['countries'] = self.cursor.fetchone()[0] self.commit() return counter def has_user(self, user_login): sql = 'SELECT COUNT(*) FROM users WHERE login = %s' self.cursor.execute(sql, (user_login,)) return self.cursor.fetchone()[0] > 0 def insert_user(self, login): sql = 'INSERT IGNORE INTO users (login) VALUES (%s)' self.cursor.execute(sql, (login,)) def insert_many_users(self, logins): sql = 'INSERT IGNORE INTO users (login) VALUES (%s)' self.cursor.executemany(sql, [(v,) for v in logins]) def insert_many_repositories(self, repos): sql = 'INSERT IGNORE INTO repositories (owner, name) VALUES (%s, %s)' self.cursor.executemany(sql, repos) def update_user(self, login, fields): for key in list(fields.keys()): if fields[key] is None: del fields[key] keys = list(fields.keys()) values = list(fields.values()) fields_str = ', '.join(keys) values_str = ', '.join(['%s'] * len(fields)) update_str = ', '.join('{} = %s'.format(k) for k in keys) sql = 'UPDATE users SET {} WHERE login = %s' \ .format(update_str) self.cursor.execute(sql, values + [login]) def update_project(self, owner, name, fields): for key in list(fields.keys()): if fields[key] is None: del fields[key] keys = list(fields.keys()) values = list(fields.values()) fields_str = ', '.join(keys) values_str = ', '.join(['%s'] * len(fields)) update_str = ', '.join('{} = %s'.format(k) for k in keys) sql = 'UPDATE repositories SET {} WHERE owner = %s AND name = %s' \ .format(update_str) self.cursor.execute(sql, values + [owner, name]) def update_user_activity(self, first_active, last_active): sql1 = """ UPDATE users SET first_active = %s WHERE login = %s AND first_active IS NULL """ sql2 = """ UPDATE users SET last_active = %s WHERE login = %s """ print('Updating first active...') args = [(v, k) for k, v in first_active.items()] self.cursor.executemany(sql1, args) print('Updating last active...') args = [(v, k) for k, v in last_active.items()] self.cursor.executemany(sql2, args) print('Committing...') self.commit() def get_company_distribution(self): self.cursor.execute(""" SELECT company, COUNT(*) FROM users WHERE company IS NOT NULL GROUP BY company """) return OrderedDict(cself.ursor) def get_country_distribution(self): self.cursor.execute(""" SELECT location_country, COUNT(id) FROM users WHERE location_country IS NOT NULL GROUP BY location_country """) return OrderedDict(self.cursor) def get_gender_distribution(self): self.cursor.execute(""" SELECT gender, COUNT(id) FROM users WHERE gender IS NOT NULL GROUP BY gender """) return OrderedDict(self.cursor) def get_location_points(self): self.cursor.execute(""" SELECT location_latitude, location_longitude FROM users WHERE location_latitude IS NOT NULL AND location_longitude IS NOT NULL """) for row in self.cursor: yield row def get_users_without_location(self): self.cursor.execute(""" SELECT login, location FROM users WHERE location IS NOT NULL AND location_country IS NULL AND location_latitude IS NULL AND location_longitude IS NULL """) return self.cursor.fetchall() def get_users_without_gender(self): self.cursor.execute(""" SELECT login, name FROM users WHERE name IS NOT NULL AND name != '' AND (gender IS NULL OR gender != '?') AND gender_probability IS NULL """) return self.cursor.fetchall() def update_user_gender(self, genders): sql = """ UPDATE users SET gender = %s, gender_probability = %s WHERE login = %s """ args = [(v[0], v[1], k) for k, v in genders.items()] self.cursor.executemany(sql, args) self.commit() def update_user_location(self, locations): sql = """ UPDATE users SET location_country = %s, location_latitude = %s, location_longitude = %s WHERE login = %s """ args = [(v[2], v[0], v[1], k) for k, v in locations.items()] self.cursor.executemany(sql, args) self.commit() def add_user_event(self, login, event): sql = """ UPDATE users SET count_{0} = IFNULL(count_{0}, 0) + 1 WHERE login = %s """.format(event) self.cursor.execute(sql, (login,)) class Events: def __init__(self): self.path = Path('../data') self.count = memory.cache(self.count) self.count_types = memory.cache(self.count_types) def iterate(self, glob='*.json.gz', func=None, start_from=None): gc.disable() started = start_from is None for path in self.path.glob(glob): if not started: if path.name.startswith(start_from): started = True else: print('Skipping events:', path) continue print('Loading events:', path) with gzip.open(str(path), 'rt', errors='ignore') as file: for line in file: try: record = json.loads(line) except ValueError: continue if record['type'] == 'Event': continue if func is not None: record = func(record) yield record def count(self): iterator = self.iterate(func=lambda event: event['type']) counter = Counter(iterator) return counter @property def types(self): return list(self.count().keys()) def count_types(self, year, month): glob = '{}-{:02d}-*.json.gz'.format(year, month) iterator = self.iterate(glob, func=lambda e: e['type']) return Counter(iterator) def count(): db = Database() events = Events() print(db.count()) print(events.count()) db.close() def iterate_events(): events = Events() for event in events.iterate(): print(event) input() if __name__ == '__main__': if sys.argv[1] == 'count': count() elif sys.argv[1] == 'events': iterate_events()
""" Module to provide Cisco UCS compatibility to Salt :codeauthor: ``Spencer Ervin <[email protected]>`` :maturity: new :depends: none :platform: unix Configuration ============= This module accepts connection configuration details either as parameters, or as configuration settings in pillar as a Salt proxy. Options passed into opts will be ignored if options are passed into pillar. .. seealso:: :py:mod:`Cisco UCS Proxy Module <salt.proxy.cimc>` About ===== This execution module was designed to handle connections to a Cisco UCS server. This module adds support to send connections directly to the device through the rest API. """ import logging import salt.proxy.cimc import salt.utils.platform log = logging.getLogger(__name__) __virtualname__ = "cimc" def __virtual__(): """ Will load for the cimc proxy minions. """ try: if salt.utils.platform.is_proxy() and __opts__["proxy"]["proxytype"] == "cimc": return __virtualname__ except KeyError: pass return False, "The cimc execution module can only be loaded for cimc proxy minions." def activate_backup_image(reset=False): """ Activates the firmware backup image. CLI Example: Args: reset(bool): Reset the CIMC device on activate. .. code-block:: bash salt '*' cimc.activate_backup_image salt '*' cimc.activate_backup_image reset=True """ dn = "sys/rack-unit-1/mgmt/fw-boot-def/bootunit-combined" r = "no" if reset is True: r = "yes" inconfig = """<firmwareBootUnit dn='sys/rack-unit-1/mgmt/fw-boot-def/bootunit-combined' adminState='trigger' image='backup' resetOnActivate='{}' />""".format( r ) ret = __proxy__["cimc.set_config_modify"](dn, inconfig, False) return ret def create_user(uid=None, username=None, password=None, priv=None): """ Create a CIMC user with username and password. Args: uid(int): The user ID slot to create the user account in. username(str): The name of the user. password(str): The clear text password of the user. priv(str): The privilege level of the user. CLI Example: .. code-block:: bash salt '*' cimc.create_user 11 username=admin password=foobar priv=admin """ if not uid: raise salt.exceptions.CommandExecutionError("The user ID must be specified.") if not username: raise salt.exceptions.CommandExecutionError("The username must be specified.") if not password: raise salt.exceptions.CommandExecutionError("The password must be specified.") if not priv: raise salt.exceptions.CommandExecutionError( "The privilege level must be specified." ) dn = "sys/user-ext/user-{}".format(uid) inconfig = """<aaaUser id="{0}" accountStatus="active" name="{1}" priv="{2}" pwd="{3}" dn="sys/user-ext/user-{0}"/>""".format( uid, username, priv, password ) ret = __proxy__["cimc.set_config_modify"](dn, inconfig, False) return ret def get_bios_defaults(): """ Get the default values of BIOS tokens. CLI Example: .. code-block:: bash salt '*' cimc.get_bios_defaults """ ret = __proxy__["cimc.get_config_resolver_class"]("biosPlatformDefaults", True) return ret def get_bios_settings(): """ Get the C240 server BIOS token values. CLI Example: .. code-block:: bash salt '*' cimc.get_bios_settings """ ret = __proxy__["cimc.get_config_resolver_class"]("biosSettings", True) return ret def get_boot_order(): """ Retrieves the configured boot order table. CLI Example: .. code-block:: bash salt '*' cimc.get_boot_order """ ret = __proxy__["cimc.get_config_resolver_class"]("lsbootDef", True) return ret def get_cpu_details(): """ Get the CPU product ID details. CLI Example: .. code-block:: bash salt '*' cimc.get_cpu_details """ ret = __proxy__["cimc.get_config_resolver_class"]("pidCatalogCpu", True) return ret def get_disks(): """ Get the HDD product ID details. CLI Example: .. code-block:: bash salt '*' cimc.get_disks """ ret = __proxy__["cimc.get_config_resolver_class"]("pidCatalogHdd", True) return ret def get_ethernet_interfaces(): """ Get the adapter Ethernet interface details. CLI Example: .. code-block:: bash salt '*' cimc.get_ethernet_interfaces """ ret = __proxy__["cimc.get_config_resolver_class"]("adaptorHostEthIf", True) return ret def get_fibre_channel_interfaces(): """ Get the adapter fibre channel interface details. CLI Example: .. code-block:: bash salt '*' cimc.get_fibre_channel_interfaces """ ret = __proxy__["cimc.get_config_resolver_class"]("adaptorHostFcIf", True) return ret def get_firmware(): """ Retrieves the current running firmware versions of server components. CLI Example: .. code-block:: bash salt '*' cimc.get_firmware """ ret = __proxy__["cimc.get_config_resolver_class"]("firmwareRunning", False) return ret def get_hostname(): """ Retrieves the hostname from the device. .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' cimc.get_hostname """ ret = __proxy__["cimc.get_config_resolver_class"]("mgmtIf", True) try: return ret["outConfigs"]["mgmtIf"][0]["hostname"] except Exception as err: # pylint: disable=broad-except return "Unable to retrieve hostname" def get_ldap(): """ Retrieves LDAP server details. CLI Example: .. code-block:: bash salt '*' cimc.get_ldap """ ret = __proxy__["cimc.get_config_resolver_class"]("aaaLdap", True) return ret def get_management_interface(): """ Retrieve the management interface details. CLI Example: .. code-block:: bash salt '*' cimc.get_management_interface """ ret = __proxy__["cimc.get_config_resolver_class"]("mgmtIf", False) return ret def get_memory_token(): """ Get the memory RAS BIOS token. CLI Example: .. code-block:: bash salt '*' cimc.get_memory_token """ ret = __proxy__["cimc.get_config_resolver_class"]( "biosVfSelectMemoryRASConfiguration", False ) return ret def get_memory_unit(): """ Get the IMM/Memory unit product ID details. CLI Example: .. code-block:: bash salt '*' cimc.get_memory_unit """ ret = __proxy__["cimc.get_config_resolver_class"]("pidCatalogDimm", True) return ret def get_network_adapters(): """ Get the list of network adapters and configuration details. CLI Example: .. code-block:: bash salt '*' cimc.get_network_adapters """ ret = __proxy__["cimc.get_config_resolver_class"]("networkAdapterEthIf", True) return ret def get_ntp(): """ Retrieves the current running NTP configuration. CLI Example: .. code-block:: bash salt '*' cimc.get_ntp """ ret = __proxy__["cimc.get_config_resolver_class"]("commNtpProvider", False) return ret def get_pci_adapters(): """ Get the PCI adapter product ID details. CLI Example: .. code-block:: bash salt '*' cimc.get_disks """ ret = __proxy__["cimc.get_config_resolver_class"]("pidCatalogPCIAdapter", True) return ret def get_power_configuration(): """ Get the configuration of the power settings from the device. This is only available on some C-Series servers. .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' cimc.get_power_configuration """ ret = __proxy__["cimc.get_config_resolver_class"]("biosVfResumeOnACPowerLoss", True) return ret def get_power_supplies(): """ Retrieves the power supply unit details. CLI Example: .. code-block:: bash salt '*' cimc.get_power_supplies """ ret = __proxy__["cimc.get_config_resolver_class"]("equipmentPsu", False) return ret def get_snmp_config(): """ Get the snmp configuration details. CLI Example: .. code-block:: bash salt '*' cimc.get_snmp_config """ ret = __proxy__["cimc.get_config_resolver_class"]("commSnmp", False) return ret def get_syslog(): """ Get the Syslog client-server details. CLI Example: .. code-block:: bash salt '*' cimc.get_syslog """ ret = __proxy__["cimc.get_config_resolver_class"]("commSyslogClient", False) return ret def get_syslog_settings(): """ Get the Syslog configuration settings from the system. .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' cimc.get_syslog_settings """ ret = __proxy__["cimc.get_config_resolver_class"]("commSyslog", False) return ret def get_system_info(): """ Get the system information. CLI Example: .. code-block:: bash salt '*' cimc.get_system_info """ ret = __proxy__["cimc.get_config_resolver_class"]("computeRackUnit", False) return ret def get_users(): """ Get the CIMC users. CLI Example: .. code-block:: bash salt '*' cimc.get_users """ ret = __proxy__["cimc.get_config_resolver_class"]("aaaUser", False) return ret def get_vic_adapters(): """ Get the VIC adapter general profile details. CLI Example: .. code-block:: bash salt '*' cimc.get_vic_adapters """ ret = __proxy__["cimc.get_config_resolver_class"]("adaptorGenProfile", True) return ret def get_vic_uplinks(): """ Get the VIC adapter uplink port details. CLI Example: .. code-block:: bash salt '*' cimc.get_vic_uplinks """ ret = __proxy__["cimc.get_config_resolver_class"]("adaptorExtEthIf", True) return ret def mount_share( name=None, remote_share=None, remote_file=None, mount_type="nfs", username=None, password=None, ): """ Mounts a remote file through a remote share. Currently, this feature is supported in version 1.5 or greater. The remote share can be either NFS, CIFS, or WWW. Some of the advantages of CIMC Mounted vMedia include: Communication between mounted media and target stays local (inside datacenter) Media mounts can be scripted/automated No vKVM requirements for media connection Multiple share types supported Connections supported through all CIMC interfaces Note: CIMC Mounted vMedia is enabled through BIOS configuration. Args: name(str): The name of the volume on the CIMC device. remote_share(str): The file share link that will be used to mount the share. This can be NFS, CIFS, or WWW. This must be the directory path and not the full path to the remote file. remote_file(str): The name of the remote file to mount. It must reside within remote_share. mount_type(str): The type of share to mount. Valid options are nfs, cifs, and www. username(str): An optional requirement to pass credentials to the remote share. If not provided, an unauthenticated connection attempt will be made. password(str): An optional requirement to pass a password to the remote share. If not provided, an unauthenticated connection attempt will be made. CLI Example: .. code-block:: bash salt '*' cimc.mount_share name=WIN7 remote_share=10.xxx.27.xxx:/nfs remote_file=sl1huu.iso salt '*' cimc.mount_share name=WIN7 remote_share=10.xxx.27.xxx:/nfs remote_file=sl1huu.iso username=bob password=badpassword """ if not name: raise salt.exceptions.CommandExecutionError("The share name must be specified.") if not remote_share: raise salt.exceptions.CommandExecutionError( "The remote share path must be specified." ) if not remote_file: raise salt.exceptions.CommandExecutionError( "The remote file name must be specified." ) if username and password: mount_options = " mountOptions='username={},password={}'".format( username, password ) else: mount_options = "" dn = "sys/svc-ext/vmedia-svc/vmmap-{}".format(name) inconfig = """<commVMediaMap dn='sys/svc-ext/vmedia-svc/vmmap-{}' map='{}'{} remoteFile='{}' remoteShare='{}' status='created' volumeName='Win12' />""".format( name, mount_type, mount_options, remote_file, remote_share ) ret = __proxy__["cimc.set_config_modify"](dn, inconfig, False) return ret def reboot(): """ Power cycling the server. CLI Example: .. code-block:: bash salt '*' cimc.reboot """ dn = "sys/rack-unit-1" inconfig = """<computeRackUnit adminPower="cycle-immediate" dn="sys/rack-unit-1"></computeRackUnit>""" ret = __proxy__["cimc.set_config_modify"](dn, inconfig, False) return ret def set_hostname(hostname=None): """ Sets the hostname on the server. .. versionadded:: 2019.2.0 Args: hostname(str): The new hostname to set. CLI Example: .. code-block:: bash salt '*' cimc.set_hostname foobar """ if not hostname: raise salt.exceptions.CommandExecutionError("Hostname option must be provided.") dn = "sys/rack-unit-1/mgmt/if-1" inconfig = ( """<mgmtIf dn="sys/rack-unit-1/mgmt/if-1" hostname="{}" ></mgmtIf>""".format( hostname ) ) ret = __proxy__["cimc.set_config_modify"](dn, inconfig, False) try: if ret["outConfig"]["mgmtIf"][0]["status"] == "modified": return True else: return False except Exception as err: # pylint: disable=broad-except return False def set_logging_levels(remote=None, local=None): """ Sets the logging levels of the CIMC devices. The logging levels must match the following options: emergency, alert, critical, error, warning, notice, informational, debug. .. versionadded:: 2019.2.0 Args: remote(str): The logging level for SYSLOG logs. local(str): The logging level for the local device. CLI Example: .. code-block:: bash salt '*' cimc.set_logging_levels remote=error local=notice """ logging_options = [ "emergency", "alert", "critical", "error", "warning", "notice", "informational", "debug", ] query = "" if remote: if remote in logging_options: query += ' remoteSeverity="{}"'.format(remote) else: raise salt.exceptions.CommandExecutionError( "Remote Severity option is not valid." ) if local: if local in logging_options: query += ' localSeverity="{}"'.format(local) else: raise salt.exceptions.CommandExecutionError( "Local Severity option is not valid." ) dn = "sys/svc-ext/syslog" inconfig = """<commSyslog dn="sys/svc-ext/syslog"{} ></commSyslog>""".format(query) ret = __proxy__["cimc.set_config_modify"](dn, inconfig, False) return ret def set_ntp_server(server1="", server2="", server3="", server4=""): """ Sets the NTP servers configuration. This will also enable the client NTP service. Args: server1(str): The first IP address or FQDN of the NTP servers. server2(str): The second IP address or FQDN of the NTP servers. server3(str): The third IP address or FQDN of the NTP servers. server4(str): The fourth IP address or FQDN of the NTP servers. CLI Example: .. code-block:: bash salt '*' cimc.set_ntp_server 10.10.10.1 salt '*' cimc.set_ntp_server 10.10.10.1 foo.bar.com """ dn = "sys/svc-ext/ntp-svc" inconfig = """<commNtpProvider dn="sys/svc-ext/ntp-svc" ntpEnable="yes" ntpServer1="{}" ntpServer2="{}" ntpServer3="{}" ntpServer4="{}"/>""".format( server1, server2, server3, server4 ) ret = __proxy__["cimc.set_config_modify"](dn, inconfig, False) return ret def set_power_configuration(policy=None, delayType=None, delayValue=None): """ Sets the power configuration on the device. This is only available for some C-Series servers. .. versionadded:: 2019.2.0 Args: policy(str): The action to be taken when chassis power is restored after an unexpected power loss. This can be one of the following: reset: The server is allowed to boot up normally when power is restored. The server can restart immediately or, optionally, after a fixed or random delay. stay-off: The server remains off until it is manually restarted. last-state: The server restarts and the system attempts to restore any processes that were running before power was lost. delayType(str): If the selected policy is reset, the restart can be delayed with this option. This can be one of the following: fixed: The server restarts after a fixed delay. random: The server restarts after a random delay. delayValue(int): If a fixed delay is selected, once chassis power is restored and the Cisco IMC has finished rebooting, the system waits for the specified number of seconds before restarting the server. Enter an integer between 0 and 240. CLI Example: .. code-block:: bash salt '*' cimc.set_power_configuration stay-off salt '*' cimc.set_power_configuration reset fixed 0 """ query = "" if policy == "reset": query = ' vpResumeOnACPowerLoss="reset"' if delayType: if delayType == "fixed": query += ' delayType="fixed"' if delayValue: query += ' delay="{}"'.format(delayValue) elif delayType == "random": query += ' delayType="random"' else: raise salt.exceptions.CommandExecutionError( "Invalid delay type entered." ) elif policy == "stay-off": query = ' vpResumeOnACPowerLoss="reset"' elif policy == "last-state": query = ' vpResumeOnACPowerLoss="last-state"' else: raise salt.exceptions.CommandExecutionError( "The power state must be specified." ) dn = "sys/rack-unit-1/board/Resume-on-AC-power-loss" inconfig = """<biosVfResumeOnACPowerLoss dn="sys/rack-unit-1/board/Resume-on-AC-power-loss"{}> </biosVfResumeOnACPowerLoss>""".format( query ) ret = __proxy__["cimc.set_config_modify"](dn, inconfig, False) return ret def set_syslog_server(server=None, type="primary"): """ Set the SYSLOG server on the host. Args: server(str): The hostname or IP address of the SYSLOG server. type(str): Specifies the type of SYSLOG server. This can either be primary (default) or secondary. CLI Example: .. code-block:: bash salt '*' cimc.set_syslog_server foo.bar.com salt '*' cimc.set_syslog_server foo.bar.com primary salt '*' cimc.set_syslog_server foo.bar.com secondary """ if not server: raise salt.exceptions.CommandExecutionError( "The SYSLOG server must be specified." ) if type == "primary": dn = "sys/svc-ext/syslog/client-primary" inconfig = """<commSyslogClient name='primary' adminState='enabled' hostname='{}' dn='sys/svc-ext/syslog/client-primary'> </commSyslogClient>""".format( server ) elif type == "secondary": dn = "sys/svc-ext/syslog/client-secondary" inconfig = """<commSyslogClient name='secondary' adminState='enabled' hostname='{}' dn='sys/svc-ext/syslog/client-secondary'> </commSyslogClient>""".format( server ) else: raise salt.exceptions.CommandExecutionError( "The SYSLOG type must be either primary or secondary." ) ret = __proxy__["cimc.set_config_modify"](dn, inconfig, False) return ret def set_user(uid=None, username=None, password=None, priv=None, status=None): """ Sets a CIMC user with specified configurations. .. versionadded:: 2019.2.0 Args: uid(int): The user ID slot to create the user account in. username(str): The name of the user. password(str): The clear text password of the user. priv(str): The privilege level of the user. status(str): The account status of the user. CLI Example: .. code-block:: bash salt '*' cimc.set_user 11 username=admin password=foobar priv=admin active """ conf = "" if not uid: raise salt.exceptions.CommandExecutionError("The user ID must be specified.") if status: conf += ' accountStatus="{}"'.format(status) if username: conf += ' name="{}"'.format(username) if priv: conf += ' priv="{}"'.format(priv) if password: conf += ' pwd="{}"'.format(password) dn = "sys/user-ext/user-{}".format(uid) inconfig = """<aaaUser id="{0}"{1} dn="sys/user-ext/user-{0}"/>""".format(uid, conf) ret = __proxy__["cimc.set_config_modify"](dn, inconfig, False) return ret def tftp_update_bios(server=None, path=None): """ Update the BIOS firmware through TFTP. Args: server(str): The IP address or hostname of the TFTP server. path(str): The TFTP path and filename for the BIOS image. CLI Example: .. code-block:: bash salt '*' cimc.tftp_update_bios foo.bar.com HP-SL2.cap """ if not server: raise salt.exceptions.CommandExecutionError( "The server name must be specified." ) if not path: raise salt.exceptions.CommandExecutionError("The TFTP path must be specified.") dn = "sys/rack-unit-1/bios/fw-updatable" inconfig = """<firmwareUpdatable adminState='trigger' dn='sys/rack-unit-1/bios/fw-updatable' protocol='tftp' remoteServer='{}' remotePath='{}' type='blade-bios' />""".format( server, path ) ret = __proxy__["cimc.set_config_modify"](dn, inconfig, False) return ret def tftp_update_cimc(server=None, path=None): """ Update the CIMC firmware through TFTP. Args: server(str): The IP address or hostname of the TFTP server. path(str): The TFTP path and filename for the CIMC image. CLI Example: .. code-block:: bash salt '*' cimc.tftp_update_cimc foo.bar.com HP-SL2.bin """ if not server: raise salt.exceptions.CommandExecutionError( "The server name must be specified." ) if not path: raise salt.exceptions.CommandExecutionError("The TFTP path must be specified.") dn = "sys/rack-unit-1/mgmt/fw-updatable" inconfig = """<firmwareUpdatable adminState='trigger' dn='sys/rack-unit-1/mgmt/fw-updatable' protocol='tftp' remoteServer='{}' remotePath='{}' type='blade-controller' />""".format( server, path ) ret = __proxy__["cimc.set_config_modify"](dn, inconfig, False) return ret
from __future__ import unicode_literals import copy from typing import Any from typing import Dict from typing import Generator from typing import List from typing import Optional from typing import Tuple from typing import Union from ._compat import decode from ._utils import merge_dicts from .exceptions import KeyAlreadyPresent from .exceptions import NonExistentKey from .exceptions import ParseError from .exceptions import TOMLKitError from .items import AoT from .items import Comment from .items import Item from .items import Key from .items import Null from .items import Table from .items import Whitespace from .items import item as _item _NOT_SET = object() class Container(dict): """ A container for items within a TOMLDocument. """ def __init__(self, parsed=False): # type: (bool) -> None self._map = {} # type: Dict[Key, int] self._body = [] # type: List[Tuple[Optional[Key], Item]] self._parsed = parsed self._table_keys = [] @property def body(self): # type: () -> List[Tuple[Optional[Key], Item]] return self._body @property def value(self): # type: () -> Dict[Any, Any] d = {} for k, v in self._body: if k is None: continue k = k.key v = v.value if isinstance(v, Container): v = v.value if k in d: merge_dicts(d[k], v) else: d[k] = v return d def parsing(self, parsing): # type: (bool) -> None self._parsed = parsing for k, v in self._body: if isinstance(v, Table): v.value.parsing(parsing) elif isinstance(v, AoT): for t in v.body: t.value.parsing(parsing) def add( self, key, item=None ): # type: (Union[Key, Item, str], Optional[Item]) -> Container """ Adds an item to the current Container. """ if item is None: if not isinstance(key, (Comment, Whitespace)): raise ValueError( "Non comment/whitespace items must have an associated key" ) key, item = None, key return self.append(key, item) def append(self, key, item): # type: (Union[Key, str, None], Item) -> Container if not isinstance(key, Key) and key is not None: key = Key(key) if not isinstance(item, Item): item = _item(item) if isinstance(item, (AoT, Table)) and item.name is None: item.name = key.key if ( isinstance(item, Table) and self._body and not self._parsed and not item.trivia.indent ): item.trivia.indent = "\n" if isinstance(item, AoT) and self._body and not self._parsed: if item and "\n" not in item[0].trivia.indent: item[0].trivia.indent = "\n" + item[0].trivia.indent else: self.append(None, Whitespace("\n")) if key is not None and key in self: current_idx = self._map[key] if isinstance(current_idx, tuple): current_body_element = self._body[current_idx[-1]] else: current_body_element = self._body[current_idx] current = current_body_element[1] if isinstance(item, Table): if not isinstance(current, (Table, AoT)): raise KeyAlreadyPresent(key) if item.is_aot_element(): # New AoT element found later on # Adding it to the current AoT if not isinstance(current, AoT): current = AoT([current, item], parsed=self._parsed) self._replace(key, key, current) else: current.append(item) return self elif current.is_aot(): if not item.is_aot_element(): # Tried to define a table after an AoT with the same name. raise KeyAlreadyPresent(key) current.append(item) return self elif current.is_super_table(): if item.is_super_table(): # We need to merge both super tables if ( self._table_keys[-1] != current_body_element[0] or key.is_dotted() or current_body_element[0].is_dotted() ): if not isinstance(current_idx, tuple): current_idx = (current_idx,) self._map[key] = current_idx + (len(self._body),) self._body.append((key, item)) self._table_keys.append(key) # Building a temporary proxy to check for errors OutOfOrderTableProxy(self, self._map[key]) return self for k, v in item.value.body: current.append(k, v) return self elif current_body_element[0].is_dotted(): raise TOMLKitError("Redefinition of an existing table") elif not item.is_super_table(): raise KeyAlreadyPresent(key) elif isinstance(item, AoT): if not isinstance(current, AoT): # Tried to define an AoT after a table with the same name. raise KeyAlreadyPresent(key) for table in item.body: current.append(table) return self else: raise KeyAlreadyPresent(key) is_table = isinstance(item, (Table, AoT)) if key is not None and self._body and not self._parsed: # If there is already at least one table in the current container # and the given item is not a table, we need to find the last # item that is not a table and insert after it # If no such item exists, insert at the top of the table key_after = None idx = 0 for k, v in self._body: if isinstance(v, Null): # This happens only after deletion continue if isinstance(v, Whitespace) and not v.is_fixed(): continue if not is_table and isinstance(v, (Table, AoT)): break key_after = k or idx idx += 1 if key_after is not None: if isinstance(key_after, int): if key_after + 1 < len(self._body) - 1: return self._insert_at(key_after + 1, key, item) else: previous_item = self._body[-1][1] if ( not isinstance(previous_item, Whitespace) and not is_table and "\n" not in previous_item.trivia.trail ): previous_item.trivia.trail += "\n" else: return self._insert_after(key_after, key, item) else: return self._insert_at(0, key, item) if key in self._map: current_idx = self._map[key] if isinstance(current_idx, tuple): current_idx = current_idx[-1] current = self._body[current_idx][1] if key is not None and not isinstance(current, Table): raise KeyAlreadyPresent(key) # Adding sub tables to a currently existing table if not isinstance(current_idx, tuple): current_idx = (current_idx,) self._map[key] = current_idx + (len(self._body),) else: self._map[key] = len(self._body) self._body.append((key, item)) if item.is_table(): self._table_keys.append(key) if key is not None: super(Container, self).__setitem__(key.key, item.value) return self def remove(self, key): # type: (Union[Key, str]) -> Container if not isinstance(key, Key): key = Key(key) idx = self._map.pop(key, None) if idx is None: raise NonExistentKey(key) if isinstance(idx, tuple): for i in idx: self._body[i] = (None, Null()) else: self._body[idx] = (None, Null()) super(Container, self).__delitem__(key.key) return self def _insert_after( self, key, other_key, item ): # type: (Union[str, Key], Union[str, Key], Union[Item, Any]) -> Container if key is None: raise ValueError("Key cannot be null in insert_after()") if key not in self: raise NonExistentKey(key) if not isinstance(key, Key): key = Key(key) if not isinstance(other_key, Key): other_key = Key(other_key) item = _item(item) idx = self._map[key] # Insert after the max index if there are many. if isinstance(idx, tuple): idx = max(idx) current_item = self._body[idx][1] if "\n" not in current_item.trivia.trail: current_item.trivia.trail += "\n" # Increment indices after the current index for k, v in self._map.items(): if isinstance(v, tuple): new_indices = [] for v_ in v: if v_ > idx: v_ = v_ + 1 new_indices.append(v_) self._map[k] = tuple(new_indices) elif v > idx: self._map[k] = v + 1 self._map[other_key] = idx + 1 self._body.insert(idx + 1, (other_key, item)) if key is not None: super(Container, self).__setitem__(other_key.key, item.value) return self def _insert_at( self, idx, key, item ): # type: (int, Union[str, Key], Union[Item, Any]) -> Container if idx > len(self._body) - 1: raise ValueError("Unable to insert at position {}".format(idx)) if not isinstance(key, Key): key = Key(key) item = _item(item) if idx > 0: previous_item = self._body[idx - 1][1] if ( not isinstance(previous_item, Whitespace) and not isinstance(item, (AoT, Table)) and "\n" not in previous_item.trivia.trail ): previous_item.trivia.trail += "\n" # Increment indices after the current index for k, v in self._map.items(): if isinstance(v, tuple): new_indices = [] for v_ in v: if v_ >= idx: v_ = v_ + 1 new_indices.append(v_) self._map[k] = tuple(new_indices) elif v >= idx: self._map[k] = v + 1 self._map[key] = idx self._body.insert(idx, (key, item)) if key is not None: super(Container, self).__setitem__(key.key, item.value) return self def item(self, key): # type: (Union[Key, str]) -> Item if not isinstance(key, Key): key = Key(key) idx = self._map.get(key, None) if idx is None: raise NonExistentKey(key) if isinstance(idx, tuple): # The item we are getting is an out of order table # so we need a proxy to retrieve the proper objects # from the parent container return OutOfOrderTableProxy(self, idx) return self._body[idx][1] def last_item(self): # type: () -> Optional[Item] if self._body: return self._body[-1][1] def as_string(self): # type: () -> str s = "" for k, v in self._body: if k is not None: if isinstance(v, Table): s += self._render_table(k, v) elif isinstance(v, AoT): s += self._render_aot(k, v) else: s += self._render_simple_item(k, v) else: s += self._render_simple_item(k, v) return s def _render_table( self, key, table, prefix=None ): # (Key, Table, Optional[str]) -> str cur = "" if table.display_name is not None: _key = table.display_name else: _key = key.as_string() if prefix is not None: _key = prefix + "." + _key if not table.is_super_table() or ( any( not isinstance(v, (Table, AoT, Whitespace)) for _, v in table.value.body ) and not key.is_dotted() ): open_, close = "[", "]" if table.is_aot_element(): open_, close = "[[", "]]" cur += "{}{}{}{}{}{}{}{}".format( table.trivia.indent, open_, decode(_key), close, table.trivia.comment_ws, decode(table.trivia.comment), table.trivia.trail, "\n" if "\n" not in table.trivia.trail and len(table.value) > 0 else "", ) for k, v in table.value.body: if isinstance(v, Table): if v.is_super_table(): if k.is_dotted() and not key.is_dotted(): # Dotted key inside table cur += self._render_table(k, v) else: cur += self._render_table(k, v, prefix=_key) else: cur += self._render_table(k, v, prefix=_key) elif isinstance(v, AoT): cur += self._render_aot(k, v, prefix=_key) else: cur += self._render_simple_item( k, v, prefix=_key if key.is_dotted() else None ) return cur def _render_aot(self, key, aot, prefix=None): _key = key.as_string() if prefix is not None: _key = prefix + "." + _key cur = "" _key = decode(_key) for table in aot.body: cur += self._render_aot_table(table, prefix=_key) return cur def _render_aot_table(self, table, prefix=None): # (Table, Optional[str]) -> str cur = "" _key = prefix or "" if not table.is_super_table(): open_, close = "[[", "]]" cur += "{}{}{}{}{}{}{}".format( table.trivia.indent, open_, decode(_key), close, table.trivia.comment_ws, decode(table.trivia.comment), table.trivia.trail, ) for k, v in table.value.body: if isinstance(v, Table): if v.is_super_table(): if k.is_dotted(): # Dotted key inside table cur += self._render_table(k, v) else: cur += self._render_table(k, v, prefix=_key) else: cur += self._render_table(k, v, prefix=_key) elif isinstance(v, AoT): cur += self._render_aot(k, v, prefix=_key) else: cur += self._render_simple_item(k, v) return cur def _render_simple_item(self, key, item, prefix=None): if key is None: return item.as_string() _key = key.as_string() if prefix is not None: _key = prefix + "." + _key return "{}{}{}{}{}{}{}".format( item.trivia.indent, decode(_key), key.sep, decode(item.as_string()), item.trivia.comment_ws, decode(item.trivia.comment), item.trivia.trail, ) # Dictionary methods def keys(self): # type: () -> Generator[str] return super(Container, self).keys() def values(self): # type: () -> Generator[Item] for k in self.keys(): yield self[k] def items(self): # type: () -> Generator[Item] for k, v in self.value.items(): if k is None: continue yield k, v def update(self, other): # type: (Dict) -> None for k, v in other.items(): self[k] = v def get(self, key, default=None): # type: (Any, Optional[Any]) -> Any if not isinstance(key, Key): key = Key(key) if key not in self: return default return self[key] def pop(self, key, default=_NOT_SET): try: value = self[key] except KeyError: if default is _NOT_SET: raise return default del self[key] return value def setdefault( self, key, default=None ): # type: (Union[Key, str], Any) -> Union[Item, Container] if key not in self: self[key] = default return self[key] def __contains__(self, key): # type: (Union[Key, str]) -> bool if not isinstance(key, Key): key = Key(key) return key in self._map def __getitem__(self, key): # type: (Union[Key, str]) -> Union[Item, Container] if not isinstance(key, Key): key = Key(key) idx = self._map.get(key, None) if idx is None: raise NonExistentKey(key) if isinstance(idx, tuple): # The item we are getting is an out of order table # so we need a proxy to retrieve the proper objects # from the parent container return OutOfOrderTableProxy(self, idx) item = self._body[idx][1] if item.is_boolean(): return item.value return item def __setitem__(self, key, value): # type: (Union[Key, str], Any) -> None if key is not None and key in self: self._replace(key, key, value) else: self.append(key, value) def __delitem__(self, key): # type: (Union[Key, str]) -> None self.remove(key) def _replace( self, key, new_key, value ): # type: (Union[Key, str], Union[Key, str], Item) -> None if not isinstance(key, Key): key = Key(key) if not isinstance(new_key, Key): new_key = Key(new_key) idx = self._map.get(key, None) if idx is None: raise NonExistentKey(key) self._replace_at(idx, new_key, value) def _replace_at( self, idx, new_key, value ): # type: (Union[int, Tuple[int]], Union[Key, str], Item) -> None if not isinstance(new_key, Key): new_key = Key(new_key) if isinstance(idx, tuple): for i in idx[1:]: self._body[i] = (None, Null()) idx = idx[0] k, v = self._body[idx] self._map[new_key] = self._map.pop(k) if new_key != k: super(Container, self).__delitem__(k) if isinstance(self._map[new_key], tuple): self._map[new_key] = self._map[new_key][0] value = _item(value) # Copying trivia if not isinstance(value, (Whitespace, AoT)): value.trivia.indent = v.trivia.indent value.trivia.comment_ws = v.trivia.comment_ws value.trivia.comment = v.trivia.comment value.trivia.trail = v.trivia.trail if isinstance(value, Table): # Insert a cosmetic new line for tables value.append(None, Whitespace("\n")) self._body[idx] = (new_key, value) super(Container, self).__setitem__(new_key.key, value.value) def __str__(self): # type: () -> str return str(self.value) def __repr__(self): # type: () -> str return super(Container, self).__repr__() def __eq__(self, other): # type: (Dict) -> bool if not isinstance(other, dict): return NotImplemented return self.value == other def _getstate(self, protocol): return (self._parsed,) def __reduce__(self): return self.__reduce_ex__(2) def __reduce_ex__(self, protocol): return ( self.__class__, self._getstate(protocol), (self._map, self._body, self._parsed), ) def __setstate__(self, state): self._map = state[0] self._body = state[1] self._parsed = state[2] def copy(self): # type: () -> Container return copy.copy(self) def __copy__(self): # type: () -> Container c = self.__class__(self._parsed) for k, v in super(Container, self).copy().items(): super(Container, c).__setitem__(k, v) c._body += self.body c._map.update(self._map) return c class OutOfOrderTableProxy(dict): def __init__(self, container, indices): # type: (Container, Tuple) -> None self._container = container self._internal_container = Container(self._container.parsing) self._tables = [] self._tables_map = {} self._map = {} for i in indices: key, item = self._container._body[i] if isinstance(item, Table): self._tables.append(item) table_idx = len(self._tables) - 1 for k, v in item.value.body: self._internal_container.append(k, v) self._tables_map[k] = table_idx if k is not None: super(OutOfOrderTableProxy, self).__setitem__(k.key, v) else: self._internal_container.append(key, item) self._map[key] = i if key is not None: super(OutOfOrderTableProxy, self).__setitem__(key.key, item) @property def value(self): return self._internal_container.value def __getitem__(self, key): # type: (Union[Key, str]) -> Any if key not in self._internal_container: raise NonExistentKey(key) return self._internal_container[key] def __setitem__(self, key, item): # type: (Union[Key, str], Any) -> None if key in self._map: idx = self._map[key] self._container._replace_at(idx, key, item) elif key in self._tables_map: table = self._tables[self._tables_map[key]] table[key] = item elif self._tables: table = self._tables[0] table[key] = item else: self._container[key] = item if key is not None: super(OutOfOrderTableProxy, self).__setitem__(key, item) def __delitem__(self, key): # type: (Union[Key, str]) -> None if key in self._map: idx = self._map[key] del self._container[key] del self._map[key] elif key in self._tables_map: table = self._tables[self._tables_map[key]] del table[key] del self._tables_map[key] else: raise NonExistentKey(key) del self._internal_container[key] def keys(self): return self._internal_container.keys() def values(self): return self._internal_container.values() def items(self): # type: () -> Generator[Item] return self._internal_container.items() def update(self, other): # type: (Dict) -> None self._internal_container.update(other) def get(self, key, default=None): # type: (Any, Optional[Any]) -> Any return self._internal_container.get(key, default=default) def pop(self, key, default=_NOT_SET): return self._internal_container.pop(key, default=default) def setdefault( self, key, default=None ): # type: (Union[Key, str], Any) -> Union[Item, Container] return self._internal_container.setdefault(key, default=default) def __contains__(self, key): return key in self._internal_container def __str__(self): return str(self._internal_container) def __repr__(self): return repr(self._internal_container) def __eq__(self, other): # type: (Dict) -> bool if not isinstance(other, dict): return NotImplemented return self._internal_container == other def __getattr__(self, attribute): return getattr(self._internal_container, attribute)
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Management class for migration / resize operations. """ import os from nova.openstack.common import excutils from nova.openstack.common import log as logging from nova.virt.hyperv import hostutils from nova.virt.hyperv import imagecache from nova.virt.hyperv import pathutils from nova.virt.hyperv import vhdutils from nova.virt.hyperv import vmops from nova.virt.hyperv import vmutils from nova.virt.hyperv import volumeops LOG = logging.getLogger(__name__) class MigrationOps(object): def __init__(self): self._hostutils = hostutils.HostUtils() self._vmutils = vmutils.VMUtils() self._vhdutils = vhdutils.VHDUtils() self._pathutils = pathutils.PathUtils() self._volumeops = volumeops.VolumeOps() self._vmops = vmops.VMOps() self._imagecache = imagecache.ImageCache() def _migrate_disk_files(self, instance_name, disk_files, dest): same_host = False if dest in self._hostutils.get_local_ips(): same_host = True LOG.debug(_("Migration target is the source host")) else: LOG.debug(_("Migration target host: %s") % dest) instance_path = self._pathutils.get_instance_dir(instance_name) revert_path = self._pathutils.get_instance_migr_revert_dir( instance_name, remove_dir=True) dest_path = None try: if same_host: # Since source and target are the same, we copy the files to # a temporary location before moving them into place dest_path = '%s_tmp' % instance_path if self._pathutils.exists(dest_path): self._pathutils.rmtree(dest_path) self._pathutils.makedirs(dest_path) else: dest_path = self._pathutils.get_instance_dir( instance_name, dest, remove_dir=True) for disk_file in disk_files: # Skip the config drive as the instance is already configured if os.path.basename(disk_file).lower() != 'configdrive.vhd': LOG.debug(_('Copying disk "%(disk_file)s" to ' '"%(dest_path)s"') % locals()) self._pathutils.copy(disk_file, dest_path) self._pathutils.rename(instance_path, revert_path) if same_host: self._pathutils.rename(dest_path, instance_path) except Exception: with excutils.save_and_reraise_exception(): self._cleanup_failed_disk_migration(instance_path, revert_path, dest_path) def _cleanup_failed_disk_migration(self, instance_path, revert_path, dest_path): try: if dest_path and self._pathutils.exists(dest_path): self._pathutils.rmtree(dest_path) if self._pathutils.exists(revert_path): self._pathutils.rename(revert_path, instance_path) except Exception as ex: # Log and ignore this exception LOG.exception(ex) LOG.error(_("Cannot cleanup migration files")) def migrate_disk_and_power_off(self, context, instance, dest, instance_type, network_info, block_device_info=None): LOG.debug(_("migrate_disk_and_power_off called"), instance=instance) self._vmops.power_off(instance) instance_name = instance["name"] (disk_files, volume_drives) = self._vmutils.get_vm_storage_paths(instance_name) if disk_files: self._migrate_disk_files(instance_name, disk_files, dest) self._vmops.destroy(instance, destroy_disks=False) # disk_info is not used return "" def confirm_migration(self, migration, instance, network_info): LOG.debug(_("confirm_migration called"), instance=instance) self._pathutils.get_instance_migr_revert_dir(instance['name'], remove_dir=True) def _revert_migration_files(self, instance_name): instance_path = self._pathutils.get_instance_dir( instance_name, create_dir=False, remove_dir=True) revert_path = self._pathutils.get_instance_migr_revert_dir( instance_name) self._pathutils.rename(revert_path, instance_path) def finish_revert_migration(self, instance, network_info, block_device_info=None): LOG.debug(_("finish_revert_migration called"), instance=instance) instance_name = instance['name'] self._revert_migration_files(instance_name) if self._volumeops.ebs_root_in_block_devices(block_device_info): root_vhd_path = None else: root_vhd_path = self._pathutils.get_vhd_path(instance_name) self._vmops.create_instance(instance, network_info, block_device_info, root_vhd_path) self._vmops.power_on(instance) def _merge_base_vhd(self, diff_vhd_path, base_vhd_path): base_vhd_copy_path = os.path.join(os.path.dirname(diff_vhd_path), os.path.basename(base_vhd_path)) try: LOG.debug(_('Copying base disk %(base_vhd_path)s to ' '%(base_vhd_copy_path)s'), locals()) self._pathutils.copyfile(base_vhd_path, base_vhd_copy_path) LOG.debug(_("Reconnecting copied base VHD " "%(base_vhd_copy_path)s and diff " "VHD %(diff_vhd_path)s"), locals()) self._vhdutils.reconnect_parent_vhd(diff_vhd_path, base_vhd_copy_path) LOG.debug(_("Merging base disk %(base_vhd_copy_path)s and " "diff disk %(diff_vhd_path)s"), locals()) self._vhdutils.merge_vhd(diff_vhd_path, base_vhd_copy_path) # Replace the differential VHD with the merged one self._pathutils.rename(base_vhd_copy_path, diff_vhd_path) except Exception: with excutils.save_and_reraise_exception(): if self._pathutils.exists(base_vhd_copy_path): self._pathutils.remove(base_vhd_copy_path) def _resize_vhd(self, vhd_path, new_size): LOG.debug(_("Getting info for disk: %s"), vhd_path) base_disk_path = self._vhdutils.get_vhd_parent_path(vhd_path) if base_disk_path: # A differential VHD cannot be resized self._merge_base_vhd(vhd_path, base_disk_path) LOG.debug(_("Resizing disk \"%(vhd_path)s\" to new max " "size %(new_size)s"), locals()) self._vhdutils.resize_vhd(vhd_path, new_size) def _check_base_disk(self, context, instance, diff_vhd_path, src_base_disk_path): base_vhd_path = self._imagecache.get_cached_image(context, instance) # If the location of the base host differs between source # and target hosts we need to reconnect the base disk if src_base_disk_path.lower() != base_vhd_path.lower(): LOG.debug(_("Reconnecting copied base VHD " "%(base_vhd_path)s and diff " "VHD %(diff_vhd_path)s"), locals()) self._vhdutils.reconnect_parent_vhd(diff_vhd_path, base_vhd_path) def finish_migration(self, context, migration, instance, disk_info, network_info, image_meta, resize_instance=False, block_device_info=None): LOG.debug(_("finish_migration called"), instance=instance) instance_name = instance['name'] if self._volumeops.ebs_root_in_block_devices(block_device_info): root_vhd_path = None else: root_vhd_path = self._pathutils.get_vhd_path(instance_name) if not self._pathutils.exists(root_vhd_path): raise vmutils.HyperVException(_("Cannot find boot VHD " "file: %s") % root_vhd_path) vhd_info = self._vhdutils.get_vhd_info(root_vhd_path) src_base_disk_path = vhd_info.get("ParentPath") if src_base_disk_path: self._check_base_disk(context, instance, root_vhd_path, src_base_disk_path) if resize_instance: curr_size = vhd_info['MaxInternalSize'] new_size = instance['root_gb'] * 1024 ** 3 if new_size < curr_size: raise vmutils.HyperVException(_("Cannot resize a VHD to a " "smaller size")) elif new_size > curr_size: self._resize_vhd(root_vhd_path, new_size) self._vmops.create_instance(instance, network_info, block_device_info, root_vhd_path) self._vmops.power_on(instance)
# coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import os from twitter.common.collections import OrderedSet from pants.backend.jvm.ivy_utils import IvyInfo, IvyModule, IvyModuleRef from pants.backend.jvm.targets.exclude import Exclude from pants.backend.jvm.targets.jar_dependency import JarDependency from pants.backend.jvm.targets.jar_library import JarLibrary from pants.backend.jvm.targets.java_library import JavaLibrary from pants.backend.jvm.tasks.ivy_resolve import IvyResolve from pants.invalidation.cache_manager import VersionedTargetSet from pants.util.contextutil import temporary_dir from pants_test.jvm.jvm_tool_task_test_base import JvmToolTaskTestBase class IvyResolveTest(JvmToolTaskTestBase): """Tests for the class IvyResolve.""" @classmethod def task_type(cls): return IvyResolve def setUp(self): super(IvyResolveTest, self).setUp() self.set_options(use_nailgun=False) self.set_options_for_scope('cache.{}'.format(self.options_scope), read_from=None, write_to=None) def resolve(self, targets): """Given some targets, execute a resolve, and return the resulting compile_classpath.""" context = self.context(target_roots=targets) self.create_task(context).execute() return context.products.get_data('compile_classpath') # # Test section # def test_resolve_specific(self): # Create a jar_library with a single dep, and another library with no deps. dep = JarDependency('commons-lang', 'commons-lang', '2.5') jar_lib = self.make_target('//:a', JarLibrary, jars=[dep]) scala_lib = self.make_target('//:b', JavaLibrary) # Confirm that the deps were added to the appropriate targets. compile_classpath = self.resolve([jar_lib, scala_lib]) self.assertEquals(1, len(compile_classpath.get_for_target(jar_lib))) self.assertEquals(0, len(compile_classpath.get_for_target(scala_lib))) def test_resolve_conflicted(self): # Create jar_libraries with different versions of the same dep: this will cause # a pre-ivy "eviction" in IvyUtils.generate_ivy, but the same case can be triggered # due to an ivy eviction where the declared version loses to a transitive version. losing_dep = JarDependency('com.google.guava', 'guava', '16.0') winning_dep = JarDependency('com.google.guava', 'guava', '16.0.1') losing_lib = self.make_target('//:a', JarLibrary, jars=[losing_dep]) winning_lib = self.make_target('//:b', JarLibrary, jars=[winning_dep]) # Confirm that the same artifact was added to each target. context = self.context(target_roots=[losing_lib, winning_lib]) def artifact_path(name): return os.path.join(self.pants_workdir, 'ivy_artifact', name) symlink_map = {artifact_path('bogus0'): artifact_path('bogus0'), artifact_path('bogus1'): artifact_path('bogus1'), artifact_path('unused'): artifact_path('unused')} task = self.create_task(context, 'unused') def mock_ivy_resolve(targets, *args, **kw): if targets: cache_manager = task.create_cache_manager(False) vts = VersionedTargetSet(cache_manager, cache_manager.wrap_targets(targets)) cache_key = vts.cache_key.hash else: cache_key = None return [], symlink_map, cache_key task.ivy_resolve = mock_ivy_resolve def mock_parse_report(resolve_hash_name_ignored, conf): ivy_info = IvyInfo(conf) # Guava 16.0 would be evicted by Guava 16.0.1. But in a real # resolve, it's possible that before it was evicted, it would # generate some resolution data. artifact_1 = artifact_path('bogus0') unused_artifact = artifact_path('unused') # Because guava 16.0 was evicted, it has no artifacts guava_0 = IvyModule(IvyModuleRef('com.google.guava', 'guava', '16.0'), None, []) guava_1 = IvyModule(IvyModuleRef('com.google.guava', 'guava', '16.0.1'), artifact_1, []) ivy_info.add_module(guava_0) ivy_info.add_module(guava_1) artifact_dep_1 = artifact_path('bogus1') # Because fake#dep 16.0 was evicted before it was resolved, # its deps are never examined, so we don't call add_module. guava_dep_0 = IvyModule(IvyModuleRef('com.google.fake', 'dep', '16.0.0'), None, [guava_0.ref]) guava_dep_1 = IvyModule(IvyModuleRef('com.google.fake', 'dep', '16.0.1'), artifact_dep_1, [guava_1.ref]) ivy_info.add_module(guava_dep_0) ivy_info.add_module(guava_dep_1) # Add an unrelated module to ensure that it's not returned unrelated_parent = IvyModuleRef('com.google.other', 'parent', '1.0') unrelated = IvyModule(IvyModuleRef('com.google.unrelated', 'unrelated', '1.0'), unused_artifact, [unrelated_parent]) ivy_info.add_module(unrelated) return ivy_info task._parse_report = mock_parse_report task.execute() compile_classpath = context.products.get_data('compile_classpath', None) losing_cp = compile_classpath.get_for_target(losing_lib) winning_cp = compile_classpath.get_for_target(winning_lib) self.assertEquals(losing_cp, winning_cp) self.assertEquals(OrderedSet([(u'default', artifact_path(u'bogus0')), (u'default', artifact_path(u'bogus1'))]), winning_cp) def test_resolve_multiple_artifacts(self): no_classifier = JarDependency('junit', 'junit', rev='4.12') classifier = JarDependency('junit', 'junit', rev='4.12', classifier='sources') no_classifier_lib = self.make_target('//:a', JarLibrary, jars=[no_classifier]) classifier_lib = self.make_target('//:b', JarLibrary, jars=[classifier]) classifier_and_no_classifier_lib = self.make_target('//:c', JarLibrary, jars=[classifier, no_classifier]) compile_classpath = self.resolve([no_classifier_lib, classifier_lib, classifier_and_no_classifier_lib]) no_classifier_cp = compile_classpath.get_classpath_entries_for_targets([no_classifier_lib]) classifier_cp = compile_classpath.get_classpath_entries_for_targets([classifier_lib]) classifier_and_no_classifier_cp = compile_classpath.get_classpath_entries_for_targets( classifier_and_no_classifier_lib.closure(bfs=True)) self.assertIn(no_classifier.coordinate, {resolved_jar.coordinate for conf, resolved_jar in classifier_and_no_classifier_cp}) self.assertIn(classifier.coordinate, {resolved_jar.coordinate for conf, resolved_jar in classifier_and_no_classifier_cp}) self.assertNotIn(classifier.coordinate, {resolved_jar.coordinate for conf, resolved_jar in no_classifier_cp}) self.assertIn(no_classifier.coordinate, {resolved_jar.coordinate for conf, resolved_jar in no_classifier_cp}) self.assertNotIn(no_classifier.coordinate, {resolved_jar.coordinate for conf, resolved_jar in classifier_cp}) self.assertIn(classifier.coordinate, {resolved_jar.coordinate for conf, resolved_jar in classifier_cp}) def test_excludes_in_java_lib_excludes_all_from_jar_lib(self): junit_dep = JarDependency('junit', 'junit', rev='4.12') junit_jar_lib = self.make_target('//:a', JarLibrary, jars=[junit_dep]) excluding_target = self.make_target('//:b', JavaLibrary, excludes=[Exclude('junit', 'junit')]) compile_classpath = self.resolve([junit_jar_lib, excluding_target]) junit_jar_cp = compile_classpath.get_for_target(junit_jar_lib) excluding_cp = compile_classpath.get_for_target(excluding_target) self.assertEquals(0, len(junit_jar_cp)) self.assertEquals(0, len(excluding_cp)) def test_resolve_no_deps(self): # Resolve a library with no deps, and confirm that the empty product is created. target = self.make_target('//:a', JavaLibrary) self.assertTrue(self.resolve([target])) def test_resolve_symlinked_cache(self): """Test to make sure resolve works when --ivy-cache-dir is a symlinked path. When ivy returns the path to a resolved jar file, it might be the realpath to the jar file, not the symlink'ed path we are expecting for --ivy-cache-dir. Make sure that resolve correctly recognizes these as belonging in the cache dir and lookups for either the symlinked cache dir or the realpath to the cache dir are recognized. """ with temporary_dir() as realcachedir: with temporary_dir() as symlinkdir: symlink_cache_dir = os.path.join(symlinkdir, 'symlinkedcache') os.symlink(realcachedir, symlink_cache_dir) self.set_options_for_scope('ivy', cache_dir=symlink_cache_dir) dep = JarDependency('commons-lang', 'commons-lang', '2.5') jar_lib = self.make_target('//:a', JarLibrary, jars=[dep]) # Confirm that the deps were added to the appropriate targets. compile_classpath = self.resolve([jar_lib]) self.assertEquals(1, len(compile_classpath.get_for_target(jar_lib)))
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for api module.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.contrib.autograph import utils from tensorflow.contrib.autograph.core import config from tensorflow.contrib.autograph.impl import api from tensorflow.contrib.autograph.pyct import parser from tensorflow.contrib.autograph.utils import py_func from tensorflow.python.framework import constant_op from tensorflow.python.platform import test from tensorflow.python.util import tf_inspect tf = utils.fake_tf() class ApiTest(test.TestCase): def setUp(self): config.COMPILED_IMPORT_STATEMENTS = ( 'from __future__ import print_function', 'from tensorflow.contrib.autograph import utils' ' as autograph_utils', 'tf = autograph_utils.fake_tf()', ) def test_decorator_recurses(self): class TestClass(object): def called_member(self, a): if a < 0: a = -a return a @api.convert(recursive=True) def test_method(self, x, s, a): while tf.reduce_sum(x) > s: x //= self.called_member(a) return x tc = TestClass() with self.test_session() as sess: x = tc.test_method( constant_op.constant([2, 4]), constant_op.constant(1), constant_op.constant(-2)) self.assertListEqual([0, 1], sess.run(x).tolist()) def test_decorator_does_not_recurse(self): class TestClass(object): def called_member(self, a): return tf.negative(a) @api.convert(recursive=False) def test_method(self, x, s, a): while tf.reduce_sum(x) > s: x //= self.called_member(a) return x tc = TestClass() with self.test_session() as sess: x = tc.test_method( constant_op.constant([2, 4]), constant_op.constant(1), constant_op.constant(-2)) self.assertListEqual([0, 1], sess.run(x).tolist()) def test_decorator_calls_unconverted_graph(self): class TestClass(object): @api.do_not_convert(api.RunMode.GRAPH) def called_member(self, a): return tf.negative(a) @api.convert(recursive=True) def test_method(self, x, s, a): while tf.reduce_sum(x) > s: x //= self.called_member(a) return x tc = TestClass() with self.test_session() as sess: x = tc.test_method( constant_op.constant([2, 4]), constant_op.constant(1), constant_op.constant(-2)) self.assertListEqual([0, 1], sess.run(x).tolist()) def test_decorator_calls_unconverted_py_func(self): class TestClass(object): @api.do_not_convert( api.RunMode.PY_FUNC, return_dtypes=py_func.MatchDType(1)) def called_member(self, a): return np.negative(a) @api.convert(recursive=True) def test_method(self, x, s, a): while tf.reduce_sum(x) > s: y = self.called_member(a) # set_shape works around while_loop's limitations. # TODO(mdan): Allow specifying shapes (or ShapeLike) instead. y.set_shape(a.shape) x //= y return x tc = TestClass() with self.test_session() as sess: x = tc.test_method( constant_op.constant([2, 4]), constant_op.constant(1), constant_op.constant(-2)) self.assertListEqual([0, 1], sess.run(x).tolist()) def test_decorator_calls_decorated(self): class TestClass(object): @api.convert() def called_member(self, a): if a < 0: a = -a return a @api.convert(recursive=True) def test_method(self, x, s, a): while tf.reduce_sum(x) > s: x //= self.called_member(a) return x tc = TestClass() with self.test_session() as sess: x = tc.test_method( constant_op.constant([2, 4]), constant_op.constant(1), constant_op.constant(-2)) self.assertListEqual([0, 1], sess.run(x).tolist()) def test_decorator_preserves_argspec(self): class TestClass(object): def called_member(self, a): if a < 0: a = -a return a called_member_converted = api.convert()(called_member) tc = TestClass() self.assertListEqual( list(tf_inspect.getfullargspec(tc.called_member)), list(tf_inspect.getfullargspec(tc.called_member_converted))) def test_convert_call_site_decorator(self): class TestClass(object): def called_member(self, a): if a < 0: a = -a return a @api.convert(recursive=True) def test_method(self, x, s, a): while tf.reduce_sum(x) > s: x //= api.converted_call(self.called_member, False, False, False, {}, self, a) return x tc = TestClass() with self.test_session() as sess: x = tc.test_method( constant_op.constant([2, 4]), constant_op.constant(1), constant_op.constant(-2)) self.assertListEqual([0, 1], sess.run(x).tolist()) def test_converted_call_builtin(self): x = api.converted_call(range, False, False, False, {}, 3) self.assertEqual((0, 1, 2), tuple(x)) def test_converted_call_function(self): def test_fn(x): if x < 0: return -x return x with self.test_session() as sess: x = api.converted_call(test_fn, False, False, False, {}, constant_op.constant(-1)) self.assertEqual(1, sess.run(x)) def test_converted_call_method(self): class TestClass(object): def __init__(self, x): self.x = x def test_method(self): if self.x < 0: return -self.x return self.x with self.test_session() as sess: tc = TestClass(constant_op.constant(-1)) x = api.converted_call(tc.test_method, False, False, False, {}, tc) self.assertEqual(1, sess.run(x)) def test_converted_call_method_by_class(self): class TestClass(object): def __init__(self, x): self.x = x def test_method(self): if self.x < 0: return -self.x return self.x with self.test_session() as sess: tc = TestClass(constant_op.constant(-1)) x = api.converted_call(TestClass.test_method, False, False, False, {}, tc) self.assertEqual(1, sess.run(x)) def test_converted_call_callable_object(self): class TestClass(object): def __init__(self, x): self.x = x def __call__(self): if self.x < 0: return -self.x return self.x with self.test_session() as sess: tc = TestClass(constant_op.constant(-1)) x = api.converted_call(tc, False, False, False, {}) self.assertEqual(1, sess.run(x)) def test_converted_call_constructor(self): class TestClass(object): def __init__(self, x): self.x = x def test_method(self): if self.x < 0: return -self.x return self.x with self.test_session() as sess: tc = api.converted_call(TestClass, False, False, False, {}, constant_op.constant(-1)) # tc is now a converted object. x = tc.test_method() self.assertEqual(1, sess.run(x)) def test_converted_call_already_converted(self): def f(x): return x == 0 with self.test_session() as sess: x = api.converted_call(f, False, False, False, {}, constant_op.constant(0)) self.assertTrue(sess.run(x)) converted_f = api.to_graph(f) x = api.converted_call(converted_f, False, False, False, {}, constant_op.constant(0)) self.assertTrue(sess.run(x)) def test_to_graph_basic(self): def test_fn(x, s): while tf.reduce_sum(x) > s: x //= 2 return x compiled_fn = api.to_graph(test_fn) with self.test_session() as sess: x = compiled_fn(constant_op.constant([4, 8]), 4) self.assertListEqual([1, 2], sess.run(x).tolist()) def test_to_code_basic(self): def test_fn(x, s): while tf.reduce_sum(x) > s: x /= 2 return x compiled_code = api.to_code(test_fn) # Just check that it is parseable Python code. self.assertIsNotNone(parser.parse_str(compiled_code)) def test_source_map_attribute_present(self): def test_fn(y): return y**2 self.assertTrue(hasattr(api.to_graph(test_fn), 'ag_source_map')) if __name__ == '__main__': test.main()
import copy import matplotlib import networkx as nx import xlsxwriter as xlsxw from gat.dao import dao colors = ["DeepSkyBlue", "Gold", "ForestGreen", "Ivory", "DarkOrchid", "Coral", "DarkTurquoise", "DarkCyan", "Blue"] hexColors = {} for color in colors: rgbVal = matplotlib.colors.colorConverter.to_rgb(color) hexVal = matplotlib.colors.rgb2hex(rgbVal).replace("#", "0x") hexColors[color] = hexVal def SNA2Dplot(graph, request, label=False): attr = {} if graph == None: return None if request.form.get("options") == None: i = 0 for nodeSet in graph.classList: attr[nodeSet] = [colors[i], 50] i += 1 if i > len(colors) + 1: i = 0 else: for nodeSet in graph.classList: c = request.form.get(nodeSet + "Color") attr[nodeSet] = [c, 50] return graph.plot_2D(attr, label=label) # makes more sense to make a whole SNA viz method that outputs both 2D and 3D if so desired # 2D is probably not desired in any case though def SNA2Dand3D(graph, request, case_num, _3D=True, _2D=False, label=False): fileDict = dao.getFileDict(case_num) systemMeasures = {} if graph == None: return None, None, None, None # make both attr = {} colorInput = [] if request.form.get("options") == None: i = 0 for nodeSet in graph.classList: attr[nodeSet] = [colors[i], 50] colorInput.append(hexColors[colors[i]]) i += 1 if i == 8: i = 0 else: for nodeSet in graph.classList: attr[nodeSet] = [request.form.get(nodeSet + "Color"), 50] c = request.form.get(nodeSet + "Color") colorInput.append(hexColors[c]) if request.form.get("removeNodeSubmit") != None: graph.removeNode(request.form.get("a")) # Get new node info, if available if request.form.get("addNodeSubmit") != None: node = request.form.get("nodeName") attrDict = { 'block': request.form.get("classList"), 'class': request.form.get("classList") } i = 0 while (request.form.get("attribute" + str(i)) is not None) and ( request.form.get("attribute" + str(i)) != '') and ( request.form.get("value" + str(i)) is not None) and ( request.form.get("value" + str(i)) != ''): key = request.form.get("attribute" + str(i)) value = request.form.get("value" + str(i)) if request.form.get("weight" + str(i)) is not None and request.form.get("weight" + str(i)) != '': value = [value, {'W': request.form.get("weight" + str(i))}] dictForm = {key: value} attrDict.update(dictForm) i += 1 links = [] j = 0 while request.form.get("link" + str(j)) != None: links.append(request.form.get("link" + str(j))) j += 1 graph.addNode(node, attrDict, links) if request.form.get("eventSubmit") != None: fileDict['SNA_Events'] = 'static/sample/sna/suicide_attacks_subset.xlsx' ##TODO add a blueprint route for event sheet here inputFile = fileDict['SNA_Events'] iters = int(request.form.get("iters")) systemMeasures['SentimentDict'] = True fileDict['SentimentChange'] = write_to_excel(graph.event_update(inputFile,iters)) graph.calculatePropensities(fileDict["propToggle"]) # Add system measures dictionary try: systemMeasures["Node Connectivity"] = graph.node_connectivity() # Currently only returning zero... except: "No node connectivity" try: systemMeasures["Average Clustering"] = graph.average_clustering() except: "No average clustering" # try: # systemMeasures["Average Degree Connectivity"] = graph.average_degree_connectivity() # except: # "No average degree connectivity" try: systemMeasures["Degree Assortativity"] = graph.degree_assortativity() except: "No degree assortativity" try: systemMeasures["Center"] = graph.center() except: "No center" try: systemMeasures["Diameter"] = graph.diameter() except: "No periphery" try: systemMeasures["Periphery"] = graph.periphery() except: "No periphery" systemMeasures["Overall Sentiment"] = graph.sentiment(types=["Belief","Audience","Actor"],key='W') # try: # systemMeasures["Triadic Census"] = graph.triadic_census() # except: # "No triadic census" # systemMeasures["Attribute Assortivity"] = graph.attribute_assortivity() # Which attributes...? UI? if graph.is_strongly_connected(): systemMeasures["Connection Strength"] = "Strong" elif graph.is_weakly_connected(): systemMeasures["Connection Strength"] = "Weak" # Add system measures descriptions to dictionary systemMeasures["Description"] = { 'Average Clustering': 'A high clustering coefficient indicates that actors within the network are closely connected to a statistically significant degree. It is a sophisticated measure of the density of a network.', 'Connection Strength': 'Knowing whether a graph is strongly or weakly connected is helpful because it demonstrates the robustness of the graph based on its redundancy. If a graph is strongly connected, there are two links between each actor in the network, one in each direction. A strongly connected graph thus would likely have more redundant communication/information flow and be more difficult to perturb than a weakly connected graph.', 'Resilience': 'The baseline value for resilience is determined by perturbing each community in the network and measuring the mean shortest path average over several perturbations. The results are scaled on a normal curve across all cliques and a percentile resilience is determined for each clique. A high percentile resilience denotes resilience to perturbation. These values are visualized on a color spectrum from red to blue, where red is low relative resilience and blue is high relative resilience.', 'AddNode': 'Introduces a new node to the network, complete with a user-defined name, user-defined attributes and known links. Using the DRAG link prediction model, node attributes are used to form likely connections and intelligently model the effects of external change on the network. New nodes and their predicted links are colored red for easy identification.', 'RemoveNode': 'Removes the node inputted in the box below and any links to which it belongs.', 'eigenvector': 'Centrality measure which sums the centralities of all adjacent nodes.', 'betweenness': 'Centrality based on the shortest path that passes through the node.', 'sentiment':'The sum of all actor sentiments towards this node.', 'Overall Sentiment': 'The sum of all actor sentiments towards this node.', 'Cliques':'Influence communities are detected in two-step Louvain modularity optimization. First, the core myth-symbol complexes are identified and named. Second, very proximate actors are grouped with the myth-symbol complex to form a full influence network.', 'EventAddition': 'Choose a number of iterations to simulate event addition into the network. Events are drawn from input file.', } # Find cliques when requested if request.form.get("cliqueSubmit") != None: cliques, names = graph.communityDetection() systemMeasures["Cliques"] = [] fileDict["Cliques"] = [] for name, clique in zip(names, cliques): central = graph.G.node[name].get('Name')[0] if graph.G.node[name].get('Name') is not None else name nodes = [] json_clique = {} i = 0 for node in clique.nodes(): nodes.append(graph.G.node[node].get('Name')[0] if graph.G.node[node].get('Name') is not None else node) json_clique["node"+str(i)] = node i+=1 systemMeasures["Cliques"].append((central,nodes)) fileDict["Cliques"].append((central,json_clique)) # Calculate resilience when requested if request.form.get("resilienceSubmit") != None: try: systemMeasures["Baseline"], systemMeasures["Resilience"], systemMeasures["Trace"] = graph.calculateResilience() # gets a scaled resilience value for each clique identified in network except nx.exception.NetworkXError: systemMeasures["Resilience"] = "Could not calculate resilience, NetworkX error." copy_of_graph = copy.deepcopy(graph) fileDict['copy_of_graph'] = copy_of_graph # return based on inputs ret3D = graph.create_json(graph.classList, colorInput) if _3D else None label = True if not label and len(graph.nodes) < 20 else False ret2D = graph.plot_2D(attr, label) if _2D else None fileDict['jgdata'] = ret3D return ret3D, ret2D, attr, systemMeasures def prep(graph): if graph != None and len(graph.G) > 0: if nx.algorithms.bipartite.is_bipartite(graph.G): graph.clustering() graph.closeness_centrality() graph.betweenness_centrality() graph.degree_centrality() # graph.katz_centrality() graph.eigenvector_centrality() graph.load_centrality() def write_to_excel(ret): path = "out/sna/SentimentChange.xlsx" workbook = xlsxw.Workbook(path) for i in range(len(ret)): worksheet = workbook.add_worksheet(str(i)) row = 0 col = 0 for header in ["Source","Target","Sentiment Change"]: worksheet.write(row,col,header) col += 1 col = 0 for line in ret[i]: row += 1 worksheet.write(row, col, line.source) worksheet.write(row, col+1, line.target) worksheet.write(row, col+2, line.change) workbook.close() return path
from __future__ import print_function, division __version__ = "0.2.1" __author__ = "Matthew Pitkin ([email protected])" __copyright__ = "Copyright 2016 Matthew Pitkin, Ben Farr and Will Farr" import numpy as np import pandas as pd import scipy.stats as ss import math import matplotlib as mpl from matplotlib import pyplot as pl from matplotlib.lines import Line2D from matplotlib.ticker import ScalarFormatter, MaxNLocator import matplotlib.gridspec as gridspec from matplotlib import transforms as mtransforms from matplotlib import patheffects as PathEffects # A bounded KDE class (inherited from the SciPy Gaussian KDE class) created by # Ben Farr @bfarr class Bounded_2d_kde(ss.gaussian_kde): """ Represents a two-dimensional Gaussian kernel density estimator for a probability distribution function that exists on a bounded domain (by `Ben Farr <https://github.com/bfarr>`_). """ def __init__(self, pts, xlow=None, xhigh=None, ylow=None, yhigh=None, *args, **kwargs): """Initialize with the given bounds. Either ``low`` or ``high`` may be ``None`` if the bounds are one-sided. Extra parameters are passed to :class:`scipy.stats.gaussian_kde`. :param xlow: The lower x domain boundary. :param xhigh: The upper x domain boundary. :param ylow: The lower y domain boundary. :param yhigh: The upper y domain boundary. """ pts = np.atleast_2d(pts) assert pts.ndim == 2, 'Bounded_kde can only be two-dimensional' super(Bounded_2d_kde, self).__init__(pts.T, *args, **kwargs) self._xlow = xlow self._xhigh = xhigh self._ylow = ylow self._yhigh = yhigh @property def xlow(self): """The lower bound of the x domain.""" return self._xlow @property def xhigh(self): """The upper bound of the x domain.""" return self._xhigh @property def ylow(self): """The lower bound of the y domain.""" return self._ylow @property def yhigh(self): """The upper bound of the y domain.""" return self._yhigh def evaluate(self, pts): """Return an estimate of the density evaluated at the given points.""" pts = np.atleast_2d(pts) assert pts.ndim == 2, 'points must be two-dimensional' x, y = pts.T pdf = super(Bounded_2d_kde, self).evaluate(pts.T) if self.xlow is not None: pdf += super(Bounded_2d_kde, self).evaluate([2*self.xlow - x, y]) if self.xhigh is not None: pdf += super(Bounded_2d_kde, self).evaluate([2*self.xhigh - x, y]) if self.ylow is not None: pdf += super(Bounded_2d_kde, self).evaluate([x, 2*self.ylow - y]) if self.yhigh is not None: pdf += super(Bounded_2d_kde, self).evaluate([x, 2*self.yhigh - y]) if self.xlow is not None: if self.ylow is not None: pdf += super(Bounded_2d_kde, self).evaluate([2*self.xlow - x, 2*self.ylow - y]) if self.yhigh is not None: pdf += super(Bounded_2d_kde, self).evaluate([2*self.xlow - x, 2*self.yhigh - y]) if self.xhigh is not None: if self.ylow is not None: pdf += super(Bounded_2d_kde, self).evaluate([2*self.xhigh - x, 2*self.ylow - y]) if self.yhigh is not None: pdf += super(Bounded_2d_kde, self).evaluate([2*self.xhigh - x, 2*self.yhigh - y]) return pdf def __call__(self, pts): pts = np.atleast_2d(pts) out_of_bounds = np.zeros(pts.shape[0], dtype='bool') if self.xlow is not None: out_of_bounds[pts[:, 0] < self.xlow] = True if self.xhigh is not None: out_of_bounds[pts[:, 0] > self.xhigh] = True if self.ylow is not None: out_of_bounds[pts[:, 1] < self.ylow] = True if self.yhigh is not None: out_of_bounds[pts[:, 1] > self.yhigh] = True results = self.evaluate(pts) results[out_of_bounds] = 0. return results class scotchcorner(object): """ Create a corner-style plot. Parameters ---------- data : :class:`numpy.ndarray` or :class:`pandas.DataFrame` A (`N` x `ndims`) array of values for the `ndims` parameters bins : int, optional, default: 20 The number of bins in the 1D histogram plots ratio : int, optional, default: 3 The ratio of the size of 1D histograms to the size of the joint plots labels : list, optional A list of names for each of the `ndims` parameters. These are used for the axes labels. If `data` is a :class:`pandas.DataFrame` then the column names of that will be used instead. truths : list, optional, default: None A list of the true values of each parameter datatitle : string, optional A title for the data set to be added as a legend showlims : string, optional, default: None Show edges/borders at the plots limits. Use 'hist' for limits on the 1D histogram plots, 'joint' for borders around 2D joint plots, or 'both' for borders on the 1D and 2D plots. The default (None) is for no borders. limlinestyle : default: 'dotted' The line style for the plot borders hist_kwargs : dict A dictionary of keywords arguments for the histogram function truth_kwargs : dict A dictionary of keyword arguments for plotting true values showpoints: bool, default: True Show the data points in the 2D joint parameter plots thinpoints : float, default: 1.0 Thin-down the number of points being plotted in the 2D scatter plots by this factor scatter_kwargs : dict A dictionary of keyword arguments for the scatter plot function showcontours : bool, default: False Show KDE probability contours for the 2D joint parameter plots (with levels defined by `contour_levels`) contour_kwargs : dict A dictionary of keyword argumemts for the contour plot function contour_levels : list, default: [0.5, 0.9] A list of values between 0 and 1 indicating the probability contour confidence intervals to plot (defaulting to 50% and 90% contours) show_level_labels : bool, default: True Add labels on the contours levels showing their probability use_math_text : bool, default: True Use math text scientific notation for parameter tick mark labelling limits : list, default: None A list of tuples giving the lower and upper limits for each parameter. If limits for some parameters are not known/required then an empty tuple (or `None` within a two value tuple) must be placed in the list for that parameter contour_limits : list, default: None A list of tuples giving the lower and upper limits for each parameter for use when creating credible interval contour for joint plots. If limits for some parameters are not known/required then an empty tuple (or `None` within a two value tuple) must be placed in the list for that parameter subtract_truths : list or tuple, optional, default: None A list/tuple of indices of parameters for which you want to show the distribution centred such that true value is zero. This is only relevent if `truths` are supplied. figsize : tuple A two value tuple giving the figure size mplparams : dict A dictionary containing matplotlib configuration values """ def __init__(self, data, bins=20, ratio=3, labels=None, truths=None, datatitle=None, showlims=None, limlinestyle='dotted', showpoints=True, showcontours=False, hist_kwargs={}, truths_kwargs={}, scatter_kwargs={}, contour_kwargs={}, contour_levels=[0.5, 0.9], show_level_labels=True, use_math_text=True, limits=None, contour_limits=None, figsize=None, mplparams=None, thinpoints=1.0, subtract_truths=None): # get number of dimensions in the data self.ndims = data.shape[1] # get number of dimensions in data self.ratio = ratio if isinstance(data, pd.DataFrame): self.labels = data.columns else: self.labels = labels self.truths = truths # true values for each parameter in data self.truths_kwargs = truths_kwargs if self.truths is not None: # must be same number of true values as parameters if len(self.truths) != self.ndims: self.truths = None self.subtract_truths = subtract_truths self.levels = contour_levels self.showpoints = showpoints self.thinpoints = thinpoints self.thinpermutation = None self.showcontours = showcontours self.scatter_kwargs = scatter_kwargs self.contour_kwargs = contour_kwargs self.show_level_labels = show_level_labels self.legend_labels = [] self.use_math_text = use_math_text self.limits = limits # a list of tuples giving the lower and upper limits for each parameter - if some values aren't given then an empty tuple must be placed in the list for that value self.contourlimits = contour_limits # a list of tuples giving the lower and upper limits for each parameter for use in credible interval contours - if some values aren't given then an empty tuple must be placed in the list for that value # default figure size (numbers "stolen" from those used in corner.py that are, to quote, "Some magic numbers for pretty axis layout." factor = 2.0 # size of one side of one panel lbdim = 0.5 * factor # size of left/bottom margin trdim = 0.2 * factor # size of top/right margin whspace = 0.05 # w/hspace size K = self.ndims - 1. + (1./self.ratio) # different from corner.py to account for histogram ratio plotdim = factor * K + factor * (K - 1.) * whspace dim = lbdim + plotdim + trdim self.figsize = (dim, dim) # default figure size if figsize is not None: if isinstance(figsize, tuple): if len(figsize) == 2: self.figsize = figsize # set plot parameters if mplparams == None: # set default parameters self.mplparams = { 'text.usetex': True, # use LaTeX for all text 'axes.linewidth': 0.5, # set axes linewidths to 0.5 'axes.grid': False, # add a grid 'font.family': 'sans-serif', 'font.sans-serif': 'Avant Garde, Helvetica, Computer Modern Sans serif', 'font.size': 15, 'legend.fontsize': 'medium', 'legend.frameon': False, 'axes.formatter.limits': (-3, 4)} else: self.mplparams = mplparams mpl.rcParams.update(self.mplparams) # set default hist_kwargs self.hist_kwargs = {'bins': bins, 'histtype': 'stepfilled', 'color': 'lightslategrey', 'alpha': 0.4, 'edgecolor': 'lightslategray', 'linewidth': 1.5} for key in hist_kwargs.keys(): # set any values input self.hist_kwargs[key] = hist_kwargs[key] if bins != 20: if isinstance(bins, int) and bins > 0: self.hist_kwargs['bins'] = bins # create figure self._fig = pl.figure(figsize=self.figsize) self.histhori = [] self.histhori_indices = list(range(0,self.ndims-1)) # indexes of parameters in horizontal histograms self.histvert = [] self.histvert_indices = list(range(1,self.ndims)) # indexes of parameters in vertical histograms self.jointaxes = [] self.jointaxes_indices = [] self._axes = {} # dictionary of axes keyed to parameter names if available # format the figure (again this is stolen from corner.py) lb = lbdim / dim tr = (lbdim + plotdim) / dim self._fig.subplots_adjust(left=lb, bottom=lb, right=tr, top=tr, wspace=whspace, hspace=whspace) # create grid gridsize = self.ratio*(self.ndims-1) + 1 gs = gridspec.GridSpec(gridsize, gridsize, wspace=0.1, hspace=0.1) # empty axes to hold any legend information (if not just a 2D plot) if data.shape[1] > 2: self.legendaxis = self._fig.add_subplot(gs[0:ratio,((self.ndims-2)*ratio+1):(1+(self.ndims-1)*ratio)]) for loc in ['top', 'right', 'left', 'bottom']: self.legendaxis.spines[loc].set_visible(False) # remove borders pl.setp(self.legendaxis.get_xticklabels(), visible=False) # remove xtick labels pl.setp(self.legendaxis.get_yticklabels(), visible=False) # remove ytick labels self.legendaxis.tick_params(bottom=False, top=False, left=False, right=False) # remove tick marks # create figure axes for i in range(self.ndims-1): # vertical histogram (and empty axes) axv = self._fig.add_subplot(gs[i*ratio:(i+1)*ratio,0]) if showlims in ['hist', 'both']: for loc in ['top', 'bottom']: axv.spines[loc].set_alpha(0.2) axv.spines[loc].set_linestyle(limlinestyle) else: axv.spines['top'].set_visible(False) # remove top border axv.spines['bottom'].set_visible(False) # remove bottom border axv.spines['right'].set_visible(False) # remove right border axv.set_xticklabels([]) axv.set_xticks([]) axv.yaxis.set_ticks_position('left') # just show ticks on left self.histvert.append(axv) self.histvert_indices.append(i+1) # horizontal histograms axh = self._fig.add_subplot(gs[-1,(i*ratio+1):(1+(i+1)*ratio)]) axh.spines['top'].set_visible(False) # remove top border if showlims in ['hist', 'both']: for loc in ['left', 'right']: axh.spines[loc].set_alpha(0.2) axh.spines[loc].set_linestyle(limlinestyle) else: axh.spines['left'].set_visible(False) # remove left border axh.spines['right'].set_visible(False) # remove right border axh.set_yticklabels([]) axh.set_yticks([]) axh.xaxis.set_ticks_position('bottom') # just show ticks on bottom self.histhori.append(axh) # joint plots for j in range(i+1): axj = self._fig.add_subplot(gs[i*ratio:(i+1)*ratio,(j*ratio+1):(1+(j+1)*ratio)], sharey=self.histvert[i], sharex=self.histhori[j]) if data.shape[1] == 2: # use this as the legend axis self.legendaxis = axj if showlims in ['joint', 'both']: for loc in ['top', 'right', 'left', 'bottom']: axj.spines[loc].set_alpha(0.2) # show border, but with alpha = 0.2 axj.spines[loc].set_linestyle(limlinestyle) else: for loc in ['top', 'right', 'left', 'bottom']: axj.spines[loc].set_visible(False) # remove borders pl.setp(axj.get_xticklabels(), visible=False) # remove xtick labels pl.setp(axj.get_yticklabels(), visible=False) # remove ytick labels axj.tick_params(bottom=False, top=False, left=False, right=False) # remove tick marks self.jointaxes.append(axj) # check for alpha of filled histogram plot if self.hist_kwargs['histtype'] == 'stepfilled': self._check_alpha() # create plots self._add_plots(data, label=datatitle) def add_data(self, data, hist_kwargs={}, datatitle=None, showpoints=True, showcontours=False, scatter_kwargs={}, contour_kwargs={}, truths=None, truths_kwargs={}, contour_levels=[0.5, 0.9], limits=None, contour_limits = None, show_level_labels=True, thinpoints=1.0): """ Add another data set to the plots, `hist_kwargs` are required. """ if data.shape[1] != self.ndims: raise("Error... number of dimensions not the same") # update with any newly supplied histogram keyword arguments for key in hist_kwargs: self.hist_kwargs[key] = hist_kwargs[key] if 'bins' not in self.hist_kwargs: # set default number of bins to 20 self.hist_kwargs['bins'] = 20 if 'linewidth' not in self.hist_kwargs: self.hist_kwargs['linewidth'] = 1.5 self.truths = truths if self.truths is not None: if len(self.truths) != self.ndims: # must be same number of true values as parameters self.truths = None self.scatter_kwargs = scatter_kwargs self.levels = contour_levels self.showpoints = showpoints self.showcontours = showcontours self.contour_kwargs = contour_kwargs self.truths_kwargs = truths_kwargs self.show_level_labels = show_level_labels self.contourlimits = contour_limits self.limits = limits if self.showpoints: if thinpoints != self.thinpoints: self.thinpoints = thinpoints self.thinpermutation = None self._add_plots(data, label=datatitle) def _add_plots(self, data, label=None): """ Add histogram and joint plots to the figure using data Label is a legend label if required. """ # make sure data has enough dimensions (and convert pandas.DataFrame to numpy ndarray) data = np.atleast_1d(data) # set default truth style if self.truths is not None: if 'color' not in self.truths_kwargs: if 'color' in self.hist_kwargs: self.truths_kwargs['color'] = self.hist_kwargs['color'] elif 'edgecolor' in self.hist_kwargs: self.truths_kwargs['color'] = self.hist_kwargs['edgecolor'] else: self.truths_kwargs['color'] == 'k' if 'linestyle' not in self.truths_kwargs: self.truths_kwargs['linestyle'] = '--' if 'linewidth' not in self.truths_kwargs: self.truths_kwargs['linewidth'] = 1.5 # the vertical histogram subval = 0. # value to subtract from distribution (to centre truth value at 0) if self.subtract_truths is not None and self.truths is not None: if len(self.truths)-1 in self.subtract_truths and self.truths[-1] is not None: subval = self.truths[-1] self.histvert[-1].hist(data[:,-1]-subval, density=True, orientation='horizontal', label=label, **self.hist_kwargs) if self.truths is not None: if self.truths[-1] is not None: marker = None if 'marker' in self.truths_kwargs: # remove any marker for line marker = self.truths_kwargs.pop('marker') self.histvert[-1].axhline(self.truths[-1]-subval, **self.truths_kwargs) if marker is not None: self.truths_kwargs['marker'] = marker # put legend in the upper right plot _, l1 = self.histvert[-1].get_legend_handles_labels() if self.legend_labels is not None: if self.hist_kwargs['histtype'] == 'stepfilled': lc = self.hist_kwargs['edgecolor'] else: lc = self.hist_kwargs['color'] self.legend_labels.append(Line2D([], [], linewidth=self.hist_kwargs['linewidth'], color=lc)) # create fake line for legend (to use line rather than a box) if data.shape[1] == 2: self.legendaxis.legend(self.legend_labels, l1, loc='best', fancybox=True, framealpha=0.4) else: self.legendaxis.legend(self.legend_labels, l1, loc='lower left') if self.labels is not None: self.histvert[-1].set_ylabel(self.labels[-1]) self._axes[self.labels[-1]] = self.histvert[-1] if self.showpoints: # set default scatter plot kwargs if 'color' in self.hist_kwargs: c = self.hist_kwargs['color'] elif 'fc' in self.hist_kwargs and self.hist_kwargs['histtype'] == 'stepfilled': c = [self.hist_kwargs['fc'][0:3]] else: c = 'b' these_scatter_kwargs = {'c': c, 'marker': 'o', 's': 20, 'alpha': 0.05, 'edgecolors': 'none'} for key in self.scatter_kwargs.keys(): these_scatter_kwargs[key] = self.scatter_kwargs[key] self.scatter_kwargs = these_scatter_kwargs if self.limits is not None: if len(self.limits) != self.ndims: raise("Error... number of dimensions is not the same as the " "number of limits being set") if self.contourlimits is not None: if len(self.contourlimits) != self.ndims: raise("Error... number of dimensions is not the same as the " "number of contour limits being set") if self.showcontours: # set default contour kwargs these_contour_kwargs = {'colors': 'k'} for key in self.contour_kwargs.keys(): these_contour_kwargs[key] = self.contour_kwargs[key] self.contour_kwargs = these_contour_kwargs # the horizontal histograms and joint plots jointcount = 0 rowcount = 0 for i in range(self.ndims-1): # check if subtracting the true values from the distribution subval = 0. if self.subtract_truths is not None and self.truths is not None: if i in self.subtract_truths and self.truths[i] is not None: subval = self.truths[i] self.histhori[i].hist(data[:,i]-subval, density=True, **self.hist_kwargs) # make sure axes ranges on vertical histograms match those on the equivalent horizontal histograms if i > 0: xmin, xmax = self.histhori[i].get_xlim() self.histvert[i-1].set_ylim([xmin, xmax]) if self.labels is not None: self.histhori[i].set_xlabel(self.labels[i]) self._axes[self.labels[i]] = self.histhori[i] if self.truths is not None: if self.truths[i] is not None: marker = None if 'marker' in self.truths_kwargs: # remove any marker for line marker = self.truths_kwargs.pop('marker') self.histhori[i].axvline(self.truths[i]-subval, **self.truths_kwargs) if marker is not None: self.truths_kwargs['marker'] = marker for j in range(i+1): if self.labels is not None: if j == 0: self.histvert[rowcount].set_ylabel(self.labels[i+1]) rowcount += 1 self._axes[self.labels[j]+'vs'+self.labels[i+1]] = self.jointaxes[jointcount] # check if subtracting the true values from the distribution subvals = [0., 0.] if self.subtract_truths is not None and self.truths is not None: if self.truths[j] is not None and j in self.subtract_truths: subvals[0] = self.truths[j] if self.truths[i+1] is not None and i+1 in self.subtract_truths: subvals[1] = self.truths[i+1] # get joint axes indices self.jointaxes_indices.append((j, i+1)) if self.showpoints: if self.thinpoints < 1. or self.thinpoints > data.shape[0]: raise("Error... Thinning factor is less than 1 or greater than the total number of data points") if self.thinpermutation is None: # select points randomly for thinning nthinpoints = int(data.shape[0]/self.thinpoints) permutepoints = np.random.permutation(np.arange(data.shape[0]))[:nthinpoints] self.thinpermutation = permutepoints # plot scatter self.jointaxes[jointcount].scatter(data[self.thinpermutation,j]-subvals[0], data[self.thinpermutation,i+1]-subvals[1], **self.scatter_kwargs) if self.showcontours: xlow = xhigh = ylow = yhigh = None # default limits if self.contourlimits is not None: if len(self.contourlimits[j]) == 2: xlow = self.contourlimits[j][0] xhigh = self.contourlimits[j][1] if len(self.contourlimits[i+1]) == 2: ylow = self.contourlimits[i+1][0] yhigh = self.contourlimits[i+1][1] self.plot_bounded_2d_kde_contours(self.jointaxes[jointcount], np.vstack((data[:,j]-subvals[0], data[:,i+1]-subvals[1])).T, xlow=xlow, xhigh=xhigh, ylow=ylow, yhigh=yhigh) if self.truths is not None: if self.truths[j] is not None and self.truths[i+1] is not None: markertmp = None if 'marker' not in self.truths_kwargs: self.truths_kwargs['marker'] = 'x' self.jointaxes[jointcount].plot(self.truths[j]-subvals[0], self.truths[i+1]-subvals[1], **self.truths_kwargs) jointcount += 1 def get_axis(self, param): """ Return the axis for the given "param" (for joint axes "param" should be the required parameters separated by "vs") """ if param in self._axes: return self._axes[param] else: print("Parameter '%s' not one of the axes.") return None def _format_axes(self): """ Set some formatting of the axes """ pl.draw() # force labels to be drawn theselimits = None if self.limits is not None: theselimits = list(self.limits) # local copy of the limits for i, ax in enumerate(self.histhori): # set limits if theselimits is not None: xmin, xmax = ax.get_xlim() # get current limits if len(theselimits[self.histhori_indices[i]]) == 2: xminnew, xmaxnew = theselimits[self.histhori_indices[i]] if xminnew == None: xminnew = xmin if xmaxnew == None: xmaxnew = xmax ax.set_xlim([xminnew, xmaxnew]) theselimits[self.histhori_indices[i]] = [xminnew, xmaxnew] # reset local copy of limits to these values (so vertical hists and joint axes have the same ranges) else: theselimits[self.histhori_indices[i]] = [xmin, xmax] # set the local copy of limits, (so vertical hists and joint axes have the same ranges) nbins = min([len(ax.get_xticklabels()), 5]) # make sure there are at least 4 tick marks (after removal of one) and a max of 7 prune = None if i > 0: # remove the lower tick label to avoid overlapping labels prune = 'lower' ax.xaxis.set_major_locator(MaxNLocator(nbins=7, min_n_ticks=nbins, prune=prune)) for l in ax.get_xticklabels(): l.set_rotation(45) ax.xaxis.set_major_formatter(ScalarFormatter(useMathText=self.use_math_text)) self.format_exponents_in_label_single_ax(ax.xaxis) # move exponents into label for i, ax in enumerate(self.histvert): # set limits if theselimits is not None: if len(theselimits[self.histvert_indices[i]]) == 2: ymin, ymax = ax.get_ylim() # get current limits yminnew, ymaxnew = theselimits[self.histvert_indices[i]] if yminnew == None: yminnew = ymin if ymaxnew == None: ymaxnew = ymax ax.set_ylim([yminnew, ymaxnew]) # make sure there are at least 4 tick marks (after removal of one) and a max of 7 nbins = min([len(ax.get_yticklabels()), 5]) prune = None # remove lower tick to avoid overlapping labels if i < len(self.histvert)-1: prune = 'lower' ax.yaxis.set_major_locator(MaxNLocator(nbins=7, min_n_ticks=nbins, prune=prune)) ax.yaxis.set_major_formatter(ScalarFormatter(useMathText=self.use_math_text)) self.format_exponents_in_label_single_ax(ax.yaxis) # move exponents into label for i, ax in enumerate(self.jointaxes): # remove any offset text from shared axes caused by the scalar formatter for MathText ax.xaxis.offsetText.set_visible(False) ax.yaxis.offsetText.set_visible(False) if theselimits is not None: if len(theselimits[self.jointaxes_indices[i][0]]) == 2: xmin, xmax = ax.get_xlim() # get current limits xminnew, xmaxnew = theselimits[self.jointaxes_indices[i][0]] if xminnew == None: xminnew = xmin if xmaxnew == None: xmaxnew = xmax dx = 0.02*(xmaxnew-xminnew) # add a little bit of space ax.set_xlim([xminnew-dx, xmaxnew+dx]) if len(theselimits[self.jointaxes_indices[i][1]]) == 2: ymin, ymax = ax.get_ylim() # get current limits yminnew, ymaxnew = theselimits[self.jointaxes_indices[i][1]] if yminnew == None: yminnew = ymin if ymaxnew == None: ymaxnew = ymax dy = 0.02*(ymaxnew-yminnew) # add a little bit of space ax.set_ylim([yminnew-dy, ymaxnew+dy]) def plot_bounded_2d_kde_contours(self, ax, pts, xlow=None, xhigh=None, ylow=None, yhigh=None, transform=None, gridsize=250, clip=None): """ Function (based on that in `plotutils` by `Will Farr <https://github.com/farr>`_ and edited by `Ben Farr <https://github.com/bfarr>`_) for plotting contours from a bounded 2d KDE. """ if transform is None: transform = lambda x: x # Determine the clipping if clip is None: clip = [(-np.inf, np.inf), (-np.inf, np.inf)] elif np.ndim(clip) == 1: clip = [clip, clip] # Calculate the KDE Npts = pts.shape[0] kde_pts = transform(pts[:int(Npts/2), :]) den_pts = transform(pts[int(Npts/2):, :]) Nden = den_pts.shape[0] post_kde = Bounded_2d_kde(kde_pts, xlow=xlow, xhigh=xhigh, ylow=ylow, yhigh=yhigh) den = post_kde(den_pts) densort = np.sort(den)[::-1] zvalues = [] for level in self.levels: ilevel = int(Nden*level + 0.5) if ilevel >= Nden: ilevel = Nden-1 zvalues.append(densort[ilevel]) # sort into ascending order (required in Matplotlib v 1.5.1) zvalues.sort() x = pts[:,0] y = pts[:,1] deltax = x.max() - x.min() deltay = y.max() - y.min() x_pts = np.linspace(x.min() - .1*deltax, x.max() + .1*deltax, gridsize) y_pts = np.linspace(y.min() - .1*deltay, y.max() + .1*deltay, gridsize) xx, yy = np.meshgrid(x_pts, y_pts) positions = np.column_stack([xx.ravel(), yy.ravel()]) z = np.reshape(post_kde(transform(positions)), xx.shape) # Black (thin) contours with while outlines by default self.contour_kwargs['linewidths'] = self.contour_kwargs.get('linewidths', 1.) # Plot the contours (plot them seperately) for k, level in enumerate(self.levels): alpha = self.contour_kwargs.pop('alpha', 1.0) self.contour_kwargs['alpha'] = level # set tranparency to the contour level cset = ax.contour(xx, yy, z, [zvalues[k]], **self.contour_kwargs) self.contour_kwargs['alpha'] = alpha # Add white outlines if self.contour_kwargs['colors'] == 'k': pl.setp(cset.collections, path_effects=[PathEffects.withStroke(linewidth=1.5, foreground="w")]) fmt = {} fmt[cset.levels[0]] = '{}%'.format(int(100*level)) if self.show_level_labels: lw = self.contour_kwargs.pop('linewidths') alpha = self.contour_kwargs.pop('alpha') ax.clabel(cset, cset.levels, fmt=fmt, fontsize=11, **self.contour_kwargs) pl.setp(cset.labelTexts, color='k', path_effects=[PathEffects.withStroke(linewidth=1.5, foreground="w")]) self.contour_kwargs['linewidths'] = lw self.contour_kwargs['alpha'] = alpha def _check_alpha(self): """ Use alpha transparency on (step filled) histogram patches, but not on edges (based on the answer `here <http://stackoverflow.com/a/28398471/1862861>`_) """ if 'alpha' in self.hist_kwargs: alpha = self.hist_kwargs.pop('alpha') if 'color' in self.hist_kwargs: cl = self.hist_kwargs.pop('color') else: # default to blue if no color is given cl = 'blue' if not isinstance(cl, tuple): # import these to get RGB color codes for names colors from matplotlib import colors as cs if cl in cs.cnames: rgbcolor = cs.hex2color(cs.cnames[cl]) else: print("histogram color '%s' not recognised. Defaulting to blue" % cl) rgbcolor = cs.hex2color(cs.cnames['blue']) # add facecolor 'fc' to hist_kwargs ctup = rgbcolor + (alpha,) else: if len(cl) == 3: ctup = cl + (alpha,) else: ctup = cl # add tuple (r, g, b, alpha) facecolor 'fc' to hist_kwargs self.hist_kwargs['fc'] = ctup def update_label(self, old_label, exponent_text): """ Method to transform given label into the new label (this function comes from `this patch <https://github.com/dfm/corner.py/pull/53/files>`_ to `corner.py <https://github.com/dfm/corner.py>`_ by `Greg Ashton <https://github.com/ga7g08>`_) """ if exponent_text == "": return old_label try: units = old_label[old_label.index("[") + 1:old_label.rindex("]")] except ValueError: units = "" label = old_label.replace("[{}]".format(units), "") exponent_text = exponent_text.replace("\\times", "") if units == "": if label == "": s = r"[{}]".format(exponent_text) else: s = r"{} [{}]".format(label, exponent_text) else: if label == "": s = r"[{} {}]".format(exponent_text, units) else: s = r"{} [{} {}]".format(label, exponent_text, units) return s def format_exponents_in_label_single_ax(self, ax): """ Routine for a single axes instance (by Greg Ashton) """ exponent_text = ax.get_offset_text().get_text() exponent_text = exponent_text.replace("\\mathdefault", "") label = ax.get_label().get_text() ax.offsetText.set_visible(False) ax.set_label_text(self.update_label(label, exponent_text)) def savefig(self, filename): """ Save the figure Parameters ---------- filename : str, required The filename of the figure to save. The figure format is determined by the file extension. """ self._format_axes() self._fig.savefig(filename) def show(self): """ Show the figure """ self._format_axes() self._fig.show() @property def fig(self): """ Return the :class:`matplotlib.figure.Figure` """ return self._fig
import numpy as np from .crf import CRF from ..utils import expand_sym, compress_sym class GraphCRF(CRF): """Pairwise CRF on a general graph. Pairwise potentials the same for all edges, are symmetric by default (``directed=False``). This leads to n_classes parameters for unary potentials. If ``directed=True``, there are ``n_classes * n_classes`` parameters for pairwise potentials, if ``directed=False``, there are only ``n_classes * (n_classes + 1) / 2`` (for a symmetric matrix). Examples, i.e. X, are given as an iterable of n_examples. An example, x, is represented as a tuple (features, edges) where features is a numpy array of shape (n_nodes, n_attributes), and edges is is an array of shape (n_edges, 2), representing the graph. Labels, Y, are given as an iterable of n_examples. Each label, y, in Y is given by a numpy array of shape (n_nodes,). There are n_states * n_features parameters for unary potentials. For edge potential parameters, there are n_state * n_states permutations, i.e. :: state_1 state_2 state 3 state_1 1 2 3 state_2 4 5 6 state_3 7 8 9 The fitted parameters of this model will be returned as an array with the first n_states * n_features elements representing the unary potentials parameters, followed by the edge potential parameters. Say we have two state, A and B, and two features 1 and 2. The unary potential parameters will be returned as [A1, A2, B1, B2]. If ``directed=True`` the edge potential parameters will return n_states * n_states parameters. The rows are senders and the columns are recievers, i.e. the edge potential state_2 -> state_1 is [2,1]; 4 in the above matrix. The above edge potential parameters example would be returned as [1, 2, 3, 4, 5, 6, 7, 8, 9] (see numpy.ravel). If edges are undirected, the edge potential parameter matrix is assumed to be symmetric and only the lower triangle is returned, i.e. [1, 4, 5, 7, 8, 9]. Parameters ---------- n_states : int, default=None Number of states for all variables. Inferred from data if not provided. n_features : int, default=None Number of features per node. Inferred from data if not provided. inference_method : string or None, default=None Function to call do do inference and loss-augmented inference. Possible values are: - 'max-product' for max-product belief propagation. Recommended for chains an trees. Loopy belief propagation in case of a general graph. - 'lp' for Linear Programming relaxation using cvxopt. - 'ad3' for AD3 dual decomposition. - 'qpbo' for QPBO + alpha expansion. - 'ogm' for OpenGM inference algorithms. If None, ad3 is used if installed, otherwise lp. class_weight : None, or array-like Class weights. If an array-like is passed, it must have length n_classes. None means equal class weights. directed : boolean, default=False Whether to model directed or undirected connections. In undirected models, interaction terms are symmetric, so an edge ``a -> b`` has the same energy as ``b -> a``. """ def __init__(self, n_states=None, n_features=None, inference_method=None, class_weight=None, directed=False): self.directed = directed CRF.__init__(self, n_states, n_features, inference_method, class_weight=class_weight) # n_states unary parameters, upper triangular for pairwise def _set_size_joint_feature(self): # try to set the size of joint_feature if possible if self.n_features is not None and self.n_states is not None: if self.directed: self.size_joint_feature = (self.n_states * self.n_features + self.n_states ** 2) else: self.size_joint_feature = ( self.n_states * self.n_features + self.n_states * (self.n_states + 1) / 2) def _get_edges(self, x): return x[1] def _get_features(self, x): return x[0] def _get_pairwise_potentials(self, x, w): """Computes pairwise potentials for x and w. Parameters ---------- x : tuple Instance Representation. w : ndarray, shape=(size_joint_feature,) Weight vector for CRF instance. Returns ------- pairwise : ndarray, shape=(n_states, n_states) Pairwise weights. """ self._check_size_w(w) self._check_size_x(x) pw = w[self.n_states * self.n_features:] if self.directed: return pw.reshape(self.n_states, self.n_states) return expand_sym(pw) def _get_unary_potentials(self, x, w): """Computes unary potentials for x and w. Parameters ---------- x : tuple Instance Representation. w : ndarray, shape=(size_joint_feature,) Weight vector for CRF instance. Returns ------- unary : ndarray, shape=(n_states) Unary weights. """ self._check_size_w(w) self._check_size_x(x) features = self._get_features(x) unary_params = w[:self.n_states * self.n_features].reshape( self.n_states, self.n_features) return np.dot(features, unary_params.T) def joint_feature(self, x, y): """Feature vector associated with instance (x, y). Feature representation joint_feature, such that the energy of the configuration (x, y) and a weight vector w is given by np.dot(w, joint_feature(x, y)). Parameters ---------- x : tuple Unary evidence. y : ndarray or tuple Either y is an integral ndarray, giving a complete labeling for x. Or it is the result of a linear programming relaxation. In this case, ``y=(unary_marginals, pariwise_marginals)``. Returns ------- p : ndarray, shape (size_joint_feature,) Feature vector associated with state (x, y). """ self._check_size_x(x) features, edges = self._get_features(x), self._get_edges(x) n_nodes = features.shape[0] if isinstance(y, tuple): # y is result of relaxation, tuple of unary and pairwise marginals unary_marginals, pw = y unary_marginals = unary_marginals.reshape(n_nodes, self.n_states) # accumulate pairwise pw = pw.reshape(-1, self.n_states, self.n_states).sum(axis=0) else: y = y.reshape(n_nodes) gx = np.ogrid[:n_nodes] #make one hot encoding unary_marginals = np.zeros((n_nodes, self.n_states), dtype=np.int) gx = np.ogrid[:n_nodes] unary_marginals[gx, y] = 1 ##accumulated pairwise pw = np.dot(unary_marginals[edges[:, 0]].T, unary_marginals[edges[:, 1]]) unaries_acc = np.dot(unary_marginals.T, features) if self.directed: pw = pw.ravel() else: pw = compress_sym(pw) joint_feature_vector = np.hstack([unaries_acc.ravel(), pw]) return joint_feature_vector
# Copyright 2016 Andrew Bogott for the Wikimedia Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ldap import ldap.modlist from keystone import exception from oslo_log import log as logging from oslo_config import cfg LOG = logging.getLogger('nova.%s' % __name__) def _getLdapInfo(attr, conffile="/etc/ldap.conf"): try: f = open(conffile) except IOError: if conffile == "/etc/ldap.conf": # fallback to /etc/ldap/ldap.conf, which will likely # have less information f = open("/etc/ldap/ldap.conf") for line in f: if line.strip() == "": continue if line.split()[0].lower() == attr.lower(): return line.split(None, 1)[1].strip() break def _open_ldap(): ldapHost = _getLdapInfo("uri") sslType = _getLdapInfo("ssl") binddn = cfg.CONF.ldap.user bindpw = cfg.CONF.ldap.password ds = ldap.initialize(ldapHost) ds.protocol_version = ldap.VERSION3 if sslType == "start_tls": ds.start_tls_s() try: ds.simple_bind_s(binddn, bindpw) return ds except ldap.CONSTRAINT_VIOLATION: LOG.debug("LDAP bind failure: Too many failed attempts.\n") except ldap.INVALID_DN_SYNTAX: LOG.debug("LDAP bind failure: The bind DN is incorrect... \n") except ldap.NO_SUCH_OBJECT: LOG.debug("LDAP bind failure: " "Unable to locate the bind DN account.\n") except ldap.UNWILLING_TO_PERFORM as msg: LOG.debug("LDAP bind failure: " "The LDAP server was unwilling to perform the action" " requested.\nError was: %s\n" % msg[0]["info"]) except ldap.INVALID_CREDENTIALS: LOG.debug("LDAP bind failure: Password incorrect.\n") return None # ds is presumed to be an already-open ldap connection def _all_groups(ds): basedn = cfg.CONF.wmfhooks.ldap_group_base_dn allgroups = ds.search_s(basedn, ldap.SCOPE_ONELEVEL) return allgroups # ds is presumed to be an already-open ldap connection def _get_next_gid_number(ds): highest = cfg.CONF.wmfhooks.minimum_gid_number for group in _all_groups(ds): if 'gidNumber' in group[1]: number = int(group[1]['gidNumber'][0]) if number > highest: highest = number # Fixme: Check against a hard max gid number limit? return highest + 1 # ds should be an already-open ldap connection. # # groupname is the name of the group to create, probably project-<projectname> def _get_ldap_group(ds, groupname): basedn = cfg.CONF.wmfhooks.ldap_group_base_dn searchdn = "cn=%s,%s" % (groupname, basedn) try: thisgroup = ds.search_s(searchdn, ldap.SCOPE_BASE) return thisgroup except ldap.LDAPError: return None def delete_ldap_project_group(project_id): basedn = cfg.CONF.wmfhooks.ldap_group_base_dn groupname = "project-%s" % project_id.encode('utf-8') dn = "cn=%s,%s" % (groupname, basedn) ds = _open_ldap() if not ds: LOG.error("Failed to connect to ldap; Leak a project group.") raise exception.ValidationError() try: ds.delete_s(dn) except ldap.LDAPError as e: LOG.warning("Failed to delete %s from ldap: %s" % (dn, e)) # delete everything under the project subtree basedn = cfg.CONF.wmfhooks.ldap_project_base_dn projectbase = "cn=%s,%s" % (project_id, basedn) search = ds.search_s(projectbase, ldap.SCOPE_SUBTREE) delete_list = [record for record, _ in search] delete_list.reverse() for record in delete_list: try: ds.delete_s(record) except ldap.LDAPError as e: LOG.warning("Failed to delete %s from ldap" % (record, e)) def sync_ldap_project_group(project_id, keystone_assignments): groupname = "project-%s" % project_id.encode('utf-8') LOG.info("Syncing keystone project membership with ldap group %s" % groupname) ds = _open_ldap() if not ds: LOG.error("Failed to connect to ldap; cannot set up new project.") raise exception.ValidationError() allusers = set() for key in keystone_assignments: allusers |= set(keystone_assignments[key]) if 'novaobserver' in allusers: allusers.remove('novaobserver') basedn = cfg.CONF.wmfhooks.ldap_user_base_dn members = ["uid=%s,%s" % (user.encode('utf-8'), basedn) for user in allusers] basedn = cfg.CONF.wmfhooks.ldap_group_base_dn dn = "cn=%s,%s" % (groupname, basedn) existingEntry = _get_ldap_group(ds, groupname) if existingEntry: # We're modifying an existing group oldEntry = existingEntry[0][1] newEntry = oldEntry.copy() newEntry['member'] = members modlist = ldap.modlist.modifyModlist(oldEntry, newEntry) if modlist: ds.modify_s(dn, modlist) else: # We're creating a new group from scratch. # There is a potential race between _get_next_git_number() # and ds.add_s, so we make a few attempts. # around this function. groupEntry = {} groupEntry['member'] = members groupEntry['objectClass'] = ['groupOfNames', 'posixGroup', 'top'] groupEntry['cn'] = [groupname] for i in range(0, 4): groupEntry['gidNumber'] = [str(_get_next_gid_number(ds))] modlist = ldap.modlist.addModlist(groupEntry) try: ds.add_s(dn, modlist) break except ldap.LDAPError: LOG.warning("Failed to create group, attempt number %s: %s" % (i, modlist)) def create_sudo_defaults(project_id): ds = _open_ldap() if not ds: LOG.error("Failed to connect to ldap; Unable to create sudo rules.") raise exception.ValidationError() userbasedn = cfg.CONF.wmfhooks.ldap_user_base_dn basedn = cfg.CONF.wmfhooks.ldap_project_base_dn projectbase = "cn=%s,%s" % (project_id, basedn) # We may or may not already have one of these... if it fails just move on. projectEntry = {} projectEntry['objectClass'] = ['extensibleobject', 'groupofnames', 'top'] projectEntry['member'] = ["uid=%s,%s" % (cfg.CONF.wmfhooks.admin_user, userbasedn)] modlist = ldap.modlist.addModlist(projectEntry) try: ds.add_s(projectbase, modlist) except ldap.LDAPError as e: LOG.warning("Failed to create project base %s in ldap: %s" % (projectbase, e)) sudoerbase = "ou=sudoers,%s" % projectbase sudoEntry = {} sudoEntry['objectClass'] = ['organizationalunit', 'top'] modlist = ldap.modlist.addModlist(sudoEntry) try: ds.add_s(sudoerbase, modlist) except ldap.LDAPError as e: LOG.warning("Failed to create base sudoer group: %s" % e) sudoEntry = {} defaultdn = "cn=default-sudo,%s" % sudoerbase sudoEntry['objectClass'] = ['sudoRole'] sudoEntry['sudoUser'] = ['%%project-%s' % project_id.encode('utf8')] sudoEntry['sudoCommand'] = ['ALL'] sudoEntry['sudoOption'] = ['!authenticate'] sudoEntry['sudoHost'] = ['ALL'] sudoEntry['cn'] = ['default-sudo'] modlist = ldap.modlist.addModlist(sudoEntry) try: ds.add_s(defaultdn, modlist) except ldap.LDAPError as e: LOG.warning("Failed to create default sudoer entry: %s" % e) defaultasdn = "cn=default-sudo-as,%s" % sudoerbase # The runas entry is the same as the default entry, plus one field sudoEntry['sudoRunAsUser'] = ["%%project-%s" % project_id.encode('utf8')] sudoEntry['cn'] = ['default-sudo-as'] modlist = ldap.modlist.addModlist(sudoEntry) try: ds.add_s(defaultasdn, modlist) except ldap.LDAPError as e: LOG.warning("Failed to create default sudo-as entry: %s" % e)
# Copyright (c) 2008 Giorgos Verigakis <[email protected]> # # Permission to use, copy, modify, and distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. from __future__ import division from ctypes import * from ctypes.util import find_library from errno import * from functools import partial from platform import machine, system from stat import S_IFDIR from traceback import print_exc _system = system() _machine = machine() # Locate the fuse shared library. # On OSX this can be provided by a number of different packages # with slightly incompatible interfaces. if _system == 'Darwin': _libfuse_path = find_library('fuse4x') or find_library('fuse') else: _libfuse_path = find_library('fuse') if not _libfuse_path: raise EnvironmentError('Unable to find libfuse') if _system == 'Darwin': _libiconv = CDLL(find_library('iconv'), RTLD_GLOBAL) # libfuse dependency _libfuse = CDLL(_libfuse_path) # Check whether OSX is using the legacy "macfuse" system. # This has a different struct layout than the newer fuse4x system. if _system == 'Darwin' and hasattr(_libfuse, 'macfuse_version'): _system = 'Darwin-MacFuse' class c_timespec(Structure): _fields_ = [('tv_sec', c_long), ('tv_nsec', c_long)] class c_utimbuf(Structure): _fields_ = [('actime', c_timespec), ('modtime', c_timespec)] class c_stat(Structure): pass # Platform dependent if _system in ('Darwin', 'Darwin-MacFuse', 'FreeBSD'): ENOTSUP = 45 c_dev_t = c_int32 c_fsblkcnt_t = c_ulong c_fsfilcnt_t = c_ulong c_gid_t = c_uint32 c_mode_t = c_uint16 c_off_t = c_int64 c_pid_t = c_int32 c_uid_t = c_uint32 setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t, c_int, c_uint32) getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t, c_uint32) # OSX with fuse4x uses 64-bit inodes and so has a different # struct layout. Other darwinish platforms use 32-bit inodes. if _system == 'Darwin': c_stat._fields_ = [ ('st_dev', c_dev_t), ('st_mode', c_mode_t), ('st_nlink', c_uint16), ('st_ino', c_uint64), ('st_uid', c_uid_t), ('st_gid', c_gid_t), ('st_rdev', c_dev_t), ('st_atimespec', c_timespec), ('st_mtimespec', c_timespec), ('st_ctimespec', c_timespec), ('st_birthtimespec', c_timespec), ('st_size', c_off_t), ('st_blocks', c_int64), ('st_blksize', c_int32), ('st_flags', c_int32), ('st_gen', c_int32), ('st_lspare', c_int32), ('st_qspare', c_int64)] else: c_stat._fields_ = [ ('st_dev', c_dev_t), ('st_ino', c_uint32), ('st_mode', c_mode_t), ('st_nlink', c_uint16), ('st_uid', c_uid_t), ('st_gid', c_gid_t), ('st_rdev', c_dev_t), ('st_atimespec', c_timespec), ('st_mtimespec', c_timespec), ('st_ctimespec', c_timespec), ('st_size', c_off_t), ('st_blocks', c_int64), ('st_blksize', c_int32)] elif _system == 'Linux': ENOTSUP = 95 c_dev_t = c_ulonglong c_fsblkcnt_t = c_ulonglong c_fsfilcnt_t = c_ulonglong c_gid_t = c_uint c_mode_t = c_uint c_off_t = c_longlong c_pid_t = c_int c_uid_t = c_uint setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t, c_int) getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t) _machine = machine() if _machine == 'x86_64': c_stat._fields_ = [ ('st_dev', c_dev_t), ('st_ino', c_ulong), ('st_nlink', c_ulong), ('st_mode', c_mode_t), ('st_uid', c_uid_t), ('st_gid', c_gid_t), ('__pad0', c_int), ('st_rdev', c_dev_t), ('st_size', c_off_t), ('st_blksize', c_long), ('st_blocks', c_long), ('st_atimespec', c_timespec), ('st_mtimespec', c_timespec), ('st_ctimespec', c_timespec)] elif _machine == 'ppc': c_stat._fields_ = [ ('st_dev', c_dev_t), ('st_ino', c_ulonglong), ('st_mode', c_mode_t), ('st_nlink', c_uint), ('st_uid', c_uid_t), ('st_gid', c_gid_t), ('st_rdev', c_dev_t), ('__pad2', c_ushort), ('st_size', c_off_t), ('st_blksize', c_long), ('st_blocks', c_longlong), ('st_atimespec', c_timespec), ('st_mtimespec', c_timespec), ('st_ctimespec', c_timespec)] else: # i686, use as fallback for everything else c_stat._fields_ = [ ('st_dev', c_dev_t), ('__pad1', c_ushort), ('__st_ino', c_ulong), ('st_mode', c_mode_t), ('st_nlink', c_uint), ('st_uid', c_uid_t), ('st_gid', c_gid_t), ('st_rdev', c_dev_t), ('__pad2', c_ushort), ('st_size', c_off_t), ('st_blksize', c_long), ('st_blocks', c_longlong), ('st_atimespec', c_timespec), ('st_mtimespec', c_timespec), ('st_ctimespec', c_timespec), ('st_ino', c_ulonglong)] else: raise NotImplementedError('%s is not supported.' % _system) class c_statvfs(Structure): _fields_ = [ ('f_bsize', c_ulong), ('f_frsize', c_ulong), ('f_blocks', c_fsblkcnt_t), ('f_bfree', c_fsblkcnt_t), ('f_bavail', c_fsblkcnt_t), ('f_files', c_fsfilcnt_t), ('f_ffree', c_fsfilcnt_t), ('f_favail', c_fsfilcnt_t)] if _system == 'FreeBSD': c_fsblkcnt_t = c_uint64 c_fsfilcnt_t = c_uint64 setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t, c_int) getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t) class c_statvfs(Structure): _fields_ = [ ('f_bavail', c_fsblkcnt_t), ('f_bfree', c_fsblkcnt_t), ('f_blocks', c_fsblkcnt_t), ('f_favail', c_fsfilcnt_t), ('f_ffree', c_fsfilcnt_t), ('f_files', c_fsfilcnt_t), ('f_bsize', c_ulong), ('f_flag', c_ulong), ('f_frsize', c_ulong)] class fuse_file_info(Structure): _fields_ = [ ('flags', c_int), ('fh_old', c_ulong), ('writepage', c_int), ('direct_io', c_uint, 1), ('keep_cache', c_uint, 1), ('flush', c_uint, 1), ('padding', c_uint, 29), ('fh', c_uint64), ('lock_owner', c_uint64)] class fuse_context(Structure): _fields_ = [ ('fuse', c_voidp), ('uid', c_uid_t), ('gid', c_gid_t), ('pid', c_pid_t), ('private_data', c_voidp)] class fuse_operations(Structure): _fields_ = [ ('getattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_stat))), ('readlink', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t)), ('getdir', c_voidp), # Deprecated, use readdir ('mknod', CFUNCTYPE(c_int, c_char_p, c_mode_t, c_dev_t)), ('mkdir', CFUNCTYPE(c_int, c_char_p, c_mode_t)), ('unlink', CFUNCTYPE(c_int, c_char_p)), ('rmdir', CFUNCTYPE(c_int, c_char_p)), ('symlink', CFUNCTYPE(c_int, c_char_p, c_char_p)), ('rename', CFUNCTYPE(c_int, c_char_p, c_char_p)), ('link', CFUNCTYPE(c_int, c_char_p, c_char_p)), ('chmod', CFUNCTYPE(c_int, c_char_p, c_mode_t)), ('chown', CFUNCTYPE(c_int, c_char_p, c_uid_t, c_gid_t)), ('truncate', CFUNCTYPE(c_int, c_char_p, c_off_t)), ('utime', c_voidp), # Deprecated, use utimens ('open', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))), ('read', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t, c_off_t, POINTER(fuse_file_info))), ('write', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t, c_off_t, POINTER(fuse_file_info))), ('statfs', CFUNCTYPE(c_int, c_char_p, POINTER(c_statvfs))), ('flush', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))), ('release', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))), ('fsync', CFUNCTYPE(c_int, c_char_p, c_int, POINTER(fuse_file_info))), ('setxattr', setxattr_t), ('getxattr', getxattr_t), ('listxattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t)), ('removexattr', CFUNCTYPE(c_int, c_char_p, c_char_p)), ('opendir', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))), ('readdir', CFUNCTYPE(c_int, c_char_p, c_voidp, CFUNCTYPE(c_int, c_voidp, c_char_p, POINTER(c_stat), c_off_t), c_off_t, POINTER(fuse_file_info))), ('releasedir', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))), ('fsyncdir', CFUNCTYPE(c_int, c_char_p, c_int, POINTER(fuse_file_info))), ('init', CFUNCTYPE(c_voidp, c_voidp)), ('destroy', CFUNCTYPE(c_voidp, c_voidp)), ('access', CFUNCTYPE(c_int, c_char_p, c_int)), ('create', CFUNCTYPE(c_int, c_char_p, c_mode_t, POINTER(fuse_file_info))), ('ftruncate', CFUNCTYPE(c_int, c_char_p, c_off_t, POINTER(fuse_file_info))), ('fgetattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_stat), POINTER(fuse_file_info))), ('lock', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info), c_int, c_voidp)), ('utimens', CFUNCTYPE(c_int, c_char_p, POINTER(c_utimbuf))), ('bmap', CFUNCTYPE(c_int, c_char_p, c_size_t, POINTER(c_ulonglong)))] def time_of_timespec(ts): return ts.tv_sec + ts.tv_nsec / 10 ** 9 def set_st_attrs(st, attrs): for key, val in attrs.items(): if key in ('st_atime', 'st_mtime', 'st_ctime'): timespec = getattr(st, key + 'spec') timespec.tv_sec = int(val) timespec.tv_nsec = int((val - timespec.tv_sec) * 10 ** 9) elif hasattr(st, key): setattr(st, key, val) _libfuse.fuse_get_context.restype = POINTER(fuse_context) def fuse_get_context(): """Returns a (uid, gid, pid) tuple""" ctxp = _libfuse.fuse_get_context() ctx = ctxp.contents return ctx.uid, ctx.gid, ctx.pid class FUSE(object): """This class is the lower level interface and should not be subclassed under normal use. Its methods are called by fuse. Assumes API version 2.6 or later.""" def __init__(self, operations, mountpoint, raw_fi=False, **kwargs): """Setting raw_fi to True will cause FUSE to pass the fuse_file_info class as is to Operations, instead of just the fh field. This gives you access to direct_io, keep_cache, etc.""" self.operations = operations self.raw_fi = raw_fi args = ['fuse'] if kwargs.pop('foreground', False): args.append('-f') if kwargs.pop('debug', False): args.append('-d') if kwargs.pop('nothreads', False): args.append('-s') kwargs.setdefault('fsname', operations.__class__.__name__) args.append('-o') args.append(','.join(key if val == True else '%s=%s' % (key, val) for key, val in kwargs.items())) args.append(mountpoint) argv = (c_char_p * len(args))(*args) fuse_ops = fuse_operations() for name, prototype in fuse_operations._fields_: if prototype != c_voidp and getattr(operations, name, None): op = partial(self._wrapper_, getattr(self, name)) setattr(fuse_ops, name, prototype(op)) _libfuse.fuse_main_real(len(args), argv, pointer(fuse_ops), sizeof(fuse_ops), None) del self.operations # Invoke the destructor def _wrapper_(self, func, *args, **kwargs): """Decorator for the methods that follow""" try: return func(*args, **kwargs) or 0 except OSError, e: return -(e.errno or EFAULT) except: print_exc() return -EFAULT def getattr(self, path, buf): return self.fgetattr(path, buf, None) def readlink(self, path, buf, bufsize): ret = self.operations('readlink', path) data = create_string_buffer(ret[:bufsize - 1]) memmove(buf, data, len(data)) return 0 def mknod(self, path, mode, dev): return self.operations('mknod', path, mode, dev) def mkdir(self, path, mode): return self.operations('mkdir', path, mode) def unlink(self, path): return self.operations('unlink', path) def rmdir(self, path): return self.operations('rmdir', path) def symlink(self, source, target): return self.operations('symlink', target, source) def rename(self, old, new): return self.operations('rename', old, new) def link(self, source, target): return self.operations('link', target, source) def chmod(self, path, mode): return self.operations('chmod', path, mode) def chown(self, path, uid, gid): return self.operations('chown', path, uid, gid) def truncate(self, path, length): return self.operations('truncate', path, length) def open(self, path, fip): fi = fip.contents if self.raw_fi: return self.operations('open', path, fi) else: fi.fh = self.operations('open', path, fi.flags) return 0 def read(self, path, buf, size, offset, fip): fh = fip.contents if self.raw_fi else fip.contents.fh ret = self.operations('read', path, size, offset, fh) if ret: strbuf = create_string_buffer(ret) memmove(buf, strbuf, len(strbuf)) return len(ret) def write(self, path, buf, size, offset, fip): data = string_at(buf, size) fh = fip.contents if self.raw_fi else fip.contents.fh return self.operations('write', path, data, offset, fh) def statfs(self, path, buf): stv = buf.contents attrs = self.operations('statfs', path) for key, val in attrs.items(): if hasattr(stv, key): setattr(stv, key, val) return 0 def flush(self, path, fip): fh = fip.contents if self.raw_fi else fip.contents.fh return self.operations('flush', path, fh) def release(self, path, fip): fh = fip.contents if self.raw_fi else fip.contents.fh return self.operations('release', path, fh) def fsync(self, path, datasync, fip): fh = fip.contents if self.raw_fi else fip.contents.fh return self.operations('fsync', path, datasync, fh) def setxattr(self, path, name, value, size, options, *args): data = string_at(value, size) return self.operations('setxattr', path, name, data, options, *args) def getxattr(self, path, name, value, size, *args): ret = self.operations('getxattr', path, name, *args) retsize = len(ret) buf = create_string_buffer(ret, retsize) # Does not add trailing 0 if bool(value): if retsize > size: return -ERANGE memmove(value, buf, retsize) return retsize def listxattr(self, path, namebuf, size): ret = self.operations('listxattr', path) if ret: buf = create_string_buffer('\x00'.join(ret)) else: buf = '' bufsize = len(buf) if bool(namebuf): if bufsize > size: return -ERANGE memmove(namebuf, buf, bufsize) return bufsize def removexattr(self, path, name): return self.operations('removexattr', path, name) def opendir(self, path, fip): # Ignore raw_fi fip.contents.fh = self.operations('opendir', path) return 0 def readdir(self, path, buf, filler, offset, fip): # Ignore raw_fi for item in self.operations('readdir', path, fip.contents.fh): if isinstance(item, str): name, st, offset = item, None, 0 else: name, attrs, offset = item if attrs: st = c_stat() set_st_attrs(st, attrs) else: st = None if filler(buf, name, st, offset) != 0: break return 0 def releasedir(self, path, fip): # Ignore raw_fi return self.operations('releasedir', path, fip.contents.fh) def fsyncdir(self, path, datasync, fip): # Ignore raw_fi return self.operations('fsyncdir', path, datasync, fip.contents.fh) def init(self, conn): return self.operations('init', '/') def destroy(self, private_data): return self.operations('destroy', '/') def access(self, path, amode): return self.operations('access', path, amode) def create(self, path, mode, fip): fi = fip.contents if self.raw_fi: return self.operations('create', path, mode, fi) else: fi.fh = self.operations('create', path, mode) return 0 def ftruncate(self, path, length, fip): fh = fip.contents if self.raw_fi else fip.contents.fh return self.operations('truncate', path, length, fh) def fgetattr(self, path, buf, fip): memset(buf, 0, sizeof(c_stat)) st = buf.contents fh = fip and (fip.contents if self.raw_fi else fip.contents.fh) attrs = self.operations('getattr', path, fh) set_st_attrs(st, attrs) return 0 def lock(self, path, fip, cmd, lock): fh = fip.contents if self.raw_fi else fip.contents.fh return self.operations('lock', path, fh, cmd, lock) def utimens(self, path, buf): if buf: atime = time_of_timespec(buf.contents.actime) mtime = time_of_timespec(buf.contents.modtime) times = (atime, mtime) else: times = None return self.operations('utimens', path, times) def bmap(self, path, blocksize, idx): return self.operations('bmap', path, blocksize, idx) class Operations(object): """This class should be subclassed and passed as an argument to FUSE on initialization. All operations should raise an OSError exception on error. When in doubt of what an operation should do, check the FUSE header file or the corresponding system call man page.""" def __call__(self, op, *args): if not hasattr(self, op): raise OSError(EFAULT, '') return getattr(self, op)(*args) def access(self, path, amode): return 0 bmap = None def chmod(self, path, mode): raise OSError(EROFS, '') def chown(self, path, uid, gid): raise OSError(EROFS, '') def create(self, path, mode, fi=None): """When raw_fi is False (default case), fi is None and create should return a numerical file handle. When raw_fi is True the file handle should be set directly by create and return 0.""" raise OSError(EROFS, '') def destroy(self, path): """Called on filesystem destruction. Path is always /""" pass def flush(self, path, fh): return 0 def fsync(self, path, datasync, fh): return 0 def fsyncdir(self, path, datasync, fh): return 0 def getattr(self, path, fh=None): """Returns a dictionary with keys identical to the stat C structure of stat(2). st_atime, st_mtime and st_ctime should be floats. NOTE: There is an incombatibility between Linux and Mac OS X concerning st_nlink of directories. Mac OS X counts all files inside the directory, while Linux counts only the subdirectories.""" if path != '/': raise OSError(ENOENT, '') return dict(st_mode=(S_IFDIR | 0755), st_nlink=2) def getxattr(self, path, name, position=0): raise OSError(ENOTSUP, '') def init(self, path): """Called on filesystem initialization. Path is always / Use it instead of __init__ if you start threads on initialization.""" pass def link(self, target, source): raise OSError(EROFS, '') def listxattr(self, path): return [] lock = None def mkdir(self, path, mode): raise OSError(EROFS, '') def mknod(self, path, mode, dev): raise OSError(EROFS, '') def open(self, path, flags): """When raw_fi is False (default case), open should return a numerical file handle. When raw_fi is True the signature of open becomes: open(self, path, fi) and the file handle should be set directly.""" return 0 def opendir(self, path): """Returns a numerical file handle.""" return 0 def read(self, path, size, offset, fh): """Returns a string containing the data requested.""" raise OSError(ENOENT, '') def readdir(self, path, fh): """Can return either a list of names, or a list of (name, attrs, offset) tuples. attrs is a dict as in getattr.""" return ['.', '..'] def readlink(self, path): raise OSError(ENOENT, '') def release(self, path, fh): return 0 def releasedir(self, path, fh): return 0 def removexattr(self, path, name): raise OSError(ENOTSUP, '') def rename(self, old, new): raise OSError(EROFS, '') def rmdir(self, path): raise OSError(EROFS, '') def setxattr(self, path, name, value, options, position=0): raise OSError(ENOTSUP, '') def statfs(self, path): """Returns a dictionary with keys identical to the statvfs C structure of statvfs(3). On Mac OS X f_bsize and f_frsize must be a power of 2 (minimum 512).""" return {} def symlink(self, target, source): raise OSError(EROFS, '') def truncate(self, path, length, fh=None): raise OSError(EROFS, '') def unlink(self, path): raise OSError(EROFS, '') def utimens(self, path, times=None): """Times is a (atime, mtime) tuple. If None use current time.""" return 0 def write(self, path, data, offset, fh): raise OSError(EROFS, '') class LoggingMixIn: def __call__(self, op, path, *args): print '->', op, path, repr(args) ret = '[Unknown Error]' try: ret = getattr(self, op)(path, *args) return ret except OSError, e: ret = str(e) raise finally: print '<-', op, repr(ret)
from knowledge import settings import django from django.db import models from django.utils.translation import ugettext_lazy as _ from django.conf import settings as django_settings from knowledge.managers import QuestionManager, ResponseManager from knowledge.signals import knowledge_post_save STATUSES = ( ('public', _('Public')), ('private', _('Private')), ('internal', _('Internal')), ) STATUSES_EXTENDED = STATUSES + ( ('inherit', _('Inherit')), ) class Category(models.Model): added = models.DateTimeField(auto_now_add=True) lastchanged = models.DateTimeField(auto_now=True) title = models.CharField(max_length=255) slug = models.SlugField(unique=True) def __unicode__(self): return self.title class Meta: ordering = ['title'] verbose_name = _('Category') verbose_name_plural = _('Categories') class KnowledgeBase(models.Model): """ The base class for Knowledge models. """ is_question, is_response = False, False added = models.DateTimeField(auto_now_add=True) lastchanged = models.DateTimeField(auto_now=True) user = models.ForeignKey('auth.User' if django.VERSION < (1, 5, 0) else django_settings.AUTH_USER_MODEL, blank=True, null=True, db_index=True) alert = models.BooleanField(default=settings.ALERTS, verbose_name=_('Alert'), help_text=_('Check this if you want to be alerted when a new' ' response is added.')) # for anonymous posting, if permitted name = models.CharField(max_length=64, blank=True, null=True, verbose_name=_('Name'), help_text=_('Enter your first and last name.')) email = models.EmailField(blank=True, null=True, verbose_name=_('Email'), help_text=_('Enter a valid email address.')) class Meta: abstract = True def save(self, *args, **kwargs): if not self.user and self.name and self.email \ and not self.id: # first time because no id self.public(save=False) if settings.AUTO_PUBLICIZE and not self.id: self.public(save=False) super(KnowledgeBase, self).save(*args, **kwargs) ######################### #### GENERIC GETTERS #### ######################### def get_name(self): """ Get local name, then self.user's first/last, and finally their username if all else fails. """ name = (self.name or (self.user and ( u'{0} {1}'.format(self.user.first_name, self.user.last_name).strip()\ or self.user.username ))) return name.strip() or _("Anonymous") get_email = lambda s: s.email or (s.user and s.user.email) get_pair = lambda s: (s.get_name(), s.get_email()) get_user_or_pair = lambda s: s.user or s.get_pair() ######################## #### STATUS METHODS #### ######################## def can_view(self, user): """ Returns a boolean dictating if a User like instance can view the current Model instance. """ if self.status == 'inherit' and self.is_response: return self.question.can_view(user) if self.status == 'internal' and user.is_staff: return True if self.status == 'private': if self.user == user or user.is_staff: return True if self.is_response and self.question.user == user: return True if self.status == 'public': return True return False def switch(self, status, save=True): self.status = status if save: self.save() switch.alters_data = True def public(self, save=True): self.switch('public', save) public.alters_data = True def private(self, save=True): self.switch('private', save) private.alters_data = True def inherit(self, save=True): self.switch('inherit', save) inherit.alters_data = True def internal(self, save=True): self.switch('internal', save) internal.alters_data = True class Question(KnowledgeBase): is_question = True _requesting_user = None title = models.CharField(max_length=255, verbose_name=_('Question'), help_text=_('Enter your question or suggestion.')) body = models.TextField(blank=True, null=True, verbose_name=_('Description'), help_text=_('Please offer details. Markdown enabled.')) status = models.CharField( verbose_name=_('Status'), max_length=32, choices=STATUSES, default='private', db_index=True) locked = models.BooleanField(default=False) categories = models.ManyToManyField('knowledge.Category', blank=True) objects = QuestionManager() class Meta: ordering = ['-added'] verbose_name = _('Question') verbose_name_plural = _('Questions') def __unicode__(self): return self.title @models.permalink def get_absolute_url(self): from django.template.defaultfilters import slugify if settings.SLUG_URLS: return ('knowledge_thread', [self.id, slugify(self.title)]) else: return ('knowledge_thread_no_slug', [self.id]) def inherit(self): pass def internal(self): pass def lock(self, save=True): self.locked = not self.locked if save: self.save() lock.alters_data = True ################### #### RESPONSES #### ################### def get_responses(self, user=None): user = user or self._requesting_user if user: return [r for r in self.responses.all().select_related('user') if r.can_view(user)] else: return self.responses.all().select_related('user') def answered(self): """ Returns a boolean indictating whether there any questions. """ return bool(self.get_responses()) def accepted(self): """ Returns a boolean indictating whether there is a accepted answer or not. """ return any([r.accepted for r in self.get_responses()]) def clear_accepted(self): self.get_responses().update(accepted=False) clear_accepted.alters_data = True def accept(self, response=None): """ Given a response, make that the one and only accepted answer. Similar to StackOverflow. """ self.clear_accepted() if response and response.question == self: response.accepted = True response.save() return True else: return False accept.alters_data = True def states(self): """ Handy for checking for mod bar button state. """ return [self.status, 'lock' if self.locked else None] @property def url(self): return self.get_absolute_url() class Response(KnowledgeBase): is_response = True question = models.ForeignKey('knowledge.Question', related_name='responses') body = models.TextField(blank=True, null=True, verbose_name=_('Response'), help_text=_('Please enter your response. Markdown enabled.')) status = models.CharField( verbose_name=_('Status'), max_length=32, choices=STATUSES_EXTENDED, default='inherit', db_index=True) accepted = models.BooleanField(default=False) objects = ResponseManager() class Meta: ordering = ['added'] verbose_name = _('Response') verbose_name_plural = _('Responses') def __unicode__(self): return self.body[0:100] + u'...' def states(self): """ Handy for checking for mod bar button state. """ return [self.status, 'accept' if self.accepted else None] def accept(self): self.question.accept(self) accept.alters_data = True # cannot attach on abstract = True... derp models.signals.post_save.connect(knowledge_post_save, sender=Question) models.signals.post_save.connect(knowledge_post_save, sender=Response)
"""Support for KNX/IP lights.""" from xknx.devices import Light as XknxLight from homeassistant.components.light import ( ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_HS_COLOR, ATTR_WHITE_VALUE, SUPPORT_BRIGHTNESS, SUPPORT_COLOR, SUPPORT_COLOR_TEMP, SUPPORT_WHITE_VALUE, LightEntity, ) from homeassistant.core import callback import homeassistant.util.color as color_util from . import ATTR_DISCOVER_DEVICES, DATA_KNX DEFAULT_COLOR = (0.0, 0.0) DEFAULT_BRIGHTNESS = 255 DEFAULT_WHITE_VALUE = 255 async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up lights for KNX platform.""" if discovery_info is not None: async_add_entities_discovery(hass, discovery_info, async_add_entities) @callback def async_add_entities_discovery(hass, discovery_info, async_add_entities): """Set up lights for KNX platform configured via xknx.yaml.""" entities = [] for device_name in discovery_info[ATTR_DISCOVER_DEVICES]: device = hass.data[DATA_KNX].xknx.devices[device_name] entities.append(KNXLight(device)) async_add_entities(entities) class KNXLight(LightEntity): """Representation of a KNX light.""" def __init__(self, device: XknxLight): """Initialize of KNX light.""" self.device = device self._min_kelvin = device.min_kelvin self._max_kelvin = device.max_kelvin self._min_mireds = color_util.color_temperature_kelvin_to_mired( self._max_kelvin ) self._max_mireds = color_util.color_temperature_kelvin_to_mired( self._min_kelvin ) @callback def async_register_callbacks(self): """Register callbacks to update hass after device was changed.""" async def after_update_callback(device): """Call after device was updated.""" self.async_write_ha_state() self.device.register_device_updated_cb(after_update_callback) async def async_added_to_hass(self): """Store register state change callback.""" self.async_register_callbacks() async def async_update(self): """Request a state update from KNX bus.""" await self.device.sync() @property def name(self): """Return the name of the KNX device.""" return self.device.name @property def available(self): """Return True if entity is available.""" return self.hass.data[DATA_KNX].connected @property def should_poll(self): """No polling needed within KNX.""" return False @property def brightness(self): """Return the brightness of this light between 0..255.""" if self.device.supports_brightness: return self.device.current_brightness hsv_color = self._hsv_color if self.device.supports_color and hsv_color: # pylint: disable=unsubscriptable-object return round(hsv_color[-1] / 100 * 255) return None @property def hs_color(self): """Return the HS color value.""" rgb = None if self.device.supports_rgbw or self.device.supports_color: rgb, _ = self.device.current_color return color_util.color_RGB_to_hs(*rgb) if rgb else None @property def _hsv_color(self): """Return the HSV color value.""" rgb = None if self.device.supports_rgbw or self.device.supports_color: rgb, _ = self.device.current_color return color_util.color_RGB_to_hsv(*rgb) if rgb else None @property def white_value(self): """Return the white value.""" white = None if self.device.supports_rgbw: _, white = self.device.current_color return white @property def color_temp(self): """Return the color temperature in mireds.""" if self.device.supports_color_temperature: kelvin = self.device.current_color_temperature if kelvin is not None: return color_util.color_temperature_kelvin_to_mired(kelvin) if self.device.supports_tunable_white: relative_ct = self.device.current_tunable_white if relative_ct is not None: # as KNX devices typically use Kelvin we use it as base for # calculating ct from percent return color_util.color_temperature_kelvin_to_mired( self._min_kelvin + ((relative_ct / 255) * (self._max_kelvin - self._min_kelvin)) ) return None @property def min_mireds(self): """Return the coldest color temp this light supports in mireds.""" return self._min_mireds @property def max_mireds(self): """Return the warmest color temp this light supports in mireds.""" return self._max_mireds @property def effect_list(self): """Return the list of supported effects.""" return None @property def effect(self): """Return the current effect.""" return None @property def is_on(self): """Return true if light is on.""" return self.device.state @property def supported_features(self): """Flag supported features.""" flags = 0 if self.device.supports_brightness: flags |= SUPPORT_BRIGHTNESS if self.device.supports_color: flags |= SUPPORT_COLOR | SUPPORT_BRIGHTNESS if self.device.supports_rgbw: flags |= SUPPORT_COLOR | SUPPORT_WHITE_VALUE if self.device.supports_color_temperature or self.device.supports_tunable_white: flags |= SUPPORT_COLOR_TEMP return flags async def async_turn_on(self, **kwargs): """Turn the light on.""" brightness = kwargs.get(ATTR_BRIGHTNESS, self.brightness) hs_color = kwargs.get(ATTR_HS_COLOR, self.hs_color) white_value = kwargs.get(ATTR_WHITE_VALUE, self.white_value) mireds = kwargs.get(ATTR_COLOR_TEMP, self.color_temp) update_brightness = ATTR_BRIGHTNESS in kwargs update_color = ATTR_HS_COLOR in kwargs update_white_value = ATTR_WHITE_VALUE in kwargs update_color_temp = ATTR_COLOR_TEMP in kwargs # avoid conflicting changes and weird effects if not ( self.is_on or update_brightness or update_color or update_white_value or update_color_temp ): await self.device.set_on() if self.device.supports_brightness and (update_brightness and not update_color): # if we don't need to update the color, try updating brightness # directly if supported; don't do it if color also has to be # changed, as RGB color implicitly sets the brightness as well await self.device.set_brightness(brightness) elif (self.device.supports_rgbw or self.device.supports_color) and ( update_brightness or update_color or update_white_value ): # change RGB color, white value (if supported), and brightness # if brightness or hs_color was not yet set use the default value # to calculate RGB from as a fallback if brightness is None: brightness = DEFAULT_BRIGHTNESS if hs_color is None: hs_color = DEFAULT_COLOR if white_value is None and self.device.supports_rgbw: white_value = DEFAULT_WHITE_VALUE rgb = color_util.color_hsv_to_RGB(*hs_color, brightness * 100 / 255) await self.device.set_color(rgb, white_value) if update_color_temp: kelvin = int(color_util.color_temperature_mired_to_kelvin(mireds)) kelvin = min(self._max_kelvin, max(self._min_kelvin, kelvin)) if self.device.supports_color_temperature: await self.device.set_color_temperature(kelvin) elif self.device.supports_tunable_white: relative_ct = int( 255 * (kelvin - self._min_kelvin) / (self._max_kelvin - self._min_kelvin) ) await self.device.set_tunable_white(relative_ct) async def async_turn_off(self, **kwargs): """Turn the light off.""" await self.device.set_off()
#!/usr/bin/python # Copyright 2017-present Open Networking Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. DOCUMENTATION = ''' --- module: maas_user short_description: Manage MAAS Clusters Interfaces options: maas: description: - URL of MAAS server default: http://localhost/MAAS/api/1.0/ key: description: - MAAS API key required: yes name: description: - name of the user required: yes email: description: - email address of the user required: no password: description: - password for the user required: no is_superuser: description: - does the user have priviledges default: no state: description: - possible states for this user choices: ['present', 'absent', 'query'] default: present requirements: [ipaddress, requests_oauthlib, maasclient] author: David Bainbridge ''' EXAMPLES = ''' examples: maas_user: maas: http://my.maas.server.com/MAAS/api/1.0/ key: 'xBvr9dx5k7S52myufC:fqBXV7hJgXegNZDw9c:K8hsmL47XjAppfQy2pDVW7G49p6PELgp' name: MyUser email: [email protected] password: donttell is_superuser: no state: present maas_user: maas: http://my.maas.server.com/MAAS/api/1.0/ key: 'xBvr9dx5k7S52myufC:fqBXV7hJgXegNZDw9c:K8hsmL47XjAppfQy2pDVW7G49p6PELgp' name: MyDeadUser state: absent ''' import sys import json import ipaddress import requests from maasclient.auth import MaasAuth from maasclient import MaasClient # For some reason the maasclient doesn't provide a put method. So # we will add it here def put(client, url, params=None): return requests.put(url=client.auth.api_url + url, auth=client._oauth(), data=params) # Attempt to interpret the given value as a JSON object, if that fails # just return it as a string def string_or_object(val): try: return json.loads(val) except: return val # Return a copy of the given dictionary with any `null` valued entries # removed def remove_null(d_in): d = d_in.copy() to_remove = [] for k in d.keys(): if d[k] == None: to_remove.append(k) for k in to_remove: del d[k] return d # Deterine if two dictionaries are different def different(have, want): have_keys = have.keys() for key in want.keys(): if (key in have_keys and want[key] != have[key]) or key not in have_keys: return True return False # Get an user from MAAS using its name, if not found return None def get_user(maas, name): res = maas.get('/users/%s/' % name) if res.ok: return json.loads(res.text) return None # Create an user based on the value given def create_user(maas, user): merged = user.copy() # merged['op'] = 'new' res = maas.post('/users/', merged) if res.ok: return { 'error': False, 'status': get_user(maas, merged['username']) } return { 'error': True, 'status': string_or_object(res.text) } # Delete an user based on the name def delete_user(maas, name): res = maas.delete('/users/%s/' % name) if res.ok: return { 'error': False } return { 'error': True, 'status': string_or_object(res.text) } def main(): module = AnsibleModule( argument_spec = dict( maas=dict(default='http://localhost/MAAS/api/1.0/'), key=dict(required=True), name=dict(required=True), email=dict(required=False), password=dict(required=False), is_superuser=dict(default=False, type='bool'), state=dict(default='present', choices=['present', 'absent', 'query']) ), supports_check_mode = False ) maas = module.params['maas'] key = module.params['key'] state = module.params['state'] # Construct a sparsely populate desired state desired = remove_null({ 'username': module.params['name'], 'email': module.params['email'], 'password': module.params['password'], 'is_superuser': 0 if not module.params['is_superuser'] else 1 }) # Authenticate into MAAS auth = MaasAuth(maas, key) maas = MaasClient(auth) # Attempt to get the user from MAAS user = get_user(maas, desired['username']) # Actions if the user does not currently exist if not user: if state == 'query': # If this is a query, returne it is not found module.exit_json(changed=False, found=False) elif state == 'present': # If this should be present, then attempt to create it res = create_user(maas, desired) if res['error']: module.fail_json(msg=res['status']) else: module.exit_json(changed=True, user=res['status']) else: # If this should be absent, then we are done and in the desired state module.exit_json(changed=False) # Done with users does not exists actions return # Actions if the user does exist if state == 'query': # If this is a query, return the user module.exit_json(changed=False, found=True, user=user) elif state == 'present': # If we want this to exists check to see if this is different and # needs updated if different(user, desired): module.fail_json(msg='Specified user, "%s", exists and MAAS does not allow the user to be modified programatically' % user['username']) else: # No differences, to nothing to change module.exit_json(changed=False, user=user) else: # If we don't want this user, then delete it res = delete_user(maas, user['username']) if res['error']: module.fail_json(msg=res['status']) else: module.exit_json(changed=True, user=user) # this is magic, see lib/ansible/module_common.py #<<INCLUDE_ANSIBLE_MODULE_COMMON>> if __name__ == '__main__': main()
""" convolutional nodes """ from __future__ import division, absolute_import from __future__ import print_function, unicode_literals import numpy as np import theano import theano.tensor as T from .. import core def conv_output_length(input_size, conv_size, stride, pad): """ calculates the output size along a single axis for a conv operation """ if input_size is None: return None without_stride = input_size + 2 * pad - conv_size + 1 # equivalent to np.ceil(without_stride / stride) output_size = (without_stride + stride - 1) // stride return output_size def conv_output_shape(input_shape, num_filters, axes, conv_shape, strides, pads): """ compute output shape for a conv """ output_shape = list(input_shape) assert 1 not in axes output_shape[1] = num_filters for axis, conv_size, stride, pad in zip(axes, conv_shape, strides, pads): output_shape[axis] = conv_output_length(input_shape[axis], conv_size, stride, pad) return tuple(output_shape) def conv_parse_pad(filter_size, pad): if pad == "valid": return (0,) * len(filter_size) elif pad == "full": return tuple([x - 1 for x in filter_size]) elif pad == "same": new_pad = [] for f in filter_size: assert f % 2 new_pad += [f // 2] return tuple(new_pad) else: assert len(pad) == len(filter_size) return pad @core.register_node("conv_2d") class Conv2DNode(core.NodeImpl): """ node for 2D convolution """ hyperparameter_names = ("inits", "num_filters", "filter_size", "conv_stride", "stride", "conv_pad", "pad") def compute_output(self, network, in_vw): # gather hyperparameters num_filters = network.find_hyperparameter(["num_filters"]) filter_size = network.find_hyperparameter(["filter_size"]) stride = network.find_hyperparameter(["conv_stride", "stride"], (1, 1)) pad = network.find_hyperparameter(["conv_pad", "pad"], "valid") # convert numerical pad to valid or full if pad == (0, 0): pad = "valid" elif pad == tuple([fs - 1 for fs in filter_size]): pad = "full" assert len(filter_size) == 2 assert pad in ["valid", "full"] # create weight num_channels = in_vw.shape[1] filter_shape = (num_filters, num_channels) + tuple(filter_size) W = network.create_vw( name="weight", is_shared=True, shape=filter_shape, tags={"parameter", "weight"}, default_inits=[], ).variable out_var = T.nnet.conv2d(input=in_vw.variable, filters=W, image_shape=in_vw.shape, filter_shape=filter_shape, border_mode=pad, subsample=stride) out_shape = conv_output_shape(input_shape=in_vw.shape, num_filters=num_filters, axes=(2, 3), conv_shape=filter_size, strides=stride, pads=conv_parse_pad(filter_size, pad)) network.create_vw( "default", variable=out_var, shape=out_shape, tags={"output"}, ) @core.register_node("conv_3d") class Conv3DNode(core.NodeImpl): """ node for 3D convolution """ hyperparameter_names = ("inits", "num_filters", "filter_size", "conv_stride", "stride", "conv_pad", "pad", "include_bias") def compute_output(self, network, in_vw): # gather hyperparameters num_filters = network.find_hyperparameter(["num_filters"]) filter_size = network.find_hyperparameter(["filter_size"]) stride = network.find_hyperparameter(["conv_stride", "stride"], (1, 1, 1)) pad = network.find_hyperparameter(["conv_pad", "pad"], "valid") include_bias = network.find_hyperparameter(["include_bias"], False) assert len(filter_size) == 3 assert pad == "valid" # create weight num_channels = in_vw.shape[1] filter_shape = (num_filters, num_channels) + tuple(filter_size) W = network.create_vw( name="weight", is_shared=True, shape=filter_shape, tags={"parameter", "weight"}, default_inits=[], ).variable # create bias if include_bias: b = network.create_vw( name="bias", is_shared=True, shape=(num_filters,), tags={"parameter", "bias"}, default_inits=[], ).variable else: b = T.zeros(num_filters) from theano.tensor.nnet.Conv3D import conv3D # conv3D takes V in order: (batch, row, column, time, in channel) # and W in order: (out channel, row, column, time ,in channel) # but we keep the dimensions that W is stored in consistent with other # convolutions, so we have to dimshuffle here out_var = conv3D(V=in_vw.variable.dimshuffle(0, 2, 3, 4, 1), W=W.dimshuffle(0, 2, 3, 4, 1), b=b, d=stride) out_shape = conv_output_shape(input_shape=in_vw.shape, num_filters=num_filters, axes=(2, 3, 4), conv_shape=filter_size, strides=stride, pads=(0, 0, 0)) network.create_vw( "default", variable=out_var, shape=out_shape, tags={"output"}, ) @core.register_node("conv_3d2d") class Conv3D2DNode(core.NodeImpl): """ performs 3D convolution via 2D convolution see: theano.tensor.nnet.conv3d2d.conv3d """ hyperparameter_names = ("inits", "num_filters", "filter_size", "conv_stride", "stride", "conv_pad", "pad") def compute_output(self, network, in_vw): # gather hyperparameters num_filters = network.find_hyperparameter(["num_filters"]) filter_size = network.find_hyperparameter(["filter_size"]) stride = network.find_hyperparameter(["conv_stride", "stride"], (1, 1, 1)) pad = network.find_hyperparameter(["conv_pad", "pad"], "valid") assert len(filter_size) == 3 assert pad == "valid" assert stride == (1, 1, 1) # create weight num_channels = in_vw.shape[1] filter_shape = (num_filters, num_channels) + tuple(filter_size) W = network.create_vw( name="weight", is_shared=True, shape=filter_shape, tags={"parameter", "weight"}, default_inits=[], ).variable from theano.tensor.nnet.conv3d2d import conv3d # takes signals in order: (batch, time, channels, row, column) # and filters in order: (out channel, time, in channels, row, column) # but we keep the dimensions that W is stored in consistent with other # convolutions, so we have to dimshuffle here order = (0, 2, 1, 3, 4) out_var = conv3d(signals=in_vw.variable.dimshuffle(*order), filters=W.dimshuffle(*order), signals_shape=[in_vw.shape[o] for o in order], filters_shape=[filter_shape[o] for o in order], # HACK as of 20150916, conv3d does a check # if isinstance(border_mode, str), so we manually # cast as a string border_mode=str("valid")) out_shape = conv_output_shape(input_shape=in_vw.shape, num_filters=num_filters, axes=(2, 3, 4), conv_shape=filter_size, strides=stride, pads=conv_parse_pad(filter_size, pad)) network.create_vw( "default", variable=out_var, shape=out_shape, tags={"output"}, )
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import sys from abc import abstractmethod, ABCMeta from pyspark import since, keyword_only from pyspark.ml.wrapper import JavaParams from pyspark.ml.param import Param, Params, TypeConverters from pyspark.ml.param.shared import HasLabelCol, HasPredictionCol, HasRawPredictionCol, \ HasFeaturesCol, HasWeightCol from pyspark.ml.common import inherit_doc from pyspark.ml.util import JavaMLReadable, JavaMLWritable __all__ = ['Evaluator', 'BinaryClassificationEvaluator', 'RegressionEvaluator', 'MulticlassClassificationEvaluator', 'ClusteringEvaluator'] @inherit_doc class Evaluator(Params): """ Base class for evaluators that compute metrics from predictions. .. versionadded:: 1.4.0 """ __metaclass__ = ABCMeta @abstractmethod def _evaluate(self, dataset): """ Evaluates the output. :param dataset: a dataset that contains labels/observations and predictions :return: metric """ raise NotImplementedError() @since("1.4.0") def evaluate(self, dataset, params=None): """ Evaluates the output with optional parameters. :param dataset: a dataset that contains labels/observations and predictions :param params: an optional param map that overrides embedded params :return: metric """ if params is None: params = dict() if isinstance(params, dict): if params: return self.copy(params)._evaluate(dataset) else: return self._evaluate(dataset) else: raise ValueError("Params must be a param map but got %s." % type(params)) @since("1.5.0") def isLargerBetter(self): """ Indicates whether the metric returned by :py:meth:`evaluate` should be maximized (True, default) or minimized (False). A given evaluator may support multiple metrics which may be maximized or minimized. """ return True @inherit_doc class JavaEvaluator(JavaParams, Evaluator): """ Base class for :py:class:`Evaluator`s that wrap Java/Scala implementations. """ __metaclass__ = ABCMeta def _evaluate(self, dataset): """ Evaluates the output. :param dataset: a dataset that contains labels/observations and predictions. :return: evaluation metric """ self._transfer_params_to_java() return self._java_obj.evaluate(dataset._jdf) def isLargerBetter(self): self._transfer_params_to_java() return self._java_obj.isLargerBetter() @inherit_doc class BinaryClassificationEvaluator(JavaEvaluator, HasLabelCol, HasRawPredictionCol, HasWeightCol, JavaMLReadable, JavaMLWritable): """ .. note:: Experimental Evaluator for binary classification, which expects two input columns: rawPrediction and label. The rawPrediction column can be of type double (binary 0/1 prediction, or probability of label 1) or of type vector (length-2 vector of raw predictions, scores, or label probabilities). >>> from pyspark.ml.linalg import Vectors >>> scoreAndLabels = map(lambda x: (Vectors.dense([1.0 - x[0], x[0]]), x[1]), ... [(0.1, 0.0), (0.1, 1.0), (0.4, 0.0), (0.6, 0.0), (0.6, 1.0), (0.6, 1.0), (0.8, 1.0)]) >>> dataset = spark.createDataFrame(scoreAndLabels, ["raw", "label"]) ... >>> evaluator = BinaryClassificationEvaluator(rawPredictionCol="raw") >>> evaluator.evaluate(dataset) 0.70... >>> evaluator.evaluate(dataset, {evaluator.metricName: "areaUnderPR"}) 0.83... >>> bce_path = temp_path + "/bce" >>> evaluator.save(bce_path) >>> evaluator2 = BinaryClassificationEvaluator.load(bce_path) >>> str(evaluator2.getRawPredictionCol()) 'raw' >>> scoreAndLabelsAndWeight = map(lambda x: (Vectors.dense([1.0 - x[0], x[0]]), x[1], x[2]), ... [(0.1, 0.0, 1.0), (0.1, 1.0, 0.9), (0.4, 0.0, 0.7), (0.6, 0.0, 0.9), ... (0.6, 1.0, 1.0), (0.6, 1.0, 0.3), (0.8, 1.0, 1.0)]) >>> dataset = spark.createDataFrame(scoreAndLabelsAndWeight, ["raw", "label", "weight"]) ... >>> evaluator = BinaryClassificationEvaluator(rawPredictionCol="raw", weightCol="weight") >>> evaluator.evaluate(dataset) 0.70... >>> evaluator.evaluate(dataset, {evaluator.metricName: "areaUnderPR"}) 0.82... .. versionadded:: 1.4.0 """ metricName = Param(Params._dummy(), "metricName", "metric name in evaluation (areaUnderROC|areaUnderPR)", typeConverter=TypeConverters.toString) @keyword_only def __init__(self, rawPredictionCol="rawPrediction", labelCol="label", metricName="areaUnderROC", weightCol=None): """ __init__(self, rawPredictionCol="rawPrediction", labelCol="label", \ metricName="areaUnderROC", weightCol=None) """ super(BinaryClassificationEvaluator, self).__init__() self._java_obj = self._new_java_obj( "org.apache.spark.ml.evaluation.BinaryClassificationEvaluator", self.uid) self._setDefault(metricName="areaUnderROC") kwargs = self._input_kwargs self._set(**kwargs) @since("1.4.0") def setMetricName(self, value): """ Sets the value of :py:attr:`metricName`. """ return self._set(metricName=value) @since("1.4.0") def getMetricName(self): """ Gets the value of metricName or its default value. """ return self.getOrDefault(self.metricName) @keyword_only @since("1.4.0") def setParams(self, rawPredictionCol="rawPrediction", labelCol="label", metricName="areaUnderROC", weightCol=None): """ setParams(self, rawPredictionCol="rawPrediction", labelCol="label", \ metricName="areaUnderROC", weightCol=None) Sets params for binary classification evaluator. """ kwargs = self._input_kwargs return self._set(**kwargs) @inherit_doc class RegressionEvaluator(JavaEvaluator, HasLabelCol, HasPredictionCol, JavaMLReadable, JavaMLWritable): """ .. note:: Experimental Evaluator for Regression, which expects two input columns: prediction and label. >>> scoreAndLabels = [(-28.98343821, -27.0), (20.21491975, 21.5), ... (-25.98418959, -22.0), (30.69731842, 33.0), (74.69283752, 71.0)] >>> dataset = spark.createDataFrame(scoreAndLabels, ["raw", "label"]) ... >>> evaluator = RegressionEvaluator(predictionCol="raw") >>> evaluator.evaluate(dataset) 2.842... >>> evaluator.evaluate(dataset, {evaluator.metricName: "r2"}) 0.993... >>> evaluator.evaluate(dataset, {evaluator.metricName: "mae"}) 2.649... >>> re_path = temp_path + "/re" >>> evaluator.save(re_path) >>> evaluator2 = RegressionEvaluator.load(re_path) >>> str(evaluator2.getPredictionCol()) 'raw' .. versionadded:: 1.4.0 """ metricName = Param(Params._dummy(), "metricName", """metric name in evaluation - one of: rmse - root mean squared error (default) mse - mean squared error r2 - r^2 metric mae - mean absolute error.""", typeConverter=TypeConverters.toString) @keyword_only def __init__(self, predictionCol="prediction", labelCol="label", metricName="rmse"): """ __init__(self, predictionCol="prediction", labelCol="label", \ metricName="rmse") """ super(RegressionEvaluator, self).__init__() self._java_obj = self._new_java_obj( "org.apache.spark.ml.evaluation.RegressionEvaluator", self.uid) self._setDefault(metricName="rmse") kwargs = self._input_kwargs self._set(**kwargs) @since("1.4.0") def setMetricName(self, value): """ Sets the value of :py:attr:`metricName`. """ return self._set(metricName=value) @since("1.4.0") def getMetricName(self): """ Gets the value of metricName or its default value. """ return self.getOrDefault(self.metricName) @keyword_only @since("1.4.0") def setParams(self, predictionCol="prediction", labelCol="label", metricName="rmse"): """ setParams(self, predictionCol="prediction", labelCol="label", \ metricName="rmse") Sets params for regression evaluator. """ kwargs = self._input_kwargs return self._set(**kwargs) @inherit_doc class MulticlassClassificationEvaluator(JavaEvaluator, HasLabelCol, HasPredictionCol, HasWeightCol, JavaMLReadable, JavaMLWritable): """ .. note:: Experimental Evaluator for Multiclass Classification, which expects two input columns: prediction and label. >>> scoreAndLabels = [(0.0, 0.0), (0.0, 1.0), (0.0, 0.0), ... (1.0, 0.0), (1.0, 1.0), (1.0, 1.0), (1.0, 1.0), (2.0, 2.0), (2.0, 0.0)] >>> dataset = spark.createDataFrame(scoreAndLabels, ["prediction", "label"]) ... >>> evaluator = MulticlassClassificationEvaluator(predictionCol="prediction") >>> evaluator.evaluate(dataset) 0.66... >>> evaluator.evaluate(dataset, {evaluator.metricName: "accuracy"}) 0.66... >>> mce_path = temp_path + "/mce" >>> evaluator.save(mce_path) >>> evaluator2 = MulticlassClassificationEvaluator.load(mce_path) >>> str(evaluator2.getPredictionCol()) 'prediction' >>> scoreAndLabelsAndWeight = [(0.0, 0.0, 1.0), (0.0, 1.0, 1.0), (0.0, 0.0, 1.0), ... (1.0, 0.0, 1.0), (1.0, 1.0, 1.0), (1.0, 1.0, 1.0), (1.0, 1.0, 1.0), ... (2.0, 2.0, 1.0), (2.0, 0.0, 1.0)] >>> dataset = spark.createDataFrame(scoreAndLabelsAndWeight, ["prediction", "label", "weight"]) ... >>> evaluator = MulticlassClassificationEvaluator(predictionCol="prediction", ... weightCol="weight") >>> evaluator.evaluate(dataset) 0.66... >>> evaluator.evaluate(dataset, {evaluator.metricName: "accuracy"}) 0.66... .. versionadded:: 1.5.0 """ metricName = Param(Params._dummy(), "metricName", "metric name in evaluation " "(f1|weightedPrecision|weightedRecall|accuracy)", typeConverter=TypeConverters.toString) @keyword_only def __init__(self, predictionCol="prediction", labelCol="label", metricName="f1", weightCol=None): """ __init__(self, predictionCol="prediction", labelCol="label", \ metricName="f1", weightCol=None) """ super(MulticlassClassificationEvaluator, self).__init__() self._java_obj = self._new_java_obj( "org.apache.spark.ml.evaluation.MulticlassClassificationEvaluator", self.uid) self._setDefault(metricName="f1") kwargs = self._input_kwargs self._set(**kwargs) @since("1.5.0") def setMetricName(self, value): """ Sets the value of :py:attr:`metricName`. """ return self._set(metricName=value) @since("1.5.0") def getMetricName(self): """ Gets the value of metricName or its default value. """ return self.getOrDefault(self.metricName) @keyword_only @since("1.5.0") def setParams(self, predictionCol="prediction", labelCol="label", metricName="f1", weightCol=None): """ setParams(self, predictionCol="prediction", labelCol="label", \ metricName="f1", weightCol=None) Sets params for multiclass classification evaluator. """ kwargs = self._input_kwargs return self._set(**kwargs) @inherit_doc class ClusteringEvaluator(JavaEvaluator, HasPredictionCol, HasFeaturesCol, JavaMLReadable, JavaMLWritable): """ .. note:: Experimental Evaluator for Clustering results, which expects two input columns: prediction and features. The metric computes the Silhouette measure using the squared Euclidean distance. The Silhouette is a measure for the validation of the consistency within clusters. It ranges between 1 and -1, where a value close to 1 means that the points in a cluster are close to the other points in the same cluster and far from the points of the other clusters. >>> from pyspark.ml.linalg import Vectors >>> featureAndPredictions = map(lambda x: (Vectors.dense(x[0]), x[1]), ... [([0.0, 0.5], 0.0), ([0.5, 0.0], 0.0), ([10.0, 11.0], 1.0), ... ([10.5, 11.5], 1.0), ([1.0, 1.0], 0.0), ([8.0, 6.0], 1.0)]) >>> dataset = spark.createDataFrame(featureAndPredictions, ["features", "prediction"]) ... >>> evaluator = ClusteringEvaluator(predictionCol="prediction") >>> evaluator.evaluate(dataset) 0.9079... >>> ce_path = temp_path + "/ce" >>> evaluator.save(ce_path) >>> evaluator2 = ClusteringEvaluator.load(ce_path) >>> str(evaluator2.getPredictionCol()) 'prediction' .. versionadded:: 2.3.0 """ metricName = Param(Params._dummy(), "metricName", "metric name in evaluation (silhouette)", typeConverter=TypeConverters.toString) distanceMeasure = Param(Params._dummy(), "distanceMeasure", "The distance measure. " + "Supported options: 'squaredEuclidean' and 'cosine'.", typeConverter=TypeConverters.toString) @keyword_only def __init__(self, predictionCol="prediction", featuresCol="features", metricName="silhouette", distanceMeasure="squaredEuclidean"): """ __init__(self, predictionCol="prediction", featuresCol="features", \ metricName="silhouette", distanceMeasure="squaredEuclidean") """ super(ClusteringEvaluator, self).__init__() self._java_obj = self._new_java_obj( "org.apache.spark.ml.evaluation.ClusteringEvaluator", self.uid) self._setDefault(metricName="silhouette", distanceMeasure="squaredEuclidean") kwargs = self._input_kwargs self._set(**kwargs) @since("2.3.0") def setMetricName(self, value): """ Sets the value of :py:attr:`metricName`. """ return self._set(metricName=value) @since("2.3.0") def getMetricName(self): """ Gets the value of metricName or its default value. """ return self.getOrDefault(self.metricName) @keyword_only @since("2.3.0") def setParams(self, predictionCol="prediction", featuresCol="features", metricName="silhouette", distanceMeasure="squaredEuclidean"): """ setParams(self, predictionCol="prediction", featuresCol="features", \ metricName="silhouette", distanceMeasure="squaredEuclidean") Sets params for clustering evaluator. """ kwargs = self._input_kwargs return self._set(**kwargs) @since("2.4.0") def setDistanceMeasure(self, value): """ Sets the value of :py:attr:`distanceMeasure`. """ return self._set(distanceMeasure=value) @since("2.4.0") def getDistanceMeasure(self): """ Gets the value of `distanceMeasure` """ return self.getOrDefault(self.distanceMeasure) if __name__ == "__main__": import doctest import tempfile import pyspark.ml.evaluation from pyspark.sql import SparkSession globs = pyspark.ml.evaluation.__dict__.copy() # The small batch size here ensures that we see multiple batches, # even in these small test examples: spark = SparkSession.builder\ .master("local[2]")\ .appName("ml.evaluation tests")\ .getOrCreate() globs['spark'] = spark temp_path = tempfile.mkdtemp() globs['temp_path'] = temp_path try: (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS) spark.stop() finally: from shutil import rmtree try: rmtree(temp_path) except OSError: pass if failure_count: sys.exit(-1)
from cStringIO import StringIO from datetime import datetime from functools import partial from itertools import chain, imap, izip from logging import StreamHandler import os from os import chdir from os.path import join, basename, split, dirname, relpath from sys import stderr from time import time from mimetypes import guess_type from urllib import quote_plus from flask import (Blueprint, Flask, send_from_directory, current_app, send_file, request, redirect, jsonify, render_template, url_for) from funcy import merge, imap from pyelasticsearch import ElasticSearch from werkzeug.exceptions import NotFound from dxr.config import Config from dxr.es import (filtered_query, frozen_config, frozen_configs, es_alias_or_not_found) from dxr.exceptions import BadTerm from dxr.filters import FILE, LINE from dxr.lines import html_line, tags_per_line, finished_tags, Ref, Region from dxr.mime import icon, is_binary_image, is_textual_image, decode_data from dxr.plugins import plugins_named from dxr.query import Query, filter_menu_items from dxr.utils import (non_negative_int, decode_es_datetime, DXR_BLUEPRINT, format_number, append_update, append_by_line, build_offset_map) from dxr.vcs import file_contents_at_rev # Look in the 'dxr' package for static files, etc.: dxr_blueprint = Blueprint(DXR_BLUEPRINT, 'dxr', template_folder='templates', # static_folder seems to register a "static" route # with the blueprint so the url_prefix (set later) # takes effect for static files when found through # url_for('static', ...). static_folder='static') class HashedStatics(object): """A Flask extension which adds hashes to static asset URLs, as determined by a static_manifest file just outside the static folder""" def __init__(self, app=None): self.app = None self.manifests = {} if app is not None: self.init_app(app) def init_app(self, app): self.app = app app.url_defaults(self._hashed_url) def _manifest_near(self, static_folder): """Cache and return a manifest for a specific static folder. The manifest must be in a file called "static_manifest" just outside the static folder. """ manifest = self.manifests.get(static_folder) if manifest is None: try: with open(join(dirname(static_folder), 'static_manifest')) as file: manifest = self.manifests[static_folder] = \ dict(line.split() for line in file) except IOError: # Probably no such file manifest = self.manifests[static_folder] = {} return manifest def _hashed_url(self, route, values): """Map an unhashed URL to a hashed one. If no mapping is found in the manifest, leave it alone, which will result in a 404. """ if route == 'static' or route.endswith('.static'): filename = values.get('filename') if filename: blueprint = request.blueprint static_folder = (self.app.blueprints[blueprint].static_folder if blueprint else self.app.static_folder) manifest = self._manifest_near(static_folder) values['filename'] = manifest.get(filename, filename) def make_app(config): """Return a DXR application which uses ``config`` as its configuration. Also set up the static and template folder. """ app = Flask('dxr') app.dxr_config = config app.register_blueprint(dxr_blueprint, url_prefix=config.www_root) HashedStatics(app=app) # The url_prefix we pass when registering the blueprint is not stored # anywhere. This saves gymnastics in our custom URL builders to get it back: app.dxr_www_root = config.www_root # Log to Apache's error log in production: app.logger.addHandler(StreamHandler(stderr)) # Make an ES connection pool shared among all threads: app.es = ElasticSearch(config.es_hosts) return app @dxr_blueprint.route('/') def index(): return redirect(url_for('.browse', tree=current_app.dxr_config.default_tree)) @dxr_blueprint.route('/<tree>/search') def search(tree): """Normalize params, and dispatch between JSON- and HTML-returning searches, based on Accept header. """ # Normalize querystring params: config = current_app.dxr_config frozen = frozen_config(tree) req = request.values query_text = req.get('q', '') offset = non_negative_int(req.get('offset'), 0) limit = min(non_negative_int(req.get('limit'), 100), 1000) # Make a Query: query = Query(partial(current_app.es.search, index=frozen['es_alias']), query_text, plugins_named(frozen['enabled_plugins'])) # Fire off one of the two search routines: searcher = _search_json if _request_wants_json() else _search_html return searcher(query, tree, query_text, offset, limit, config) def _search_json(query, tree, query_text, offset, limit, config): """Try a "direct search" (for exact identifier matches, etc.). If we have a direct hit, then return {redirect: hit location}.If that doesn't work, fall back to a normal search and return the results as JSON.""" # If we're asked to redirect and have a direct hit, then return the url to that. if request.values.get('redirect') == 'true': result = query.direct_result() if result: path, line = result # TODO: Does this escape query_text properly? params = { 'tree': tree, 'path': path, 'from': query_text } return jsonify({'redirect': url_for('.browse', _anchor=line, **params)}) try: count_and_results = query.results(offset, limit) # Convert to dicts for ease of manipulation in JS: results = [{'icon': icon, 'path': path, 'lines': [{'line_number': nb, 'line': l} for nb, l in lines]} for icon, path, lines in count_and_results['results']] except BadTerm as exc: return jsonify({'error_html': exc.reason, 'error_level': 'warning'}), 400 return jsonify({ 'www_root': config.www_root, 'tree': tree, 'results': results, 'result_count': count_and_results['result_count'], 'result_count_formatted': format_number(count_and_results['result_count']), 'tree_tuples': _tree_tuples(query_text)}) def _search_html(query, tree, query_text, offset, limit, config): """Return the rendered template for search.html. """ frozen = frozen_config(tree) # Try a normal search: template_vars = { 'filters': filter_menu_items( plugins_named(frozen['enabled_plugins'])), 'generated_date': frozen['generated_date'], 'google_analytics_key': config.google_analytics_key, 'query': query_text, 'search_url': url_for('.search', tree=tree, q=query_text, redirect='false'), 'top_of_tree': url_for('.browse', tree=tree), 'tree': tree, 'tree_tuples': _tree_tuples(query_text), 'www_root': config.www_root} return render_template('search.html', **template_vars) def _tree_tuples(query_text): """Return a list of rendering info for Switch Tree menu items.""" return [(f['name'], url_for('.search', tree=f['name'], q=query_text), f['description']) for f in frozen_configs()] @dxr_blueprint.route('/<tree>/raw/<path:path>') def raw(tree, path): """Send raw data at path from tree, for binary things like images.""" if not is_binary_image(path) and not is_textual_image(path): raise NotFound query = { 'filter': { 'term': { 'path': path } } } results = current_app.es.search( query, index=es_alias_or_not_found(tree), doc_type=FILE, size=1) try: # we explicitly get index 0 because there should be exactly 1 result data = results['hits']['hits'][0]['_source']['raw_data'][0] except IndexError: # couldn't find the image raise NotFound data_file = StringIO(data.decode('base64')) return send_file(data_file, mimetype=guess_type(path)[0]) @dxr_blueprint.route('/<tree>/raw-rev/<revision>/<path:path>') def raw_rev(tree, revision, path): """Send raw data at path from tree at the given revision, for binary things like images.""" if not is_binary_image(path) and not is_textual_image(path): raise NotFound config = current_app.dxr_config tree_config = config.trees[tree] abs_path = join(tree_config.source_folder, path) data = file_contents_at_rev(abs_path, revision) if data is None: raise NotFound data_file = StringIO(data) return send_file(data_file, mimetype=guess_type(path)[0]) @dxr_blueprint.route('/<tree>/source/') @dxr_blueprint.route('/<tree>/source/<path:path>') def browse(tree, path=''): """Show a directory listing or a single file from one of the trees. Raise NotFound if path does not exist as either a folder or file. """ config = current_app.dxr_config try: # Strip any trailing slash because we do not store it in ES. return _browse_folder(tree, path.rstrip('/'), config) except NotFound: frozen = frozen_config(tree) # Grab the FILE doc, just for the sidebar nav links and the symlink target: files = filtered_query( frozen['es_alias'], FILE, filter={'path': path}, size=1, include=['link', 'links', 'is_binary']) if not files: raise NotFound file_doc = files[0] if 'link' in file_doc: # Then this path is a symlink, so redirect to the real thing. return redirect(url_for('.browse', tree=tree, path=file_doc['link'][0])) lines = filtered_query( frozen['es_alias'], LINE, filter={'path': path}, sort=['number'], size=1000000, include=['content', 'refs', 'regions', 'annotations']) # Deref the content field in each document. We can do this because we # do not store empty lines in ES. for doc in lines: doc['content'] = doc['content'][0] return _browse_file(tree, path, lines, file_doc, config, file_doc.get('is_binary', [False])[0], frozen['generated_date']) def _browse_folder(tree, path, config): """Return a rendered folder listing for folder ``path``. Search for FILEs having folder == path. If any matches, render the folder listing. Otherwise, raise NotFound. """ frozen = frozen_config(tree) files_and_folders = filtered_query( frozen['es_alias'], FILE, filter={'folder': path}, sort=[{'is_folder': 'desc'}, 'name'], size=10000, include=['name', 'modified', 'size', 'link', 'path', 'is_binary', 'is_folder']) if not files_and_folders: raise NotFound return render_template( 'folder.html', # Common template variables: www_root=config.www_root, tree=tree, tree_tuples=[ (t['name'], url_for('.parallel', tree=t['name'], path=path), t['description']) for t in frozen_configs()], generated_date=frozen['generated_date'], google_analytics_key=config.google_analytics_key, paths_and_names=_linked_pathname(path, tree), filters=filter_menu_items( plugins_named(frozen['enabled_plugins'])), # Autofocus only at the root of each tree: should_autofocus_query=path == '', # Folder template variables: name=basename(path) or tree, path=path, files_and_folders=[ (_icon_class_name(f), f['name'], decode_es_datetime(f['modified']) if 'modified' in f else None, f.get('size'), url_for('.browse', tree=tree, path=f.get('link', f['path'])[0])) for f in files_and_folders]) def skim_file(skimmers, num_lines): """Skim contents with all the skimmers, returning the things we need to make a template. Compare to dxr.build.index_file :arg skimmers: iterable of FileToSkim objects :arg num_lines: the number of lines in the file being skimmed """ linkses, refses, regionses = [], [], [] annotations_by_line = [[] for _ in xrange(num_lines)] for skimmer in skimmers: if skimmer.is_interesting(): linkses.append(skimmer.links()) refses.append(skimmer.refs()) regionses.append(skimmer.regions()) append_by_line(annotations_by_line, skimmer.annotations_by_line()) links = dictify_links(chain.from_iterable(linkses)) return links, refses, regionses, annotations_by_line def _build_common_file_template(tree, path, is_binary, date, config): """Return a dictionary of the common required file template parameters. """ return { # Common template variables: 'www_root': config.www_root, 'tree': tree, 'tree_tuples': [(t['name'], url_for('.parallel', tree=t['name'], path=path), t['description']) for t in frozen_configs()], 'generated_date': date, 'google_analytics_key': config.google_analytics_key, 'filters': filter_menu_items( plugins_named(frozen_config(tree)['enabled_plugins'])), # File template variables 'paths_and_names': _linked_pathname(path, tree), 'icon_url': url_for('.static', filename='icons/mimetypes/%s.png' % icon(path, is_binary)), 'path': path, 'name': basename(path) } def _browse_file(tree, path, line_docs, file_doc, config, is_binary, date=None, contents=None, image_rev=None): """Return a rendered page displaying a source file. :arg string tree: name of tree on which file is found :arg string path: relative path from tree root of file :arg list line_docs: LINE documents as defined in the mapping of core.py, where the `content` field is dereferenced :arg file_doc: the FILE document as defined in core.py :arg config: TreeConfig object of this tree :arg is_binary: Whether file is binary or not :arg date: a formatted string representing the generated date, default to now :arg string contents: the contents of the source file, defaults to joining the `content` field of all line_docs :arg image_rev: revision number of a textual or binary image, for images displayed at a certain rev """ def sidebar_links(sections): """Return data structure to build nav sidebar from. :: [('Section Name', [{'icon': ..., 'title': ..., 'href': ...}])] """ # Sort by order, resolving ties by section name: return sorted(sections, key=lambda section: (section['order'], section['heading'])) if not date: # Then assume that the file is generated now. Remark: we can't use this # as the default param because that is only evaluated once, so the same # time would always be used. date = datetime.utcnow().strftime("%a, %d %b %Y %H:%M:%S +0000") common = _build_common_file_template(tree, path, is_binary, date, config) links = file_doc.get('links', []) if is_binary_image(path): return render_template( 'image_file.html', **merge(common, { 'sections': sidebar_links(links), 'revision': image_rev})) elif is_binary: return render_template( 'text_file.html', **merge(common, { 'lines': [], 'is_binary': True, 'sections': sidebar_links(links)})) else: # We concretize the lines into a list because we iterate over it multiple times lines = [doc['content'] for doc in line_docs] if not contents: # If contents are not provided, we can reconstruct them by # stitching the lines together. contents = ''.join(lines) offsets = build_offset_map(lines) tree_config = config.trees[tree] if is_textual_image(path) and image_rev: # Add a link to view textual images on revs: links.extend(dictify_links([ (4, 'Image', [('svgview', 'View', url_for('.raw_rev', tree=tree_config.name, path=path, revision=image_rev))])])) # Construct skimmer objects for all enabled plugins that define a # file_to_skim class. skimmers = [plugin.file_to_skim(path, contents, plugin.name, tree_config, file_doc, line_docs) for plugin in tree_config.enabled_plugins if plugin.file_to_skim] skim_links, refses, regionses, annotationses = skim_file(skimmers, len(line_docs)) index_refs = (Ref.es_to_triple(ref, tree_config) for ref in chain.from_iterable(doc.get('refs', []) for doc in line_docs)) index_regions = (Region.es_to_triple(region) for region in chain.from_iterable(doc.get('regions', []) for doc in line_docs)) tags = finished_tags(lines, chain(chain.from_iterable(refses), index_refs), chain(chain.from_iterable(regionses), index_regions)) return render_template( 'text_file.html', **merge(common, { # Someday, it would be great to stream this and not concretize # the whole thing in RAM. The template will have to quit # looping through the whole thing 3 times. 'lines': [(html_line(doc['content'], tags_in_line, offset), doc.get('annotations', []) + skim_annotations) for doc, tags_in_line, offset, skim_annotations in izip(line_docs, tags_per_line(tags), offsets, annotationses)], 'sections': sidebar_links(links + skim_links)})) @dxr_blueprint.route('/<tree>/rev/<revision>/<path:path>') def rev(tree, revision, path): """Display a page showing the file at path at specified revision by obtaining the contents from version control. """ config = current_app.dxr_config tree_config = config.trees[tree] abs_path = join(tree_config.source_folder, path) contents = file_contents_at_rev(abs_path, revision) if contents is not None: image_rev = None if is_binary_image(path): is_text = False contents = '' image_rev = revision else: is_text, contents = decode_data(contents, tree_config.source_encoding) if not is_text: contents = '' elif is_textual_image(path): image_rev = revision # We do some wrapping to mimic the JSON returned by an ES lines query. return _browse_file(tree, path, [{'content': line} for line in contents.splitlines(True)], {}, config, not is_text, contents=contents, image_rev=image_rev) else: raise NotFound def _linked_pathname(path, tree_name): """Return a list of (server-relative URL, subtree name) tuples that can be used to display linked path components in the headers of file or folder pages. :arg path: The path that will be split """ # Hold the root of the tree: components = [('/%s/source' % tree_name, tree_name)] # Populate each subtree: dirs = path.split(os.sep) # TODO: Trips on \/ in path. # A special case when we're dealing with the root tree. Without # this, it repeats: if not path: return components for idx in range(1, len(dirs)+1): subtree_path = join('/', tree_name, 'source', *dirs[:idx]) subtree_name = split(subtree_path)[1] or tree_name components.append((subtree_path, subtree_name)) return components @dxr_blueprint.route('/<tree>/') @dxr_blueprint.route('/<tree>') def tree_root(tree): """Redirect requests for the tree root instead of giving 404s.""" # Don't do a redirect and then 404; that's tacky: es_alias_or_not_found(tree) return redirect(tree + '/source/') @dxr_blueprint.route('/<tree>/parallel/') @dxr_blueprint.route('/<tree>/parallel/<path:path>') def parallel(tree, path=''): """If a file or dir parallel to the given path exists in the given tree, redirect to it. Otherwise, redirect to the root of the given tree. Deferring this test lets us avoid doing 50 queries when drawing the Switch Tree menu when 50 trees are indexed: we check only when somebody actually chooses something. """ config = current_app.dxr_config files = filtered_query( es_alias_or_not_found(tree), FILE, filter={'path': path.rstrip('/')}, size=1, include=[]) # We don't really need anything. return redirect(('{root}/{tree}/source/{path}' if files else '{root}/{tree}/source/').format(root=config.www_root, tree=tree, path=path)) def _icon_class_name(file_doc): """Return a string for the CSS class of the icon for file document.""" if file_doc['is_folder']: return 'folder' class_name = icon(file_doc['name'], file_doc.get('is_binary', [False])[0]) # for small images, we can turn the image into icon via javascript # if bigger than the cutoff, we mark it as too big and don't do this if file_doc['size'] > current_app.dxr_config.max_thumbnail_size: class_name += " too_fat" return class_name def _request_wants_json(): """Return whether the current request prefers JSON. Why check if json has a higher quality than HTML and not just go with the best match? Because some browsers accept on */* and we don't want to deliver JSON to an ordinary browser. """ # From http://flask.pocoo.org/snippets/45/ best = request.accept_mimetypes.best_match(['application/json', 'text/html']) return (best == 'application/json' and request.accept_mimetypes[best] > request.accept_mimetypes['text/html']) def dictify_links(links): """Return a chain of order, heading, items links as a list of dicts.""" return [{'order': order, 'heading': heading, 'items': [{'icon': icon, 'title': title, 'href': href} for icon, title, href in items]} for order, heading, items in links]
""" Validators library """ import re from string import Template class ValidatorMetaclass(type): def __new__(cls, name, bases, classdict): error_code_map = {} error_messages = {} message_values = {} for base in reversed(bases): try: error_code_map.update(base.error_code_map) except AttributeError: pass try: error_messages.update(base.error_messages) except AttributeError: pass try: message_values.update(base.message_values) except AttributeError: pass try: error_code_map.update(classdict.pop('error_code_map')) except KeyError: pass try: error_messages.update(classdict.pop('error_messages')) except KeyError: pass try: message_values.update(classdict.pop('message_values')) except KeyError: pass classdict['error_code_map'] = error_code_map classdict['error_messages'] = error_messages classdict['message_values'] = message_values return super(ValidatorMetaclass, cls).__new__(cls, name, bases, classdict) class BaseValidator(metaclass=ValidatorMetaclass): error_code_map = {} error_messages = {} message_values = {} hidden_value = '**Hidden**' def __init__(self, error_code_map=None, error_messages=None, message_values=None, hidden=False, *args, **kwargs): """ :param error_code_map: Map of orginial error codes to custom error codes :rparam error_code_map: dict :param error_messages: Map of error codes to error messages :rparam error_messages: dict :param message_values: Map of placeholders to values :rparam error_messages: dict """ self.error_code_map = self.error_code_map.copy() self.error_messages = self.error_messages.copy() self.message_values = self.message_values.copy() if error_code_map: self.error_code_map.update(error_code_map) if error_messages: self.error_messages.update(error_messages) if message_values: self.message_values.update(message_values) self.messages = {} self.hidden = hidden def error(self, error_code, value, **kwargs): """ Helper to add error to messages field. It fills placeholder with extra call parameters or values from message_value map. :param error_code: Error code to use :rparam error_code: str :param value: Value checked :param kwargs: Map of values to use in placeholders """ code = self.error_code_map.get(error_code, error_code) try: message = Template(self.error_messages[code]) except KeyError: message = Template(self.error_messages[error_code]) placeholders = {"value": self.hidden_value if self.hidden else value} placeholders.update(kwargs) placeholders.update(self.message_values) self.messages[code] = message.safe_substitute(placeholders) def is_valid(self, value, *args, **kwargs): self.messages = {} return self._internal_is_valid(value, *args, **kwargs) def _internal_is_valid(self, value, *args, **kwargs): return True class EqualTo(BaseValidator): """ Compares value with a static value. """ NOT_EQUAL = 'notEqual' error_messages = {NOT_EQUAL: "'$value' is not equal to '$comp_value'"} def __init__(self, comp_value=None, *args, **kwargs): """ :param comp_value: Static value to use on check """ super(EqualTo, self).__init__(*args, **kwargs) self.comp_value = comp_value self.message_values.update({'comp_value': self.comp_value}) def _internal_is_valid(self, value, *args, **kwargs): if value != self.comp_value: self.error(self.NOT_EQUAL, value) return False return True class NotEqualTo(BaseValidator): """ Checks whether a value is distinct of static value. """ IS_EQUAL = 'isEqual' error_messages = {IS_EQUAL: "'$value' is equal to '$comp_value'"} def __init__(self, comp_value=None, *args, **kwargs): """ :param comp_value: Static value to use on check """ super(NotEqualTo, self).__init__(*args, **kwargs) self.comp_value = comp_value self.message_values.update({'comp_value': self.comp_value}) def _internal_is_valid(self, value, *args, **kwargs): if value == self.comp_value: self.error(self.IS_EQUAL, value) return False return True class StringNotContaining(BaseValidator): """ Checks that the value does not contain a static substring """ NOT_CONTAINS = 'notContains' error_messages = {NOT_CONTAINS: "'$value' contains '$token'"} def __init__(self, token=None, case_sensitive=True, *args, **kwargs): """ :param token: Static value to see check it is contained in the string :param case_sensitive: Boolean to check the string matching case or not """ super(StringNotContaining, self).__init__(*args, **kwargs) self.token = token self.case_sensitive = case_sensitive self.message_values.update({'token': self.token}) def _internal_is_valid(self, value, *args, **kwargs): if (not self.case_sensitive and (self.token.lower() not in value.lower())) or \ (self.case_sensitive and (self.token not in value)): return True self.error(self.NOT_CONTAINS, value) return False class Length(BaseValidator): """ Validates the length of a string. :param min: The minimum required length of the string. If not provided, minimum length will not be checked. :param max: The maximum length of the string. If not provided, maximum length will not be checked. """ TOO_LONG = 'tooLong' TOO_SHORT = 'tooShort' INVALID_TYPE = 'notLength' error_messages = { TOO_LONG: "'$value' is more than $max unit length", TOO_SHORT: "'$value' is less than $min unit length", INVALID_TYPE: "'$value' has no length" } def __init__(self, min=-1, max=-1, *args, **kwargs): super(Length, self).__init__(*args, **kwargs) assert min != -1 or max != -1, 'At least one of `min` or `max` must be specified.' assert max == -1 or min <= max, '`min` cannot be more than `max`.' self.min = min self.max = max self.message_values.update({"min": self.min, "max": self.max}) def _internal_is_valid(self, value, *args, **kwargs): try: length = len(value) or 0 if length < self.min: self.error(self.TOO_SHORT, value) return False if self.max != -1 and length > self.max: self.error(self.TOO_LONG, value) return False return True except TypeError: self.error(self.INVALID_TYPE, value) return False class NumberRange(BaseValidator): """ Validates that a number is of a minimum and/or maximum value, inclusive. This will work with any comparable number type, such as floats and decimals, not just integers. :param min: The minimum required value of the number. If not provided, minimum value will not be checked. :param max: The maximum value of the number. If not provided, maximum value will not be checked. """ OUT_OF_RANGE = 'outOfRange' error_messages = { OUT_OF_RANGE: "'$value' is out of range ($min, $max)", } def __init__(self, min=None, max=None, *args, **kwargs): super(NumberRange, self).__init__(*args, **kwargs) self.min = min self.max = max self.message_values.update({"min": self.min, "max": self.max}) def _internal_is_valid(self, value, *args, **kwargs): if value is None or (self.min is not None and value < self.min) or \ (self.max is not None and value > self.max): self.error(self.OUT_OF_RANGE, value) return False return True class Regexp(BaseValidator): """ Validates the field against a user provided regexp. :param regex: The regular expression string to use. Can also be a compiled regular expression pattern. :param flags: The regexp flags to use, for example re.IGNORECASE. Ignored if `regex` is not a string. """ NOT_MATCH = "notMatch" error_messages = { NOT_MATCH: "'$value' does not match against pattern '$regex'", } def __init__(self, regex, flags=0, *args, **kwargs): super(Regexp, self).__init__(*args, **kwargs) if isinstance(regex, str): regex = re.compile(regex, flags) self.regex = regex self.message_values.update({"regex": self.regex.pattern}) def _internal_is_valid(self, value, *args, **kwargs): try: if not self.regex.match(value or ''): self.error(self.NOT_MATCH, value) return False return True except TypeError: self.error(self.NOT_MATCH, value) class Email(Regexp): """ Validates an email address. Note that this uses a very primitive regular expression and should only be used in instances where you later verify by other means, such as email activation or lookups. """ NOT_MAIL = "notMail" error_code_map = {Regexp.NOT_MATCH: NOT_MAIL} error_messages = {NOT_MAIL: "'$value' is not a valid email address."} def __init__(self, *args, **kwargs): super(Email, self).__init__(r'^.+@[^.].*\.[a-z]{2,10}$', re.IGNORECASE, *args, **kwargs) class IPAddress(BaseValidator): """ Validates an IP address. :param ipv4: If True, accept IPv4 addresses as valid (default True) :param ipv6: If True, accept IPv6 addresses as valid (default False) """ NOT_IP_ADDRESS = 'notIdAddress' IPV4_NOT_ALLOWED = 'ipv4NotAllowed' IPV6_NOT_ALLOWED = 'ipv6NotAllowed' error_messages = { NOT_IP_ADDRESS: "'$value' does not appear to be a valid IP address. Allowed $types", IPV4_NOT_ALLOWED: "'$value' is an ipv4 address that is not allowed. Allowed $types", IPV6_NOT_ALLOWED: "'$value' is an ipv6 address that is not allowed. Allowed $types", } def __init__(self, ipv4=True, ipv6=False, *args, **kwargs): super(IPAddress, self).__init__(*args, **kwargs) if not ipv4 and not ipv6: raise ValueError('IP Address Validator must have at least one of ipv4 or ipv6 enabled.') self.ipv4 = ipv4 self.ipv6 = ipv6 self.message_values.update({'types': ' and '.join([x for x in ('ipv4', 'ipv6') if getattr(self, x)])}) def _internal_is_valid(self, value, *args, **kwargs): if self.check_ipv4(value): if not self.ipv4: self.error(self.IPV4_NOT_ALLOWED, value) return False return True if self.check_ipv6(value): if not self.ipv6: self.error(self.IPV6_NOT_ALLOWED, value) return False return True self.error(self.NOT_IP_ADDRESS, value) return False def check_ipv4(self, value): try: parts = value.split('.') except AttributeError: return False if len(parts) == 4 and all(x.isdigit() for x in parts): numbers = list(int(x) for x in parts) return all(num >= 0 and num < 256 for num in numbers) return False def check_ipv6(self, value): try: parts = value.split(':') except AttributeError: return False if not 2 <= len(parts) <= 8: return False num_blank = 0 for part in parts: if not part: num_blank += 1 else: try: value = int(part, 16) except ValueError: return False else: if value < 0 or value >= 65536: return False if num_blank < 2: return True elif num_blank == 2 and not parts[0] and not parts[1]: return True return False class MacAddress(Regexp): """ Validates a MAC address. """ INVALID_MAC_ADDRESS = 'invalidMacAddress' error_code_map = {Regexp.NOT_MATCH: INVALID_MAC_ADDRESS} error_messages = {INVALID_MAC_ADDRESS: "'$value' is not a valid mac address."} def __init__(self, *args, **kwargs): pattern = r'^(?:[0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2}$' super(MacAddress, self).__init__(pattern, *args, **kwargs) class URL(Regexp): """ Simple regexp based url validation. Much like the email validator, you probably want to validate the url later by other means if the url must resolve. :param require_tld: If true, then the domain-name portion of the URL must contain a .tld suffix. Set this to false if you want to allow domains like `localhost`. """ INVALID_URL = 'invalidUrl' error_code_map = {Regexp.NOT_MATCH: INVALID_URL} error_messages = {INVALID_URL: "'$value' is not a valid url."} def __init__(self, require_tld=True, *args, **kwargs): tld_part = (require_tld and r'\.[a-z]{2,10}' or '') regex = r'^[a-z]+://([^/:]+%s|([0-9]{1,3}\.){3}[0-9]{1,3})(:[0-9]+)?(\/.*)?$' % tld_part super(URL, self).__init__(regex, re.IGNORECASE, *args, **kwargs) class UUID(Regexp): """ Validates a UUID. """ INVALID_UUID = 'invalidUuid' error_code_map = {Regexp.NOT_MATCH: INVALID_UUID} error_messages = {INVALID_UUID: "'$value' is not a valid UUID."} def __init__(self, *args, **kwargs): pattern = r'^[0-9a-fA-F]{8}-([0-9a-fA-F]{4}-){3}[0-9a-fA-F]{12}$' super(UUID, self).__init__(pattern, *args, **kwargs) class AnyOf(BaseValidator): """ Compares the incoming data to a sequence of valid inputs. :param values: A sequence of valid inputs. :param values_formatter: Function used to format the list of values in the error message. """ NOT_IN_LIST = 'notInList' error_messages = {NOT_IN_LIST: "'$value' is none of $values."} def __init__(self, values, values_formatter=None, *args, **kwargs): super(AnyOf, self).__init__(*args, **kwargs) self.values = values if values_formatter is None: values_formatter = self.default_values_formatter self.values_formatter = values_formatter def _internal_is_valid(self, value, *args, **kwargs): if value not in self.values: self.error(self.NOT_IN_LIST, value, values=self.values_formatter(self.values)) return False return True @staticmethod def default_values_formatter(values): return ', '.join(str(x) if not isinstance(x, str) else "'%s'" % x for x in values) class NoneOf(BaseValidator): """ Compares the incoming data to a sequence of invalid inputs. :param values: A sequence of invalid inputs. :param values_formatter: Function used to format the list of values in the error message. """ IN_LIST = 'inList' error_messages = {IN_LIST: "'$value' is one of $values."} def __init__(self, values, values_formatter=None, *args, **kwargs): super(NoneOf, self).__init__(*args, **kwargs) self.values = values if values_formatter is None: def values_formatter(v): return ', '.join(str(x) if not isinstance(x, str) else "'%s'" % x for x in values) self.values_formatter = values_formatter def _internal_is_valid(self, value, *args, **kwargs): if value in self.values: self.error(self.IN_LIST, value, values=self.values_formatter(self.values)) return False return True class IsEmpty(BaseValidator): """ Compares the incoming value with an empty one """ EMPTY = 'Empty' error_messages = {EMPTY: "'$value' must be empty"} def _internal_is_valid(self, value, *args, **kwargs): if value: self.error(self.EMPTY, value) return False return True class NotEmpty(BaseValidator): """ Raise error when it is empty """ NOT_EMPTY = 'notEmpty' error_messages = {NOT_EMPTY: "Value can not be empty"} def _internal_is_valid(self, value, *args, **kwargs): if not value: self.error(self.NOT_EMPTY, value) return False return True class NotEmptyString(NotEmpty): """ Raise error when it is empty """ NOT_EMPTY = 'notEmpty' NOT_STRING = 'notString' error_messages = { NOT_EMPTY: "Value can not be empty", NOT_STRING: "Value must be a string" } def _internal_is_valid(self, value, *args, **kwargs): if not isinstance(value, str): self.error(self.NOT_STRING, value) return False return super(NotEmptyString, self)._internal_is_valid(value.strip(), args, kwargs) class IsNone(BaseValidator): """ Raise error if it is not None """ NONE = 'None' error_messages = {NONE: "'$value' must be None"} def _internal_is_valid(self, value, *args, **kwargs): if value is not None: self.error(self.NONE, value) return False return True class NotNone(BaseValidator): """ Raise error if it is None """ NOT_NONE = 'notNone' error_messages = {NOT_NONE: "Value must not be None"} def _internal_is_valid(self, value, *args, **kwargs): if value is None: self.error(self.NOT_NONE, value) return False return True
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from heat.common import exception from heat.common.i18n import _ from heat.engine import constraints from heat.engine import properties from heat.engine import resource from heat.engine import support class KeystoneRoleAssignment(resource.Resource): ''' Keystone Role assignment class implements role assignments between user/groups and project/domain. heat_template_version: 2013-05-23 parameters: ... Group or User parameters group_role: type: string description: role group_role_domain: type: string description: group role domain group_role_project: type: string description: group role project resources: admin_group: type: OS::Keystone::Group OR OS::Keystone::User properties: ... Group or User properties roles: - role: {get_param: group_role} domain: {get_param: group_role_domain} - role: {get_param: group_role} project: {get_param: group_role_project} ''' support_status = support.SupportStatus( version='2015.1', message=_('Supported versions: keystone v3')) default_client_name = 'keystone' PROPERTIES = ( ROLES ) = ( 'roles' ) _ROLES_MAPPING_PROPERTIES = ( ROLE, DOMAIN, PROJECT ) = ( 'role', 'domain', 'project' ) properties_schema = { ROLES: properties.Schema( properties.Schema.LIST, _('List of role assignments.'), schema=properties.Schema( properties.Schema.MAP, _('Map between role with either project or domain.'), schema={ ROLE: properties.Schema( properties.Schema.STRING, _('Keystone role'), required=True, constraints=([constraints. CustomConstraint('keystone.role')]) ), PROJECT: properties.Schema( properties.Schema.STRING, _('Keystone project'), constraints=([constraints. CustomConstraint('keystone.project')]) ), DOMAIN: properties.Schema( properties.Schema.STRING, _('Keystone domain'), constraints=([constraints. CustomConstraint('keystone.domain')]) ), } ), update_allowed=True ) } def _add_role_assignments_to_group(self, group_id, role_assignments): for role_assignment in self._normalize_to_id(role_assignments): if role_assignment.get(self.PROJECT) is not None: self.client().client.roles.grant( role=role_assignment.get(self.ROLE), project=role_assignment.get(self.PROJECT), group=group_id ) elif role_assignment.get(self.DOMAIN) is not None: self.client().client.roles.grant( role=role_assignment.get(self.ROLE), domain=role_assignment.get(self.DOMAIN), group=group_id ) def _add_role_assignments_to_user(self, user_id, role_assignments): for role_assignment in self._normalize_to_id(role_assignments): if role_assignment.get(self.PROJECT) is not None: self.client().client.roles.grant( role=role_assignment.get(self.ROLE), project=role_assignment.get(self.PROJECT), user=user_id ) elif role_assignment.get(self.DOMAIN) is not None: self.client().client.roles.grant( role=role_assignment.get(self.ROLE), domain=role_assignment.get(self.DOMAIN), user=user_id ) def _remove_role_assignments_from_group(self, group_id, role_assignments): for role_assignment in self._normalize_to_id(role_assignments): if role_assignment.get(self.PROJECT) is not None: self.client().client.roles.revoke( role=role_assignment.get(self.ROLE), project=role_assignment.get(self.PROJECT), group=group_id ) elif role_assignment.get(self.DOMAIN) is not None: self.client().client.roles.revoke( role=role_assignment.get(self.ROLE), domain=role_assignment.get(self.DOMAIN), group=group_id ) def _remove_role_assignments_from_user(self, user_id, role_assignments): for role_assignment in self._normalize_to_id(role_assignments): if role_assignment.get(self.PROJECT) is not None: self.client().client.roles.revoke( role=role_assignment.get(self.ROLE), project=role_assignment.get(self.PROJECT), user=user_id ) elif role_assignment.get(self.DOMAIN) is not None: self.client().client.roles.revoke( role=role_assignment.get(self.ROLE), domain=role_assignment.get(self.DOMAIN), user=user_id ) def _normalize_to_id(self, role_assignment_prps): role_assignments = [] if role_assignment_prps is None: return role_assignments for role_assignment in role_assignment_prps: role = role_assignment.get(self.ROLE) project = role_assignment.get(self.PROJECT) domain = role_assignment.get(self.DOMAIN) role_assignments.append({ self.ROLE: self.client_plugin().get_role_id(role), self.PROJECT: (self.client_plugin(). get_project_id(project)) if project else None, self.DOMAIN: (self.client_plugin(). get_domain_id(domain)) if domain else None }) return role_assignments @staticmethod def _find_diff(updated_prps, stored_prps): updated_role_project_assignments = [] updated_role_domain_assignments = [] # Split the properties into two set of role assignments # (project, domain) from updated properties for role_assignment in updated_prps or []: if role_assignment.get(KeystoneRoleAssignment.PROJECT) is not None: updated_role_project_assignments.append( '%s:%s' % ( role_assignment[KeystoneRoleAssignment.ROLE], role_assignment[KeystoneRoleAssignment.PROJECT])) elif (role_assignment.get(KeystoneRoleAssignment.DOMAIN) is not None): updated_role_domain_assignments.append( '%s:%s' % (role_assignment[KeystoneRoleAssignment.ROLE], role_assignment[KeystoneRoleAssignment.DOMAIN])) stored_role_project_assignments = [] stored_role_domain_assignments = [] # Split the properties into two set of role assignments # (project, domain) from updated properties for role_assignment in (stored_prps or []): if role_assignment.get(KeystoneRoleAssignment.PROJECT) is not None: stored_role_project_assignments.append( '%s:%s' % ( role_assignment[KeystoneRoleAssignment.ROLE], role_assignment[KeystoneRoleAssignment.PROJECT])) elif (role_assignment.get(KeystoneRoleAssignment.DOMAIN) is not None): stored_role_domain_assignments.append( '%s:%s' % (role_assignment[KeystoneRoleAssignment.ROLE], role_assignment[KeystoneRoleAssignment.DOMAIN])) new_role_assignments = [] removed_role_assignments = [] # NOTE: finding the diff of list of strings is easier by using 'set' # so properties are converted to string in above sections # New items for item in (set(updated_role_project_assignments) - set(stored_role_project_assignments)): new_role_assignments.append( {KeystoneRoleAssignment.ROLE: item[:item.find(':')], KeystoneRoleAssignment.PROJECT: item[item.find(':') + 1:]} ) for item in (set(updated_role_domain_assignments) - set(stored_role_domain_assignments)): new_role_assignments.append( {KeystoneRoleAssignment.ROLE: item[:item.find(':')], KeystoneRoleAssignment.DOMAIN: item[item.find(':') + 1:]} ) # Old items for item in (set(stored_role_project_assignments) - set(updated_role_project_assignments)): removed_role_assignments.append( {KeystoneRoleAssignment.ROLE: item[:item.find(':')], KeystoneRoleAssignment.PROJECT: item[item.find(':') + 1:]} ) for item in (set(stored_role_domain_assignments) - set(updated_role_domain_assignments)): removed_role_assignments.append( {KeystoneRoleAssignment.ROLE: item[:item.find(':')], KeystoneRoleAssignment.DOMAIN: item[item.find(':') + 1:]} ) return new_role_assignments, removed_role_assignments def handle_create(self, user_id=None, group_id=None): if self.properties.get(self.ROLES) is not None: if user_id is not None: self._add_role_assignments_to_user( user_id, self.properties.get(self.ROLES)) elif group_id is not None: self._add_role_assignments_to_group( group_id, self.properties.get(self.ROLES)) def handle_update(self, user_id=None, group_id=None, prop_diff=None): (new_role_assignments, removed_role_assignments) = KeystoneRoleAssignment._find_diff( prop_diff.get(self.ROLES), self._stored_properties_data.get(self.ROLES)) if len(new_role_assignments) > 0: if user_id is not None: self._add_role_assignments_to_user( user_id, new_role_assignments) elif group_id is not None: self._add_role_assignments_to_group( group_id, new_role_assignments) if len(removed_role_assignments) > 0: if user_id is not None: self._remove_role_assignments_from_user( user_id, removed_role_assignments) elif group_id is not None: self._remove_role_assignments_from_group( group_id, removed_role_assignments) def handle_delete(self, user_id=None, group_id=None): if self._stored_properties_data.get(self.ROLES) is not None: if user_id is not None: self._remove_role_assignments_from_user( user_id, (self._stored_properties_data. get(self.ROLES))) elif group_id is not None: self._remove_role_assignments_from_group( group_id, (self._stored_properties_data. get(self.ROLES))) def validate(self): super(KeystoneRoleAssignment, self).validate() if self.properties.get(self.ROLES) is not None: for role_assignment in self.properties.get(self.ROLES): project = role_assignment.get(self.PROJECT) domain = role_assignment.get(self.DOMAIN) if project is not None and domain is not None: raise exception.ResourcePropertyConflict(self.PROJECT, self.DOMAIN) if project is None and domain is None: msg = _('Either project or domain must be specified for' ' role %s') % role_assignment.get(self.ROLE) raise exception.StackValidationFailed(message=msg)
""" utility functions for asteval Matthew Newville <[email protected]>, The University of Chicago """ import ast import io import math import numbers import re from sys import exc_info from tokenize import ENCODING as tk_ENCODING from tokenize import NAME as tk_NAME from tokenize import tokenize as generate_tokens HAS_NUMPY = False numpy = None ndarr = None try: import numpy ndarr = numpy.ndarray HAS_NUMPY = True numpy_version = numpy.version.version.split('.', 2) except ImportError: pass MAX_EXPONENT = 10000 MAX_STR_LEN = 2 << 17 # 256KiB MAX_SHIFT = 1000 MAX_OPEN_BUFFER = 2 << 17 RESERVED_WORDS = ('and', 'as', 'assert', 'break', 'class', 'continue', 'def', 'del', 'elif', 'else', 'except', 'exec', 'finally', 'for', 'from', 'global', 'if', 'import', 'in', 'is', 'lambda', 'not', 'or', 'pass', 'print', 'raise', 'return', 'try', 'while', 'with', 'True', 'False', 'None', 'eval', 'execfile', '__import__', '__package__') NAME_MATCH = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$").match UNSAFE_ATTRS = ('__subclasses__', '__bases__', '__globals__', '__code__', '__reduce__', '__reduce_ex__', '__mro__', '__closure__', '__func__', '__self__', '__module__', '__dict__', '__class__', '__call__', '__get__', '__getattribute__', '__subclasshook__', '__new__', '__init__', 'func_globals', 'func_code', 'func_closure', 'im_class', 'im_func', 'im_self', 'gi_code', 'gi_frame', 'f_locals', '__asteval__') # inherit these from python's __builtins__ FROM_PY = ('ArithmeticError', 'AssertionError', 'AttributeError', 'BaseException', 'BufferError', 'BytesWarning', 'DeprecationWarning', 'EOFError', 'EnvironmentError', 'Exception', 'False', 'FloatingPointError', 'GeneratorExit', 'IOError', 'ImportError', 'ImportWarning', 'IndentationError', 'IndexError', 'KeyError', 'KeyboardInterrupt', 'LookupError', 'MemoryError', 'NameError', 'None', 'NotImplementedError', 'OSError', 'OverflowError', 'ReferenceError', 'RuntimeError', 'RuntimeWarning', 'StopIteration', 'SyntaxError', 'SyntaxWarning', 'SystemError', 'SystemExit', 'True', 'TypeError', 'UnboundLocalError', 'UnicodeDecodeError', 'UnicodeEncodeError', 'UnicodeError', 'UnicodeTranslateError', 'UnicodeWarning', 'ValueError', 'Warning', 'ZeroDivisionError', 'abs', 'all', 'any', 'bin', 'bool', 'bytearray', 'bytes', 'chr', 'complex', 'dict', 'dir', 'divmod', 'enumerate', 'filter', 'float', 'format', 'frozenset', 'hash', 'hex', 'id', 'int', 'isinstance', 'len', 'list', 'map', 'max', 'min', 'oct', 'ord', 'pow', 'range', 'repr', 'reversed', 'round', 'set', 'slice', 'sorted', 'str', 'sum', 'tuple', 'zip') # inherit these from python's math FROM_MATH = ('acos', 'acosh', 'asin', 'asinh', 'atan', 'atan2', 'atanh', 'ceil', 'copysign', 'cos', 'cosh', 'degrees', 'e', 'exp', 'fabs', 'factorial', 'floor', 'fmod', 'frexp', 'fsum', 'hypot', 'isinf', 'isnan', 'ldexp', 'log', 'log10', 'log1p', 'modf', 'pi', 'pow', 'radians', 'sin', 'sinh', 'sqrt', 'tan', 'tanh', 'trunc') FROM_NUMPY = ('Inf', 'NAN', 'abs', 'add', 'alen', 'all', 'amax', 'amin', 'angle', 'any', 'append', 'arange', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', 'argmax', 'argmin', 'argsort', 'argwhere', 'around', 'array', 'array2string', 'asanyarray', 'asarray', 'asarray_chkfinite', 'ascontiguousarray', 'asfarray', 'asfortranarray', 'asmatrix', 'asscalar', 'atleast_1d', 'atleast_2d', 'atleast_3d', 'average', 'bartlett', 'base_repr', 'bitwise_and', 'bitwise_not', 'bitwise_or', 'bitwise_xor', 'blackman', 'bool', 'broadcast', 'broadcast_arrays', 'byte', 'c_', 'cdouble', 'ceil', 'cfloat', 'chararray', 'choose', 'clip', 'clongdouble', 'clongfloat', 'column_stack', 'common_type', 'complex', 'complex128', 'complex64', 'complex_', 'complexfloating', 'compress', 'concatenate', 'conjugate', 'convolve', 'copy', 'copysign', 'corrcoef', 'correlate', 'cos', 'cosh', 'cov', 'cross', 'csingle', 'cumprod', 'cumsum', 'datetime_data', 'deg2rad', 'degrees', 'delete', 'diag', 'diag_indices', 'diag_indices_from', 'diagflat', 'diagonal', 'diff', 'digitize', 'divide', 'dot', 'double', 'dsplit', 'dstack', 'dtype', 'e', 'ediff1d', 'empty', 'empty_like', 'equal', 'exp', 'exp2', 'expand_dims', 'expm1', 'extract', 'eye', 'fabs', 'fill_diagonal', 'finfo', 'fix', 'flatiter', 'flatnonzero', 'fliplr', 'flipud', 'float', 'float32', 'float64', 'float_', 'floating', 'floor', 'floor_divide', 'fmax', 'fmin', 'fmod', 'format_parser', 'frexp', 'frombuffer', 'fromfile', 'fromfunction', 'fromiter', 'frompyfunc', 'fromregex', 'fromstring', 'fv', 'genfromtxt', 'getbufsize', 'geterr', 'gradient', 'greater', 'greater_equal', 'hamming', 'hanning', 'histogram', 'histogram2d', 'histogramdd', 'hsplit', 'hstack', 'hypot', 'i0', 'identity', 'iinfo', 'imag', 'in1d', 'index_exp', 'indices', 'inexact', 'inf', 'info', 'infty', 'inner', 'insert', 'int', 'int0', 'int16', 'int32', 'int64', 'int8', 'int_', 'int_asbuffer', 'intc', 'integer', 'interp', 'intersect1d', 'intp', 'invert', 'ipmt', 'irr', 'iscomplex', 'iscomplexobj', 'isfinite', 'isfortran', 'isinf', 'isnan', 'isneginf', 'isposinf', 'isreal', 'isrealobj', 'isscalar', 'issctype', 'iterable', 'ix_', 'kaiser', 'kron', 'ldexp', 'left_shift', 'less', 'less_equal', 'linspace', 'little_endian', 'load', 'loads', 'loadtxt', 'log', 'log10', 'log1p', 'log2', 'logaddexp', 'logaddexp2', 'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'logspace', 'long', 'longcomplex', 'longdouble', 'longfloat', 'longlong', 'mafromtxt', 'mask_indices', 'mat', 'matrix', 'maximum', 'maximum_sctype', 'may_share_memory', 'mean', 'median', 'memmap', 'meshgrid', 'mgrid', 'minimum', 'mintypecode', 'mirr', 'mod', 'modf', 'msort', 'multiply', 'nan', 'nan_to_num', 'nanargmax', 'nanargmin', 'nanmax', 'nanmin', 'nansum', 'ndarray', 'ndenumerate', 'ndfromtxt', 'ndim', 'ndindex', 'negative', 'newaxis', 'nextafter', 'nonzero', 'not_equal', 'nper', 'npv', 'number', 'obj2sctype', 'ogrid', 'ones', 'ones_like', 'outer', 'packbits', 'percentile', 'pi', 'piecewise', 'place', 'pmt', 'poly', 'poly1d', 'polyadd', 'polyder', 'polydiv', 'polyfit', 'polyint', 'polymul', 'polysub', 'polyval', 'power', 'ppmt', 'prod', 'product', 'ptp', 'put', 'putmask', 'pv', 'r_', 'rad2deg', 'radians', 'rank', 'rate', 'ravel', 'real', 'real_if_close', 'reciprocal', 'record', 'remainder', 'repeat', 'reshape', 'resize', 'restoredot', 'right_shift', 'rint', 'roll', 'rollaxis', 'roots', 'rot90', 'round', 'round_', 'row_stack', 's_', 'sctype2char', 'searchsorted', 'select', 'setbufsize', 'setdiff1d', 'seterr', 'setxor1d', 'shape', 'short', 'sign', 'signbit', 'signedinteger', 'sin', 'sinc', 'single', 'singlecomplex', 'sinh', 'size', 'sometrue', 'sort', 'sort_complex', 'spacing', 'split', 'sqrt', 'square', 'squeeze', 'std', 'str', 'str_', 'subtract', 'sum', 'swapaxes', 'take', 'tan', 'tanh', 'tensordot', 'tile', 'trace', 'transpose', 'trapz', 'tri', 'tril', 'tril_indices', 'tril_indices_from', 'trim_zeros', 'triu', 'triu_indices', 'triu_indices_from', 'true_divide', 'trunc', 'ubyte', 'uint', 'uint0', 'uint16', 'uint32', 'uint64', 'uint8', 'uintc', 'uintp', 'ulonglong', 'union1d', 'unique', 'unravel_index', 'unsignedinteger', 'unwrap', 'ushort', 'vander', 'var', 'vdot', 'vectorize', 'vsplit', 'vstack', 'where', 'who', 'zeros', 'zeros_like', 'fft', 'linalg', 'polynomial', 'random') NUMPY_RENAMES = {'ln': 'log', 'asin': 'arcsin', 'acos': 'arccos', 'atan': 'arctan', 'atan2': 'arctan2', 'atanh': 'arctanh', 'acosh': 'arccosh', 'asinh': 'arcsinh'} def _open(filename, mode='r', buffering=-1): """read only version of open()""" if mode not in ('r', 'rb', 'rU'): raise RuntimeError("Invalid open file mode, must be 'r', 'rb', or 'rU'") if buffering > MAX_OPEN_BUFFER: raise RuntimeError(f"Invalid buffering value, max buffer size is {MAX_OPEN_BUFFER}") return open(filename, mode, buffering) def _type(obj, *varargs, **varkws): """type that prevents varargs and varkws""" return type(obj).__name__ LOCALFUNCS = {'open': _open, 'type': _type} # Safe versions of functions to prevent denial of service issues def safe_pow(base, exp): """safe version of pow""" if isinstance(exp, numbers.Number): if exp > MAX_EXPONENT: raise RuntimeError(f"Invalid exponent, max exponent is {MAX_EXPONENT}") elif HAS_NUMPY and isinstance(exp, ndarr): if numpy.nanmax(exp) > MAX_EXPONENT: raise RuntimeError(f"Invalid exponent, max exponent is {MAX_EXPONENT}") if isinstance(base, int): ret = (1.0*base)**exp if isinstance(exp, int): return int(ret) return ret return base ** exp def safe_mult(a, b): """safe version of multiply""" if isinstance(a, str) and isinstance(b, int) and len(a) * b > MAX_STR_LEN: raise RuntimeError(f"String length exceeded, max string length is {MAX_STR_LEN}") return a * b def safe_add(a, b): """safe version of add""" if isinstance(a, str) and isinstance(b, str) and len(a) + len(b) > MAX_STR_LEN: raise RuntimeError(f"String length exceeded, max string length is {MAX_STR_LEN}") return a + b def safe_lshift(a, b): """safe version of lshift""" if isinstance(b, numbers.Number): if b > MAX_SHIFT: raise RuntimeError(f"Invalid left shift, max left shift is {MAX_SHIFT}") elif HAS_NUMPY and isinstance(b, ndarr): if numpy.nanmax(b) > MAX_SHIFT: raise RuntimeError(f"Invalid left shift, max left shift is {MAX_SHIFT}") return a << b OPERATORS = {ast.Is: lambda a, b: a is b, ast.IsNot: lambda a, b: a is not b, ast.In: lambda a, b: a in b, ast.NotIn: lambda a, b: a not in b, ast.Add: safe_add, ast.BitAnd: lambda a, b: a & b, ast.BitOr: lambda a, b: a | b, ast.BitXor: lambda a, b: a ^ b, ast.Div: lambda a, b: a / b, ast.FloorDiv: lambda a, b: a // b, ast.LShift: safe_lshift, ast.RShift: lambda a, b: a >> b, ast.Mult: safe_mult, ast.Pow: safe_pow, ast.Sub: lambda a, b: a - b, ast.Mod: lambda a, b: a % b, ast.And: lambda a, b: a and b, ast.Or: lambda a, b: a or b, ast.Eq: lambda a, b: a == b, ast.Gt: lambda a, b: a > b, ast.GtE: lambda a, b: a >= b, ast.Lt: lambda a, b: a < b, ast.LtE: lambda a, b: a <= b, ast.NotEq: lambda a, b: a != b, ast.Invert: lambda a: ~a, ast.Not: lambda a: not a, ast.UAdd: lambda a: +a, ast.USub: lambda a: -a} def valid_symbol_name(name): """Determine whether the input symbol name is a valid name. Arguments --------- name : str name to check for validity. Returns -------- valid : bool whether name is a a valid symbol name This checks for Python reserved words and that the name matches the regular expression ``[a-zA-Z_][a-zA-Z0-9_]`` """ if name in RESERVED_WORDS: return False gen = generate_tokens(io.BytesIO(name.encode('utf-8')).readline) typ, _, start, end, _ = next(gen) if typ == tk_ENCODING: typ, _, start, end, _ = next(gen) return typ == tk_NAME and start == (1, 0) and end == (1, len(name)) def op2func(op): """Return function for operator nodes.""" return OPERATORS[op.__class__] class Empty: """Empty class.""" def __init__(self): """TODO: docstring in public method.""" pass def __nonzero__(self): """TODO: docstring in magic method.""" return False ReturnedNone = Empty() class ExceptionHolder: """Basic exception handler.""" def __init__(self, node, exc=None, msg='', expr=None, lineno=None): """TODO: docstring in public method.""" self.node = node self.expr = expr self.msg = msg self.exc = exc self.lineno = lineno self.exc_info = exc_info() if self.exc is None and self.exc_info[0] is not None: self.exc = self.exc_info[0] if self.msg == '' and self.exc_info[1] is not None: self.msg = self.exc_info[1] def get_error(self): """Retrieve error data.""" col_offset = -1 if self.node is not None: try: col_offset = self.node.col_offset except AttributeError: pass try: exc_name = self.exc.__name__ except AttributeError: exc_name = str(self.exc) if exc_name in (None, 'None'): exc_name = 'UnknownError' out = [" %s" % self.expr] if col_offset > 0: out.append(" %s^^^" % ((col_offset)*' ')) out.append(str(self.msg)) return (exc_name, '\n'.join(out)) class NameFinder(ast.NodeVisitor): """Find all symbol names used by a parsed node.""" def __init__(self): """TODO: docstring in public method.""" self.names = [] ast.NodeVisitor.__init__(self) def generic_visit(self, node): """TODO: docstring in public method.""" if node.__class__.__name__ == 'Name': if node.ctx.__class__ == ast.Load and node.id not in self.names: self.names.append(node.id) ast.NodeVisitor.generic_visit(self, node) builtins = __builtins__ if not isinstance(builtins, dict): builtins = builtins.__dict__ def get_ast_names(astnode): """Return symbol Names from an AST node.""" finder = NameFinder() finder.generic_visit(astnode) return finder.names def make_symbol_table(use_numpy=True, **kws): """Create a default symboltable, taking dict of user-defined symbols. Arguments --------- numpy : bool, optional whether to include symbols from numpy kws : optional additional symbol name, value pairs to include in symbol table Returns -------- symbol_table : dict a symbol table that can be used in `asteval.Interpereter` """ symtable = {} for sym in FROM_PY: if sym in builtins: symtable[sym] = builtins[sym] for sym in FROM_MATH: if hasattr(math, sym): symtable[sym] = getattr(math, sym) if HAS_NUMPY and use_numpy: # aliases deprecated in NumPy v1.20.0 deprecated = ['str', 'bool', 'int', 'float', 'complex', 'pv', 'rate', 'pmt', 'ppmt', 'npv', 'nper', 'long', 'mirr', 'fv', 'irr', 'ipmt'] for sym in FROM_NUMPY: if (int(numpy_version[0]) == 1 and int(numpy_version[1]) >= 20 and sym in deprecated): continue if hasattr(numpy, sym): symtable[sym] = getattr(numpy, sym) for name, sym in NUMPY_RENAMES.items(): if hasattr(numpy, sym): symtable[name] = getattr(numpy, sym) symtable.update(LOCALFUNCS) symtable.update(kws) return symtable
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.eager import def_function from tensorflow.python.eager import function from tensorflow.python.framework import auto_control_deps as acd from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gen_resource_variable_ops from tensorflow.python.ops import gen_sendrecv_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training import adam from tensorflow.python.training import momentum class AutomaticControlDependenciesTest(test.TestCase): def testBasic(self): with context.graph_mode(), self.cached_session(): v = resource_variable_ops.ResourceVariable(1.0) self.evaluate(variables.global_variables_initializer()) with acd.AutomaticControlDependencies() as c: v.assign(v + 1) v.assign(2 * v) val = v.read_value() val = c.mark_as_return(val) self.assertAllEqual(val, 4.0) def testNoControlDepsBetweenVariableReads(self): with context.graph_mode(), self.cached_session(): v = resource_variable_ops.ResourceVariable(1.0) self.evaluate(variables.global_variables_initializer()) with acd.AutomaticControlDependencies(): read_op1 = gen_resource_variable_ops.read_variable_op( v.handle, v.dtype).op read_op2 = gen_resource_variable_ops.read_variable_op( v.handle, v.dtype).op gen_resource_variable_ops.assign_variable_op(v.handle, v + 1) self.assertNotIn(read_op1, read_op2.control_inputs) self.assertNotIn(read_op2, read_op1.control_inputs) def testVariableReadThenWrite(self): with context.graph_mode(), self.cached_session(): v = resource_variable_ops.ResourceVariable(1.0) self.evaluate(variables.global_variables_initializer()) with acd.AutomaticControlDependencies(): read_op1 = gen_resource_variable_ops.read_variable_op( v.handle, v.dtype).op read_op2 = gen_resource_variable_ops.read_variable_op( v.handle, v.dtype).op assign_op = gen_resource_variable_ops.assign_variable_op( v.handle, v + 1) # Writes should have control deps from "all" reads since last write # or start of the code block. self.assertIn(read_op1, assign_op.control_inputs) self.assertIn(read_op2, assign_op.control_inputs) # There should be no control deps between reads. self.assertNotIn(read_op1, read_op2.control_inputs) self.assertNotIn(read_op2, read_op1.control_inputs) def testVariableWriteThenRead(self): with context.graph_mode(), self.cached_session(): v = resource_variable_ops.ResourceVariable(1.0) self.evaluate(variables.global_variables_initializer()) with acd.AutomaticControlDependencies(): assign_op = gen_resource_variable_ops.assign_variable_op( v.handle, v + 1) read_op1 = gen_resource_variable_ops.read_variable_op( v.handle, v.dtype).op read_op2 = gen_resource_variable_ops.read_variable_op( v.handle, v.dtype).op # Reads should have a control dep from the last write. self.assertIn(assign_op, read_op1.control_inputs) self.assertIn(assign_op, read_op2.control_inputs) # There should be no control deps between reads. self.assertNotIn(read_op1, read_op2.control_inputs) self.assertNotIn(read_op2, read_op1.control_inputs) def testVariableReadsInOpsWithMustRun(self): with context.graph_mode(), self.cached_session(): v = resource_variable_ops.ResourceVariable(1.0) self.evaluate(variables.global_variables_initializer()) with acd.AutomaticControlDependencies() as c: read_op = gen_resource_variable_ops.read_variable_op(v.handle, v.dtype).op # Read ops get added to control outputs only if they have consumers. c.mark_as_return(read_op.outputs[0]) self.assertIn(read_op, c.ops_which_must_run) def testVariableMultipleReadsAndWrites(self): with context.graph_mode(), self.cached_session(): v = resource_variable_ops.ResourceVariable(1.0) self.evaluate(variables.global_variables_initializer()) with acd.AutomaticControlDependencies() as c: # 2 reads -> 2 writes -> 2 reads -> 2 writes. read_op1 = gen_resource_variable_ops.read_variable_op( v.handle, v.dtype).op read_op2 = gen_resource_variable_ops.read_variable_op( v.handle, v.dtype).op assign_op1 = gen_resource_variable_ops.assign_variable_op( v.handle, v + 1) assign_op2 = gen_resource_variable_ops.assign_variable_op( v.handle, v + 1) read_op3 = gen_resource_variable_ops.read_variable_op( v.handle, v.dtype).op read_op4 = gen_resource_variable_ops.read_variable_op( v.handle, v.dtype).op assign_op3 = gen_resource_variable_ops.assign_variable_op( v.handle, v + 1) assign_op4 = gen_resource_variable_ops.assign_variable_op( v.handle, v + 1) # Read ops get added to control outputs only if they have consumers. c.mark_as_return(read_op1.outputs[0]) c.mark_as_return(read_op2.outputs[0]) c.mark_as_return(read_op3.outputs[0]) c.mark_as_return(read_op4.outputs[0]) # Verify the control edges. self.assertIn(read_op1, assign_op1.control_inputs) self.assertIn(read_op2, assign_op1.control_inputs) self.assertIn(assign_op1, assign_op2.control_inputs) self.assertIn(assign_op2, read_op3.control_inputs) self.assertIn(assign_op2, read_op4.control_inputs) self.assertIn(read_op3, assign_op3.control_inputs) self.assertIn(read_op4, assign_op3.control_inputs) self.assertIn(assign_op3, assign_op4.control_inputs) # There should be no control deps between reads. read_ops = [read_op1, read_op2, read_op3, read_op4] for src_op, tgt_op in itertools.product(read_ops, read_ops): self.assertNotIn(src_op, tgt_op.control_inputs) # Reads must be in `ops_which_must_run`. self.assertIn(read_op1, c.ops_which_must_run) self.assertIn(read_op2, c.ops_which_must_run) self.assertIn(read_op3, c.ops_which_must_run) self.assertIn(read_op4, c.ops_which_must_run) # Last write must be in `ops_which_must_run`. self.assertIn(assign_op4, c.ops_which_must_run) def testSendInOpsWithMustRun(self): with context.graph_mode(), self.cached_session(): v = resource_variable_ops.ResourceVariable(1.0) self.evaluate(variables.global_variables_initializer()) with acd.AutomaticControlDependencies() as c: send_op = gen_sendrecv_ops.send(v, "x", "/", 0, "/") # Send must be in `ops_which_must_run`. self.assertIn(send_op, c.ops_which_must_run) def _testVariableReadInFunctionalOp(self, build_functional_op, op_type): v = resource_variable_ops.ResourceVariable(1.0) self.evaluate(variables.global_variables_initializer()) @def_function.function def read_var_in_while(): gen_resource_variable_ops.read_variable_op( v.handle, v.dtype, name="read1") result = build_functional_op(v) gen_resource_variable_ops.read_variable_op( v.handle, v.dtype, name="read2") gen_resource_variable_ops.assign_variable_op(v.handle, v + 1) return result func_graph = read_var_in_while.get_concrete_function().graph assert len(func_graph.inputs) == 1 def get_op(op_type, sub_name): operations = [ op for op in func_graph.get_operations() if op.type == op_type and sub_name in op.name ] assert len(operations) == 1 return operations[0] read1 = get_op("ReadVariableOp", "read1") functional_op = get_op(op_type, "") read2 = get_op("ReadVariableOp", "read2") assign_op = get_op("AssignVariableOp", "") # Since the functional op only has reads, previous reads e.g. read1 do not\ # have a control edge to it and next future reads e.g. read2 do not have a # control edge from it. self.assertNotIn(read1, functional_op.control_inputs) self.assertNotIn(functional_op, read2.control_inputs) self.assertIn(read1, assign_op.control_inputs) self.assertIn(read2, assign_op.control_inputs) self.assertIn(functional_op, assign_op.control_inputs) def testVariableReadInWhileLoop(self): def build_functional_op(v): def body(_): return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype) return control_flow_ops.while_loop( lambda i: True, body, [0.0], maximum_iterations=1) self._testVariableReadInFunctionalOp(build_functional_op, "While") def testVariableReadInCondTrueBranch(self): def build_functional_op(v): def then_branch(): return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype) def else_branch(): return array_ops.zeros([], v.dtype) return control_flow_ops.cond( constant_op.constant(True), then_branch, else_branch) self._testVariableReadInFunctionalOp(build_functional_op, "If") def testVariableReadInCondFalseBranch(self): def build_functional_op(v): def then_branch(): return array_ops.zeros([], v.dtype) def else_branch(): return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype) return control_flow_ops.cond( constant_op.constant(False), then_branch, else_branch) self._testVariableReadInFunctionalOp(build_functional_op, "If") def testVariableReadInCaseBranch0(self): def build_functional_op(v): def branch0(): return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype) def branch1(): return array_ops.zeros([], v.dtype) return control_flow_ops.switch_case( constant_op.constant(0), [branch0, branch1]) self._testVariableReadInFunctionalOp(build_functional_op, "Case") def testVariableReadInCaseBranch1(self): def build_functional_op(v): def branch0(): return array_ops.zeros([], v.dtype) def branch1(): return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype) return control_flow_ops.switch_case( constant_op.constant(0), [branch0, branch1]) self._testVariableReadInFunctionalOp(build_functional_op, "Case") def testVariableReadInFunction(self): def build_functional_op(v): @def_function.function def fn_with_read(): return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype) return fn_with_read() self._testVariableReadInFunctionalOp(build_functional_op, "StatefulPartitionedCall") def testVariableReadInNestedFunction(self): def build_functional_op(v): @def_function.function def fn_with_read(): @def_function.function def inner_fn(): return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype) return inner_fn() return fn_with_read() self._testVariableReadInFunctionalOp(build_functional_op, "StatefulPartitionedCall") def testVariableReadInWhileInInnerFunc(self): def build_functional_op(v): @def_function.function def fn_with_read(): @def_function.function def inner_fn(): def body(_): return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype) return control_flow_ops.while_loop( lambda i: True, body, [0.0], maximum_iterations=1) return inner_fn() return fn_with_read() self._testVariableReadInFunctionalOp(build_functional_op, "StatefulPartitionedCall") def testVariableReadInCondInInnerFunc(self): def build_functional_op(v): @def_function.function def fn_with_read(): @def_function.function def inner_fn(): def then_branch(): return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype) def else_branch(): return array_ops.zeros([], v.dtype) return control_flow_ops.cond( constant_op.constant(True), then_branch, else_branch) return inner_fn() return fn_with_read() self._testVariableReadInFunctionalOp(build_functional_op, "StatefulPartitionedCall") def _testVariableWriteInFunctionalOp(self, build_functional_op, op_type): v = resource_variable_ops.ResourceVariable(1.0) self.evaluate(variables.global_variables_initializer()) @def_function.function def write_var_in_while(): gen_resource_variable_ops.read_variable_op( v.handle, v.dtype, name="read1") result = build_functional_op(v) gen_resource_variable_ops.read_variable_op( v.handle, v.dtype, name="read2") gen_resource_variable_ops.assign_variable_op(v.handle, v + 1) return result func_graph = write_var_in_while.get_concrete_function().graph assert len(func_graph.inputs) == 1 def get_op(op_type, sub_name): operations = [ op for op in func_graph.get_operations() if op.type == op_type and sub_name in op.name ] assert len(operations) == 1 return operations[0] read1 = get_op("ReadVariableOp", "read1") functional_op = get_op(op_type, "") read2 = get_op("ReadVariableOp", "read2") assign_op = get_op("AssignVariableOp", "") # Since the While has writes, it has control edges from previous reads # e.g. `read1` and to future reads(`read2`) and writes(`assign_op`). self.assertIn(read1, functional_op.control_inputs) self.assertIn(functional_op, read2.control_inputs) self.assertIn(read2, assign_op.control_inputs) self.assertIn(functional_op, assign_op.control_inputs) def testVariableWriteInWhileLoop(self): def build_functional_op(v): def body(_): gen_resource_variable_ops.assign_variable_op(v.handle, v + 1) return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype) return control_flow_ops.while_loop( lambda i: True, body, [0.0], maximum_iterations=1) self._testVariableWriteInFunctionalOp(build_functional_op, "While") def testVariableWriteInCondTrueBranch(self): def build_functional_op(v): def then_branch(): gen_resource_variable_ops.assign_variable_op(v.handle, v + 1) return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype) def else_branch(): return array_ops.zeros([], v.dtype) return control_flow_ops.cond( constant_op.constant(True), then_branch, else_branch) self._testVariableWriteInFunctionalOp(build_functional_op, "If") def testVariableWriteInCondFalseBranch(self): def build_functional_op(v): def then_branch(): return array_ops.zeros([], v.dtype) def else_branch(): gen_resource_variable_ops.assign_variable_op(v.handle, v + 1) return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype) return control_flow_ops.cond( constant_op.constant(False), then_branch, else_branch) self._testVariableWriteInFunctionalOp(build_functional_op, "If") def testVariableWriteInCaseBranch0(self): def build_functional_op(v): def branch0(): gen_resource_variable_ops.assign_variable_op(v.handle, v + 1) return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype) def branch1(): return array_ops.zeros([], v.dtype) return control_flow_ops.switch_case( constant_op.constant(0), [branch0, branch1]) self._testVariableWriteInFunctionalOp(build_functional_op, "Case") def testVariableWriteInCaseBranch1(self): def build_functional_op(v): def branch0(): return array_ops.zeros([], v.dtype) def branch1(): gen_resource_variable_ops.assign_variable_op(v.handle, v + 1) return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype) return control_flow_ops.switch_case( constant_op.constant(0), [branch0, branch1]) self._testVariableWriteInFunctionalOp(build_functional_op, "Case") def testVariableWriteInFunction(self): def build_functional_op(v): @def_function.function def fn_with_write(): gen_resource_variable_ops.assign_variable_op(v.handle, v + 1) return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype) return fn_with_write() self._testVariableWriteInFunctionalOp(build_functional_op, "StatefulPartitionedCall") def testVariableWriteInNestedFunction(self): def build_functional_op(v): @def_function.function def fn_with_write(): @def_function.function def inner_fn(): gen_resource_variable_ops.assign_variable_op(v.handle, v + 1) return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype) return inner_fn() return fn_with_write() self._testVariableWriteInFunctionalOp(build_functional_op, "StatefulPartitionedCall") def testVariableWriteInWhileInInnerFunc(self): def build_functional_op(v): @def_function.function def fn_with_write(): @def_function.function def inner_fn(): def body(_): gen_resource_variable_ops.assign_variable_op(v.handle, v + 1) return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype) return control_flow_ops.while_loop( lambda i: True, body, [0.0], maximum_iterations=1) return inner_fn() return fn_with_write() self._testVariableWriteInFunctionalOp(build_functional_op, "StatefulPartitionedCall") def testVariableWriteInCondInInnerFunc(self): def build_functional_op(v): @def_function.function def fn_with_write(): @def_function.function def inner_fn(): def then_branch(): gen_resource_variable_ops.assign_variable_op(v.handle, v + 1) return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype) def else_branch(): return array_ops.zeros([], v.dtype) return control_flow_ops.cond( constant_op.constant(True), then_branch, else_branch) return inner_fn() return fn_with_write() self._testVariableWriteInFunctionalOp(build_functional_op, "StatefulPartitionedCall") @test_util.run_v1_only("b/120545219") def testCondMustRun(self): with context.graph_mode(), self.cached_session(): v = resource_variable_ops.ResourceVariable(1.0) self.evaluate(variables.global_variables_initializer()) p = array_ops.placeholder(dtype=dtypes.bool) with acd.AutomaticControlDependencies() as c: def true_fn(): v.assign(v + 1) return 0.0 def false_fn(): v.assign(v + 4) return 1.0 control_flow_ops.cond(p, true_fn, false_fn) val = v.read_value() val = c.mark_as_return(val) self.assertAllEqual(val.eval(feed_dict={p: False}), 5.0) self.assertAllEqual(val.eval(feed_dict={p: True}), 6.0) @test_util.run_v1_only("b/120545219") def testCondMustRunSeparateRead(self): with context.graph_mode(), self.cached_session(): v = resource_variable_ops.ResourceVariable(1.0) self.evaluate(variables.global_variables_initializer()) p = array_ops.placeholder(dtype=dtypes.bool) with acd.AutomaticControlDependencies() as c: def true_fn(): v.assign(v + 1) return 0.0 def false_fn(): v.assign(v + 4) return 1.0 control_flow_ops.cond(p, true_fn, false_fn) one = constant_op.constant(1.0) one = c.mark_as_return(one) one.eval(feed_dict={p: False}) self.assertAllEqual(v.read_value(), 5.0) one.eval(feed_dict={p: True}) self.assertAllEqual(v.read_value(), 6.0) @test_util.run_v1_only("b/120545219") def testCondNested(self): with context.graph_mode(), self.cached_session(): v = resource_variable_ops.ResourceVariable(1.0) self.evaluate(variables.global_variables_initializer()) p = array_ops.placeholder(dtype=dtypes.bool) q = array_ops.placeholder(dtype=dtypes.bool) with acd.AutomaticControlDependencies() as c: def true_fn(): v.assign(v + 1, name="true") return 1.0 def false_fn(): def inner_true_fn(): v.assign(v * 2, name="false_true") return 2.0 def inner_false_fn(): v.assign(v * 3, name="false_false") return 3.0 control_flow_ops.cond(q, inner_true_fn, inner_false_fn) return 1.0 control_flow_ops.cond(p, true_fn, false_fn) with ops.name_scope("final"): val = v.read_value() val = c.mark_as_return(val) self.assertAllEqual(val.eval(feed_dict={p: False, q: False}), 3.0) self.assertAllEqual(val.eval(feed_dict={p: False, q: True}), 6.0) self.assertAllEqual(val.eval(feed_dict={p: True, q: True}), 7.0) self.assertAllEqual(val.eval(feed_dict={p: True, q: False}), 8.0) @test_util.run_v1_only("b/120545219") def testCondOneBranch(self): with context.graph_mode(), self.cached_session(): v = resource_variable_ops.ResourceVariable(1.0) self.evaluate(variables.global_variables_initializer()) p = array_ops.placeholder(dtype=dtypes.bool) with acd.AutomaticControlDependencies() as c: def true_fn(): return 0.0 def false_fn(): v.assign(v + 4) return 1.0 control_flow_ops.cond(p, true_fn, false_fn) val = v.read_value() val = c.mark_as_return(val) self.assertAllEqual(val.eval(feed_dict={p: False}), 5.0) self.assertAllEqual(val.eval(feed_dict={p: True}), 5.0) @test_util.run_v1_only("b/120545219") def testCondOneBranchUpdateBefore(self): with context.graph_mode(), self.cached_session(): v = resource_variable_ops.ResourceVariable(1.0) self.evaluate(variables.global_variables_initializer()) p = array_ops.placeholder(dtype=dtypes.bool) with acd.AutomaticControlDependencies() as c: v.assign(v * 2) def true_fn(): return 0.0 def false_fn(): v.assign(v + 4) return 1.0 control_flow_ops.cond(p, true_fn, false_fn) val = v.read_value() val = c.mark_as_return(val) self.assertAllEqual(val.eval(feed_dict={p: False}), 6.0) self.assertAllEqual(val.eval(feed_dict={p: True}), 12.0) @test_util.run_v1_only("b/120545219") def testCondOneBranchUpdateAfter(self): with context.graph_mode(), self.cached_session(): v = resource_variable_ops.ResourceVariable(1.0) self.evaluate(variables.global_variables_initializer()) p = array_ops.placeholder(dtype=dtypes.bool) with acd.AutomaticControlDependencies() as c: def true_fn(): return 0.0 def false_fn(): v.assign(v + 4) return 1.0 control_flow_ops.cond(p, true_fn, false_fn) v.assign(v * 2) val = v.read_value() val = c.mark_as_return(val) self.assertAllEqual(val.eval(feed_dict={p: False}), 10.0) self.assertAllEqual(val.eval(feed_dict={p: True}), 20.0) def testDefunWhileLoopWithCapturedLoopVars(self): n = 3 x = constant_op.constant(list(range(n))) @function.defun def loop(): c = lambda i, x: i < n b = lambda i, x: (i + 1, x + 1) i, out = control_flow_ops.while_loop(c, b, (0, x)) return i, out i, out = loop() self.assertEqual(int(i), 3) self.assertAllEqual(out, [3, 4, 5]) def testDecorator(self): with context.graph_mode(), self.cached_session(): v = resource_variable_ops.ResourceVariable(1.0) self.evaluate(variables.global_variables_initializer()) @acd.automatic_control_dependencies def f(): v.assign(v + 1) v.assign(2 * v) return v.read_value() self.assertAllEqual(f(), 4.0) def testOptimizerInDefun(self): def loss(v): return v**2 optimizer = momentum.MomentumOptimizer(learning_rate=1.0, momentum=1.0) @function.defun def train(): self.v = resource_variable_ops.ResourceVariable(1.0) grad = backprop.implicit_grad(loss)(self.v) optimizer.apply_gradients(grad) return self.v.read_value() value = train() self.assertEqual(value.numpy(), -1.0) def testReturningNonTensorRaisesError(self): optimizer = momentum.MomentumOptimizer(learning_rate=1.0, momentum=1.0) optimizer.apply_gradients = function.defun(optimizer.apply_gradients) v = resource_variable_ops.ResourceVariable(1.0) grad = backprop.implicit_grad(lambda v: v**2)(v) with self.assertRaisesRegex(TypeError, ".*must return zero or more Tensors.*"): # TODO(akshayka): We might want to allow defun-ing Python functions # that return operations (and just execute the op instead of running it). optimizer.apply_gradients(grad) # TODO(b/111663004): This should work when the outer context is graph # building. def testOptimizerNonSlotVarsInDefunNoError(self): def loss(v): return v**2 optimizer = adam.AdamOptimizer(learning_rate=1.0) @function.defun def train(): self.v = resource_variable_ops.ResourceVariable(1.0) grad = backprop.implicit_grad(loss)(self.v) optimizer.apply_gradients(grad) return self.v.read_value() train() def testOptimizerInDefunWithCapturedVariable(self): v = resource_variable_ops.ResourceVariable(1.0) def loss(): return v**2 optimizer = momentum.MomentumOptimizer(learning_rate=1.0, momentum=1.0) @function.defun def train(): grad = backprop.implicit_grad(loss)() optimizer.apply_gradients(grad) train() self.assertEqual(v.numpy(), -1.0) def testRepeatedResourceInput(self): var = resource_variable_ops.ResourceVariable(1.0) @def_function.function def inner(var1, var2): return (resource_variable_ops.read_variable_op(var1, dtypes.float32) + resource_variable_ops.read_variable_op(var2, dtypes.float32)) @def_function.function def outer(): return inner(var.handle, var.handle) self.assertEqual(self.evaluate(outer()), 2.0) if __name__ == "__main__": ops.enable_eager_execution() test.main()
# Copyright 2013 Cisco Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import eventlet import netaddr from oslo_log import log as logging from oslo_serialization import jsonutils import requests from neutron.common import exceptions as n_exc from neutron.extensions import providernet from neutron.plugins.cisco.common import cisco_constants as c_const from neutron.plugins.cisco.common import cisco_credentials_v2 as c_cred from neutron.plugins.cisco.common import cisco_exceptions as c_exc from neutron.plugins.cisco.common import config as c_conf from neutron.plugins.cisco.db import network_db_v2 from neutron.plugins.cisco.extensions import n1kv LOG = logging.getLogger(__name__) class Client(object): """ Client for the Cisco Nexus1000V Neutron Plugin. This client implements functions to communicate with Cisco Nexus1000V VSM. For every Neutron objects, Cisco Nexus1000V Neutron Plugin creates a corresponding object in the controller (Cisco Nexus1000V VSM). CONCEPTS: Following are few concepts used in Nexus1000V VSM: port-profiles: Policy profiles correspond to port profiles on Nexus1000V VSM. Port profiles are the primary mechanism by which network policy is defined and applied to switch interfaces in a Nexus 1000V system. network-segment: Each network-segment represents a broadcast domain. network-segment-pool: A network-segment-pool contains one or more network-segments. logical-network: A logical-network contains one or more network-segment-pools. bridge-domain: A bridge-domain is created when the network-segment is of type VXLAN. Each VXLAN <--> VLAN combination can be thought of as a bridge domain. ip-pool: Each ip-pool represents a subnet on the Nexus1000V VSM. vm-network: vm-network refers to a network-segment and policy-profile. It maintains a list of ports that uses the network-segment and policy-profile this vm-network refers to. events: Events correspond to commands that are logged on Nexus1000V VSM. Events are used to poll for a certain resource on Nexus1000V VSM. Event type of port_profile: Return all updates/create/deletes of port profiles from the VSM. Event type of port_profile_update: Return only updates regarding policy-profiles. Event type of port_profile_delete: Return only deleted policy profiles. WORK FLOW: For every network profile a corresponding logical-network and a network-segment-pool, under this logical-network, will be created. For every network created from a given network profile, a network-segment will be added to the network-segment-pool corresponding to that network profile. A port is created on a network and associated with a policy-profile. Hence for every unique combination of a network and a policy-profile, a unique vm-network will be created and a reference to the port will be added. If the same combination of network and policy-profile is used by another port, the references to that port will be added to the same vm-network. """ # Define paths for the URI where the client connects for HTTP requests. port_profiles_path = "/virtual-port-profile" network_segment_path = "/network-segment/%s" network_segment_pool_path = "/network-segment-pool/%s" ip_pool_path = "/ip-pool-template/%s" ports_path = "/kvm/vm-network/%s/ports" port_path = "/kvm/vm-network/%s/ports/%s" vm_networks_path = "/kvm/vm-network" vm_network_path = "/kvm/vm-network/%s" bridge_domains_path = "/kvm/bridge-domain" bridge_domain_path = "/kvm/bridge-domain/%s" logical_network_path = "/logical-network/%s" events_path = "/kvm/events" clusters_path = "/cluster" encap_profiles_path = "/encapsulation-profile" encap_profile_path = "/encapsulation-profile/%s" pool = eventlet.GreenPool(c_conf.CISCO_N1K.http_pool_size) def __init__(self, **kwargs): """Initialize a new client for the plugin.""" self.format = 'json' self.hosts = self._get_vsm_hosts() self.action_prefix = 'http://%s/api/n1k' % self.hosts[0] self.timeout = c_conf.CISCO_N1K.http_timeout def list_port_profiles(self): """ Fetch all policy profiles from the VSM. :returns: JSON string """ return self._get(self.port_profiles_path) def create_bridge_domain(self, network, overlay_subtype): """ Create a bridge domain on VSM. :param network: network dict :param overlay_subtype: string representing subtype of overlay network """ body = {'name': network['id'] + c_const.BRIDGE_DOMAIN_SUFFIX, 'segmentId': network[providernet.SEGMENTATION_ID], 'subType': overlay_subtype, 'tenantId': network['tenant_id']} if overlay_subtype == c_const.NETWORK_SUBTYPE_NATIVE_VXLAN: body['groupIp'] = network[n1kv.MULTICAST_IP] return self._post(self.bridge_domains_path, body=body) def delete_bridge_domain(self, name): """ Delete a bridge domain on VSM. :param name: name of the bridge domain to be deleted """ return self._delete(self.bridge_domain_path % name) def create_network_segment(self, network, network_profile): """ Create a network segment on the VSM. :param network: network dict :param network_profile: network profile dict """ body = {'publishName': network['id'], 'description': network['name'], 'id': network['id'], 'tenantId': network['tenant_id'], 'networkSegmentPool': network_profile['id'], } if network[providernet.NETWORK_TYPE] == c_const.NETWORK_TYPE_VLAN: body['vlan'] = network[providernet.SEGMENTATION_ID] elif network[providernet.NETWORK_TYPE] == c_const.NETWORK_TYPE_OVERLAY: body['bridgeDomain'] = (network['id'] + c_const.BRIDGE_DOMAIN_SUFFIX) if network_profile['segment_type'] == c_const.NETWORK_TYPE_TRUNK: body['mode'] = c_const.NETWORK_TYPE_TRUNK body['segmentType'] = network_profile['sub_type'] if network_profile['sub_type'] == c_const.NETWORK_TYPE_VLAN: body['addSegments'] = network['add_segment_list'] body['delSegments'] = network['del_segment_list'] else: body['encapProfile'] = (network['id'] + c_const.ENCAPSULATION_PROFILE_SUFFIX) else: body['mode'] = 'access' body['segmentType'] = network_profile['segment_type'] return self._post(self.network_segment_path % network['id'], body=body) def update_network_segment(self, network_segment_id, body): """ Update a network segment on the VSM. Network segment on VSM can be updated to associate it with an ip-pool or update its description and segment id. :param network_segment_id: UUID representing the network segment :param body: dict of arguments to be updated """ return self._post(self.network_segment_path % network_segment_id, body=body) def delete_network_segment(self, network_segment_id): """ Delete a network segment on the VSM. :param network_segment_id: UUID representing the network segment """ return self._delete(self.network_segment_path % network_segment_id) def create_logical_network(self, network_profile, tenant_id): """ Create a logical network on the VSM. :param network_profile: network profile dict :param tenant_id: UUID representing the tenant """ LOG.debug("Logical network") body = {'description': network_profile['name'], 'tenantId': tenant_id} logical_network_name = (network_profile['id'] + c_const.LOGICAL_NETWORK_SUFFIX) return self._post(self.logical_network_path % logical_network_name, body=body) def delete_logical_network(self, logical_network_name): """ Delete a logical network on VSM. :param logical_network_name: string representing name of the logical network """ return self._delete( self.logical_network_path % logical_network_name) def create_network_segment_pool(self, network_profile, tenant_id): """ Create a network segment pool on the VSM. :param network_profile: network profile dict :param tenant_id: UUID representing the tenant """ LOG.debug("network_segment_pool") logical_network_name = (network_profile['id'] + c_const.LOGICAL_NETWORK_SUFFIX) body = {'name': network_profile['name'], 'description': network_profile['name'], 'id': network_profile['id'], 'logicalNetwork': logical_network_name, 'tenantId': tenant_id} if network_profile['segment_type'] == c_const.NETWORK_TYPE_OVERLAY: body['subType'] = network_profile['sub_type'] return self._post( self.network_segment_pool_path % network_profile['id'], body=body) def update_network_segment_pool(self, network_profile): """ Update a network segment pool on the VSM. :param network_profile: network profile dict """ body = {'name': network_profile['name'], 'description': network_profile['name']} return self._post(self.network_segment_pool_path % network_profile['id'], body=body) def delete_network_segment_pool(self, network_segment_pool_id): """ Delete a network segment pool on the VSM. :param network_segment_pool_id: UUID representing the network segment pool """ return self._delete(self.network_segment_pool_path % network_segment_pool_id) def create_ip_pool(self, subnet): """ Create an ip-pool on the VSM. :param subnet: subnet dict """ if subnet['cidr']: try: ip = netaddr.IPNetwork(subnet['cidr']) netmask = str(ip.netmask) network_address = str(ip.network) except (ValueError, netaddr.AddrFormatError): msg = _("Invalid input for CIDR") raise n_exc.InvalidInput(error_message=msg) else: netmask = network_address = "" if subnet['allocation_pools']: address_range_start = subnet['allocation_pools'][0]['start'] address_range_end = subnet['allocation_pools'][0]['end'] else: address_range_start = None address_range_end = None body = {'addressRangeStart': address_range_start, 'addressRangeEnd': address_range_end, 'ipAddressSubnet': netmask, 'description': subnet['name'], 'gateway': subnet['gateway_ip'], 'dhcp': subnet['enable_dhcp'], 'dnsServersList': subnet['dns_nameservers'], 'networkAddress': network_address, 'netSegmentName': subnet['network_id'], 'id': subnet['id'], 'tenantId': subnet['tenant_id']} return self._post(self.ip_pool_path % subnet['id'], body=body) def update_ip_pool(self, subnet): """ Update an ip-pool on the VSM. :param subnet: subnet dictionary """ body = {'description': subnet['name'], 'dhcp': subnet['enable_dhcp'], 'dnsServersList': subnet['dns_nameservers']} return self._post(self.ip_pool_path % subnet['id'], body=body) def delete_ip_pool(self, subnet_id): """ Delete an ip-pool on the VSM. :param subnet_id: UUID representing the subnet """ return self._delete(self.ip_pool_path % subnet_id) def create_vm_network(self, port, vm_network_name, policy_profile): """ Create a VM network on the VSM. :param port: port dict :param vm_network_name: name of the VM network :param policy_profile: policy profile dict """ body = {'name': vm_network_name, 'networkSegmentId': port['network_id'], 'networkSegment': port['network_id'], 'portProfile': policy_profile['name'], 'portProfileId': policy_profile['id'], 'tenantId': port['tenant_id'], 'portId': port['id'], 'macAddress': port['mac_address'], } if port.get('fixed_ips'): body['ipAddress'] = port['fixed_ips'][0]['ip_address'] body['subnetId'] = port['fixed_ips'][0]['subnet_id'] return self._post(self.vm_networks_path, body=body) def delete_vm_network(self, vm_network_name): """ Delete a VM network on the VSM. :param vm_network_name: name of the VM network """ return self._delete(self.vm_network_path % vm_network_name) def create_n1kv_port(self, port, vm_network_name): """ Create a port on the VSM. :param port: port dict :param vm_network_name: name of the VM network which imports this port """ body = {'id': port['id'], 'macAddress': port['mac_address']} if port.get('fixed_ips'): body['ipAddress'] = port['fixed_ips'][0]['ip_address'] body['subnetId'] = port['fixed_ips'][0]['subnet_id'] return self._post(self.ports_path % vm_network_name, body=body) def update_n1kv_port(self, vm_network_name, port_id, body): """ Update a port on the VSM. Update the mac address associated with the port :param vm_network_name: name of the VM network which imports this port :param port_id: UUID of the port :param body: dict of the arguments to be updated """ return self._post(self.port_path % (vm_network_name, port_id), body=body) def delete_n1kv_port(self, vm_network_name, port_id): """ Delete a port on the VSM. :param vm_network_name: name of the VM network which imports this port :param port_id: UUID of the port """ return self._delete(self.port_path % (vm_network_name, port_id)) def _do_request(self, method, action, body=None, headers=None): """ Perform the HTTP request. The response is in either JSON format or plain text. A GET method will invoke a JSON response while a PUT/POST/DELETE returns message from the VSM in plain text format. Exception is raised when VSM replies with an INTERNAL SERVER ERROR HTTP status code (500) i.e. an error has occurred on the VSM or SERVICE UNAVAILABLE (503) i.e. VSM is not reachable. :param method: type of the HTTP request. POST, GET, PUT or DELETE :param action: path to which the client makes request :param body: dict for arguments which are sent as part of the request :param headers: header for the HTTP request :returns: JSON or plain text in HTTP response """ action = self.action_prefix + action if not headers and self.hosts: headers = self._get_auth_header(self.hosts[0]) headers['Content-Type'] = self._set_content_type('json') headers['Accept'] = self._set_content_type('json') if body: body = jsonutils.dumps(body, indent=2) LOG.debug("req: %s", body) try: resp = self.pool.spawn(requests.request, method, url=action, data=body, headers=headers, timeout=self.timeout).wait() except Exception as e: raise c_exc.VSMConnectionFailed(reason=e) LOG.debug("status_code %s", resp.status_code) if resp.status_code == requests.codes.OK: if 'application/json' in resp.headers['content-type']: try: return resp.json() except ValueError: return {} elif 'text/plain' in resp.headers['content-type']: LOG.debug("VSM: %s", resp.text) else: raise c_exc.VSMError(reason=resp.text) def _set_content_type(self, format=None): """ Set the mime-type to either 'xml' or 'json'. :param format: format to be set. :return: mime-type string """ if not format: format = self.format return "application/%s" % format def _delete(self, action, body=None, headers=None): return self._do_request("DELETE", action, body=body, headers=headers) def _get(self, action, body=None, headers=None): return self._do_request("GET", action, body=body, headers=headers) def _post(self, action, body=None, headers=None): return self._do_request("POST", action, body=body, headers=headers) def _put(self, action, body=None, headers=None): return self._do_request("PUT", action, body=body, headers=headers) def _get_vsm_hosts(self): """ Retrieve a list of VSM ip addresses. :return: list of host ip addresses """ return [cr[c_const.CREDENTIAL_NAME] for cr in network_db_v2.get_all_n1kv_credentials()] def _get_auth_header(self, host_ip): """ Retrieve header with auth info for the VSM. :param host_ip: IP address of the VSM :return: authorization header dict """ username = c_cred.Store.get_username(host_ip) password = c_cred.Store.get_password(host_ip) auth = base64.encodestring("%s:%s" % (username, password)).rstrip() header = {"Authorization": "Basic %s" % auth} return header def get_clusters(self): """Fetches a list of all vxlan gateway clusters.""" return self._get(self.clusters_path) def create_encapsulation_profile(self, encap): """ Create an encapsulation profile on VSM. :param encap: encapsulation dict """ body = {'name': encap['name'], 'addMappings': encap['add_segment_list'], 'delMappings': encap['del_segment_list']} return self._post(self.encap_profiles_path, body=body) def update_encapsulation_profile(self, context, profile_name, body): """ Adds a vlan to bridge-domain mapping to an encapsulation profile. :param profile_name: Name of the encapsulation profile :param body: mapping dictionary """ return self._post(self.encap_profile_path % profile_name, body=body) def delete_encapsulation_profile(self, name): """ Delete an encapsulation profile on VSM. :param name: name of the encapsulation profile to be deleted """ return self._delete(self.encap_profile_path % name)
""" Gaudi Python script for extraction and analysis of BGV mdf data. """ from __future__ import print_function import pdb import logging import sys from argparse import (ArgumentParser, RawDescriptionHelpFormatter, ArgumentTypeError) from os.path import abspath, isfile from datetime import datetime from operator import lt, gt from numpy import (array, mean, std, zeros, int16, rint, arange, vstack) try: from h5py.version import hdf5_version from h5py.version import version from h5py import File except ImportError: logging.error('Module h5py was not found. Please install it for output.') # set_logging related constants LOG_MSG_FMT = "[%(asctime)s][%(levelname)-8s]\ [%(filename)s, %(lineno)d][%(name)s]\t%(message)s" LOG_DT_FMT = "\033[1m%m-%d %H:%M:%S\033[0m" ############################################################################### # ARGPARSE CONSTANTS ############################################################################### USAGE_STR = "fin fout [<options>]" USAGE_STR_NOOUT = "fin [<options>]" USAGE_CMDS = """\n\n The commands defined here are: datacheck Extract data to a root file examining basic information extract Extract data for general case (h5 file) - fmt: [evt][ch] extract_cl Extract cluster from data (h5 file) - fmt: [tell][evt][(x, y)] extract_dev Extract data for general case (h5 file) - experimental pdb Start the run for usage with python debugger pedhead Extract the data for pedestal definition pulseshape Extract the data in the case of a pulse shape scan analysis """ EXAMPLES_CMDS = """\n Examples: $ python bgvUber.py pdb <in [mdf]> [<options>] $ python bgvUber.py timespan <in [mdf]> [<options>] $ python bgvUber.py datacheck <in [mdf]> <out [root]> [<options>] $ python bgvUber.py extract <in [mdf]> <out [hdf5]> [<options>] $ python bgvUber.py pedhead <in [mdf]> <out [hdf5]> [<options>] $ python bgvUber.py pulseshape <in [mdf]> <out [root]> [<options>] """ DESCR_PED = "Extract data stats for pedestal definition.\n\ The output data are in HDF5 format. Under '/Raw/MeanADC' is the mean which\n\ is to be used as pedestal while under '/Raw/StdADC' is the standard deviation." DESCR_PDB = 'Start the loop with a pdb trace for each event' DESCR_TS = 'Get start and end time of run.' DESCR_EX = 'Extract data to HDF5 format.' EPILOGUE_PS = """\n \033[1mNOTE\033[0m: nevts for pulseshape function is per step Example: $ python bgvUber.py pulseshape /eos/project/l/lhcbgv/data/nzs/<year>/Run_* \ Run_X_pulseshape_nXeXtX.root -n 1000 -t 2 -e 25 """ ############################################################################### # CONSTANTS ############################################################################### # Used for extrated HDF5 file attribute DT_FMT = '%Y-%m-%d %H:%M:%S' DIR_BGVDB = '/eos/project/l/lhcbgv/sw/SQLiteDB/' # EOS related constants - now irrelevant - keep though EOS_FS = 'root://eospublic.cern.ch:1094' EOS = EOS_FS[:-5] GAUDI_DATAFILE_STR_EOS = 'mdf:' + EOS + '/' # Gaudi related constants CONDB_OPT = "/afs/cern.ch/project/lhcbgv/sw/releases/SQLiteDB/options/\ SetBGVDB.py" KEYIDS = [1, 2, 4, 5, 6, 7, 8, 9] NUM_CH = 2048 xml_cond_template = """<?xml version="1.0" encoding="ISO-8859-1"?> <!DOCTYPE DDDB SYSTEM "conddb:/DTD/structure.dtd"> <DDDB> <catalog name ="Pedestals"> <conditionref href="#Tell1Key1"/> <conditionref href="#Tell1Key2"/> <conditionref href="#Tell1Key4"/> <conditionref href="#Tell1Key5"/> <conditionref href="#Tell1Key6"/> <conditionref href="#Tell1Key7"/> <conditionref href="#Tell1Key8"/> <conditionref href="#Tell1Key9"/> </catalog> <catalog name ="RMS"> <conditionref href="#Tell1Key1"/> <conditionref href="#Tell1Key2"/> <conditionref href="#Tell1Key4"/> <conditionref href="#Tell1Key5"/> <conditionref href="#Tell1Key6"/> <conditionref href="#Tell1Key7"/> <conditionref href="#Tell1Key8"/> <conditionref href="#Tell1Key9"/> </catalog> <catalog name ="Headers"> <conditionref href="#Tell1Key1"/> <conditionref href="#Tell1Key2"/> <conditionref href="#Tell1Key4"/> <conditionref href="#Tell1Key5"/> <conditionref href="#Tell1Key6"/> <conditionref href="#Tell1Key7"/> <conditionref href="#Tell1Key8"/> <conditionref href="#Tell1Key9"/> </catalog> <condition classID="55001" name="Tell1Key1"> <paramVector name="Pedestals" type="int"> %(ped_key1)s </paramVector> <paramVector name="RMS" type="int"> %(rms_key1)s </paramVector> <paramVector name="Headers" type="int"> %(head_key1)s </paramVector> </condition> <condition classID="55001" name="Tell1Key2"> <paramVector name="Pedestals" type="int"> %(ped_key2)s </paramVector> <paramVector name="RMS" type="int"> %(rms_key2)s </paramVector> <paramVector name="Headers" type="int"> %(head_key2)s </paramVector> </condition> <condition classID="55001" name="Tell1Key4"> <paramVector name="Pedestals" type="int"> %(ped_key4)s </paramVector> <paramVector name="RMS" type="int"> %(rms_key4)s </paramVector> <paramVector name="Headers" type="int"> %(head_key4)s </paramVector> </condition> <condition classID="55001" name="Tell1Key5"> <paramVector name="Pedestals" type="int"> %(ped_key5)s </paramVector> <paramVector name="RMS" type="int"> %(rms_key5)s </paramVector> <paramVector name="Headers" type="int"> %(head_key5)s </paramVector> </condition> <condition classID="55001" name="Tell1Key6"> <paramVector name="Pedestals" type="int"> %(ped_key6)s </paramVector> <paramVector name="RMS" type="int"> %(rms_key6)s </paramVector> <paramVector name="Headers" type="int"> %(head_key6)s </paramVector> </condition> <condition classID="55001" name="Tell1Key7"> <paramVector name="Pedestals" type="int"> %(ped_key7)s </paramVector> <paramVector name="RMS" type="int"> %(rms_key7)s </paramVector> <paramVector name="Headers" type="int"> %(head_key7)s </paramVector> </condition> <condition classID="55001" name="Tell1Key8"> <paramVector name="Pedestals" type="int"> %(ped_key8)s </paramVector> <paramVector name="RMS" type="int"> %(rms_key8)s </paramVector> <paramVector name="Headers" type="int"> %(head_key8)s </paramVector> </condition> <condition classID="55001" name="Tell1Key9"> <paramVector name="Pedestals" type="int"> %(ped_key9)s </paramVector> <paramVector name="RMS" type="int"> %(rms_key9)s </paramVector> <paramVector name="Headers" type="int"> %(head_key9)s </paramVector> </condition> </DDDB> """ ############################################################################### __author__ = 'andreas' __copyright__ = "Copyleft 7DF" __email__ = "a.alexopoulos at cern dot ch" __version__ = 'v1.0' ############################################################################### logger = None def set_logging(verbosity, msg_fmt=LOG_MSG_FMT, dt_fmt=LOG_DT_FMT): """Sets the logging output style. """ logging.addLevelName(logging.DEBUG, "\033[1;34m%-8s\033[1;0m" % logging.getLevelName(logging.DEBUG)) logging.addLevelName(logging.INFO, "\033[1;37m%-8s\033[1;0m" % logging.getLevelName(logging.INFO)) logging.addLevelName(logging.WARNING, "\033[1;33m%-8s\033[1;0m" % logging.getLevelName(logging.WARNING)) logging.addLevelName(logging.ERROR, "\033[1;31m%-8s\033[1;0m" % logging.getLevelName(logging.ERROR)) logging.addLevelName(logging.CRITICAL, "\033[1;41m%-8s\033[1;0m" % logging.getLevelName(logging.CRITICAL)) logging.basicConfig(level=verbosity, format=LOG_MSG_FMT, datefmt=LOG_DT_FMT) # XRootD function in case of --eos def get_files_eos(path, runs, server=EOS): """Searches for the files of the defined run in the path/server specified. """ from XRootD import client from XRootD.client.flags import DirListFlags c = client.FileSystem(EOS) gaudi_datafiles = GAUDI_DATAFILE_STR_EOS + path logger.debug('Path:%s, Runs:%s' % (path, runs)) status, listing = c.dirlist(path, DirListFlags.STAT) logger.debug('Status:%s' % status) files = [] if listing is None: return files, status for run in runs: for entry in listing: rnum = int(entry.name.split('_')[1]) r = int(run) if r == rnum: files.append(gaudi_datafiles + entry.name) return files, status def set_input_files(fin, eos=False, eos_path='/'): if eos: logger.debug('Data to retrieve from EOS.') files, status = get_files_eos(eos_path, fin) logger.info('EOS Files: %s found: %s' % (len(files), files)) if not files: logger.critical('No files found !\nStatus: %s\nExiting!' % status) sys.exit() else: files = [abspath(item) for item in fin if isfile(item)] return files def initialize_gaudi(files, **kwargs): """Initializing Gaudi for the execution. A lot TODOs/TOCHECKs here. """ from Configurables import LHCbApp from Gaudi.Configuration import importOptions, EventSelector from Configurables import CondDB from GaudiPython.Bindings import AppMgr no_corr = kwargs.get('not_corrected', False) tae = kwargs.get('tae', 0) loc = kwargs.get('loc', []) dbtag = kwargs.get('dbtag', "bgv-20160616") ########################################################################### # Gaudi Init ########################################################################### logger.debug('\nInitializing Gaudi...\n\n') lhcb = LHCbApp() logging.debug('LHCbApp object: %s' % lhcb) ########################################################################### # CondDB SETTING ########################################################################### importOptions(CONDB_OPT) # Set the tags for geometry and conditions CondDB().Tags["DDDB"] = "" # default tag CondDB().Tags["LHCBCOND"] = dbtag CondDB().IgnoreHeartBeat = True CondDB().EnableRunStampCheck = False logger.info("\n%s" % CondDB()) ########################################################################### # EVENT SELECTOR ########################################################################### EventSelector().PrintFreq = 500 EventSelector().Input = ["DATAFILE='%s' SVC='%s'" % (f, "LHCb::MDFSelector") for f in files] ########################################################################### # AppMgr Algorithms ########################################################################### appMgr = AppMgr() if not tae: appMgr.addAlgorithm('LbAppInit') appMgr.addAlgorithm('PrepareVeloFullRawBuffer') appMgr.addAlgorithm('DecodeSciFiFullRawBuffer') if not no_corr: appMgr.addAlgorithm('OfflineADCCorrections') # appMgr.addAlgorithm('nzsStreamListener') # ? else: # Case with time alignment events logger.info('TAE Events') preplist = ['prepare'+item[:-1] if item else 'prepareCentral' for item in loc] decolist = ['decode'+item[:-1] if item else 'decodeCentral' for item in loc] logger.info('Locations: %s' % loc) logger.info('Prepares: %s' % preplist) logger.info('Decodes: %s' % decolist) for p, d in zip(preplist, decolist): appMgr.addAlgorithm('PrepareVeloFullRawBuffer/' + p) appMgr.addAlgorithm('DecodeSciFiFullRawBuffer/' + d) appMgr.addAlgorithm('createODIN') evt = appMgr.evtsvc() # det = appMgr.detsvc() ########################################################################### # ALGORITHMS SETTING ########################################################################### if not tae: prep = appMgr.algorithm('PrepareVeloFullRawBuffer') deco = appMgr.algorithm('DecodeSciFiFullRawBuffer') if not no_corr: corr = appMgr.algorithm('OfflineADCCorrections') logger.debug('Corrections algorithm enabled; %s' % corr) # set variables for the algorithms prep.RunWithODIN = False # correct.OutputLevel = 5 # correct.cmThreshold = 1 # decode.CableOrder = [3,2,1,0] else: logging.debug(appMgr.algorithms()) for l, p, d in zip(loc, preplist, decolist): prep = appMgr.algorithm(p) deco = appMgr.algorithm(d) prep.RunWithODIN = False # prep.OutputLevel = 3 prep.IgnoreErrorBanks = True prep.ADCLocation = l + prep.ADCLocation prep.ADCPartialLoc = l + prep.ADCPartialLoc prep.RawEventLocation = l + prep.RawEventLocation deco.CableOrder = [3, 2, 1, 0] deco.SectorCorrection = False # deco.OutputLevel = 3 deco.ADCLocation = l + deco.ADCLocation deco.DecodedADCLocation = l + deco.DecodedADCLocation deco.DecodedHeaderLocation = l + deco.DecodedHeaderLocation deco.EventInfoLocation = l + deco.EventInfoLocation deco.DecodedPedestalLocation = l + deco.DecodedPedestalLocation return appMgr, evt def create_out_file(fname, first_mdf, **kwargs): """Setting the basic attributes of the HDF5 output file. """ logger.info('Creating output file: %s' % fname) f = File(fname, 'w') # Give the file some attributes f.attrs['file_name'] = fname f.attrs['file_orig'] = first_mdf f.attrs['ext_time'] = datetime.now().strftime(DT_FMT) f.attrs['DCD_version'] = __version__ f.attrs['HDF5_Version'] = hdf5_version f.attrs['h5py_version'] = version return f def write_out_event(fp, evntinfo, data): """I don't like the idea of running through 100k events and keeping everything in memory. Do something - per event / periodical write out (?) """ pass def set_parser_args(parser, fout=True): """Sets some default commonly used arguments, so that I don't repeat them. """ # positional arguments parser.add_argument('fin', nargs='+', help='Input file path or run number (eos case)') if fout: parser.add_argument('fout', help='Output file path.') """ # eos - forget them for now parser.add_argument('--eos', action='store_true', default=False, help='Retrieve run data from EOS.') parser.add_argument('--eos-path', default='/eos/bgv/data/nzs/2016/', help='If EOS enabled, path to look for the run number') """ # More parser.add_argument('--nevts', '-n', action='store', default=1000, type=int, help='Number of events to be processed') parser.add_argument('--sevts', '-s', action='store', default=0, type=int, help='Number of events to be skipped') # verbosity parser.add_argument('-v', '--verbosity', action='count', help='Increase output verbosity') return parser def counts_to_verbosity(counts): """Gets the 'v' count from the cli and translates it to some logging level """ if counts is None: return logging.WARNING elif counts == 1: return logging.INFO else: return logging.DEBUG class UberBGV(object): """ Module containing the implementation of the extraction and analysis of BGV mdf data. """ def __init__(self): # parser = ArgumentParser(description=__doc__, usage=USAGE_STR) usage = USAGE_STR + USAGE_CMDS parser = ArgumentParser(description=__doc__, prog='bgvUber.py <command>', usage='%(prog)s ' + usage, formatter_class=RawDescriptionHelpFormatter, epilog=EXAMPLES_CMDS) parser.add_argument('command', help='Subcommand to run') args = parser.parse_args(sys.argv[1:2]) if not hasattr(self, args.command): print('ERROR: Unrecognized command') parser.print_help() exit(1) # use dispatch pattern to invoke method with same name getattr(self, args.command)() def extract(self): parser = ArgumentParser(description=DESCR_EX, prog='bgvUber.py extract', usage='%(prog)s ' + USAGE_STR, formatter_class=RawDescriptionHelpFormatter) parser = set_parser_args(parser, fout=True) # Set common arguments # Function specific args parser.add_argument('--type', '-t', action='store', default='cor', choices=['raw', 'cor'], type=str, help='Type of output data.') args = vars(parser.parse_args(sys.argv[2:])) # Setting Logger set_logging(counts_to_verbosity(args['verbosity'])) global logger logger = logging.getLogger(__name__) logger.debug('ARGS: %s' % args) logger.debug('Extraction function running...') # Definition of file lists files = set_input_files(args['fin']) args['fout'] = abspath(args['fout']) ####################################################################### # INIT ####################################################################### tick0 = datetime.now() if args['type'] == 'cor': appMgr, evt = initialize_gaudi(files) else: appMgr, evt = initialize_gaudi(files, not_corrected=True) logger.info('Elapsed time [init]: %s' % (datetime.now()-tick0)) nevts = int(args['nevts']) skipevts = int(args['sevts']) ####################################################################### # START ####################################################################### if skipevts > 0: appMgr.run(skipevts) if args['type'] == 'raw': bank = '/Event/Raw/Velo/DecodedADC' else: bank = '/Event/Corrected/SciFi/CorrectedADC' res, bids = [], [] cnt = 0 for i in range(nevts): appMgr.run(1) if not evt['/Event'].__nonzero__(): logger.warning('EMPTY: Event %d' % (i)) cnt += 1 continue tes_bank = evt[bank] odin = evt['DAQ/ODIN'] try: tmp_arr = zeros(NUM_CH*len(KEYIDS)) for j, key in enumerate(KEYIDS): if(tes_bank.containedObject(key)): tmp = tes_bank.containedObject(key).data() l, h = j*NUM_CH, j*NUM_CH + NUM_CH tmp_arr[l:h] = array(tmp) else: logger.warning('EMPTY: Event %d, key %d' % (i, key)) tmp_arr = array([]) cnt += 1 break except AttributeError as e: logger.warning('EMPTY - AttributeError:%s' % e) tmp_arr = array([]) cnt += 1 break # continue - leaving it now - should not happen if tmp_arr.any(): res.append(tmp_arr) bids.append(odin.bunchId()) logger.debug('Evt #%d / BunchID: %d' % (i, bids[-1])) logger.debug('# of empty events: %d' % cnt) ####################################################################### # OUTPUT ####################################################################### tick1 = datetime.now() fp = create_out_file(args['fout'], args['fin'][0]) if args['type'] == 'raw': fp.attrs['file_type'] = 'Raw' grp = fp.create_group('/Raw/') grp.create_dataset('RawADC', data=array(res, dtype=int16)) else: fp.attrs['file_type'] = 'Corrected' grp = fp.create_group('/Corrected/') grp.create_dataset('CorrectedADC', data=array(res, dtype=int16)) grp = fp.create_group('/Info/') grp.create_dataset('BunchIDs', data=array(bids, dtype=int16)) fp.close() logger.info('Dump time: %s' % (datetime.now() - tick1)) ####################################################################### logger.info('Total analysis time: %s' % (datetime.now() - tick0)) def extract_dev(self): """ Just another version of extract for dev purposes. """ parser = ArgumentParser(description='Extract data to different types', prog='bgvUber.py extract', usage='%(prog)s ' + USAGE_STR, formatter_class=RawDescriptionHelpFormatter) parser = set_parser_args(parser, fout=True) # Set common arguments # Function specific args parser.add_argument('--type', '-t', action='store', default='cor', choices=['raw', 'cor'], type=str, help='Extract h5 file (dev version).') args = vars(parser.parse_args(sys.argv[2:])) # Setting Logger set_logging(counts_to_verbosity(args['verbosity'])) global logger logger = logging.getLogger(__name__) logger.debug('ARGS: %s' % args) logger.info('Extraction (dev version) function running...') # Definition of file lists files = set_input_files(args['fin']) args['fout'] = abspath(args['fout']) ####################################################################### # INIT ####################################################################### tick0 = datetime.now() if args['type'] == 'cor': appMgr, evt = initialize_gaudi(files) else: appMgr, evt = initialize_gaudi(files, not_corrected=True) logger.info('Elapsed time [init]: %s' % (datetime.now()-tick0)) nevts = int(args['nevts']) skipevts = int(args['sevts']) # Create output file tick1 = datetime.now() fp = create_out_file(args['fout']) if args['type'] == 'raw': fp.attrs['file_type'] = 'Raw' else: fp.attrs['file_type'] = 'Corrected' grp = fp.create_group('/Events/') ####################################################################### # START ####################################################################### if skipevts > 0: appMgr.run(skipevts) if args['type'] == 'raw': bank = '/Event/Raw/Velo/DecodedADC' else: bank = '/Event/Corrected/SciFi/CorrectedADC' cnt = 0 for i in range(nevts): appMgr.run(1) enum = skipevts+i if not evt['/Event'].__nonzero__(): logger.warning('EMPTY: Event %d' % (i)) cnt += 1 continue tes_bank = evt[bank] odin = evt['DAQ/ODIN'] try: tmp_dict = {} for j, key in enumerate(KEYIDS): if(tes_bank.containedObject(key)): tmp = tes_bank.containedObject(key).data() tmp_dict[key] = array(tmp, dtype=int16) else: logger.warning('EMPTY: Event %d, key %d' % (i, key)) tmp_dict = {} cnt += 1 break except AttributeError as e: logger.warning('EMPTY - AttributeError:%s' % e) tmp_dict = {} cnt += 1 break # continue - leaving it now - should not happen if tmp_dict: ge = grp.create_group('%s' % enum) gt = ge.create_group('Tell1') for k, v in tmp_dict.iteritems(): gt.create_dataset('%s' % k, data=array(v, dtype=int16)) info = [odin.bunchId(), odin.orbitNumber(), odin.eventTime()] ge.create_dataset('Info', data=info) logger.debug('Evt #%d / Info: %d' % (enum, info)) logger.debug('# of empty events: %d' % cnt) ####################################################################### # OUTPUT ####################################################################### fp.close() logger.info('Dump time: %s' % (datetime.now() - tick1)) ####################################################################### logger.info('Total analysis time: %s' % (datetime.now() - tick0)) def extract_clu(self): """ Just another version of extract for dev purposes. """ parser = ArgumentParser(description='Extract data to different types', prog='bgvUber.py extract', usage='%(prog)s ' + USAGE_STR, formatter_class=RawDescriptionHelpFormatter) parser = set_parser_args(parser, fout=True) # Set common arguments # Function specific args parser.add_argument('--threshold', '-t', action='store', default=70, type=int, help='Threshold for the clusters') args = vars(parser.parse_args(sys.argv[2:])) # Setting Logger set_logging(counts_to_verbosity(args['verbosity'])) global logger logger = logging.getLogger(__name__) logger.debug('ARGS: %s' % args) logger.debug('Extraction (clu) function running...') # Definition of file lists files = set_input_files(args['fin']) args['fout'] = abspath(args['fout']) ####################################################################### # INIT ####################################################################### tick0 = datetime.now() if args['type'] == 'cor': appMgr, evt = initialize_gaudi(files) else: appMgr, evt = initialize_gaudi(files, not_corrected=True) logger.info('Elapsed time [init]: %s' % (datetime.now()-tick0)) nevts = int(args['nevts']) skipevts = int(args['sevts']) # Create output file tick1 = datetime.now() fp = create_out_file(args['fout']) if args['type'] == 'raw': fp.attrs['file_type'] = 'Raw' else: fp.attrs['file_type'] = 'Corrected' grp = fp.create_group('/Events/') ####################################################################### # START ####################################################################### if skipevts > 0: appMgr.run(skipevts) if args['type'] == 'raw': bank = '/Event/Raw/Velo/DecodedADC' else: bank = '/Event/Corrected/SciFi/CorrectedADC' cnt = 0 for i in range(nevts): appMgr.run(1) enum = skipevts+i if not evt['/Event'].__nonzero__(): logger.warning('EMPTY: Event %d' % (i)) cnt += 1 continue tes_bank = evt[bank] odin = evt['DAQ/ODIN'] try: tmp_dict = {} for j, key in enumerate(KEYIDS): if(tes_bank.containedObject(key)): tmp = tes_bank.containedObject(key).data() tmp_dict[key] = array(tmp, dtype=int16) else: logger.warning('EMPTY: Event %d, key %d' % (i, key)) tmp_dict = {} cnt += 1 break except AttributeError as e: logger.warning('EMPTY - AttributeError:%s' % e) tmp_dict = {} cnt += 1 break # continue - leaving it now - should not happen if tmp_dict: ge = grp.create_group('%s' % enum) gt = ge.create_group('Tell1') for k, v in tmp_dict.iteritems(): gt.create_dataset('%s' % k, data=array(v, dtype=int16)) info = [odin.bunchId(), odin.orbitNumber(), odin.eventTime()] ge.create_dataset('Info', data=info) logger.debug('Evt #%d / Info: %d' % (enum, info)) logger.debug('# of empty events: %d' % cnt) ####################################################################### # OUTPUT ####################################################################### fp.close() logger.info('Dump time: %s' % (datetime.now() - tick1)) ####################################################################### logger.info('Total analysis time: %s' % (datetime.now() - tick0)) def timespan(self): parser = ArgumentParser(description=DESCR_TS, prog='bgvUber.py timespan', usage='%(prog)s ' + USAGE_STR, formatter_class=RawDescriptionHelpFormatter) parser = set_parser_args(parser, fout=False) # Set common arguments # Function specific args """ parser.add_argument('--msevts', '-m', action='store', default=1, type=int, help='Number of events to be skipped in between') """ args = vars(parser.parse_args(sys.argv[2:])) # Setting Logger set_logging(counts_to_verbosity(args['verbosity'])) global logger logger = logging.getLogger(__name__) logger.debug('ARGS: %s' % args) logger.debug('Timespan function running...') # Definition of file lists files = set_input_files(args['fin']) ####################################################################### # INIT ####################################################################### appMgr, evt = initialize_gaudi(files, not_corrected=True) appMgr.removeAlgorithm('PrepareVeloFullRawBuffer') appMgr.removeAlgorithm('DecodeSciFiFullRawBuffer') nevts = int(args['nevts']) skipevts = int(args['sevts']) ####################################################################### # START ####################################################################### cnt = 0 if skipevts > 0: appMgr.run(skipevts) cnt += skipevts appMgr.run(1) enum0 = cnt if not evt['/Event'].__nonzero__(): logger.error('EMPTY: Event %d' % (enum0)) appMgr.stop() return cnt += 1 odin = evt['DAQ/ODIN'] et0 = odin.eventTime() tupl = (et0.year(True), et0.month(True)+1, et0.day(True), et0.hour(True), et0.minute(True), et0.second(True), et0.nsecond()) et0_str = "%04d-%02d-%02d %02d:%02d:%02d.%d" % tupl nevts -= 2 logger.debug('NEVTS: %d' % nevts) appMgr.run(nevts) cnt += nevts appMgr.run(1) enum1 = cnt print(appMgr.evtSel().isValid()) if not evt['/Event'].__nonzero__(): logger.error('EMPTY: Event %d' % (enum1)) appMgr.stop() return cnt += 1 odin = evt['DAQ/ODIN'] et1 = odin.eventTime() tupl = (et1.year(True), et1.month(True)+1, et1.day(True), et1.hour(True), et1.minute(True), et1.second(True), et1.nsecond()) et1_str = "%04d-%02d-%02d %02d:%02d:%02d.%d" % tupl print("\n*** SUMMARY ***") msg = "\tevt: %06d / time:%s\n\tevt: %06d / time:%s\n" print(msg % (enum0, et0_str, enum1, et1_str)) timespan = (et1.ns()-et0.ns())*1e-9 print('\tTimespan: %s sec' % timespan) rate = round(((nevts+2)/timespan), 2) print('\tRate : %s Hz\n' % rate) def pdb(self): parser = ArgumentParser(description=DESCR_PDB, prog='bgvUber.py pdb', usage='%(prog)s ' + USAGE_STR_NOOUT, formatter_class=RawDescriptionHelpFormatter) parser = set_parser_args(parser, fout=False) # Set common arguments args = vars(parser.parse_args(sys.argv[2:])) # Setting Logger set_logging(counts_to_verbosity(args['verbosity'])) global logger logger = logging.getLogger(__name__) logger.debug('ARGS: %s' % args) logger.debug('Extraction function running...') # Definition of file lists files = set_input_files(args['fin']) ####################################################################### # Gaudi Init ####################################################################### tick0 = datetime.now() appMgr, evt = initialize_gaudi(files) logger.info('Elapsed time [init]: %s' % (datetime.now()-tick0)) nevts = int(args['nevts']) skipevts = int(args['sevts']) ####################################################################### # START ####################################################################### if skipevts > 0: appMgr.run(skipevts) logger.debug('Trace before the loop') pdb.set_trace() raw = [[] for _ in range(len(KEYIDS))] ped = [[] for _ in range(len(KEYIDS))] hdr = [[] for _ in range(len(KEYIDS))] cor = [[] for _ in range(len(KEYIDS))] for i in range(nevts): appMgr.run(1) # Define some banks that I want easily accessible rawADC = evt['/Event/Raw/Velo/DecodedADC'] pedSubADC = evt['/Event/Corrected/SciFi/PedSubCorrectedADC'] headADC = evt['/Event/Corrected/SciFi/HeadCorrectedADC'] correctedADC = evt['/Event/Corrected/SciFi/CorrectedADC'] odin = evt['DAQ/ODIN'] datainfo = {} logger.debug('Evt Retrieved - type:%s' % type(rawADC)) try: for j, key in enumerate(KEYIDS): if(rawADC.containedObject(key)): raw[j] = array(rawADC.containedObject(key).data()) ped[j] = array(pedSubADC.containedObject(key).data()) hdr[j] = array(headADC.containedObject(key).data()) tmp = correctedADC.containedObject(key).data() cor[j] = array(tmp) datainfo['bunchID'] = odin.bunchId() else: logger.warning('Event %d: rawADC is empty') except AttributeError as e: logger.warning('Nothing contained - %s' % e) pdb.set_trace() break pdb.set_trace() logger.debug('Trace after the loop') pdb.set_trace() def pedhead(self): parser = ArgumentParser(description=DESCR_PED, prog='bgvUber.py pedhead', usage='%(prog)s ' + USAGE_STR, formatter_class=RawDescriptionHelpFormatter) parser = set_parser_args(parser, fout=True) # Set common arguments parser.add_argument('--hdr_thres', action='store', default=512, type=int, help='The header threshold') args = vars(parser.parse_args(sys.argv[2:])) # Setting Logger set_logging(counts_to_verbosity(args['verbosity'])) global logger logger = logging.getLogger(__name__) logger.debug('ARGS: %s' % args) logger.debug('PedHead function running...') # Definition of file lists files = set_input_files(args['fin']) args['fout'] = abspath(args['fout']) ####################################################################### # INIT ####################################################################### tick0 = datetime.now() appMgr, evt = initialize_gaudi(files, not_corrected=True) logger.info('Elapsed time [init]: %s' % (datetime.now()-tick0)) nevts = int(args['nevts']) skipevts = int(args['sevts']) ####################################################################### # START ####################################################################### if skipevts > 0: appMgr.run(skipevts) hd_pos = arange(256) % 4 > 1 ch_pos = arange(2048) % 32 < 2 raw = [[] for _ in range(len(KEYIDS))] raw_h = [[] for _ in range(len(KEYIDS))] raw_p = [[] for _ in range(len(KEYIDS))] cnt = 0 for i in range(nevts): appMgr.run(1) dadcs = evt['/Event/Raw/Velo/DecodedADC'] dhdr = evt['/Event/Raw/Velo/DecodedHeaders'] logger.debug('Evt #%d Retrieved' % i) try: for j, key in enumerate(KEYIDS): if(dadcs.containedObject(key)): tmp = array(dadcs.containedObject(key).data()) tmp_h = array(dhdr.containedObject(key).data())[hd_pos] raw_h[j].append(tmp_h) raw_p[j].append(tmp[ch_pos]) raw[j].append(tmp) else: logger.warning('Event #%d: tes_bank is empty') cnt += 1 except AttributeError as e: logger.warning('Nothing contained - %s' % e) break logger.debug('# of empty events: %d' % cnt) ####################################################################### # POST-LOOP PROCESSING ####################################################################### # Pedestal & RMS raw = array(raw) mu = mean(raw, axis=1) mu_int = rint(mu).astype(int16) si = std(raw, axis=1) # Headers head_thres = args['hdr_thres'] evt_num = raw.shape[1] raw_h = array(raw_h) logger.debug("raw_h: %s" % str(raw_h.shape)) logger.debug("raw: %s" % str(raw.shape)) tst = [] for k in range(8): # Loop over TELL1 for c in range(64): # Loop over channels vec00, vec01 = [[], []], [[], []] vec10, vec11 = [[], []], [[], []] print("\n%s-%s" % (k, c)) tmp_raw_hdr_0 = raw_h[k, :, c*2] tmp_raw_hdr_1 = raw_h[k, :, c*2+1] print("tmp: %s, %s" % (tmp_raw_hdr_0.shape, tmp_raw_hdr_0)) tmp_raw_hdr = vstack((tmp_raw_hdr_0, tmp_raw_hdr_1)) tmp_raw_hdr = tmp_raw_hdr.transpose() tmp_raw_hdr_bool = tmp_raw_hdr > head_thres # np.where((tmp_raw_hdr_bool == (False, False)).all(axis=1)) tst.append(tmp_raw_hdr_bool) import pickle as pkl pkl.dump(tst) ####################################################################### # OUTPUT ####################################################################### tick1 = datetime.now() fp = create_out_file(args['fout'], args['fin'][0]) grp1 = fp.create_group('/Raw/') fp.attrs['file_type'] = 'pedhead' grp1.create_dataset('MeanADC', data=mu_int) grp1.create_dataset('StdADC', data=si) fp.close() logger.info('Dump time: %s' % (datetime.now() - tick1)) ####################################################################### logger.info('Total analysis time: %s' % (datetime.now() - tick0)) def pulseshape(self): def integer_list_tell1(l): try: l = map(int, l.split(',')) return l except ValueError: raise ArgumentTypeError("Should be an integer list e.g 8,9") def integer_list_links(l): try: l = map(int, l.split(',')) if any(t < 0 or t > 63 for t in l): raise ValueError return l except ValueError: raise ArgumentTypeError("Should be an integer list e.g 8,9") def integer_list_channels(l): try: l = map(int, l.split(',')) if any(t < 0 or t > 31 for t in l): raise ValueError return l except ValueError: raise ArgumentTypeError("Should be an integer list e.g 8,9") parser = ArgumentParser(description='Extract the data in the case of \ a pulse shape scan analysis', prog='bgvUber.py pulseshape', usage='%(prog)s ' + USAGE_STR, formatter_class=RawDescriptionHelpFormatter, epilog=EPILOGUE_PS) parser = set_parser_args(parser, fout=True) # Set common arguments # Function specific args parser.add_argument('--tae', '-t', action='store', default=2, type=int, help='Number of TAE events') parser.add_argument('--nsteps', '-e', action='store', default=25, type=int, help='Number of steps. For pulse shape \ scan the total number of processed events is the \ nevts*nsteps') parser.add_argument('--tellkeys', '-k', action='store', default=KEYIDS, type=integer_list_tell1, help='TELL1s to be used %s' % KEYIDS) parser.add_argument('--linklist', '-l', action='store', default=range(5), type=integer_list_links, help='The analogue links [0-63] to be tested') parser.add_argument('--chlist', '-c', action='store', default=range(4, 29), type=integer_list_channels, help='The analogue link channels [0-31] to be\ tested.') parser.add_argument('--pedestal', '-p', action='store', type=str, default='', help='A path of a hdf5 pedestal file to be used\ for removing the noise') args = vars(parser.parse_args(sys.argv[2:])) # Setting Logger set_logging(counts_to_verbosity(args['verbosity'])) global logger logger = logging.getLogger(__name__) logger.debug('ARGS: %s' % args) logger.debug('PulseShape function running...') # Definition of file lists files = set_input_files(args['fin']) args['fout'] = abspath(args['fout']) ####################################################################### # INIT ####################################################################### from ROOT import TFile, TH2F logging.debug('INITIALIZING 1: Gaudi, args') # Define locations in TES where data are to be stored # TAE before and TAE after loc = ['Prev%d/' % i for i in range(args['tae'], 0, -1)] + [''] + \ ['Next%d/' % i for i in range(1, args['tae']+1)] tick0 = datetime.now() appMgr, evt = initialize_gaudi(files, tae=args['tae'], loc=loc) logger.info('Elapsed time [init]: %s' % (datetime.now()-tick0)) nevts = int(args['nevts']) skipevts = int(args['sevts']) nsteps = int(args['nsteps']) ####################################################################### # INIT 2 - create hists etc ####################################################################### logging.debug('INITIALIZING 2: set vars - create hists') fp = TFile(args['fout'], 'recreate') tellkeys = args['tellkeys'] chlist = args['chlist'] # range(4, 29) linklist = args['linklist'] # range(64) logging.debug('ChannelList: len=%d, %s' % (len(chlist), chlist)) logging.debug('LinkList : len=%d, %s' % (len(linklist), linklist)) pulsehists, h_cmplpulse, h_avgpulse = {}, {}, {} for key in tellkeys: pulsehists[key] = [] h_cmplpulse[key] = {} h_avgpulse[key] = {} for ll in linklist: logging.debug('Key:%2d | Link:%2d' % (key, ll)) # Define a histogram for each link hname = 'h_PSscan_src%02d_link%03d' % (key, ll) htitle = 'PulseShapeScan: Source %d, Link %d' % (key, ll) nxbins = nsteps*len(loc) xmax = nsteps*len(loc)*0.5 xmin = -xmax hist = TH2F(hname, htitle, nxbins, xmin, xmax, 200, 300.0, 700.0) pulsehists[key].append(hist) h_cmplpulse[key][ll] = {} h_avgpulse[key][ll] = [] for timeslot in range(nxbins): h_cmplpulse[key][ll][xmin + timeslot] = [] logger.debug('Histograms list done: len=%d, %s' % (len(pulsehists), pulsehists)) ####################################################################### # START ####################################################################### if skipevts > 0: logger.debug('Skipping %s events' % skipevts) appMgr.run(skipevts) step = 0 n = 0 while n < nevts*nsteps: skipevent = False n += 1 if n % nevts == 1: logging.info('Event #%d, Step:#%d' % (n, step)) step += 1 else: logging.debug('Event #%d, Step:#%d' % (n, step)) appMgr.run(1) if not evt['/Event'].__nonzero__(): logging.warning('EMPTY Event #%d' % n) continue adcs, headers, x_axis = [], [], [] # evtinfo = [] # for every location (Prev2, Prev1 etc) get the adcs and # check if the banks are not empty; if they are skipevent=True for i, l in enumerate(loc): a = evt[l + 'Raw/Velo/DecodedADC'] h = evt[l + 'Raw/Velo/DecodedHeaders'] # e = evt[l + 'Raw/Velo/EvtInfo'] x_axis.append((step+(i-0.5*len(loc))*nsteps)) try: assert a and h adcs.append(a) headers.append(h) except AssertionError: logger.warning('AssertionError: Empty event #%d, loc %s' % (n, l)) skipevent = True # Some ODIN stuff too - removed # If anything happened from above that I don't want, skip evt. if skipevent: logger.debug('Event skipped') continue logger.debug('Event data retrieved') # Now if everything ok with the adcs & headers # for every TELL for key in tellkeys: # logger.debug('Sensor %d' % key) tell_adcs, tell_hdrs = [], [] # for every TAE of the TELL for l in range(len(loc)): l_adcs = adcs[l] l_hdrs = headers[l] for a in l_adcs.containedObjects(): if a.key() == key: tell_adcs.append(a.decodedData()) for h in l_hdrs.containedObjects(): if h.key() == key: tell_hdrs.append(h.decodedData()) for ind_l, l in enumerate(linklist): for ch in chlist: # logger.debug('Link:%02d, Ch:%02d' % (l, ch)) for s in range(len(loc)): pulsehists[key][ind_l].Fill(x_axis[s], tell_adcs[s][l*32+ch]) fp.Write() fp.Close() def datacheck(self): def mcms(data): corr = [0]*NUM_CH for i in range(64): x = 0 for j in range(32): ch = i*32+j x += data[ch] x /= 32 for j in range(32): ch = i*32+j corr[ch] = data[ch] - x return corr def rewind(appMgr): algs = appMgr.algorithms() for i in algs: appMgr.algorithm(i).Enable = False appMgr.evtsel().rewind() for i in algs: appMgr.algorithm(i).Enable = True parser = ArgumentParser(description='Check the data and output to root\ file for basic checking.', prog='bgvUber.py datacheck', usage='%(prog)s ' + USAGE_STR) parser = set_parser_args(parser, fout=True) # Set common arguments # Function specific parser.add_argument('--rewind', '-r', action='store_true', default=False, help='Rewind to the beggining') parser.add_argument('--pevts', '-p', action='store', default=500, type=int, help='Number of events for pedestal definition') parser.add_argument('--gt', action='store', default=None, type=int, help='Cluster threshold in case of gt comparison') parser.add_argument('--lt', action='store', default=None, type=int, help='Cluster threshold in case of lt comparison') parser.add_argument('--corr', action='store_true', default=False, help='Add OfflineADCCorrections algorithm.') args = vars(parser.parse_args(sys.argv[2:])) if args['lt'] is not None and args['gt'] is not None: raise ArgumentTypeError('You can enable only lt or gt') # Setting Logger set_logging(counts_to_verbosity(args['verbosity'])) global logger logger = logging.getLogger(__name__) logger.debug('ARGS: %s' % args) logger.debug('Extraction function running...') # Definition of file lists files = set_input_files(args['fin']) args['fout'] = abspath(args['fout']) ####################################################################### # INIT ####################################################################### tick0 = datetime.now() nc = not args['corr'] appMgr, evt = initialize_gaudi(files, not_corrected=nc) logger.info('Elapsed time [init]: %s' % (datetime.now()-tick0)) nevts = int(args['nevts']) skipevts = int(args['sevts']) pevts = int(args['pevts']) fcomp, sthres = None, None if args['gt'] is not None: fcomp = gt sthres = args['gt'] # seed threshold elif args['lt'] is not None: fcomp = lt sthres = args['lt'] logger.debug('INIT: nc=%s, fcomp=%s, sthres=%s' % (nc, fcomp, sthres)) from math import sqrt from ROOT import (TFile, TH1F, TH2F, TGraph, TMultiGraph, TLegend) f = TFile(args['fout'], 'recreate') ####################################################################### # START ####################################################################### # hists headhists = {} noisehists, rawnoisehists = {}, {} pedsubhists, pedhists = {}, {} # arrays peds = {} sum_vals, sum_vals2 = {}, {} sum_vals_raw, sum_vals2_raw = {}, {} if skipevts > 0: appMgr.run(skipevts) logger.info('Loop for pedestal definition') for i in range(pevts): appMgr.run(1) if not evt['/Event'].__nonzero__(): break adcs = evt['Raw/Velo/DecodedADC'] for sen in adcs.containedObjects(): key = sen.key() if i == 0: peds[key] = [0.0]*NUM_CH peds[key] = [(a + b) for a, b in zip(peds[key], sen.decodedData())] headers = evt['Raw/Velo/DecodedHeaders'] for h in headers.containedObjects(): key = h.key() if i == 0: headhists[key] = TH2F("h_header_%03d" % (key), "headers in sensor " + str(key), 256, -0.5, 255.5, 200, 411.5, 611.5) hdata = h.decodedData() for j, d in enumerate(hdata): headhists[key].Fill(j, d) logger.debug('Setting pedestal histograms') for k, v in peds.iteritems(): peds[k] = [(i / pevts) for i in v] pedhists[k] = TH1F("h_pedestal_%03d" % (k), "pedestals in sensor " + str(k), NUM_CH, -0.5, 2047.5) for ch in range(NUM_CH): pedhists[k].Fill(ch, peds[k][ch]) if args['rewind']: logger.debug('Rewinding') rewind(appMgr) logger.info('Looping over events, setting the pedsub histogram') bunchIDs = TH1F("h_bunch_ids", "Bunch IDs Hist", 3564, 0, 3563) cluster_num, cluster_num_sum = {}, {} evts_x, bids = [], [] for e in range(nevts): appMgr.run(1) if not evt['/Event'].__nonzero__(): logger.warning('Evt #%d is EMPTY' % e) break if nc: adcs = evt['Raw/Velo/DecodedADC'] # location in TES else: adcs = evt['Raw/Corrected/SciFi/CorrectedADC'] bid = evt['DAQ/ODIN'].bunchId() bunchIDs.Fill(bid) evts_x.append(e) bids.append(bid) logger.debug('Evt #%d / BunchID: %d' % (e, bid)) for sen in adcs.containedObjects(): key = sen.key() # gives number of Tell1 Sensor # should come from VeloTELL1Data function m_decodedData raw = sen.decodedData() pedsub = [(raw[j] - peds[key][j]) for j in range(NUM_CH)] if fcomp is not None: cnum = len([i for i in pedsub if fcomp(i, sthres)]) try: cluster_num_sum[key] += cnum cluster_num[key].append(cnum) except KeyError: cluster_num_sum[key] = cnum cluster_num[key] = [cnum] corr = mcms(pedsub) # noise if e == 0: sum_vals[key] = [0.0]*NUM_CH sum_vals2[key] = [0.0]*NUM_CH sum_vals_raw[key] = [0.0]*NUM_CH sum_vals2_raw[key] = [0.0]*NUM_CH pedsubhists[key] = TH2F("h_pedsub_adcs_%03d" % (key), "pedestal subbed ADCs in sensor " + str(key), NUM_CH, -0.5, 2047.5, 200, -100.5, 99.5) for j, d in enumerate(pedsub): pedsubhists[key].Fill(j, d) sum_vals[key] = [(x+y) for x, y in zip(sum_vals[key], corr)] sum_vals2[key] = [(x+(y*y)) for x, y in zip(sum_vals2[key], corr)] sum_vals_raw[key] = [(x+y) for x, y in zip(sum_vals[key], pedsub)] sum_vals2_raw[key] = [(x+(y*y)) for x, y in zip(sum_vals2[key], pedsub)] # Further Diagnostics evts_x = array(evts_x).astype('float') if evts_x.size != nevts: logger.warning('Processed %d/%d events' % (evts_x.size, nevts)) ##################################### # AFTER LOOP - Diagnostics # ##################################### if fcomp is not None: TOP_TELLS = [9, 4, 1, 2] BOT_TELLS = [6, 5, 7, 8] if args['lt'] is not None: mg_title = "#Channels < %d" % sthres else: mg_title = "#Channels > %d" % sthres mg_top = TMultiGraph() mg_top_lgd = TLegend(0.1, 0.7, 0.3, 0.9) mg_top_lgd.SetNColumns(2) mg_top_lgd.SetHeader("Test") mg_top_lgd.SetFillColor(0) for i, k in enumerate(TOP_TELLS): clu_y = array(cluster_num[k]).astype('float') tmp_graph = TGraph(evts_x.size, evts_x, clu_y) tmp_graph.SetLineColor(i) mg_top.Add(tmp_graph, "lp") mg_top_lgd.AddEntry(tmp_graph, "%d" % i, 'lol') mg_top_lgd.DrawClone("Same") mg_top.SetName("g_seeds_top") mg_top.SetTitle(mg_title) tmp_graph.GetXaxis().SetTitle("Event #") tmp_graph.GetYaxis().SetTitle("# of channels") mg_top.Write() mg_bot = TMultiGraph() for i, k in enumerate(BOT_TELLS): clu_y = array(cluster_num[k]).astype('float') tmp_graph = TGraph(evts_x.size, evts_x, clu_y) tmp_graph.SetLineColor(i) mg_bot.Add(tmp_graph, "lp") mg_bot.SetName("g_seeds_bot") mg_bot.SetTitle(mg_title) #mg_bot.GetXaxis().SetTitle("Event #") #mg_bot.GetYaxis().SetTitle("# of channels") mg_bot.Write() # TGraph for BIDs per event bids = array(bids).astype('float') g_bids = TGraph(evts_x.size, evts_x, bids) g_bids.SetLineColor(4) #graph.Draw("AC*") g_bids.SetName("g_bunch_ids") g_bids.SetTitle("Bunch IDs per event") g_bids.GetXaxis().SetTitle("Event #") g_bids.GetYaxis().SetTitle("Bunch ID") g_bids.Write() # Noise logger.debug('Setting noise histograms') noise = {} rawnoise = {} for key in sum_vals: noise[key] = [sqrt((y)/float(nevts)) for x, y in zip(sum_vals[key], sum_vals2[key])] rawnoise[key] = [sqrt((y)/float(nevts)) for x, y in zip(sum_vals_raw[key], sum_vals2_raw[key])] noisehists[key] = TH1F("h_noise_%03d" % (key), "noise in sensor " + str(key), NUM_CH, -0.5, 2047.5) rawnoisehists[key] = TH1F("h_rawnoise_%03d" % (key), "noise in sensor " + str(key), NUM_CH, -0.5, 2047.5) for j in range(NUM_CH): noisehists[key].SetBinContent(j+1, noise[key][j]) rawnoisehists[key].SetBinContent(j+1, rawnoise[key][j]) f.Write() f.Close() def update_conditions(self): parser = ArgumentParser(description='Update BGVCOND.db', prog='bgvUber.py update_conditions', usage='%(prog)s ' + USAGE_STR) parser.add_argument('fin', nargs='+', help='Input file path for pedestal file (h5).') parser.add_argument('-v', '--verbosity', action='count', help='Increase output verbosity') parser.add_argument('-b', '--basedir', action='store', type=str, default=DIR_BGVDB, help='Set the base directory of the database') args = vars(parser.parse_args(sys.argv[2:])) fpeds = File(args['fin']) if fpeds.attrs['file_type'] != 'Pedestal': logger.error("File type=%s, not 'Pedestal'. Exit.") return peds = fpeds['Raw/MeanADC'] dpedvalues = {} for i, key in enumerate(KEYIDS): vals_str = str(list(peds[i]))[1:-1] vals_str = vals_str.replace(',', '') dpedvalues[key] = vals_str peds_xml_string = xml_cond_template % {"ped_key1": dpedvalues[1], "ped_key2": dpedvalues[2], "ped_key4": dpedvalues[4], "ped_key5": dpedvalues[5], "ped_key6": dpedvalues[6], "ped_key7": dpedvalues[7], "ped_key8": dpedvalues[8], "ped_key9": dpedvalues[9] } logger.info('XML string ready:') print(peds_xml_string) print() print(fpeds.attrs['file_orig']) date_str = fpeds.attrs['file_orig'].split('_')[-1].split('-')[0] tag = 'bgv-'+date_str print(tag) if __name__ == '__main__': UberBGV()
""" Diffusion 2: jump diffusion, stochastic volatility, stochastic time Created on Tue Dec 08 15:03:49 2009 Author: josef-pktd following Meucci License: BSD contains: CIRSubordinatedBrownian Heston IG JumpDiffusionKou JumpDiffusionMerton NIG VG References ---------- Attilio Meucci, Review of Discrete and Continuous Processes in Finance: Theory and Applications Bloomberg Portfolio Research Paper No. 2009-02-CLASSROOM July 1, 2009 http://papers.ssrn.com/sol3/papers.cfm?abstract_id=1373102 this is currently mostly a translation from matlab of http://www.mathworks.com/matlabcentral/fileexchange/23554-review-of-discrete-and-continuous-processes-in-finance license BSD: Copyright (c) 2008, Attilio Meucci All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. TODO: * vectorize where possible * which processes are exactly simulated by finite differences ? * include or exclude (now) the initial observation ? * convert to and merge with diffusion.py (part 1 of diffusions) * which processes can be easily estimated ? loglike or characteristic function ? * tests ? check for possible index errors (random indices), graphs look ok * adjust notation, variable names, more consistent, more pythonic * delete a few unused lines, cleanup * docstrings random bug (showed up only once, need fuzz-testing to replicate) File "...\diffusion2.py", line 375, in <module> x = jd.simulate(mu,sigma,lambd,a,D,ts,nrepl) File "...\diffusion2.py", line 129, in simulate jumps_ts[n] = CumS[Events] IndexError: index out of bounds CumS is empty array, Events == -1 """ import numpy as np #from scipy import stats # currently only uses np.random import matplotlib.pyplot as plt class JumpDiffusionMerton(object): ''' Example ------- mu=.00 # deterministic drift sig=.20 # Gaussian component l=3.45 # Poisson process arrival rate a=0 # drift of log-jump D=.2 # st.dev of log-jump X = JumpDiffusionMerton().simulate(mu,sig,lambd,a,D,ts,nrepl) plt.figure() plt.plot(X.T) plt.title('Merton jump-diffusion') ''' def __init__(self): pass def simulate(self, m,s,lambd,a,D,ts,nrepl): T = ts[-1] # time points # simulate number of jumps n_jumps = np.random.poisson(lambd*T, size=(nrepl, 1)) jumps=[] nobs=len(ts) jumps=np.zeros((nrepl,nobs)) for j in range(nrepl): # simulate jump arrival time t = T*np.random.rand(n_jumps[j])#,1) #uniform t = np.sort(t,0) # simulate jump size S = a + D*np.random.randn(n_jumps[j],1) # put things together CumS = np.cumsum(S) jumps_ts = np.zeros(nobs) for n in range(nobs): Events = np.sum(t<=ts[n])-1 #print n, Events, CumS.shape, jumps_ts.shape jumps_ts[n]=0 if Events > 0: jumps_ts[n] = CumS[Events] #TODO: out of bounds see top #jumps = np.column_stack((jumps, jumps_ts)) #maybe wrong transl jumps[j,:] = jumps_ts D_Diff = np.zeros((nrepl,nobs)) for k in range(nobs): Dt=ts[k] if k>1: Dt=ts[k]-ts[k-1] D_Diff[:,k]=m*Dt + s*np.sqrt(Dt)*np.random.randn(nrepl) x = np.hstack((np.zeros((nrepl,1)),np.cumsum(D_Diff,1)+jumps)) return x class JumpDiffusionKou(object): def __init__(self): pass def simulate(self, m,s,lambd,p,e1,e2,ts,nrepl): T=ts[-1] # simulate number of jumps N = np.random.poisson(lambd*T,size =(nrepl,1)) jumps=[] nobs=len(ts) jumps=np.zeros((nrepl,nobs)) for j in range(nrepl): # simulate jump arrival time t=T*np.random.rand(N[j]) t=np.sort(t) # simulate jump size ww = np.random.binomial(1, p, size=(N[j])) S = ww * np.random.exponential(e1, size=(N[j])) - \ (1-ww) * np.random.exponential(e2, N[j]) # put things together CumS = np.cumsum(S) jumps_ts = np.zeros(nobs) for n in range(nobs): Events = sum(t<=ts[n])-1 jumps_ts[n]=0 if Events: jumps_ts[n]=CumS[Events] jumps[j,:] = jumps_ts D_Diff = np.zeros((nrepl,nobs)) for k in range(nobs): Dt=ts[k] if k>1: Dt=ts[k]-ts[k-1] D_Diff[:,k]=m*Dt + s*np.sqrt(Dt)*np.random.normal(size=nrepl) x = np.hstack((np.zeros((nrepl,1)),np.cumsum(D_Diff,1)+jumps)) return x class VG(object): '''variance gamma process ''' def __init__(self): pass def simulate(self, m,s,kappa,ts,nrepl): T=len(ts) dXs = np.zeros((nrepl,T)) for t in range(T): dt=ts[1]-0 if t>1: dt = ts[t]-ts[t-1] #print dt/kappa #TODO: check parameterization of gamrnd, checked looks same as np d_tau = kappa * np.random.gamma(dt/kappa,1.,size=(nrepl)) #print s*np.sqrt(d_tau) # this raises exception: #dX = stats.norm.rvs(m*d_tau,(s*np.sqrt(d_tau))) # np.random.normal requires scale >0 dX = np.random.normal(loc=m*d_tau, scale=1e-6+s*np.sqrt(d_tau)) dXs[:,t] = dX x = np.cumsum(dXs,1) return x class IG(object): '''inverse-Gaussian ??? used by NIG ''' def __init__(self): pass def simulate(self, l,m,nrepl): N = np.random.randn(nrepl,1) Y = N**2 X = m + (.5*m*m/l)*Y - (.5*m/l)*np.sqrt(4*m*l*Y+m*m*(Y**2)) U = np.random.rand(nrepl,1) ind = U>m/(X+m) X[ind] = m*m/X[ind] return X.ravel() class NIG(object): '''normal-inverse-Gaussian ''' def __init__(self): pass def simulate(self, th,k,s,ts,nrepl): T = len(ts) DXs = np.zeros((nrepl,T)) for t in range(T): Dt=ts[1]-0 if t>1: Dt=ts[t]-ts[t-1] l = 1/k*(Dt**2) m = Dt DS = IG().simulate(l,m,nrepl) N = np.random.randn(nrepl) DX = s*N*np.sqrt(DS) + th*DS #print DS.shape, DX.shape, DXs.shape DXs[:,t] = DX x = np.cumsum(DXs,1) return x class Heston(object): '''Heston Stochastic Volatility ''' def __init__(self): pass def simulate(self, m, kappa, eta,lambd,r, ts, nrepl,tratio=1.): T = ts[-1] nobs = len(ts) dt = np.zeros(nobs) #/tratio dt[0] = ts[0]-0 dt[1:] = np.diff(ts) DXs = np.zeros((nrepl,nobs)) dB_1 = np.sqrt(dt) * np.random.randn(nrepl,nobs) dB_2u = np.sqrt(dt) * np.random.randn(nrepl,nobs) dB_2 = r*dB_1 + np.sqrt(1-r**2)*dB_2u vt = eta*np.ones(nrepl) v=[] dXs = np.zeros((nrepl,nobs)) vts = np.zeros((nrepl,nobs)) for t in range(nobs): dv = kappa*(eta-vt)*dt[t]+ lambd*np.sqrt(vt)*dB_2[:,t] dX = m*dt[t] + np.sqrt(vt*dt[t]) * dB_1[:,t] vt = vt + dv vts[:,t] = vt dXs[:,t] = dX x = np.cumsum(dXs,1) return x, vts class CIRSubordinatedBrownian(object): '''CIR subordinated Brownian Motion ''' def __init__(self): pass def simulate(self, m, kappa, T_dot,lambd,sigma, ts, nrepl): T = ts[-1] nobs = len(ts) dtarr = np.zeros(nobs) #/tratio dtarr[0] = ts[0]-0 dtarr[1:] = np.diff(ts) DXs = np.zeros((nrepl,nobs)) dB = np.sqrt(dtarr) * np.random.randn(nrepl,nobs) yt = 1. dXs = np.zeros((nrepl,nobs)) dtaus = np.zeros((nrepl,nobs)) y = np.zeros((nrepl,nobs)) for t in range(nobs): dt = dtarr[t] dy = kappa*(T_dot-yt)*dt + lambd*np.sqrt(yt)*dB[:,t] yt = np.maximum(yt+dy,1e-10) # keep away from zero ? dtau = np.maximum(yt*dt, 1e-6) dX = np.random.normal(loc=m*dtau, scale=sigma*np.sqrt(dtau)) y[:,t] = yt dtaus[:,t] = dtau dXs[:,t] = dX tau = np.cumsum(dtaus,1) x = np.cumsum(dXs,1) return x, tau, y def schout2contank(a,b,d): th = d*b/np.sqrt(a**2-b**2) k = 1/(d*np.sqrt(a**2-b**2)) s = np.sqrt(d/np.sqrt(a**2-b**2)) return th,k,s if __name__ == '__main__': #Merton Jump Diffusion #^^^^^^^^^^^^^^^^^^^^^ # grid of time values at which the process is evaluated #("0" will be added, too) nobs = 252.#1000 #252. ts = np.linspace(1./nobs, 1., nobs) nrepl=5 # number of simulations mu=.010 # deterministic drift sigma = .020 # Gaussian component lambd = 3.45 *10 # Poisson process arrival rate a=0 # drift of log-jump D=.2 # st.dev of log-jump jd = JumpDiffusionMerton() x = jd.simulate(mu,sigma,lambd,a,D,ts,nrepl) plt.figure() plt.plot(x.T) #Todo plt.title('Merton jump-diffusion') sigma = 0.2 lambd = 3.45 x = jd.simulate(mu,sigma,lambd,a,D,ts,nrepl) plt.figure() plt.plot(x.T) #Todo plt.title('Merton jump-diffusion') #Kou jump diffusion #^^^^^^^^^^^^^^^^^^ mu=.0 # deterministic drift lambd=4.25 # Poisson process arrival rate p=.5 # prob. of up-jump e1=.2 # parameter of up-jump e2=.3 # parameter of down-jump sig=.2 # Gaussian component x = JumpDiffusionKou().simulate(mu,sig,lambd,p,e1,e2,ts,nrepl) plt.figure() plt.plot(x.T) #Todo plt.title('double exponential (Kou jump diffusion)') #variance-gamma #^^^^^^^^^^^^^^ mu = .1 # deterministic drift in subordinated Brownian motion kappa = 1. #10. #1 # inverse for gamma shape parameter sig = 0.5 #.2 # s.dev in subordinated Brownian motion x = VG().simulate(mu,sig,kappa,ts,nrepl) plt.figure() plt.plot(x.T) #Todo plt.title('variance gamma') #normal-inverse-Gaussian #^^^^^^^^^^^^^^^^^^^^^^^ # (Schoutens notation) al = 2.1 be = 0 de = 1 # convert parameters to Cont-Tankov notation th,k,s = schout2contank(al,be,de) x = NIG().simulate(th,k,s,ts,nrepl) plt.figure() plt.plot(x.T) #Todo x-axis plt.title('normal-inverse-Gaussian') #Heston Stochastic Volatility #^^^^^^^^^^^^^^^^^^^^^^^^^^^^ m=.0 kappa = .6 # 2*Kappa*Eta>Lambda^2 eta = .3**2 lambd =.25 r = -.7 T = 20. nobs = 252.*T#1000 #252. tsh = np.linspace(T/nobs, T, nobs) x, vts = Heston().simulate(m,kappa, eta,lambd,r, tsh, nrepl, tratio=20.) plt.figure() plt.plot(x.T) plt.title('Heston Stochastic Volatility') plt.figure() plt.plot(np.sqrt(vts).T) plt.title('Heston Stochastic Volatility - CIR Vol.') plt.figure() plt.subplot(2,1,1) plt.plot(x[0]) plt.title('Heston Stochastic Volatility process') plt.subplot(2,1,2) plt.plot(np.sqrt(vts[0])) plt.title('CIR Volatility') #CIR subordinated Brownian #^^^^^^^^^^^^^^^^^^^^^^^^^ m=.1 sigma=.4 kappa=.6 # 2*Kappa*T_dot>Lambda^2 T_dot=1 lambd=1 #T=252*10 #dt=1/252 #nrepl=2 T = 10. nobs = 252.*T#1000 #252. tsh = np.linspace(T/nobs, T, nobs) x, tau, y = CIRSubordinatedBrownian().simulate(m, kappa, T_dot,lambd,sigma, tsh, nrepl) plt.figure() plt.plot(tsh, x.T) plt.title('CIRSubordinatedBrownian process') plt.figure() plt.plot(tsh, y.T) plt.title('CIRSubordinatedBrownian - CIR') plt.figure() plt.plot(tsh, tau.T) plt.title('CIRSubordinatedBrownian - stochastic time ') plt.figure() plt.subplot(2,1,1) plt.plot(tsh, x[0]) plt.title('CIRSubordinatedBrownian process') plt.subplot(2,1,2) plt.plot(tsh, y[0], label='CIR') plt.plot(tsh, tau[0], label='stoch. time') plt.legend(loc='upper left') plt.title('CIRSubordinatedBrownian') #plt.show()
# -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, division # standard library dependencies try: # prefer lxml as it supports XPath from lxml import etree except ImportError: import xml.etree.ElementTree as etree from operator import attrgetter import itertools from petl.compat import string_types, text_type # internal dependencies from petl.util.base import Table from petl.io.sources import read_source_from_arg def fromxml(source, *args, **kwargs): """ Extract data from an XML file. E.g.:: >>> import petl as etl >>> # setup a file to demonstrate with ... d = '''<table> ... <tr> ... <td>foo</td><td>bar</td> ... </tr> ... <tr> ... <td>a</td><td>1</td> ... </tr> ... <tr> ... <td>b</td><td>2</td> ... </tr> ... <tr> ... <td>c</td><td>2</td> ... </tr> ... </table>''' >>> with open('example1.xml', 'w') as f: ... f.write(d) ... 212 >>> table1 = etl.fromxml('example1.xml', 'tr', 'td') >>> table1 +-----+-----+ | foo | bar | +=====+=====+ | 'a' | '1' | +-----+-----+ | 'b' | '2' | +-----+-----+ | 'c' | '2' | +-----+-----+ If the data values are stored in an attribute, provide the attribute name as an extra positional argument:: >>> d = '''<table> ... <tr> ... <td v='foo'/><td v='bar'/> ... </tr> ... <tr> ... <td v='a'/><td v='1'/> ... </tr> ... <tr> ... <td v='b'/><td v='2'/> ... </tr> ... <tr> ... <td v='c'/><td v='2'/> ... </tr> ... </table>''' >>> with open('example2.xml', 'w') as f: ... f.write(d) ... 220 >>> table2 = etl.fromxml('example2.xml', 'tr', 'td', 'v') >>> table2 +-----+-----+ | foo | bar | +=====+=====+ | 'a' | '1' | +-----+-----+ | 'b' | '2' | +-----+-----+ | 'c' | '2' | +-----+-----+ Data values can also be extracted by providing a mapping of field names to element paths:: >>> d = '''<table> ... <row> ... <foo>a</foo><baz><bar v='1'/><bar v='3'/></baz> ... </row> ... <row> ... <foo>b</foo><baz><bar v='2'/></baz> ... </row> ... <row> ... <foo>c</foo><baz><bar v='2'/></baz> ... </row> ... </table>''' >>> with open('example3.xml', 'w') as f: ... f.write(d) ... 223 >>> table3 = etl.fromxml('example3.xml', 'row', ... {'foo': 'foo', 'bar': ('baz/bar', 'v')}) >>> table3 +------------+-----+ | bar | foo | +============+=====+ | ('1', '3') | 'a' | +------------+-----+ | '2' | 'b' | +------------+-----+ | '2' | 'c' | +------------+-----+ If `lxml <http://lxml.de/>`_ is installed, full XPath expressions can be used. Note that the implementation is currently **not** streaming, i.e., the whole document is loaded into memory. If multiple elements match a given field, all values are reported as a tuple. If there is more than one element name used for row values, a tuple or list of paths can be provided, e.g., ``fromxml('example.html', './/tr', ('th', 'td'))``. """ source = read_source_from_arg(source) return XmlView(source, *args, **kwargs) class XmlView(Table): def __init__(self, source, *args, **kwargs): self.source = source self.args = args if len(args) == 2 and isinstance(args[1], (string_types, tuple, list)): self.rmatch = args[0] self.vmatch = args[1] self.vdict = None self.attr = None elif len(args) == 2 and isinstance(args[1], dict): self.rmatch = args[0] self.vmatch = None self.vdict = args[1] self.attr = None elif len(args) == 3: self.rmatch = args[0] self.vmatch = args[1] self.vdict = None self.attr = args[2] else: assert False, 'bad parameters' self.missing = kwargs.get('missing', None) def __iter__(self): vmatch = self.vmatch vdict = self.vdict with self.source.open('rb') as xmlf: tree = etree.parse(xmlf) if not hasattr(tree, 'iterfind'): # Python 2.6 compatibility tree.iterfind = tree.findall if vmatch is not None: # simple case, all value paths are the same for rowelm in tree.iterfind(self.rmatch): if self.attr is None: getv = attrgetter('text') else: getv = lambda e: e.get(self.attr) if isinstance(vmatch, string_types): # match only one path velms = rowelm.findall(vmatch) else: # match multiple paths velms = itertools.chain(*[rowelm.findall(enm) for enm in vmatch]) yield tuple(getv(velm) for velm in velms) else: # difficult case, deal with different paths for each field # determine output header flds = tuple(sorted(map(text_type, vdict.keys()))) yield flds # setup value getters vmatches = dict() vgetters = dict() for f in flds: vmatch = self.vdict[f] if isinstance(vmatch, string_types): # match element path vmatches[f] = vmatch vgetters[f] = element_text_getter(self.missing) else: # match element path and attribute name vmatches[f] = vmatch[0] attr = vmatch[1] vgetters[f] = attribute_text_getter(attr, self.missing) # determine data rows for rowelm in tree.iterfind(self.rmatch): yield tuple(vgetters[f](rowelm.findall(vmatches[f])) for f in flds) def element_text_getter(missing): def _get(v): if len(v) > 1: return tuple(e.text for e in v) elif len(v) == 1: return v[0].text else: return missing return _get def attribute_text_getter(attr, missing): def _get(v): if len(v) > 1: return tuple(e.get(attr) for e in v) elif len(v) == 1: return v[0].get(attr) else: return missing return _get
import pytest from unittest import mock from awx.api.versioning import reverse from awx.main.utils import decrypt_field from awx.main.models.workflow import ( WorkflowJobTemplate, WorkflowJobTemplateNode, WorkflowApprovalTemplate ) from awx.main.models.jobs import JobTemplate from awx.main.tasks import deep_copy_model_obj @pytest.mark.django_db def test_job_template_copy(post, get, project, inventory, machine_credential, vault_credential, credential, alice, job_template_with_survey_passwords, admin): job_template_with_survey_passwords.project = project job_template_with_survey_passwords.inventory = inventory job_template_with_survey_passwords.save() job_template_with_survey_passwords.credentials.add(credential) job_template_with_survey_passwords.credentials.add(machine_credential) job_template_with_survey_passwords.credentials.add(vault_credential) job_template_with_survey_passwords.admin_role.members.add(alice) project.admin_role.members.add(alice) inventory.admin_role.members.add(alice) assert get( reverse('api:job_template_copy', kwargs={'pk': job_template_with_survey_passwords.pk}), alice, expect=200 ).data['can_copy'] is False assert get( reverse('api:job_template_copy', kwargs={'pk': job_template_with_survey_passwords.pk}), admin, expect=200 ).data['can_copy'] is True assert post( reverse('api:job_template_copy', kwargs={'pk': job_template_with_survey_passwords.pk}), {'name': 'new jt name'}, alice, expect=403 ).data['detail'] == 'Insufficient access to Job Template credentials.' jt_copy_pk = post( reverse('api:job_template_copy', kwargs={'pk': job_template_with_survey_passwords.pk}), {'name': 'new jt name'}, admin, expect=201 ).data['id'] # give credential access to user 'alice' for c in (credential, machine_credential, vault_credential): c.use_role.members.add(alice) c.save() assert get( reverse('api:job_template_copy', kwargs={'pk': job_template_with_survey_passwords.pk}), alice, expect=200 ).data['can_copy'] is True jt_copy_pk_alice = post( reverse('api:job_template_copy', kwargs={'pk': job_template_with_survey_passwords.pk}), {'name': 'new jt name'}, alice, expect=201 ).data['id'] jt_copy_admin = type(job_template_with_survey_passwords).objects.get(pk=jt_copy_pk) jt_copy_alice = type(job_template_with_survey_passwords).objects.get(pk=jt_copy_pk_alice) assert jt_copy_admin.created_by == admin assert jt_copy_alice.created_by == alice for jt_copy in (jt_copy_admin, jt_copy_alice): assert jt_copy.name == 'new jt name' assert jt_copy.project == project assert jt_copy.inventory == inventory assert jt_copy.playbook == job_template_with_survey_passwords.playbook assert jt_copy.credentials.count() == 3 assert credential in jt_copy.credentials.all() assert vault_credential in jt_copy.credentials.all() assert machine_credential in jt_copy.credentials.all() assert job_template_with_survey_passwords.survey_spec == jt_copy.survey_spec @pytest.mark.django_db def test_project_copy(post, get, project, organization, scm_credential, alice): project.credential = scm_credential project.save() project.admin_role.members.add(alice) assert get( reverse('api:project_copy', kwargs={'pk': project.pk}), alice, expect=200 ).data['can_copy'] is False project.organization.admin_role.members.add(alice) scm_credential.use_role.members.add(alice) assert get( reverse('api:project_copy', kwargs={'pk': project.pk}), alice, expect=200 ).data['can_copy'] is True project_copy_pk = post( reverse('api:project_copy', kwargs={'pk': project.pk}), {'name': 'copied project'}, alice, expect=201 ).data['id'] project_copy = type(project).objects.get(pk=project_copy_pk) assert project_copy.created_by == alice assert project_copy.name == 'copied project' assert project_copy.organization == organization assert project_copy.credential == scm_credential @pytest.mark.django_db def test_inventory_copy(inventory, group_factory, post, get, alice, organization): group_1_1 = group_factory('g_1_1') group_2_1 = group_factory('g_2_1') group_2_2 = group_factory('g_2_2') group_2_1.parents.add(group_1_1) group_2_2.parents.add(group_1_1) group_2_2.parents.add(group_2_1) host = group_1_1.hosts.create(name='host', inventory=inventory) group_2_1.hosts.add(host) inventory.admin_role.members.add(alice) assert get( reverse('api:inventory_copy', kwargs={'pk': inventory.pk}), alice, expect=200 ).data['can_copy'] is False inventory.organization.admin_role.members.add(alice) assert get( reverse('api:inventory_copy', kwargs={'pk': inventory.pk}), alice, expect=200 ).data['can_copy'] is True with mock.patch('awx.api.generics.trigger_delayed_deep_copy') as deep_copy_mock: inv_copy_pk = post( reverse('api:inventory_copy', kwargs={'pk': inventory.pk}), {'name': 'new inv name'}, alice, expect=201 ).data['id'] inventory_copy = type(inventory).objects.get(pk=inv_copy_pk) args, kwargs = deep_copy_mock.call_args deep_copy_model_obj(*args, **kwargs) group_1_1_copy = inventory_copy.groups.get(name='g_1_1') group_2_1_copy = inventory_copy.groups.get(name='g_2_1') group_2_2_copy = inventory_copy.groups.get(name='g_2_2') host_copy = inventory_copy.hosts.get(name='host') assert inventory_copy.organization == organization assert inventory_copy.created_by == alice assert inventory_copy.name == 'new inv name' assert set(group_1_1_copy.parents.all()) == set() assert set(group_2_1_copy.parents.all()) == set([group_1_1_copy]) assert set(group_2_2_copy.parents.all()) == set([group_1_1_copy, group_2_1_copy]) assert set(group_1_1_copy.hosts.all()) == set([host_copy]) assert set(group_2_1_copy.hosts.all()) == set([host_copy]) assert set(group_2_2_copy.hosts.all()) == set() @pytest.mark.django_db def test_workflow_job_template_copy(workflow_job_template, post, get, admin, organization): workflow_job_template.organization = organization workflow_job_template.save() jts = [JobTemplate.objects.create(name='test-jt-{}'.format(i)) for i in range(0, 5)] nodes = [ WorkflowJobTemplateNode.objects.create( workflow_job_template=workflow_job_template, unified_job_template=jts[i] ) for i in range(0, 5) ] nodes[0].success_nodes.add(nodes[1]) nodes[1].success_nodes.add(nodes[2]) nodes[0].failure_nodes.add(nodes[3]) nodes[3].failure_nodes.add(nodes[4]) with mock.patch('awx.api.generics.trigger_delayed_deep_copy') as deep_copy_mock: wfjt_copy_id = post( reverse('api:workflow_job_template_copy', kwargs={'pk': workflow_job_template.pk}), {'name': 'new wfjt name'}, admin, expect=201 ).data['id'] wfjt_copy = type(workflow_job_template).objects.get(pk=wfjt_copy_id) args, kwargs = deep_copy_mock.call_args deep_copy_model_obj(*args, **kwargs) assert wfjt_copy.organization == organization assert wfjt_copy.created_by == admin assert wfjt_copy.name == 'new wfjt name' copied_node_list = [x for x in wfjt_copy.workflow_job_template_nodes.all()] copied_node_list.sort(key=lambda x: int(x.unified_job_template.name[-1])) for node, success_count, failure_count, always_count in zip( copied_node_list, [1, 1, 0, 0, 0], [1, 0, 0, 1, 0], [0, 0, 0, 0, 0] ): assert node.success_nodes.count() == success_count assert node.failure_nodes.count() == failure_count assert node.always_nodes.count() == always_count assert copied_node_list[1] in copied_node_list[0].success_nodes.all() assert copied_node_list[2] in copied_node_list[1].success_nodes.all() assert copied_node_list[3] in copied_node_list[0].failure_nodes.all() assert copied_node_list[4] in copied_node_list[3].failure_nodes.all() @pytest.mark.django_db def test_workflow_approval_node_copy(workflow_job_template, post, get, admin, organization): workflow_job_template.organization = organization workflow_job_template.save() ajts = [ WorkflowApprovalTemplate.objects.create( name='test-approval-{}'.format(i), description='description-{}'.format(i), timeout=30 ) for i in range(0, 5) ] nodes = [ WorkflowJobTemplateNode.objects.create( workflow_job_template=workflow_job_template, unified_job_template=ajts[i] ) for i in range(0, 5) ] nodes[0].success_nodes.add(nodes[1]) nodes[1].success_nodes.add(nodes[2]) nodes[0].failure_nodes.add(nodes[3]) nodes[3].failure_nodes.add(nodes[4]) assert WorkflowJobTemplate.objects.count() == 1 assert WorkflowJobTemplateNode.objects.count() == 5 assert WorkflowApprovalTemplate.objects.count() == 5 with mock.patch('awx.api.generics.trigger_delayed_deep_copy') as deep_copy_mock: wfjt_copy_id = post( reverse('api:workflow_job_template_copy', kwargs={'pk': workflow_job_template.pk}), {'name': 'new wfjt name'}, admin, expect=201 ).data['id'] wfjt_copy = type(workflow_job_template).objects.get(pk=wfjt_copy_id) args, kwargs = deep_copy_mock.call_args deep_copy_model_obj(*args, **kwargs) assert wfjt_copy.organization == organization assert wfjt_copy.created_by == admin assert wfjt_copy.name == 'new wfjt name' assert WorkflowJobTemplate.objects.count() == 2 assert WorkflowJobTemplateNode.objects.count() == 10 assert WorkflowApprovalTemplate.objects.count() == 10 original_templates = [ x.unified_job_template for x in workflow_job_template.workflow_job_template_nodes.all() ] copied_templates = [ x.unified_job_template for x in wfjt_copy.workflow_job_template_nodes.all() ] # make sure shallow fields like `timeout` are copied properly for i, t in enumerate(original_templates): assert t.timeout == 30 assert t.description == 'description-{}'.format(i) for i, t in enumerate(copied_templates): assert t.timeout == 30 assert t.description == 'description-{}'.format(i) # the Approval Template IDs on the *original* WFJT should not match *any* # of the Approval Template IDs on the *copied* WFJT assert not set([x.id for x in original_templates]).intersection( set([x.id for x in copied_templates]) ) # if you remove the " copy" suffix from the copied template names, they # should match the original templates assert ( set([x.name for x in original_templates]) == set([x.name.replace(' copy', '') for x in copied_templates]) ) @pytest.mark.django_db def test_credential_copy(post, get, machine_credential, credentialtype_ssh, admin): assert get( reverse('api:credential_copy', kwargs={'pk': machine_credential.pk}), admin, expect=200 ).data['can_copy'] is True credential_copy_pk = post( reverse('api:credential_copy', kwargs={'pk': machine_credential.pk}), {'name': 'copied credential'}, admin, expect=201 ).data['id'] credential_copy = type(machine_credential).objects.get(pk=credential_copy_pk) assert credential_copy.created_by == admin assert credential_copy.name == 'copied credential' assert credential_copy.credential_type == credentialtype_ssh assert credential_copy.inputs['username'] == machine_credential.inputs['username'] assert (decrypt_field(credential_copy, 'password') == decrypt_field(machine_credential, 'password')) @pytest.mark.django_db def test_notification_template_copy(post, get, notification_template_with_encrypt, organization, alice): notification_template_with_encrypt.organization.auditor_role.members.add(alice) assert get( reverse( 'api:notification_template_copy', kwargs={'pk': notification_template_with_encrypt.pk} ), alice, expect=200 ).data['can_copy'] is False notification_template_with_encrypt.organization.admin_role.members.add(alice) assert get( reverse( 'api:notification_template_copy', kwargs={'pk': notification_template_with_encrypt.pk} ), alice, expect=200 ).data['can_copy'] is True nt_copy_pk = post( reverse( 'api:notification_template_copy', kwargs={'pk': notification_template_with_encrypt.pk} ), {'name': 'copied nt'}, alice, expect=201 ).data['id'] notification_template_copy = type(notification_template_with_encrypt).objects.get(pk=nt_copy_pk) assert notification_template_copy.created_by == alice assert notification_template_copy.name == 'copied nt' assert notification_template_copy.organization == organization assert (decrypt_field(notification_template_with_encrypt, 'notification_configuration', 'token') == decrypt_field(notification_template_copy, 'notification_configuration', 'token')) @pytest.mark.django_db def test_inventory_script_copy(post, get, inventory_script, organization, alice): inventory_script.organization.auditor_role.members.add(alice) assert get( reverse('api:inventory_script_copy', kwargs={'pk': inventory_script.pk}), alice, expect=200 ).data['can_copy'] is False inventory_script.organization.admin_role.members.add(alice) assert get( reverse('api:inventory_script_copy', kwargs={'pk': inventory_script.pk}), alice, expect=200 ).data['can_copy'] is True is_copy_pk = post( reverse('api:inventory_script_copy', kwargs={'pk': inventory_script.pk}), {'name': 'copied inv script'}, alice, expect=201 ).data['id'] inventory_script_copy = type(inventory_script).objects.get(pk=is_copy_pk) assert inventory_script_copy.created_by == alice assert inventory_script_copy.name == 'copied inv script' assert inventory_script_copy.organization == organization
# -*- coding: utf-8 -*- """OSF mailing utilities. Email templates go in website/templates/emails Templates must end in ``.txt.mako`` for plaintext emails or``.html.mako`` for html emails. You can then create a `Mail` object given the basename of the template and the email subject. :: CONFIRM_EMAIL = Mail(tpl_prefix='confirm', subject="Confirm your email address") You can then use ``send_mail`` to send the email. Usage: :: from website import mails ... mails.send_mail('[email protected]', mails.CONFIRM_EMAIL, user=user) """ import os import logging import waffle from mako.lookup import TemplateLookup, Template from framework.email import tasks from osf import features from website import settings logger = logging.getLogger(__name__) EMAIL_TEMPLATES_DIR = os.path.join(settings.TEMPLATES_PATH, 'emails') _tpl_lookup = TemplateLookup( directories=[EMAIL_TEMPLATES_DIR], ) HTML_EXT = '.html.mako' DISABLED_MAILS = [ 'welcome', 'welcome_osf4i' ] class Mail(object): """An email object. :param str tpl_prefix: The template name prefix. :param str subject: The subject of the email. :param iterable categories: Categories to add to the email using SendGrid's SMTPAPI. Used for email analytics. See https://sendgrid.com/docs/User_Guide/Statistics/categories.html :param: bool engagement: Whether this is an engagement email that can be disabled with the disable_engagement_emails waffle flag """ def __init__(self, tpl_prefix, subject, categories=None, engagement=False): self.tpl_prefix = tpl_prefix self._subject = subject self.categories = categories self.engagement = engagement def html(self, **context): """Render the HTML email message.""" tpl_name = self.tpl_prefix + HTML_EXT return render_message(tpl_name, **context) def subject(self, **context): return Template(self._subject).render(**context) def render_message(tpl_name, **context): """Render an email message.""" tpl = _tpl_lookup.get_template(tpl_name) return tpl.render(**context) def send_mail( to_addr, mail, mimetype='html', from_addr=None, mailer=None, celery=True, username=None, password=None, callback=None, attachment_name=None, attachment_content=None, **context): """Send an email from the OSF. Example: :: from website import mails mails.send_email('[email protected]', mails.TEST, name="Foo") :param str to_addr: The recipient's email address :param Mail mail: The mail object :param str mimetype: Either 'plain' or 'html' :param function callback: celery task to execute after send_mail completes :param **context: Context vars for the message template .. note: Uses celery if available """ if waffle.switch_is_active(features.DISABLE_ENGAGEMENT_EMAILS) and mail.engagement: return False from_addr = from_addr or settings.FROM_EMAIL mailer = mailer or tasks.send_email subject = mail.subject(**context) message = mail.html(**context) # Don't use ttls and login in DEBUG_MODE ttls = login = not settings.DEBUG_MODE logger.debug('Sending email...') logger.debug(u'To: {to_addr}\nFrom: {from_addr}\nSubject: {subject}\nMessage: {message}'.format(**locals())) kwargs = dict( from_addr=from_addr, to_addr=to_addr, subject=subject, message=message, mimetype=mimetype, ttls=ttls, login=login, username=username, password=password, categories=mail.categories, attachment_name=attachment_name, attachment_content=attachment_content, ) logger.debug('Preparing to send...') if settings.USE_EMAIL: if settings.USE_CELERY and celery: logger.debug('Sending via celery...') return mailer.apply_async(kwargs=kwargs, link=callback) else: logger.debug('Sending without celery') ret = mailer(**kwargs) if callback: callback() return ret def get_english_article(word): """ Decide whether to use 'a' or 'an' for a given English word. :param word: the word immediately after the article :return: 'a' or 'an' """ return 'a' + ('n' if word[0].lower() in 'aeiou' else '') # Predefined Emails TEST = Mail('test', subject='A test email to ${name}', categories=['test']) # Emails for first-time login through external identity providers. EXTERNAL_LOGIN_CONFIRM_EMAIL_CREATE = Mail( 'external_confirm_create', subject='OSF Account Verification' ) FORK_COMPLETED = Mail( 'fork_completed', subject='Your fork has completed' ) FORK_FAILED = Mail( 'fork_failed', subject='Your fork has failed' ) EXTERNAL_LOGIN_CONFIRM_EMAIL_LINK = Mail( 'external_confirm_link', subject='OSF Account Verification' ) EXTERNAL_LOGIN_LINK_SUCCESS = Mail( 'external_confirm_success', subject='OSF Account Verification Success' ) # Sign up confirmation emails for OSF, native campaigns and branded campaigns INITIAL_CONFIRM_EMAIL = Mail( 'initial_confirm', subject='OSF Account Verification' ) CONFIRM_EMAIL = Mail( 'confirm', subject='Add a new email to your OSF account' ) CONFIRM_EMAIL_PREREG = Mail( 'confirm_prereg', subject='OSF Account Verification, Preregistration Challenge' ) CONFIRM_EMAIL_ERPC = Mail( 'confirm_erpc', subject='OSF Account Verification, Election Research Preacceptance Competition' ) CONFIRM_EMAIL_PREPRINTS = lambda name, provider: Mail( 'confirm_preprints_{}'.format(name), subject='OSF Account Verification, {}'.format(provider) ) CONFIRM_EMAIL_REGISTRIES_OSF = Mail( 'confirm_registries_osf', subject='OSF Account Verification, OSF Registries' ) CONFIRM_EMAIL_MODERATION = lambda provider: Mail( 'confirm_moderation', subject='OSF Account Verification, {}'.format(provider.name) ) # Merge account, add or remove email confirmation emails. CONFIRM_MERGE = Mail('confirm_merge', subject='Confirm account merge') PRIMARY_EMAIL_CHANGED = Mail('primary_email_changed', subject='Primary email changed') # Contributor added confirmation emails INVITE_DEFAULT = Mail( 'invite_default', subject='You have been added as a contributor to an OSF project.' ) INVITE_PREPRINT = lambda template, provider: Mail( 'invite_preprints_{}'.format(template), subject='You have been added as a contributor to {} {} {}.'.format(get_english_article(provider.name), provider.name, provider.preprint_word) ) CONTRIBUTOR_ADDED_DEFAULT = Mail( 'contributor_added_default', subject='You have been added as a contributor to an OSF project.' ) CONTRIBUTOR_ADDED_PREPRINT = lambda template, provider: Mail( 'contributor_added_preprints_{}'.format(template), subject='You have been added as a contributor to {} {} {}.'.format(get_english_article(provider.name), provider.name, provider.preprint_word) ) CONTRIBUTOR_ADDED_PREPRINT_NODE_FROM_OSF = Mail( 'contributor_added_preprint_node_from_osf', subject='You have been added as a contributor to an OSF project.' ) MODERATOR_ADDED = lambda provider: Mail( 'moderator_added', subject='You have been added as a moderator for {}'.format(provider.name) ) CONTRIBUTOR_ADDED_ACCESS_REQUEST = Mail( 'contributor_added_access_request', subject='Your access request to an OSF project has been approved' ) FORWARD_INVITE = Mail('forward_invite', subject='Please forward to ${fullname}') FORWARD_INVITE_REGISTERED = Mail('forward_invite_registered', subject='Please forward to ${fullname}') FORGOT_PASSWORD = Mail('forgot_password', subject='Reset Password') PASSWORD_RESET = Mail('password_reset', subject='Your OSF password has been reset') PENDING_VERIFICATION = Mail('pending_invite', subject='Your account is almost ready!') PENDING_VERIFICATION_REGISTERED = Mail('pending_registered', subject='Received request to be a contributor') REQUEST_EXPORT = Mail('support_request', subject='[via OSF] Export Request') REQUEST_DEACTIVATION = Mail('support_request', subject='[via OSF] Deactivation Request') SPAM_USER_BANNED = Mail('spam_user_banned', subject='[OSF] Account flagged as spam') CONFERENCE_SUBMITTED = Mail( 'conference_submitted', subject='Project created on OSF', ) CONFERENCE_INACTIVE = Mail( 'conference_inactive', subject='OSF Error: Conference inactive', ) CONFERENCE_FAILED = Mail( 'conference_failed', subject='OSF Error: No files attached', ) DIGEST = Mail( 'digest', subject='OSF Notifications', categories=['notifications', 'notifications-digest'] ) DIGEST_REVIEWS_MODERATORS = Mail( 'digest_reviews_moderators', subject='Recent submissions to ${provider_name}', ) TRANSACTIONAL = Mail( 'transactional', subject='OSF: ${subject}', categories=['notifications', 'notifications-transactional'] ) # Retraction related Mail objects PENDING_RETRACTION_ADMIN = Mail( 'pending_retraction_admin', subject='Withdrawal pending for one of your projects.' ) PENDING_RETRACTION_NON_ADMIN = Mail( 'pending_retraction_non_admin', subject='Withdrawal pending for one of your projects.' ) # Embargo related Mail objects PENDING_EMBARGO_ADMIN = Mail( 'pending_embargo_admin', subject='Registration pending for one of your projects.' ) PENDING_EMBARGO_NON_ADMIN = Mail( 'pending_embargo_non_admin', subject='Registration pending for one of your projects.' ) # Registration related Mail Objects PENDING_REGISTRATION_ADMIN = Mail( 'pending_registration_admin', subject='Registration pending for one of your projects.' ) PENDING_REGISTRATION_NON_ADMIN = Mail( 'pending_registration_non_admin', subject='Registration pending for one of your projects.' ) PENDING_EMBARGO_TERMINATION_ADMIN = Mail( 'pending_embargo_termination_admin', subject='Request to end an embargo early for one of your projects.' ) PENDING_EMBARGO_TERMINATION_NON_ADMIN = Mail( 'pending_embargo_termination_non_admin', subject='Request to end an embargo early for one of your projects.' ) FILE_OPERATION_SUCCESS = Mail( 'file_operation_success', subject='Your ${action} has finished', ) FILE_OPERATION_FAILED = Mail( 'file_operation_failed', subject='Your ${action} has failed', ) UNESCAPE = '<% from osf.utils.sanitize import unescape_entities %> ${unescape_entities(src.title)}' PROBLEM_REGISTERING = 'Problem registering ' + UNESCAPE ARCHIVE_SIZE_EXCEEDED_DESK = Mail( 'archive_size_exceeded_desk', subject=PROBLEM_REGISTERING ) ARCHIVE_SIZE_EXCEEDED_USER = Mail( 'archive_size_exceeded_user', subject=PROBLEM_REGISTERING ) ARCHIVE_COPY_ERROR_DESK = Mail( 'archive_copy_error_desk', subject=PROBLEM_REGISTERING ) ARCHIVE_COPY_ERROR_USER = Mail( 'archive_copy_error_user', subject=PROBLEM_REGISTERING ) ARCHIVE_FILE_NOT_FOUND_DESK = Mail( 'archive_file_not_found_desk', subject=PROBLEM_REGISTERING ) ARCHIVE_FILE_NOT_FOUND_USER = Mail( 'archive_file_not_found_user', subject='Registration failed because of altered files' ) ARCHIVE_UNCAUGHT_ERROR_DESK = Mail( 'archive_uncaught_error_desk', subject=PROBLEM_REGISTERING ) ARCHIVE_REGISTRATION_STUCK_DESK = Mail( 'archive_registration_stuck_desk', subject='[auto] Stuck registrations audit' ) ARCHIVE_UNCAUGHT_ERROR_USER = Mail( 'archive_uncaught_error_user', subject=PROBLEM_REGISTERING ) ARCHIVE_SUCCESS = Mail( 'archive_success', subject='Registration of ' + UNESCAPE + ' complete' ) WELCOME = Mail( 'welcome', subject='Welcome to OSF', engagement=True ) WELCOME_OSF4I = Mail( 'welcome_osf4i', subject='Welcome to OSF', engagement=True ) PREREG_CHALLENGE_REJECTED = Mail( 'prereg_challenge_rejected', subject='Revisions required, your submission for the Preregistration Challenge is not yet registered' ) PREREG_CHALLENGE_ACCEPTED = Mail( 'prereg_challenge_accepted', subject='Your research plan has been registered and accepted for the Preregistration Challenge' ) PREREG_CSV = Mail( 'prereg_csv', subject='[auto] Updated Prereg CSV' ) EMPTY = Mail('empty', subject='${subject}') SHARE_ERROR_DESK = Mail( 'send_data_share_error_desk', subject='Share Error' ) SHARE_PREPRINT_ERROR_DESK = Mail( 'send_data_share_preprint_error_desk', subject='Share Error' ) REVIEWS_SUBMISSION_CONFIRMATION = Mail( 'reviews_submission_confirmation', subject='Confirmation of your submission to ${provider_name}' ) ACCESS_REQUEST_SUBMITTED = Mail( 'access_request_submitted', subject='An OSF user has requested access to your ${node.project_or_component}' ) ACCESS_REQUEST_DENIED = Mail( 'access_request_rejected', subject='Your access request to an OSF project has been declined' ) CROSSREF_ERROR = Mail( 'crossref_doi_error', subject='There was an error creating a DOI for preprint(s). batch_id: ${batch_id}' ) PREPRINT_WITHDRAWAL_REQUEST_GRANTED = Mail( 'preprint_withdrawal_request_granted', subject='Your ${reviewable.provider.preprint_word} has been withdrawn', ) PREPRINT_WITHDRAWAL_REQUEST_DECLINED = Mail( 'preprint_withdrawal_request_declined', subject='Your withdrawal request has been declined', )
import logbook from .log import Log, RedisPub log = Log(__name__) def guard(f, *args, **kargs): with logbook.NullHandler().applicationbound(): with RedisPub(): try: return f(*args, **kargs) except: log.exception('task {} failed', f.__name__) def block(f, *args, **kargs): return block_call(f, args, kargs) def block_call(f, args=[], kargs={}, timeout=None, bound='cpu'): impl = { 'cpu': cpu_bound_block_call, 'io': io_bound_block_call, }.get(bound) assert impl, 'unknown bound type: %s' % bound return impl(f, args, kargs, timeout) def nonblock(f, *args, **kargs): return nonblock_call(f, args, kargs) def nonblock_call(f, args=[], kargs={}, timeout=None, bound='cpu', group=None): if group is None: impl = { 'cpu': cpu_bound_nonblock_call, 'io': io_bound_nonblock_call, }.get(bound) assert impl, 'unknown bound type: %s' % bound return impl(f, args, kargs, timeout) if bound == 'cpu': log.warning( 'task assigned to group "{}", bound type fall back to "io"', group ) assert timeout is not None, 'group task must have timeout setting' from .local import core from flask import current_app from redis import Redis redis = Redis() run_group_app_task( redis, group_app_task_lock(group), group, current_app.kargs, timeout ) import pickle redis.rpush( ':'.join([core.group_app_task_key, group]), pickle.dumps((f, args, kargs)) ) def group_app_task_lock(group): from .local import core return ':'.join([core.group_app_task_key, group, 'lock']) def cpu_bound_block_call(f, args, kargs, timeout): try: from .rq import q from time import sleep job = q.enqueue_call( guard, args=[f] + list(args), kwargs=kargs, timeout=timeout, ) while job.result is None: sleep(0.5) ret = job.result job.cancel() return ret except: from concurrent.futures import ProcessPoolExecutor as Ex with Ex() as ex: future = ex.submit(guard, f, *args, **kargs) return future.result() def io_bound_block_call(f, args, kargs, timeout): from .local import thread_slave return thread_slave.submit(guard, f, *args, **kargs).result(timeout) def io_bound_nonblock_call(f, args, kargs, timeout): assert timeout is None, "thread based non-block doesn't support timeout" from .local import thread_slave return thread_slave.submit(guard, f, *args, **kargs) def cpu_bound_nonblock_call(f, args, kargs, timeout): try: from .rq import q q.enqueue_call( guard, args=[f] + list(args), kwargs=kargs, timeout=timeout, ) except: from multiprocessing import Process Process( target=guard, args=[f] + list(args), kwargs=kargs, daemon=False, ).start() def _thread_main(f, done): done(f()) def callback(f, done, *args, **kargs): from threading import Thread from functools import partial Thread( target=_thread_main, args=(partial(block, guard, f, *args, **kargs), done), daemon=False, ).start() def refresh_group_app_task_lock(redis, lock, name): from time import time import pickle redis.set(lock, pickle.dumps(time())) def group_app_task(redis, lock, name, appops, timeout): log.debug('group app task {} start', name) import pickle from flask import copy_current_request_context from . import make_slave_app from .local import core app = make_slave_app(appops) with app.app_context(): while True: refresh_group_app_task_lock(redis, lock, name) message = redis.blpop( ':'.join([core.group_app_task_key, name]), timeout=timeout ) if message is None: break task, args, kargs = pickle.loads(message[1]) try: with app.test_request_context(): nonblock_call( copy_current_request_context(task), args=args, kargs=kargs, bound='io', ) except: log.exception('group task {} failed', task.__name__) log.debug('group app task {} done', name) def group_app_task_out(lock, name, appops, timeout): from redis import Redis redis = Redis() try: group_app_task(redis, lock, name, appops, timeout) finally: redis.delete(lock) def run_group_app_task(redis, lock, name, appops, timeout): from .local import core from time import time import pickle now = time() if not redis.setnx(lock, pickle.dumps(now)): dead = now - pickle.loads(redis.get(lock)) > timeout + 13 if dead: redis.set(lock, pickle.dumps(now)) else: return try: nonblock_call( group_app_task_out, kargs=dict( lock=lock, name=name, appops=appops, timeout=timeout, ), bound='cpu', timeout=core.group_app_task_timeout, ) except: redis.delete(lock) raise
#this program corresponds to special.py from decimal import Decimal from numpy.testing import * import scipy.signal as signal from scipy.signal import lfilter from numpy import array, arange import numpy as np class TestConvolve(TestCase): def test_basic(self): a = [3,4,5,6,5,4] b = [1,2,3] c = signal.convolve(a,b) assert_array_equal(c,array([3,10,22,28,32,32,23,12])) class TestFFTConvolve(TestCase): def test_real(self): x = array([1,2,3]) assert_array_almost_equal(signal.fftconvolve(x,x), [1,4,10,12,9.]) def test_complex(self): x = array([1+1j,2+2j,3+3j]) assert_array_almost_equal(signal.fftconvolve(x,x), [0+2.0j, 0+8j, 0+20j, 0+24j, 0+18j]) class TestMedFilt(TestCase): def test_basic(self): f = [[50, 50, 50, 50, 50, 92, 18, 27, 65, 46], [50, 50, 50, 50, 50, 0, 72, 77, 68, 66], [50, 50, 50, 50, 50, 46, 47, 19, 64, 77], [50, 50, 50, 50, 50, 42, 15, 29, 95, 35], [50, 50, 50, 50, 50, 46, 34, 9, 21, 66], [70, 97, 28, 68, 78, 77, 61, 58, 71, 42], [64, 53, 44, 29, 68, 32, 19, 68, 24, 84], [ 3, 33, 53, 67, 1, 78, 74, 55, 12, 83], [ 7, 11, 46, 70, 60, 47, 24, 43, 61, 26], [32, 61, 88, 7, 39, 4, 92, 64, 45, 61]] d = signal.medfilt(f, [7, 3]) e = signal.medfilt2d(np.array(f, np.float), [7, 3]) assert_array_equal(d, [[ 0, 50, 50, 50, 42, 15, 15, 18, 27, 0], [ 0, 50, 50, 50, 50, 42, 19, 21, 29, 0], [50, 50, 50, 50, 50, 47, 34, 34, 46, 35], [50, 50, 50, 50, 50, 50, 42, 47, 64, 42], [50, 50, 50, 50, 50, 50, 46, 55, 64, 35], [33, 50, 50, 50, 50, 47, 46, 43, 55, 26], [32, 50, 50, 50, 50, 47, 46, 45, 55, 26], [ 7, 46, 50, 50, 47, 46, 46, 43, 45, 21], [ 0, 32, 33, 39, 32, 32, 43, 43, 43, 0], [ 0, 7, 11, 7, 4, 4, 19, 19, 24, 0]]) assert_array_equal(d, e) class TestWiener(TestCase): def test_basic(self): g = array([[5,6,4,3],[3,5,6,2],[2,3,5,6],[1,6,9,7]],'d') correct = array([[2.16374269,3.2222222222, 2.8888888889, 1.6666666667],[2.666666667, 4.33333333333, 4.44444444444, 2.8888888888],[2.222222222, 4.4444444444, 5.4444444444, 4.801066874837],[1.33333333333, 3.92735042735, 6.0712560386, 5.0404040404]]) h = signal.wiener(g) assert_array_almost_equal(h,correct,decimal=6) class TestCSpline1DEval(TestCase): def test_basic(self): y=array([1,2,3,4,3,2,1,2,3.0]) x=arange(len(y)) dx=x[1]-x[0] cj = signal.cspline1d(y) x2=arange(len(y)*10.0)/10.0 y2=signal.cspline1d_eval(cj, x2, dx=dx,x0=x[0]) # make sure interpolated values are on knot points assert_array_almost_equal(y2[::10], y, decimal=5) class TestOrderFilt(TestCase): def test_basic(self): assert_array_equal(signal.order_filter([1,2,3],[1,0,1],1), [2,3,2]) class TestChebWin: def test_cheb_odd(self): cheb_odd_true = array([0.200938, 0.107729, 0.134941, 0.165348, 0.198891, 0.235450, 0.274846, 0.316836, 0.361119, 0.407338, 0.455079, 0.503883, 0.553248, 0.602637, 0.651489, 0.699227, 0.745266, 0.789028, 0.829947, 0.867485, 0.901138, 0.930448, 0.955010, 0.974482, 0.988591, 0.997138, 1.000000, 0.997138, 0.988591, 0.974482, 0.955010, 0.930448, 0.901138, 0.867485, 0.829947, 0.789028, 0.745266, 0.699227, 0.651489, 0.602637, 0.553248, 0.503883, 0.455079, 0.407338, 0.361119, 0.316836, 0.274846, 0.235450, 0.198891, 0.165348, 0.134941, 0.107729, 0.200938]) cheb_odd = signal.chebwin(53, at=-40) assert_array_almost_equal(cheb_odd, cheb_odd_true, decimal=4) def test_cheb_even(self): cheb_even_true = array([0.203894, 0.107279, 0.133904, 0.163608, 0.196338, 0.231986, 0.270385, 0.311313, 0.354493, 0.399594, 0.446233, 0.493983, 0.542378, 0.590916, 0.639071, 0.686302, 0.732055, 0.775783, 0.816944, 0.855021, 0.889525, 0.920006, 0.946060, 0.967339, 0.983557, 0.994494, 1.000000, 1.000000, 0.994494, 0.983557, 0.967339, 0.946060, 0.920006, 0.889525, 0.855021, 0.816944, 0.775783, 0.732055, 0.686302, 0.639071, 0.590916, 0.542378, 0.493983, 0.446233, 0.399594, 0.354493, 0.311313, 0.270385, 0.231986, 0.196338, 0.163608, 0.133904, 0.107279, 0.203894]) cheb_even = signal.chebwin(54, at=-40) assert_array_almost_equal(cheb_even, cheb_even_true, decimal=4) class _TestLinearFilter(TestCase): dt = None def test_rank1(self): x = np.linspace(0, 5, 6).astype(self.dt) b = np.array([1, -1]).astype(self.dt) a = np.array([0.5, -0.5]).astype(self.dt) # Test simple IIR y_r = np.array([0, 2, 4, 6, 8, 10.]).astype(self.dt) assert_array_almost_equal(lfilter(b, a, x), y_r) # Test simple FIR b = np.array([1, 1]).astype(self.dt) a = np.array([1]).astype(self.dt) y_r = np.array([0, 1, 3, 5, 7, 9.]).astype(self.dt) assert_array_almost_equal(lfilter(b, a, x), y_r) # Test IIR with initial conditions b = np.array([1, 1]).astype(self.dt) a = np.array([1]).astype(self.dt) zi = np.array([1]).astype(self.dt) y_r = np.array([1, 1, 3, 5, 7, 9.]).astype(self.dt) zf_r = np.array([5]).astype(self.dt) y, zf = lfilter(b, a, x, zi=zi) assert_array_almost_equal(y, y_r) assert_array_almost_equal(zf, zf_r) b = np.array([1, 1, 1]).astype(self.dt) a = np.array([1]).astype(self.dt) zi = np.array([1, 1]).astype(self.dt) y_r = np.array([1, 2, 3, 6, 9, 12.]).astype(self.dt) zf_r = np.array([9, 5]).astype(self.dt) y, zf = lfilter(b, a, x, zi=zi) assert_array_almost_equal(y, y_r) assert_array_almost_equal(zf, zf_r) def test_rank2(self): shape = (4, 3) x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape) x = x.astype(self.dt) b = np.array([1, -1]).astype(self.dt) a = np.array([0.5, 0.5]).astype(self.dt) y_r2_a0 = np.array([[0, 2, 4], [6, 4, 2], [0, 2, 4], [6 ,4 ,2]], dtype=self.dt) y_r2_a1 = np.array([[0, 2, 0], [6, -4, 6], [12, -10, 12], [18, -16, 18]], dtype=self.dt) y = lfilter(b, a, x, axis = 0) assert_array_almost_equal(y_r2_a0, y) y = lfilter(b, a, x, axis = 1) assert_array_almost_equal(y_r2_a1, y) def test_rank2_init_cond_a1(self): # Test initial condition handling along axis 1 shape = (4, 3) x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape) x = x.astype(self.dt) b = np.array([1, -1]).astype(self.dt) a = np.array([0.5, 0.5]).astype(self.dt) y_r2_a0_1 = np.array([[1, 1, 1], [7, -5, 7], [13, -11, 13], [19, -17, 19]], dtype=self.dt) zf_r = np.array([-5, -17, -29, -41])[:, np.newaxis].astype(self.dt) y, zf = lfilter(b, a, x, axis = 1, zi = np.ones((4, 1))) assert_array_almost_equal(y_r2_a0_1, y) assert_array_almost_equal(zf, zf_r) def test_rank2_init_cond_a0(self): # Test initial condition handling along axis 0 shape = (4, 3) x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape) x = x.astype(self.dt) b = np.array([1, -1]).astype(self.dt) a = np.array([0.5, 0.5]).astype(self.dt) y_r2_a0_0 = np.array([[1, 3, 5], [5, 3, 1], [1, 3, 5], [5 ,3 ,1]], dtype=self.dt) zf_r = np.array([[-23, -23, -23]], dtype=self.dt) y, zf = lfilter(b, a, x, axis = 0, zi = np.ones((1, 3))) assert_array_almost_equal(y_r2_a0_0, y) assert_array_almost_equal(zf, zf_r) def test_rank3(self): shape = (4, 3, 2) x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape) b = np.array([1, -1]).astype(self.dt) a = np.array([0.5, 0.5]).astype(self.dt) # Test last axis y = lfilter(b, a, x) for i in range(x.shape[0]): for j in range(x.shape[1]): assert_array_almost_equal(y[i, j], lfilter(b, a, x[i, j])) def test_empty_zi(self): """Regression test for #880: empty array for zi crashes.""" a = np.ones(1).astype(self.dt) b = np.ones(1).astype(self.dt) x = np.arange(5).astype(self.dt) zi = np.ones(0).astype(self.dt) y, zf = lfilter(b, a, x, zi=zi) assert_array_almost_equal(y, x) self.failUnless(zf.dtype == self.dt) self.failUnless(zf.size == 0) class TestLinearFilterFloat32(_TestLinearFilter): dt = np.float32 class TestLinearFilterFloat64(_TestLinearFilter): dt = np.float64 class TestLinearFilterFloatExtended(_TestLinearFilter): dt = np.longdouble class TestLinearFilterComplex64(_TestLinearFilter): dt = np.complex64 class TestLinearFilterComplex128(_TestLinearFilter): dt = np.complex128 class TestLinearFilterComplexxxiExtended28(_TestLinearFilter): dt = np.longcomplex class TestLinearFilterDecimal(_TestLinearFilter): dt = np.dtype(Decimal) class TestFiltFilt: def test_basic(self): out = signal.filtfilt([1,2,3], [1,2,3], np.arange(12)) assert_equal(out, arange(12)) class TestDecimate: def test_basic(self): x = np.arange(6) assert_array_equal(signal.decimate(x, 2, n=1).round(), x[::2]) if __name__ == "__main__": run_module_suite()
import Tkinter as tk from versionControl import greeting import AppKit import subprocess import threading import time import os import functions start_bt_ms = "Welcome! Think about the how you want to label tweets." count = -1 Mflag = False class App(tk.Frame): def __init__(self, master): tk.Frame.__init__(self, master) self.pack() self.master.title("") self.master.resizable(False, False) self.master.tk_setPalette(background='#ececec') self.master.protocol('WM_DELETE_WINDOW', self.click_cancel) self.master.bind('<Return>', self.click_ok) self.master.bind('<Escape>', self.click_cancel) x = (self.master.winfo_screenwidth() - self.master.winfo_reqwidth()) / 2 y = (self.master.winfo_screenheight() - self.master.winfo_reqheight()) / 3 self.master.geometry("+{}+{}".format(x, y)) self.master.config(menu=tk.Menu(self)) tk.Message(self, text= greeting(), font='System 18 bold', justify='left', aspect=800).pack(pady=(5, 0)) tk.Message(self, text= "Step 2. Labeling", font='System 14 bold', justify='left', aspect=800).pack(pady=(5, 0)) ## frame 1 f1 = tk.Frame(self) f1.pack(padx=60, pady=15, anchor='w') self.f1l1 = tk.Label(f1, text='The keywords to label tweets as class 1 (positive.txt):') self.f1l1.grid(row=0,column=0,columnspan=2,sticky='w') self.f1l1L = tk.Label(f1, text='Keyword:') self.f1l1L.grid(row=1, column=0, sticky='w') self.user_input = tk.Entry(f1, background='white', width=30) self.user_input.grid(row=1, column=1, sticky='w') tk.Label(f1, text=' ').grid(row=2, column=0, sticky='w') self.f1l2 = tk.Label(f1, text='The keywords to label tweets as class 2 (negative.txt):') self.f1l2.grid(row=3,column=0,columnspan=2,sticky='w') self.f1l2L = tk.Label(f1, text='Keyword:') self.f1l2L.grid(row=4, column=0, sticky='w') self.user_input2 = tk.Entry(f1, background='white', width=30) self.user_input2.grid(row=4, column=1, sticky='w') tk.Label(f1, text=' ').grid(row=5, column=0, sticky='w') self.f1l3 = tk.Label(f1, text='The file containing fetched tweets (default in ./output):') self.f1l3.grid(row=6,column=0,columnspan=2,sticky='w') self.f1l3L = tk.Label(f1, text='Path:') self.f1l3L.grid(row=7, column=0, sticky='w') self.pass_input = tk.Entry(f1, background='white', width=30) #self.pass_input.insert(0,"./output/stream_(step1Tag).txt") self.pass_input.insert(0,"./output/stream_China.txt") self.pass_input.grid(row=7, column=1, sticky='w') ##frame middle 1.5 f1_5 = tk.Frame(self) f1_5.pack(padx=60, pady=(5,0), anchor='w') self.ctl_tx = tk.Label(f1_5, anchor="w",fg='black',state='disabled', text="Manually label each tweet displayed by following buttons",width=45) self.ctl_tx.pack() self.ctl_1 = tk.Button(f1_5, text='Class1', height=1, width=6, state='disabled', command=self.click_1) self.ctl_1.bind('<Enter>', self.hover_1) self.ctl_1.bind('<Leave>', self.hover_off) self.ctl_1.pack(side='right') self.ctl_2 = tk.Button(f1_5, text='Class2', height=1, width=6, state='disabled', command=self.click_2) self.ctl_2.bind('<Enter>', self.hover_2) self.ctl_2.bind('<Leave>', self.hover_off) self.ctl_2.pack(side='right') self.ctl_3 = tk.Button(f1_5, text='Skip', height=1, width=6, state='disabled', command=self.click_3) self.ctl_3.bind('<Enter>', self.hover_3) self.ctl_3.bind('<Leave>', self.hover_off) self.ctl_3.pack(side='right') ##frame middle 1.7 f1_7 = tk.Frame(self) f1_7.pack(padx=30, anchor='w') self.dis = tk.Message(f1_7, text="", justify='left', width=450) self.dis.pack() ##frame 2 f2 = tk.Frame(self) f2.pack(padx=60, anchor='w') self.label = tk.Label(f2, anchor="w",fg="white",bg="blue", text=start_bt_ms, width=45) self.label.pack() tk.Label(f2, anchor="w",text=" ", width=45).pack() ## frame last fb = tk.Frame(self) fb.pack(padx=60, pady=(0, 15), anchor='e') self.stb = tk.Button(fb, text='Keywords', height=1, width=6, default='active', command=self.click_ok) self.stb.pack(side='right') self.stb.bind("<Enter>", self.hover_on) self.stb.bind("<Leave>", self.hover_off) self.stb2 = tk.Button(fb, text='Manual', height=1, width=6, command=self.click_ok_manual) self.stb2.pack(side='right') self.stb2.bind("<Enter>", self.hover_on_manual) self.stb2.bind("<Leave>", self.hover_off) self.stb3 = tk.Button(fb, text='Quit...', height=1, width=6, command=self.click_cancel) self.stb3.pack(side='right') def hover_1(self, event=None): self.label.config(text="Label this tweet as group 1") def hover_2(self, event=None): self.label.config(text="Label this tweet as group 2") def hover_3(self, event=None): self.label.config(text="Skip this tweet") def hover_on_manual(self, event=None): self.label.config(text="Click to label manually, leaving keywords entries blank") def hover_on(self, event=None): self.label.config(text="Click to label by keywords") def hover_off(self, event=None): self.label.config(text=start_bt_ms) def click_ok(self, event=None): if not os.path.isfile(self.pass_input.get()): self.label.config(text="File "+self.pass_input.get()+" doesn't exist!") return 0 print "keyword1: " + self.user_input.get() print "keyword2: " + self.user_input2.get() print "file: " + self.pass_input.get() def click_ok_manual(self, event=None): if not os.path.isfile(self.pass_input.get()): self.label.config(text="File "+self.pass_input.get()+" doesn't exist!") return 0 print "file: " + self.pass_input.get() self.label.config(text="Label tweets manually") global start_bt_ms start_bt_ms = "Label tweets manually" self.stb.config(state='disabled') self.stb2.config(state='disabled') self.ctl_1.config(state='active') self.ctl_2.config(state='active') self.ctl_3.config(state='active') self.ctl_tx.config(state='normal') self.user_input.config(state='disabled') self.user_input2.config(state='disabled') self.pass_input.config(state='disabled') self.f1l1.config(state='disabled') self.f1l1L.config(state='disabled') self.f1l2.config(state='disabled') self.f1l2L.config(state='disabled') self.f1l3.config(state='disabled') self.f1l3L.config(state='disabled') self.stb3.config(text="Save&Quit",default='active',width=7) self.tweets = functions.readManyStrings(self.pass_input.get()) self.next_twi() self.class1=[] self.class2=[] global Mflag Mflag = True def click_cancel(self, event=None): if Mflag: tmpPath = os.path.dirname(self.pass_input.get()) functions.writeList(self.class1,tmpPath + "/positive.txt") functions.writeList(self.class2,tmpPath + "/negative.txt") print("The user clicked 'Cancel'") self.master.destroy() def click_1(self, event=None): self.next_twi() self.class1.append(self.display) self.disp_twi() def click_2(self, event=None): self.next_twi() self.class2.append(self.display) self.disp_twi() def click_3(self, event=None): self.next_twi() self.disp_twi() def next_twi(self, event=None): global count count += 1 self.display = self.tweets[count].strip() #\n at end of self.display self.dis.config(text="\nTweet No."+str(count)+": "+self.display+"\n") def disp_twi(self, event=None): self.label.config(text=str(len(self.class1))+" tweets in class1 / "+str(len(self.class2))+" tweets in class2.") global start_bt_ms start_bt_ms = str(len(self.class1))+" tweets in class1 / "+str(len(self.class2))+" tweets in class2." def labeling(): info = AppKit.NSBundle.mainBundle().infoDictionary() info['LSUIElement'] = True root = tk.Tk() app = App(root) AppKit.NSApplication.sharedApplication().activateIgnoringOtherApps_(True) app.mainloop()
# Copyright (c) The PyAMF Project. # See LICENSE.txt for details. """ General gateway tests. @since: 0.1.0 """ import unittest import sys import pyamf from pyamf import remoting from pyamf.remoting import gateway, amf0 class TestService(object): def spam(self): return 'spam' def echo(self, x): return x class FaultTestCase(unittest.TestCase): def test_create(self): x = remoting.ErrorFault() self.assertEqual(x.code, '') self.assertEqual(x.details, '') self.assertEqual(x.description, '') x = remoting.ErrorFault(code=404, details='Not Found', description='Spam eggs') self.assertEqual(x.code, 404) self.assertEqual(x.details, 'Not Found') self.assertEqual(x.description, 'Spam eggs') def test_build(self): fault = None try: raise TypeError("Unknown type") except TypeError: fault = amf0.build_fault(*sys.exc_info()) self.assertTrue(isinstance(fault, remoting.ErrorFault)) self.assertEqual(fault.level, 'error') self.assertEqual(fault.code, 'TypeError') self.assertEqual(fault.details, None) def test_build_traceback(self): fault = None try: raise TypeError("Unknown type") except TypeError: fault = amf0.build_fault(include_traceback=True, *sys.exc_info()) self.assertTrue(isinstance(fault, remoting.ErrorFault)) self.assertEqual(fault.level, 'error') self.assertEqual(fault.code, 'TypeError') self.assertTrue("\\n" not in fault.details) def test_encode(self): encoder = pyamf.get_encoder(pyamf.AMF0) decoder = pyamf.get_decoder(pyamf.AMF0) decoder.stream = encoder.stream try: raise TypeError("Unknown type") except TypeError: encoder.writeElement(amf0.build_fault(*sys.exc_info())) buffer = encoder.stream buffer.seek(0, 0) fault = decoder.readElement() old_fault = amf0.build_fault(*sys.exc_info()) self.assertEqual(fault.level, old_fault.level) self.assertEqual(fault.type, old_fault.type) self.assertEqual(fault.code, old_fault.code) self.assertEqual(fault.details, old_fault.details) self.assertEqual(fault.description, old_fault.description) def test_explicit_code(self): class X(Exception): _amf_code = 'Server.UnknownResource' try: raise X() except X: fault = amf0.build_fault(*sys.exc_info()) self.assertEqual(fault.code, 'Server.UnknownResource') class ServiceWrapperTestCase(unittest.TestCase): def test_create(self): x = gateway.ServiceWrapper('blah') self.assertEqual(x.service, 'blah') def test_create_preprocessor(self): x = gateway.ServiceWrapper('blah', preprocessor=ord) self.assertEqual(x.preprocessor, ord) def test_cmp(self): x = gateway.ServiceWrapper('blah') y = gateway.ServiceWrapper('blah') z = gateway.ServiceWrapper('bleh') self.assertEqual(x, y) self.assertNotEquals(y, z) def test_call(self): def add(x, y): self.assertEqual(x, 1) self.assertEqual(y, 2) return x + y x = gateway.ServiceWrapper(add) self.assertTrue(callable(x)) self.assertEqual(x(None, [1, 2]), 3) x = gateway.ServiceWrapper('blah') self.assertRaises(gateway.UnknownServiceMethodError, x, None, []) x = gateway.ServiceWrapper(TestService) self.assertRaises(gateway.UnknownServiceMethodError, x, None, []) self.assertEqual(x('spam', []), 'spam') self.assertRaises(gateway.UnknownServiceMethodError, x, 'xyx', []) self.assertRaises(gateway.InvalidServiceMethodError, x, '_private', []) self.assertEqual(x('echo', [x]), x) class ServiceRequestTestCase(unittest.TestCase): def test_create(self): sw = gateway.ServiceWrapper(TestService) request = remoting.Envelope() x = gateway.ServiceRequest(request, sw, None) self.assertEqual(x.request, request) self.assertEqual(x.service, sw) self.assertEqual(x.method, None) def test_call(self): sw = gateway.ServiceWrapper(TestService) request = remoting.Envelope() x = gateway.ServiceRequest(request, sw, None) self.assertRaises(gateway.UnknownServiceMethodError, x) x = gateway.ServiceRequest(request, sw, 'spam') self.assertEqual(x(), 'spam') x = gateway.ServiceRequest(request, sw, 'echo') self.assertEqual(x(x), x) class ServiceCollectionTestCase(unittest.TestCase): def test_contains(self): x = gateway.ServiceCollection() self.assertFalse(TestService in x) self.assertFalse('spam.eggs' in x) x['spam.eggs'] = gateway.ServiceWrapper(TestService) self.assertTrue(TestService in x) self.assertTrue('spam.eggs' in x) class BaseGatewayTestCase(unittest.TestCase): def test_create(self): x = gateway.BaseGateway() self.assertEqual(x.services, {}) x = gateway.BaseGateway({}) self.assertEqual(x.services, {}) x = gateway.BaseGateway({}) self.assertEqual(x.services, {}) x = gateway.BaseGateway({'x': TestService}) self.assertEqual(x.services, {'x': TestService}) x = gateway.BaseGateway({}, timezone_offset=-180) self.assertEqual(x.timezone_offset, -180) self.assertRaises(TypeError, gateway.BaseGateway, []) self.assertRaises(TypeError, gateway.BaseGateway, foo='bar') def test_add_service(self): gw = gateway.BaseGateway() self.assertEqual(gw.services, {}) gw.addService(TestService) self.assertTrue(TestService in gw.services) self.assertTrue('TestService' in gw.services) del gw.services['TestService'] gw.addService(TestService, 'spam.eggs') self.assertTrue(TestService in gw.services) self.assertTrue('spam.eggs' in gw.services) del gw.services['spam.eggs'] class SpamService(object): def __str__(self): return 'spam' def __call__(*args, **kwargs): pass x = SpamService() gw.addService(x) self.assertTrue(x in gw.services) self.assertTrue('spam' in gw.services) del gw.services['spam'] self.assertEqual(gw.services, {}) self.assertRaises(TypeError, gw.addService, 1) import new temp = new.module('temp') gw.addService(temp) self.assertTrue(temp in gw.services) self.assertTrue('temp' in gw.services) del gw.services['temp'] self.assertEqual(gw.services, {}) def test_remove_service(self): gw = gateway.BaseGateway({'test': TestService}) self.assertTrue('test' in gw.services) wrapper = gw.services['test'] gw.removeService('test') self.assertFalse('test' in gw.services) self.assertFalse(TestService in gw.services) self.assertFalse(wrapper in gw.services) self.assertEqual(gw.services, {}) gw = gateway.BaseGateway({'test': TestService}) self.assertTrue(TestService in gw.services) wrapper = gw.services['test'] gw.removeService(TestService) self.assertFalse('test' in gw.services) self.assertFalse(TestService in gw.services) self.assertFalse(wrapper in gw.services) self.assertEqual(gw.services, {}) gw = gateway.BaseGateway({'test': TestService}) self.assertTrue(TestService in gw.services) wrapper = gw.services['test'] gw.removeService(wrapper) self.assertFalse('test' in gw.services) self.assertFalse(TestService in gw.services) self.assertFalse(wrapper in gw.services) self.assertEqual(gw.services, {}) x = TestService() gw = gateway.BaseGateway({'test': x}) gw.removeService(x) self.assertFalse('test' in gw.services) self.assertEqual(gw.services, {}) self.assertRaises(NameError, gw.removeService, 'test') self.assertRaises(NameError, gw.removeService, TestService) self.assertRaises(NameError, gw.removeService, wrapper) def test_service_request(self): gw = gateway.BaseGateway({'test': TestService}) envelope = remoting.Envelope() message = remoting.Request('spam', [], envelope=envelope) self.assertRaises(gateway.UnknownServiceError, gw.getServiceRequest, message, 'spam') message = remoting.Request('test.spam', [], envelope=envelope) sr = gw.getServiceRequest(message, 'test.spam') self.assertTrue(isinstance(sr, gateway.ServiceRequest)) self.assertEqual(sr.request, envelope) self.assertEqual(sr.service, TestService) self.assertEqual(sr.method, 'spam') message = remoting.Request('test') sr = gw.getServiceRequest(message, 'test') self.assertTrue(isinstance(sr, gateway.ServiceRequest)) self.assertEqual(sr.request, None) self.assertEqual(sr.service, TestService) self.assertEqual(sr.method, None) gw = gateway.BaseGateway({'test': TestService}) envelope = remoting.Envelope() message = remoting.Request('test') sr = gw.getServiceRequest(message, 'test') self.assertTrue(isinstance(sr, gateway.ServiceRequest)) self.assertEqual(sr.request, None) self.assertEqual(sr.service, TestService) self.assertEqual(sr.method, None) # try to access an unknown service message = remoting.Request('spam') self.assertRaises(gateway.UnknownServiceError, gw.getServiceRequest, message, 'spam') # check x.x calls message = remoting.Request('test.test') sr = gw.getServiceRequest(message, 'test.test') self.assertTrue(isinstance(sr, gateway.ServiceRequest)) self.assertEqual(sr.request, None) self.assertEqual(sr.service, TestService) self.assertEqual(sr.method, 'test') def test_long_service_name(self): gw = gateway.BaseGateway({'a.c.b.d': TestService}) envelope = remoting.Envelope() message = remoting.Request('a.c.b.d', [], envelope=envelope) sr = gw.getServiceRequest(message, 'a.c.b.d.spam') self.assertTrue(isinstance(sr, gateway.ServiceRequest)) self.assertEqual(sr.request, envelope) self.assertEqual(sr.service, TestService) self.assertEqual(sr.method, 'spam') def test_get_response(self): gw = gateway.BaseGateway({'test': TestService}) envelope = remoting.Envelope() self.assertRaises(NotImplementedError, gw.getResponse, envelope) def test_process_request(self): gw = gateway.BaseGateway({'test': TestService}) envelope = remoting.Envelope() request = remoting.Request('test.spam', envelope=envelope) processor = gw.getProcessor(request) response = processor(request) self.assertTrue(isinstance(response, remoting.Response)) self.assertEqual(response.status, remoting.STATUS_OK) self.assertEqual(response.body, 'spam') def test_unknown_service(self): # Test a non existant service call gw = gateway.BaseGateway({'test': TestService}) envelope = remoting.Envelope() request = remoting.Request('nope', envelope=envelope) processor = gw.getProcessor(request) response = processor(request) self.assertFalse(gw.debug) self.assertTrue(isinstance(response, remoting.Message)) self.assertEqual(response.status, remoting.STATUS_ERROR) self.assertTrue(isinstance(response.body, remoting.ErrorFault)) self.assertEqual(response.body.code, 'Service.ResourceNotFound') self.assertEqual(response.body.description, 'Unknown service nope') self.assertEqual(response.body.details, None) def test_debug_traceback(self): # Test a non existant service call gw = gateway.BaseGateway({'test': TestService}, debug=True) envelope = remoting.Envelope() # Test a non existant service call request = remoting.Request('nope', envelope=envelope) processor = gw.getProcessor(request) response = processor(request) self.assertTrue(isinstance(response, remoting.Message)) self.assertEqual(response.status, remoting.STATUS_ERROR) self.assertTrue(isinstance(response.body, remoting.ErrorFault)) self.assertEqual(response.body.code, 'Service.ResourceNotFound') self.assertEqual(response.body.description, 'Unknown service nope') self.assertNotEquals(response.body.details, None) def test_malformed_credentials_header(self): gw = gateway.BaseGateway({'test': TestService}) envelope = remoting.Envelope() request = remoting.Request('test.spam', envelope=envelope) request.headers['Credentials'] = {'spam': 'eggs'} processor = gw.getProcessor(request) response = processor(request) self.assertTrue(isinstance(response, remoting.Response)) self.assertEqual(response.status, remoting.STATUS_ERROR) self.assertTrue(isinstance(response.body, remoting.ErrorFault)) self.assertEqual(response.body.code, 'KeyError') def test_authenticate(self): gw = gateway.BaseGateway({'test': TestService}) sr = gateway.ServiceRequest(None, gw.services['test'], None) self.assertTrue(gw.authenticateRequest(sr, None, None)) def auth(u, p): if u == 'spam' and p == 'eggs': return True return False gw = gateway.BaseGateway({'test': TestService}, authenticator=auth) self.assertFalse(gw.authenticateRequest(sr, None, None)) self.assertTrue(gw.authenticateRequest(sr, 'spam', 'eggs')) def test_null_target(self): gw = gateway.BaseGateway({}) request = remoting.Request(None) processor = gw.getProcessor(request) from pyamf.remoting import amf3 self.assertTrue(isinstance(processor, amf3.RequestProcessor)) def test_empty_target(self): gw = gateway.BaseGateway({}) request = remoting.Request('') processor = gw.getProcessor(request) from pyamf.remoting import amf3 self.assertTrue(isinstance(processor, amf3.RequestProcessor)) class QueryBrowserTestCase(unittest.TestCase): def test_request(self): gw = gateway.BaseGateway() echo = lambda x: x gw.addService(echo, 'echo', description='This is a test') envelope = remoting.Envelope() request = remoting.Request('echo') envelope['/1'] = request request.headers['DescribeService'] = None processor = gw.getProcessor(request) response = processor(request) self.assertEqual(response.status, remoting.STATUS_OK) self.assertEqual(response.body, 'This is a test') class AuthenticatorTestCase(unittest.TestCase): def setUp(self): self.called = False def tearDown(self): if self.called is False: self.fail("authenticator not called") def _auth(self, username, password): self.called = True if username == 'fred' and password == 'wilma': return True return False def test_gateway(self): gw = gateway.BaseGateway(authenticator=self._auth) echo = lambda x: x gw.addService(echo, 'echo') envelope = remoting.Envelope() request = remoting.Request('echo', body=['spam']) envelope.headers['Credentials'] = dict(userid='fred', password='wilma') envelope['/1'] = request processor = gw.getProcessor(request) response = processor(request) self.assertEqual(response.status, remoting.STATUS_OK) self.assertEqual(response.body, 'spam') def test_service(self): gw = gateway.BaseGateway() echo = lambda x: x gw.addService(echo, 'echo', authenticator=self._auth) envelope = remoting.Envelope() request = remoting.Request('echo', body=['spam']) envelope.headers['Credentials'] = dict(userid='fred', password='wilma') envelope['/1'] = request processor = gw.getProcessor(request) response = processor(request) self.assertEqual(response.status, remoting.STATUS_OK) self.assertEqual(response.body, 'spam') def test_class_decorator(self): class TestService: def echo(self, x): return x TestService.echo = gateway.authenticate(TestService.echo, self._auth) gw = gateway.BaseGateway({'test': TestService}) envelope = remoting.Envelope() request = remoting.Request('test.echo', body=['spam']) envelope.headers['Credentials'] = dict(userid='fred', password='wilma') envelope['/1'] = request processor = gw.getProcessor(request) response = processor(request) self.assertEqual(response.status, remoting.STATUS_OK) self.assertEqual(response.body, 'spam') def test_func_decorator(self): def echo(x): return x echo = gateway.authenticate(echo, self._auth) gw = gateway.BaseGateway({'echo': echo}) envelope = remoting.Envelope() request = remoting.Request('echo', body=['spam']) envelope.headers['Credentials'] = dict(userid='fred', password='wilma') envelope['/1'] = request processor = gw.getProcessor(request) response = processor(request) self.assertEqual(response.status, remoting.STATUS_OK) self.assertEqual(response.body, 'spam') def test_expose_request_decorator(self): def echo(x): return x def exposed_auth(request, username, password): return self._auth(username, password) exposed_auth = gateway.expose_request(exposed_auth) echo = gateway.authenticate(echo, exposed_auth) gw = gateway.BaseGateway({'echo': echo}) envelope = remoting.Envelope() request = remoting.Request('echo', body=['spam']) envelope.headers['Credentials'] = dict(userid='fred', password='wilma') envelope['/1'] = request processor = gw.getProcessor(request) response = processor(request) self.assertEqual(response.status, remoting.STATUS_OK) self.assertEqual(response.body, 'spam') def test_expose_request_keyword(self): def echo(x): return x def exposed_auth(request, username, password): return self._auth(username, password) echo = gateway.authenticate(echo, exposed_auth, expose_request=True) gw = gateway.BaseGateway({'echo': echo}) envelope = remoting.Envelope() request = remoting.Request('echo', body=['spam']) envelope.headers['Credentials'] = dict(userid='fred', password='wilma') envelope['/1'] = request processor = gw.getProcessor(request) response = processor(request) self.assertEqual(response.status, remoting.STATUS_OK) self.assertEqual(response.body, 'spam') class ExposeRequestTestCase(unittest.TestCase): def test_default(self): gw = gateway.BaseGateway() gw.addService(lambda x: x, 'test') envelope = remoting.Envelope() request = remoting.Request('test') envelope['/1'] = request service_request = gateway.ServiceRequest(envelope, gw.services['test'], None) self.assertFalse(gw.mustExposeRequest(service_request)) def test_gateway(self): gw = gateway.BaseGateway(expose_request=True) gw.addService(lambda x: x, 'test') envelope = remoting.Envelope() request = remoting.Request('test') envelope['/1'] = request service_request = gateway.ServiceRequest(envelope, gw.services['test'], None) self.assertTrue(gw.mustExposeRequest(service_request)) def test_service(self): gw = gateway.BaseGateway() gw.addService(lambda x: x, 'test', expose_request=True) envelope = remoting.Envelope() request = remoting.Request('test') envelope['/1'] = request service_request = gateway.ServiceRequest(envelope, gw.services['test'], None) self.assertTrue(gw.mustExposeRequest(service_request)) def test_decorator(self): def echo(x): return x gateway.expose_request(echo) gw = gateway.BaseGateway() gw.addService(echo, 'test') envelope = remoting.Envelope() request = remoting.Request('test') envelope['/1'] = request service_request = gateway.ServiceRequest(envelope, gw.services['test'], None) self.assertTrue(gw.mustExposeRequest(service_request)) class PreProcessingTestCase(unittest.TestCase): def _preproc(self): pass def test_default(self): gw = gateway.BaseGateway() gw.addService(lambda x: x, 'test') envelope = remoting.Envelope() request = remoting.Request('test') envelope['/1'] = request service_request = gateway.ServiceRequest(envelope, gw.services['test'], None) self.assertEqual(gw.getPreprocessor(service_request), None) def test_global(self): gw = gateway.BaseGateway(preprocessor=self._preproc) gw.addService(lambda x: x, 'test') envelope = remoting.Envelope() request = remoting.Request('test') envelope['/1'] = request service_request = gateway.ServiceRequest(envelope, gw.services['test'], None) self.assertEqual(gw.getPreprocessor(service_request), self._preproc) def test_service(self): gw = gateway.BaseGateway() gw.addService(lambda x: x, 'test', preprocessor=self._preproc) envelope = remoting.Envelope() request = remoting.Request('test') envelope['/1'] = request service_request = gateway.ServiceRequest(envelope, gw.services['test'], None) self.assertEqual(gw.getPreprocessor(service_request), self._preproc) def test_decorator(self): def echo(x): return x gateway.preprocess(echo, self._preproc) gw = gateway.BaseGateway() gw.addService(echo, 'test') envelope = remoting.Envelope() request = remoting.Request('test') envelope['/1'] = request service_request = gateway.ServiceRequest(envelope, gw.services['test'], None) self.assertEqual(gw.getPreprocessor(service_request), self._preproc) def test_call(self): def preproc(sr, *args): self.called = True self.assertEqual(args, tuple()) self.assertTrue(isinstance(sr, gateway.ServiceRequest)) gw = gateway.BaseGateway({'test': TestService}, preprocessor=preproc) envelope = remoting.Envelope() request = remoting.Request('test.spam', envelope=envelope) processor = gw.getProcessor(request) response = processor(request) self.assertTrue(isinstance(response, remoting.Response)) self.assertEqual(response.status, remoting.STATUS_OK) self.assertEqual(response.body, 'spam') self.assertTrue(self.called) def test_fail(self): def preproc(sr, *args): raise IndexError gw = gateway.BaseGateway({'test': TestService}, preprocessor=preproc) envelope = remoting.Envelope() request = remoting.Request('test.spam', envelope=envelope) processor = gw.getProcessor(request) response = processor(request) self.assertTrue(isinstance(response, remoting.Response)) self.assertEqual(response.status, remoting.STATUS_ERROR)
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import paddle.fluid as fluid import paddle import unittest import numpy from paddle.fluid.layers.control_flow import lod_rank_table from paddle.fluid.layers.control_flow import max_sequence_len from paddle.fluid.layers.control_flow import lod_tensor_to_array from paddle.fluid.layers.control_flow import array_to_lod_tensor from paddle.fluid.layers.control_flow import shrink_memory class TestDynRNN(unittest.TestCase): def setUp(self): self.word_dict = paddle.dataset.imdb.word_dict() self.BATCH_SIZE = 2 self.train_data = paddle.batch( paddle.dataset.imdb.train(self.word_dict), batch_size=self.BATCH_SIZE) def test_plain_while_op(self): main_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(main_program, startup_program): sentence = fluid.layers.data( name='word', shape=[1], dtype='int64', lod_level=1) sent_emb = fluid.layers.embedding( input=sentence, size=[len(self.word_dict), 32], dtype='float32') label = fluid.layers.data(name='label', shape=[1], dtype='float32') rank_table = lod_rank_table(x=sent_emb) sent_emb_array = lod_tensor_to_array(x=sent_emb, table=rank_table) seq_len = max_sequence_len(rank_table=rank_table) i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) i.stop_gradient = False boot_mem = fluid.layers.fill_constant_batch_size_like( input=fluid.layers.array_read( array=sent_emb_array, i=i), value=0, shape=[-1, 100], dtype='float32') boot_mem.stop_gradient = False mem_array = fluid.layers.array_write(x=boot_mem, i=i) cond = fluid.layers.less_than(x=i, y=seq_len) cond.stop_gradient = False while_op = fluid.layers.While(cond=cond) out = fluid.layers.create_array(dtype='float32') with while_op.block(): mem = fluid.layers.array_read(array=mem_array, i=i) ipt = fluid.layers.array_read(array=sent_emb_array, i=i) mem = shrink_memory(x=mem, i=i, table=rank_table) hidden = fluid.layers.fc(input=[mem, ipt], size=100, act='tanh') fluid.layers.array_write(x=hidden, i=i, array=out) fluid.layers.increment(x=i, in_place=True) fluid.layers.array_write(x=hidden, i=i, array=mem_array) fluid.layers.less_than(x=i, y=seq_len, cond=cond) all_timesteps = array_to_lod_tensor(x=out, table=rank_table) last = fluid.layers.sequence_last_step(input=all_timesteps) logits = fluid.layers.fc(input=last, size=1, act=None) loss = fluid.layers.sigmoid_cross_entropy_with_logits( x=logits, label=label) loss = fluid.layers.mean(loss) sgd = fluid.optimizer.SGD(1e-4) sgd.minimize(loss=loss) cpu = fluid.CPUPlace() exe = fluid.Executor(cpu) exe.run(startup_program) feeder = fluid.DataFeeder(feed_list=[sentence, label], place=cpu) data = next(self.train_data()) val = exe.run(main_program, feed=feeder.feed(data), fetch_list=[loss])[0] self.assertEqual((1, ), val.shape) print(val) self.assertFalse(numpy.isnan(val)) def test_train_dyn_rnn(self): main_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(main_program, startup_program): sentence = fluid.layers.data( name='word', shape=[1], dtype='int64', lod_level=1) sent_emb = fluid.layers.embedding( input=sentence, size=[len(self.word_dict), 32], dtype='float32') rnn = fluid.layers.DynamicRNN() with rnn.block(): in_ = rnn.step_input(sent_emb) mem = rnn.memory(shape=[100], dtype='float32') out_ = fluid.layers.fc(input=[in_, mem], size=100, act='tanh') rnn.update_memory(mem, out_) rnn.output(out_) last = fluid.layers.sequence_last_step(input=rnn()) logits = fluid.layers.fc(input=last, size=1, act=None) label = fluid.layers.data(name='label', shape=[1], dtype='float32') loss = fluid.layers.sigmoid_cross_entropy_with_logits( x=logits, label=label) loss = fluid.layers.mean(loss) sgd = fluid.optimizer.Adam(1e-3) sgd.minimize(loss=loss) cpu = fluid.CPUPlace() exe = fluid.Executor(cpu) exe.run(startup_program) feeder = fluid.DataFeeder(feed_list=[sentence, label], place=cpu) data = next(self.train_data()) loss_0 = exe.run(main_program, feed=feeder.feed(data), fetch_list=[loss])[0] for _ in range(100): val = exe.run(main_program, feed=feeder.feed(data), fetch_list=[loss])[0] # loss should be small after 100 mini-batch self.assertLess(val[0], loss_0[0]) # this unit test is just used to the two layer nested dyn_rnn. def test_train_nested_dyn_rnn(self): word_dict = [i for i in range(30)] def fake_reader(): seq_len, label = [[2, 2]], [0, 1] data = [] for ele in seq_len: for j in ele: data.append([numpy.random.randint(30) \ for _ in range(j)]) while True: yield data, label train_data = paddle.batch(fake_reader, batch_size=2) main_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(main_program, startup_program): sentence = fluid.layers.data( name='word', shape=[1], dtype='int64', lod_level=2) label = fluid.layers.data( name='label', shape=[1], dtype='float32', lod_level=1) rnn = fluid.layers.DynamicRNN() with rnn.block(): in_ = rnn.step_input(sentence) sent_emb = fluid.layers.embedding( input=in_, size=[len(word_dict), 32], dtype='float32') out_ = fluid.layers.fc(input=sent_emb, size=100, act='tanh') rnn1 = fluid.layers.DynamicRNN() with rnn1.block(): in_1 = rnn1.step_input(out_) out_1 = fluid.layers.fc(input=[in_1], size=100, act='tanh') rnn1.output(out_1) last = fluid.layers.sequence_last_step(input=rnn1()) rnn.output(last) last = rnn() logits = fluid.layers.fc(input=last, size=1, act=None) loss = fluid.layers.sigmoid_cross_entropy_with_logits( x=logits, label=label) loss = fluid.layers.mean(loss) sgd = fluid.optimizer.SGD(1e-3) #sgd = fluid.optimizer.Adam(1e-3) sgd.minimize(loss=loss) cpu = fluid.CPUPlace() exe = fluid.Executor(cpu) exe.run(startup_program) feeder = fluid.DataFeeder(feed_list=[sentence, label], place=cpu) data = next(train_data()) val = exe.run(main_program, feed=feeder.feed(data), fetch_list=[loss])[0] for _ in range(100): val = exe.run(main_program, feed=feeder.feed(data), fetch_list=[loss])[0] print(val) # this unit test is just used to the two layer nested dyn_rnn. def test_train_nested_dyn_rnn2(self): word_dict = [i for i in range(30)] def fake_reader(): seq_len, label = [[2, 2]], [0, 1] data = [] for ele in seq_len: for j in ele: data.append([numpy.random.randint(30) \ for _ in range(j)]) while True: yield data, label train_data = paddle.batch(fake_reader, batch_size=2) hidden_size = 32 main_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(main_program, startup_program): sentence = fluid.layers.data( name='word', shape=[1], dtype='int64', lod_level=2) label = fluid.layers.data( name='label', shape=[1], dtype='float32', lod_level=1) rnn = fluid.layers.DynamicRNN() with rnn.block(): in_ = rnn.step_input(sentence) sent_emb = fluid.layers.embedding( input=in_, size=[len(word_dict), hidden_size], dtype='float32') input_forward_proj = fluid.layers.fc(input=sent_emb, size=hidden_size * 4, act=None, bias_attr=False) forward, _ = fluid.layers.dynamic_lstm( input=input_forward_proj, size=hidden_size * 4, use_peepholes=False) rnn1 = fluid.layers.DynamicRNN() with rnn1.block(): in_1 = rnn1.step_input(forward) out_1 = fluid.layers.fc(input=[in_1], size=100, act='tanh') rnn1.output(out_1) last = fluid.layers.sequence_last_step(input=rnn1()) rnn.output(last) last = rnn() logits = fluid.layers.fc(input=last, size=1, act=None) loss = fluid.layers.sigmoid_cross_entropy_with_logits( x=logits, label=label) loss = fluid.layers.mean(loss) sgd = fluid.optimizer.SGD(1e-3) #sgd = fluid.optimizer.Adam(1e-3) sgd.minimize(loss=loss) cpu = fluid.CPUPlace() exe = fluid.Executor(cpu) exe.run(startup_program) feeder = fluid.DataFeeder(feed_list=[sentence, label], place=cpu) data = next(train_data()) val = exe.run(main_program, feed=feeder.feed(data), fetch_list=[loss])[0] for _ in range(100): val = exe.run(main_program, feed=feeder.feed(data), fetch_list=[loss])[0] if __name__ == '__main__': unittest.main()
import datetime from urlparse import urlparse from utils import log as logging from django.shortcuts import get_object_or_404, render_to_response from django.views.decorators.http import condition from django.http import HttpResponseForbidden, HttpResponseRedirect, HttpResponse, Http404 from django.conf import settings from django.contrib.auth.decorators import login_required from django.template import RequestContext # from django.db import IntegrityError from apps.rss_feeds.models import Feed, merge_feeds from apps.rss_feeds.models import MFetchHistory from apps.rss_feeds.models import MFeedIcon from apps.push.models import PushSubscription from apps.analyzer.models import get_classifiers_for_user from apps.reader.models import UserSubscription from apps.rss_feeds.models import MStory from utils.user_functions import ajax_login_required from utils import json_functions as json, feedfinder from utils.feed_functions import relative_timeuntil, relative_timesince from utils.user_functions import get_user from utils.view_functions import get_argument_or_404 from utils.view_functions import required_params from vendor.timezones.utilities import localtime_for_timezone from utils.ratelimit import ratelimit IGNORE_AUTOCOMPLETE = [ "facebook.com/feeds/notifications.php", "inbox", "secret", "password", "latitude", ] @json.json_view def search_feed(request): address = request.REQUEST.get('address') offset = int(request.REQUEST.get('offset', 0)) if not address: return dict(code=-1, message="Please provide a URL/address.") feed = Feed.get_feed_from_url(address, create=False, aggressive=True, offset=offset) if feed: return feed.canonical() else: return dict(code=-1, message="No feed found matching that XML or website address.") @json.json_view def load_single_feed(request, feed_id): user = get_user(request) feed = get_object_or_404(Feed, pk=feed_id) classifiers = get_classifiers_for_user(user, feed_id=feed.pk) payload = feed.canonical(full=True) payload['classifiers'] = classifiers return payload def feed_favicon_etag(request, feed_id): try: feed_icon = MFeedIcon.objects.get(feed_id=feed_id) except MFeedIcon.DoesNotExist: return return feed_icon.color @condition(etag_func=feed_favicon_etag) def load_feed_favicon(request, feed_id): not_found = False try: feed_icon = MFeedIcon.objects.get(feed_id=feed_id) except MFeedIcon.DoesNotExist: not_found = True if not_found or not feed_icon.data: return HttpResponseRedirect(settings.MEDIA_URL + 'img/icons/circular/world.png') icon_data = feed_icon.data.decode('base64') return HttpResponse(icon_data, mimetype='image/png') @json.json_view def feed_autocomplete(request): query = request.GET.get('term') or request.GET.get('query') version = int(request.GET.get('v', 1)) format = request.GET.get('format', 'autocomplete') # user = get_user(request) # if True or not user.profile.is_premium: # return dict(code=-1, message="Overloaded, no autocomplete results.", feeds=[], term=query) if not query: return dict(code=-1, message="Specify a search 'term'.", feeds=[], term=query) if '.' in query: try: parts = urlparse(query) if not parts.hostname and not query.startswith('http'): parts = urlparse('http://%s' % query) if parts.hostname: query = [parts.hostname] query.extend([p for p in parts.path.split('/') if p]) query = ' '.join(query) except: logging.user(request, "~FGAdd search, could not parse url in ~FR%s" % query) query_params = query.split(' ') tries_left = 5 while len(query_params) and tries_left: tries_left -= 1 feed_ids = Feed.autocomplete(' '.join(query_params)) if feed_ids: break else: query_params = query_params[:-1] feeds = list(set([Feed.get_by_id(feed_id) for feed_id in feed_ids])) feeds = [feed for feed in feeds if feed and not feed.branch_from_feed] feeds = [feed for feed in feeds if all([x not in feed.feed_address for x in IGNORE_AUTOCOMPLETE])] if format == 'autocomplete': feeds = [{ 'id': feed.pk, 'value': feed.feed_address, 'label': feed.feed_title, 'tagline': feed.data and feed.data.feed_tagline, 'num_subscribers': feed.num_subscribers, } for feed in feeds] else: feeds = [feed.canonical(full=True) for feed in feeds] feeds = sorted(feeds, key=lambda f: -1 * f['num_subscribers']) feed_ids = [f['id'] for f in feeds] feed_icons = dict((icon.feed_id, icon) for icon in MFeedIcon.objects.filter(feed_id__in=feed_ids)) for feed in feeds: if feed['id'] in feed_icons: feed_icon = feed_icons[feed['id']] if feed_icon.data: feed['favicon_color'] = feed_icon.color feed['favicon'] = feed_icon.data logging.user(request, "~FGAdd Search: ~SB%s ~SN(%s matches)" % (query, len(feeds),)) if version > 1: return { 'feeds': feeds, 'term': query, } else: return feeds @ratelimit(minutes=1, requests=10) @json.json_view def load_feed_statistics(request, feed_id): user = get_user(request) timezone = user.profile.timezone stats = dict() feed = get_object_or_404(Feed, pk=feed_id) feed.update_all_statistics() feed.set_next_scheduled_update(verbose=True, skip_scheduling=True) feed.save_feed_story_history_statistics() feed.save_classifier_counts() # Dates of last and next update stats['active'] = feed.active stats['last_update'] = relative_timesince(feed.last_update) stats['next_update'] = relative_timeuntil(feed.next_scheduled_update) stats['push'] = feed.is_push if feed.is_push: try: stats['push_expires'] = localtime_for_timezone(feed.push.lease_expires, timezone).strftime("%Y-%m-%d %H:%M:%S") except PushSubscription.DoesNotExist: stats['push_expires'] = 'Missing push' feed.is_push = False feed.save() # Minutes between updates update_interval_minutes = feed.get_next_scheduled_update(force=True, verbose=False) stats['update_interval_minutes'] = update_interval_minutes original_active_premium_subscribers = feed.active_premium_subscribers original_premium_subscribers = feed.premium_subscribers feed.active_premium_subscribers = max(feed.active_premium_subscribers+1, 1) feed.premium_subscribers += 1 premium_update_interval_minutes = feed.get_next_scheduled_update(force=True, verbose=False, premium_speed=True) feed.active_premium_subscribers = original_active_premium_subscribers feed.premium_subscribers = original_premium_subscribers stats['premium_update_interval_minutes'] = premium_update_interval_minutes stats['errors_since_good'] = feed.errors_since_good # Stories per month - average and month-by-month breakout average_stories_per_month, story_count_history = feed.average_stories_per_month, feed.data.story_count_history stats['average_stories_per_month'] = average_stories_per_month stats['story_count_history'] = story_count_history and json.decode(story_count_history) # Subscribers stats['subscriber_count'] = feed.num_subscribers stats['num_subscribers'] = feed.num_subscribers stats['stories_last_month'] = feed.stories_last_month stats['last_load_time'] = feed.last_load_time stats['premium_subscribers'] = feed.premium_subscribers stats['active_subscribers'] = feed.active_subscribers stats['active_premium_subscribers'] = feed.active_premium_subscribers # Classifier counts stats['classifier_counts'] = json.decode(feed.data.feed_classifier_counts) # Fetch histories fetch_history = MFetchHistory.feed(feed_id, timezone=timezone) stats['feed_fetch_history'] = fetch_history['feed_fetch_history'] stats['page_fetch_history'] = fetch_history['page_fetch_history'] stats['feed_push_history'] = fetch_history['push_history'] logging.user(request, "~FBStatistics: ~SB%s" % (feed)) return stats @json.json_view def load_feed_settings(request, feed_id): stats = dict() feed = get_object_or_404(Feed, pk=feed_id) user = get_user(request) timezone = user.profile.timezone fetch_history = MFetchHistory.feed(feed_id, timezone=timezone) stats['feed_fetch_history'] = fetch_history['feed_fetch_history'] stats['page_fetch_history'] = fetch_history['page_fetch_history'] stats['feed_push_history'] = fetch_history['push_history'] stats['duplicate_addresses'] = feed.duplicate_addresses.all() return stats @json.json_view def exception_retry(request): user = get_user(request) feed_id = get_argument_or_404(request, 'feed_id') reset_fetch = json.decode(request.POST['reset_fetch']) feed = Feed.get_by_id(feed_id) original_feed = feed if not feed: raise Http404 feed.schedule_feed_fetch_immediately() changed = False if feed.has_page_exception: changed = True feed.has_page_exception = False if feed.has_feed_exception: changed = True feed.has_feed_exception = False if not feed.active: changed = True feed.active = True if changed: feed.save(update_fields=['has_page_exception', 'has_feed_exception', 'active']) original_fetched_once = feed.fetched_once if reset_fetch: logging.user(request, "~FRRefreshing exception feed: ~SB%s" % (feed)) feed.fetched_once = False else: logging.user(request, "~FRForcing refreshing feed: ~SB%s" % (feed)) feed.fetched_once = True if feed.fetched_once != original_fetched_once: feed.save(update_fields=['fetched_once']) feed = feed.update(force=True, compute_scores=False, verbose=True) feed = Feed.get_by_id(feed.pk) try: usersub = UserSubscription.objects.get(user=user, feed=feed) except UserSubscription.DoesNotExist: usersubs = UserSubscription.objects.filter(user=user, feed=original_feed) if usersubs: usersub = usersubs[0] usersub.switch_feed(feed, original_feed) else: return {'code': -1} usersub.calculate_feed_scores(silent=False) feeds = {feed.pk: usersub and usersub.canonical(full=True), feed_id: usersub.canonical(full=True)} return {'code': 1, 'feeds': feeds} @ajax_login_required @json.json_view def exception_change_feed_address(request): feed_id = request.POST['feed_id'] feed = get_object_or_404(Feed, pk=feed_id) original_feed = feed feed_address = request.POST['feed_address'] timezone = request.user.profile.timezone code = -1 if not feed.known_good and (feed.has_page_exception or feed.has_feed_exception): # Fix broken feed logging.user(request, "~FRFixing feed exception by address: ~SB%s~SN to ~SB%s" % (feed.feed_address, feed_address)) feed.has_feed_exception = False feed.active = True feed.fetched_once = False feed.feed_address = feed_address duplicate_feed = feed.schedule_feed_fetch_immediately() code = 1 if duplicate_feed: new_feed = Feed.objects.get(pk=duplicate_feed.pk) feed = new_feed new_feed.schedule_feed_fetch_immediately() new_feed.has_feed_exception = False new_feed.active = True new_feed.save() merge_feeds(new_feed.pk, feed.pk) else: # Branch good feed logging.user(request, "~FRBranching feed by address: ~SB%s~SN to ~SB%s" % (feed.feed_address, feed_address)) feed, _ = Feed.objects.get_or_create(feed_address=feed_address, feed_link=feed.feed_link) code = 1 if feed.pk != original_feed.pk: try: feed.branch_from_feed = original_feed.branch_from_feed or original_feed except Feed.DoesNotExist: feed.branch_from_feed = original_feed feed.feed_address_locked = True feed.save() feed = feed.update() feed = Feed.get_by_id(feed.pk) try: usersub = UserSubscription.objects.get(user=request.user, feed=feed) except UserSubscription.DoesNotExist: usersubs = UserSubscription.objects.filter(user=request.user, feed=original_feed) if usersubs: usersub = usersubs[0] usersub.switch_feed(feed, original_feed) else: fetch_history = MFetchHistory.feed(feed_id, timezone=timezone) return { 'code': -1, 'feed_fetch_history': fetch_history['feed_fetch_history'], 'page_fetch_history': fetch_history['page_fetch_history'], 'push_history': fetch_history['push_history'], } usersub.calculate_feed_scores(silent=False) feed.update_all_statistics() classifiers = get_classifiers_for_user(usersub.user, feed_id=usersub.feed_id) feeds = { original_feed.pk: usersub and usersub.canonical(full=True, classifiers=classifiers), } if feed and feed.has_feed_exception: code = -1 fetch_history = MFetchHistory.feed(feed_id, timezone=timezone) return { 'code': code, 'feeds': feeds, 'new_feed_id': usersub.feed_id, 'feed_fetch_history': fetch_history['feed_fetch_history'], 'page_fetch_history': fetch_history['page_fetch_history'], 'push_history': fetch_history['push_history'], } @ajax_login_required @json.json_view def exception_change_feed_link(request): feed_id = request.POST['feed_id'] feed = get_object_or_404(Feed, pk=feed_id) original_feed = feed feed_link = request.POST['feed_link'] timezone = request.user.profile.timezone code = -1 if not feed.known_good and (feed.has_page_exception or feed.has_feed_exception): # Fix broken feed logging.user(request, "~FRFixing feed exception by link: ~SB%s~SN to ~SB%s" % (feed.feed_link, feed_link)) feed_address = feedfinder.feed(feed_link) if feed_address: code = 1 feed.has_page_exception = False feed.active = True feed.fetched_once = False feed.feed_link = feed_link feed.feed_address = feed_address duplicate_feed = feed.schedule_feed_fetch_immediately() if duplicate_feed: new_feed = Feed.objects.get(pk=duplicate_feed.pk) feed = new_feed new_feed.schedule_feed_fetch_immediately() new_feed.has_page_exception = False new_feed.active = True new_feed.save() else: # Branch good feed logging.user(request, "~FRBranching feed by link: ~SB%s~SN to ~SB%s" % (feed.feed_link, feed_link)) feed, _ = Feed.objects.get_or_create(feed_address=feed.feed_address, feed_link=feed_link) code = 1 if feed.pk != original_feed.pk: try: feed.branch_from_feed = original_feed.branch_from_feed or original_feed except Feed.DoesNotExist: feed.branch_from_feed = original_feed feed.feed_link_locked = True feed.save() feed = feed.update() feed = Feed.get_by_id(feed.pk) try: usersub = UserSubscription.objects.get(user=request.user, feed=feed) except UserSubscription.DoesNotExist: usersubs = UserSubscription.objects.filter(user=request.user, feed=original_feed) if usersubs: usersub = usersubs[0] usersub.switch_feed(feed, original_feed) else: fetch_history = MFetchHistory.feed(feed_id, timezone=timezone) return { 'code': -1, 'feed_fetch_history': fetch_history['feed_fetch_history'], 'page_fetch_history': fetch_history['page_fetch_history'], 'push_history': fetch_history['push_history'], } usersub.calculate_feed_scores(silent=False) feed.update_all_statistics() classifiers = get_classifiers_for_user(usersub.user, feed_id=usersub.feed_id) if feed and feed.has_feed_exception: code = -1 feeds = { original_feed.pk: usersub.canonical(full=True, classifiers=classifiers), } fetch_history = MFetchHistory.feed(feed_id, timezone=timezone) return { 'code': code, 'feeds': feeds, 'new_feed_id': usersub.feed_id, 'feed_fetch_history': fetch_history['feed_fetch_history'], 'page_fetch_history': fetch_history['page_fetch_history'], 'push_history': fetch_history['push_history'], } @login_required def status(request): if not request.user.is_staff: logging.user(request, "~SKNON-STAFF VIEWING RSS FEEDS STATUS!") assert False return HttpResponseForbidden() minutes = int(request.GET.get('minutes', 10)) now = datetime.datetime.now() hour_ago = now - datetime.timedelta(minutes=minutes) feeds = Feed.objects.filter(last_update__gte=hour_ago).order_by('-last_update') return render_to_response('rss_feeds/status.xhtml', { 'feeds': feeds }, context_instance=RequestContext(request)) @required_params('story_id', feed_id=int) @json.json_view def original_text(request): story_id = request.REQUEST.get('story_id') feed_id = request.REQUEST.get('feed_id') story_hash = request.REQUEST.get('story_hash', None) force = request.REQUEST.get('force', False) debug = request.REQUEST.get('debug', False) if story_hash: story, _ = MStory.find_story(story_hash=story_hash) else: story, _ = MStory.find_story(story_id=story_id, story_feed_id=feed_id) if not story: logging.user(request, "~FYFetching ~FGoriginal~FY story text: ~FRstory not found") return {'code': -1, 'message': 'Story not found.', 'original_text': None, 'failed': True} original_text = story.fetch_original_text(force=force, request=request, debug=debug) return { 'feed_id': feed_id, 'story_id': story_id, 'original_text': original_text, 'failed': not original_text or len(original_text) < 100, } @required_params('story_hash') def original_story(request): story_hash = request.REQUEST.get('story_hash') force = request.REQUEST.get('force', False) debug = request.REQUEST.get('debug', False) story, _ = MStory.find_story(story_hash=story_hash) if not story: logging.user(request, "~FYFetching ~FGoriginal~FY story page: ~FRstory not found") return {'code': -1, 'message': 'Story not found.', 'original_page': None, 'failed': True} original_page = story.fetch_original_page(force=force, request=request, debug=debug) return HttpResponse(original_page or "")
""" Nagios notification module. Uses the curl protocol to push SFT notifications to a nagios server. On the nagios server site we recommend to install the NSCAweb (http://wiki.smetj.net/wiki/Nscaweb) module, as we will submit multi-line notifications, which will are fed to the message to the nagios.cmd pipe. The messages are received there as passive tests. """ __author__ = "Placi Flury [email protected]" __date__ = "16.02.2012" __version__ = "0.2.0" import time import logging import Queue from subprocess import Popen, PIPE from errors.nagios import NagiosNotifierError class NagiosNotification(object): """ Notification Object for Nagios """ STATUS = {'OK': 0, 'WARNING': 1, 'CRITICAL': 2, 'UNKNOWN': -1} def __init__(self, host, service ): """ host - host attributed to nofication service - service attributed to notification """ self.host = host self.service = service self.message = None self.perf_data = None self.status = 'UNKNOWN' def get_host(self): """ returns host attributed to notication""" return self.host def get_service(self): """ returns service attributed to notification """ return self.service def set_message(self, msg): """ Sets (status) message of notification. """ self.message = msg def get_message(self): """ Returns status message of notifiation. """ return self.message def set_perf_data(self, perfdata): """ Adding (service) performance data. perfdata - string with performance data """ self.perf_data = '%r' % perfdata def get_perf_data(self): """ Returns performance data of service (if any) """ return self.perf_data def has_perf_data(self): """ returns true if performance data is available """ if self.perf_data: return True def set_status(self, status = 'UNKNOWN'): """ Sets status of service. Valid values are: 'OK', 'WARNING','CRITICAL', 'UNKNOWN'. status - status of service. default= UNKNOWN, also set if wrong values is passed """ if status not in NagiosNotification.STATUS.keys(): self.status = 'UNKNOWN' else: self.status = status def get_status(self, nsca_coded = True): """ returns service status of notification. nsca_coded - if set True (default) returned values have been converted to the standard nagios return codes (i.e. OK = 0, WARNING = 1 CRITICAL=2, UNKNOWN= -1) if set False, you get back the strings instead. """ if nsca_coded: return NagiosNotification.STATUS[self.status] else: return self.status class NagiosNotifier(object): """ Nagios notification class. Collects various status messages and notifies Nagios server via the curl protocol. """ def __init__(self, config): """ config - global config object """ self.log = logging.getLogger(__name__) self.curl_bin = config.curl_bin self.nscaweb_endpoint = config.nscaweb_host + ':' + \ str(config.nscaweb_port) +'/queue/' + \ config.nscaweb_queue self.nscaweb_port = config.nscaweb_port self.nscaweb_user = config.nscaweb_user self.nscaweb_pwd = config.nscaweb_pwd self.queue = Queue.LifoQueue(0) # notifications queue def add_notification(self, notification): """ Adds notification to notification queue. notification - NagiosNotfication object """ self.queue.put(notification) def notify(self, trace=True): """ Push notifications to nscaweb host (usually running nagios server). params: trace - if set true (default), the notifications for the same host/service pairs will be sent as chronological traces (like stack traces). if set false, only the most recent notification for a host/service pair will be sent (older notifications are masked out). """ # curl_msg: [TIMESTAMP] COMMAND_NAME;argument1;argument2;...;argumentN if not trace: pass else: hs_msg_stack = {} hs_perf_data = {} hs_fin_status = {} while not self.queue.empty(): _note = self.queue.get() _status = _note.get_status() key = (_note.get_host(), _note.get_service()) if not hs_msg_stack.has_key(key): hs_msg_stack[key] = [] hs_fin_status[key] = _status if _note.has_perf_data(): hs_perf_data[key] = _note.get_perf_data() _msg = _note.get_status(nsca_coded=False) + ': ' + _note.get_message() hs_msg_stack[key].append(_msg) # fin status changes from OK to WARN for any non-OK sub-test that # had any problem if hs_fin_status[key] == 0 and _status != 0: hs_fin_status[key] = 1 for k in hs_msg_stack.keys(): timestamp = int(time.time()) curl_msg = ('[%d] PROCESS_SERVICE_CHECK_RESULT;%s;%s;%s;%s' % \ (timestamp, k[0], k[1], hs_fin_status[k], hs_msg_stack[k])) if hs_perf_data.has_key(k): curl_msg += ("|%s" % hs_perf_data[k]) self.log.debug("username=%s'" % self.nscaweb_user) Popen([self.curl_bin, '-d', 'username=%s' % self.nscaweb_user, '-d', 'password=%s' % self.nscaweb_pwd, '--data-urlencode', "input=%s" % curl_msg , self.nscaweb_endpoint], stdout=PIPE) self.log.debug('Sent notification >%s<.' % curl_msg) if __name__ == '__main__': # just some quick test LOG_FILENAME = 'nagios_notifier.log' logging.basicConfig(filename = LOG_FILENAME, level = logging.DEBUG) notifier = NagiosNotifier('nagios.smscg.ch') note = NagiosNotification('laren.switch.ch','sft_daemon') note.set_message("Everything alright") note.set_status('OK') notifier.add_notification(note) notifier.notify()
import sys import pandas as pd from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import TfidfVectorizer from sklearn import svm from sklearn.metrics import classification_report from sklearn.multiclass import OneVsRestClassifier from sklearn.svm import SVC from sklearn.metrics import f1_score from sklearn import tree from collections import OrderedDict from sklearn.ensemble import RandomForestClassifier def loadCategoryDataFile(categoryDataPath): """ Module to load aspect category dataset Args: categoryDataPath: aspect category data set Returns: train: training data test: testing data """ categoryDF = pd.read_csv(categoryDataPath,delimiter='#',encoding = 'utf-8') train, test = train_test_split(categoryDF, test_size = 0.2) train = train.reset_index(drop='True') test = test.reset_index(drop='True') return train,test def processCategoryData(train,test): """ Module to create training and test data feature inputs and labels Args: train: training data test: testing data Returns: trainData: relevant training data trainLabels: train data labels testData: relevant test data testLabels: test data labels sentiTrainLabels: category sentiment training labels sentiTestLabels: category sentiment testing labels """ trainData=[] testData = [] trainLabels = [] testLabels = [] sentiTrainLabels=[] sentiTestLabels=[] for id in train.index: trainData.append(train.iloc[id,2]) trainLabels.append(train.iloc[id,3]) sentiTrainLabels.append(train.iloc[id,1]) for id in test.index: testData.append(test.iloc[id,2]) testLabels.append(test.iloc[id,3]) sentiTestLabels.append(test.iloc[id,1]) trainLabels = ['missing' if str(x)== 'nan' else x for x in trainLabels] testLabels = ['missing' if str(x) == 'nan' else x for x in testLabels] return trainData,trainLabels,testData,testLabels,sentiTrainLabels,sentiTestLabels def loadHindiStopWords(stopWordsDataPath): """ Module to load hindi stop words Args: stopWordsDataPath: hindi stop words dataset Returns: stopWords: stop word list """ with open(stopWordsDataPath) as sw: stopWords=[x.strip("\n") for x in sw.readlines()] return stopWords def createVectorizer(stopWords,trainData,testData): """ Module to create tfidf feature vectors Args: stopWords: stop word list trainData: relevant training data testData: relevant test data Returns: train_vectors: training feature vector test_vectors: testing feature vector """ vectorizer = TfidfVectorizer(stop_words=stopWords,min_df=5,max_df = 0.8, sublinear_tf=True,use_idf=True) train_vectors = vectorizer.fit_transform(trainData) test_vectors = vectorizer.transform(testData) return train_vectors,test_vectors def loadSVMRbfClassifier(train_vectors,trainLabels,sentiTrainLabels,test_vectors): """ RBF SVM Classifier module Args: train_vectors: training feature vector trainLabels: train data labels test_vectors: testing feature vector sentiTrainLabels: category sentiment training labels Returns: prediction_rbf: rbf predicted categories senti_prediction_rbf: rbf predicted sentiments """ classifier_rbf = svm.SVC() classifier_rbf.fit(train_vectors, trainLabels) prediction_rbf = classifier_rbf.predict(test_vectors) senti_classifier_rbf=svm.SVC() senti_classifier_rbf.fit(train_vectors,sentiTrainLabels) senti_prediction_rbf = senti_classifier_rbf.predict(test_vectors) return prediction_rbf,senti_prediction_rbf def loadSVMLinearClassifier(train_vectors,trainLabels,test_vectors,sentiTrainLabels): """ Linear SVM Classifier module Args: train_vectors: training feature vector trainLabels: train data labels test_vectors: testing feature vector sentiTrainLabels: category sentiment training labels Returns: prediction_linear: linear predicted categories senti_prediction_linear: linear predicted sentiments """ classifier_linear = OneVsRestClassifier(SVC(kernel='linear')) classifier_linear.fit(train_vectors, trainLabels) prediction_linear = classifier_linear.predict(test_vectors) senti_classifier_linear=svm.SVC(kernel='linear') senti_classifier_linear.fit(train_vectors,sentiTrainLabels) senti_prediction_linear = senti_classifier_linear.predict(test_vectors) return prediction_linear,senti_prediction_linear def loadSVMLibLinearClassifier(train_vectors,trainLabels,test_vectors,sentiTrainLabels): """ Lib Linear SVM Classifier module Args: train_vectors: training feature vector trainLabels: train data labels test_vectors: testing feature vector sentiTrainLabels: category sentiment training labels Returns: prediction_liblinear: lib linear predicted categories senti_prediction_liblinear: lib linear predicted sentiments """ classifier_liblinear = svm.LinearSVC() classifier_liblinear.fit(train_vectors, trainLabels) prediction_liblinear = classifier_liblinear.predict(test_vectors) senti_classifier_liblinear=svm.LinearSVC() senti_classifier_liblinear.fit(train_vectors,sentiTrainLabels) senti_prediction_liblinear = senti_classifier_liblinear.predict(test_vectors) return prediction_liblinear,senti_prediction_liblinear def getFScores(y_true,y_pred): """ Module to find the f_score of predicted results Args: y_true: true labels y_pred: predicted labels Returns: None """ print f1_score(y_true, y_pred, average='micro') def getClassificationReport(y_train,y_true,y_pred): """ Module to find class-wise classification report Args: y_train: training data labels y_true: true labels y_pred: predicted labels Returns: None """ target_names = set(y_train) target_list = list(target_names) print(classification_report(y_true, y_pred, target_names=target_list)) def loadDecisionTree(trainLabels,train_vectors,test_vectors): """ Decision Tree classification module Args: trainLabels: training data labels train_vectors: training feature vectors test_vectors: testing feature vectors Returns: resultData: Decision Tree classifier prediction trainMacros: Decision Tree label to integer map reverseMap: Decision Tree integer to label map """ trainSet = set(trainLabels) i=0 macros=OrderedDict() reverseMap=OrderedDict() for item in trainSet: macros[item]=i reverseMap[i]=item i += 1 trainMacros=[] for item in trainLabels: trainMacros.append(macros[item]) clf = OneVsRestClassifier(tree.DecisionTreeClassifier()) clf = clf.fit(train_vectors, trainMacros) result = clf.predict(test_vectors) resultList= result.tolist() resultData=[] for item in resultList: resultData.append(reverseMap[item]) return resultData,trainMacros,reverseMap def loadRandomForest(train_vectors,trainMacros,test_vectors,reverseMap): """ Random Forest classifier module Args: train_vectors: training feature vectors trainMacros: label to integer map test_vectors: testing feature vectors reverseMap: integer to label map Returns: rfresultData: Random Forest prediction results """ rfClf = OneVsRestClassifier(RandomForestClassifier()) rfClf.fit(train_vectors,trainMacros) rfresult = rfClf.predict(test_vectors) rfresultList = rfresult.tolist() rfresultData=[] for item in rfresultList: rfresultData.append(reverseMap[item]) return rfresultData def main(categoryDataPath,stopWordsDataPath): """ This module performs Aspect Category Detection and Aspect Category Sentiment Analysis based on Multi-Label Machine Learning models. Have used RBFSVM, LinearSVM, LibLinearSVM, DecisionTrees and RandomForest Classifiers for Aspect Category Detection and the results are compared Args: categoryDataPath: aspect category dataset stopWordsDataPath: stop words dataset Returns: None """ train,test = loadCategoryDataFile(categoryDataPath) trainData,trainLabels,testData,testLabels,sentiTrainLabels,sentiTestLabels = processCategoryData(train,test) stopWords = loadHindiStopWords(stopWordsDataPath) train_vectors,test_vectors = createVectorizer(stopWords,trainData,testData) prediction_rbf,senti_prediction_rbf = loadSVMRbfClassifier(train_vectors,trainLabels,sentiTrainLabels,test_vectors) prediction_linear,senti_prediction_linear = loadSVMLinearClassifier(train_vectors,trainLabels,test_vectors,sentiTrainLabels) prediction_liblinear,senti_prediction_liblinear = loadSVMLibLinearClassifier(train_vectors,trainLabels,test_vectors,sentiTrainLabels) getFScores(testLabels, prediction_rbf) getFScores(testLabels, prediction_linear) getFScores(testLabels, prediction_liblinear) getFScores(sentiTestLabels, senti_prediction_rbf) getFScores(sentiTestLabels, senti_prediction_linear) getFScores(sentiTestLabels, senti_prediction_liblinear) getClassificationReport(testLabels, prediction_rbf) getClassificationReport(testLabels, prediction_linear) getClassificationReport(testLabels, prediction_liblinear) getClassificationReport(sentiTestLabels, prediction_rbf) getClassificationReport(sentiTestLabels, prediction_linear) getClassificationReport(sentiTestLabels, prediction_liblinear) resultData,trainMacros,reverseMap = loadDecisionTree(trainLabels,train_vectors,test_vectors) getFScores(testLabels, resultData) rfresultData = loadRandomForest(train_vectors,trainMacros,test_vectors,reverseMap) getFScores(testLabels, rfresultData) main(sys.argv[1],sys.argv[2])
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This sphinx extension adds two directives for summarizing the public members of a module or package. These directives are primarily for use with the `automodapi` extension, but can be used independently. ======================= `automodsumm` directive ======================= This directive will produce an "autosummary"-style table for public attributes of a specified module. See the `sphinx.ext.autosummary` extension for details on this process. The main difference from the `autosummary` directive is that `autosummary` requires manually inputting all attributes that appear in the table, while this captures the entries automatically. This directive requires a single argument that must be a module or package. It also accepts any options supported by the `autosummary` directive- see `sphinx.ext.autosummary` for details. It also accepts two additional options: * ``:classes-only:`` If present, the autosummary table will only contain entries for classes. This cannot be used at the same time with ``:functions-only:`` . * ``:functions-only:`` If present, the autosummary table will only contain entries for functions. This cannot be used at the same time with ``:classes-only:`` . * ``:skip: obj1, [obj2, obj3, ...]`` If present, specifies that the listed objects should be skipped and not have their documentation generated, nor be includded in the summary table. =========================== `automod-diagram` directive =========================== This directive will produce an inheritance diagram like that of the `sphinx.ext.inheritance_diagram` extension. This directive requires a single argument that must be a module or package. It accepts no options. .. note:: Like 'inheritance-diagram', 'automod-diagram' requires `graphviz <http://www.graphviz.org/>`_ to generate the inheritance diagram. """ import inspect import os import re from sphinx.ext.autosummary import Autosummary from sphinx.ext.inheritance_diagram import InheritanceDiagram from docutils.parsers.rst.directives import flag from ...utils import find_mod_objs from .astropyautosummary import AstropyAutosummary def _str_list_converter(argument): """ A directive option conversion function that converts the option into a list of strings. Used for 'skip' option. """ if argument is None: return [] else: return [s.strip() for s in argument.split(',')] class Automodsumm(AstropyAutosummary): required_arguments = 1 optional_arguments = 0 final_argument_whitespace = False has_content = False option_spec = dict(Autosummary.option_spec) option_spec['functions-only'] = flag option_spec['classes-only'] = flag option_spec['skip'] = _str_list_converter def run(self): self.warnings = [] nodelist = [] try: localnames, fqns, objs = find_mod_objs(self.arguments[0]) except ImportError: self.warnings = [] self.warn("Couldn't import module " + self.arguments[0]) return self.warnings try: # set self.content to trick the Autosummary internals. # Be sure to respect functions-only and classes-only. funconly = 'functions-only' in self.options clsonly = 'classes-only' in self.options skipmap = {} if 'skip' in self.options: skipnames = set(self.options['skip']) for lnm, fqnm in zip(localnames, fqns): if lnm in skipnames: skipnames.remove(lnm) skipmap[fqnm] = lnm if len(skipnames) > 0: self.warn('Tried to skip objects {objs} in module {mod}, ' 'but they were not present. Ignoring.'.format( objs=skipnames, mod=self.arguments[0])) if funconly and not clsonly: cont = [] for nm, obj in zip(fqns, objs): if nm not in skipmap and inspect.isfunction(obj): cont.append('~' + nm) elif clsonly: cont = [] for nm, obj in zip(fqns, objs): if nm not in skipmap and inspect.isclass(obj): cont.append('~' + nm) else: if clsonly and funconly: self.warning('functions-only and classes-only both ' 'defined. Skipping.') cont = ['~' + nm for nm in fqns if nm not in skipmap] self.content = cont #can't use super because Sphinx/docutils has trouble #return super(Autosummary,self).run() nodelist.extend(Autosummary.run(self)) return self.warnings + nodelist finally: # has_content = False for the Automodsumm self.content = [] #<-------------------automod-diagram stuff------------------------------------> class Automoddiagram(InheritanceDiagram): def run(self): try: nms, objs = find_mod_objs(self.arguments[0], onlylocals=True)[1:] except ImportError: self.warnings = [] self.warn("Couldn't import module " + self.arguments[0]) return self.warnings clsnms = [] for n, o in zip(nms, objs): if inspect.isclass(o): clsnms.append(n) oldargs = self.arguments try: if len(clsnms) > 0: self.arguments = [u' '.join(clsnms)] return InheritanceDiagram.run(self) finally: self.arguments = oldargs #<---------------------automodsumm generation stuff---------------------------> def process_automodsumm_generation(app): env = app.builder.env ext = app.config.source_suffix filestosearch = [x + ext for x in env.found_docs if os.path.isfile(env.doc2path(x))]\ liness = [automodsumm_to_autosummary_lines(sfn, app) for sfn in filestosearch] for sfn, lines in zip(filestosearch, liness): if len(lines) > 0: generate_automodsumm_docs(lines, sfn, builder=app.builder, warn=app.warn, info=app.info, suffix=app.config.source_suffix, base_path=app.srcdir) #_automodsummrex = re.compile(r'^(\s*)\.\. automodsumm::\s*([A-Za-z0-9_.]+)\s*' # r'\n\1(\s*)(\S|$)', re.MULTILINE) _lineendrex = r'(?:\n|$)' _hdrex = r'^\n?(\s*)\.\. automodsumm::\s*(\S+)\s*' + _lineendrex _oprex1 = r'(?:\1(\s+)\S.*' + _lineendrex + ')' _oprex2 = r'(?:\1\4\S.*' + _lineendrex + ')' _automodsummrex = re.compile(_hdrex + '(' + _oprex1 + '?' + _oprex2 + '*)', re.MULTILINE) def automodsumm_to_autosummary_lines(fn, app): """ Generates lines from a file with an "automodsumm" entry suitable for feeding into "autosummary". Searches the provided file for `automodsumm` directives and returns a list of lines specifying the `autosummary` commands for the modules requested. This does *not* return the whole file contents - just an autosummary section in place of any :automodsumm: entries. Note that any options given for `automodsumm` are also included in the generated `autosummary` section. Parameters ---------- fn : str The name of the file to search for `automodsumm` entries. app : sphinx.application.Application The sphinx Application object Return ------ lines : list of str Lines for all `automodsumm` entries with the entries replaced by `autosummary` and the module's members added. """ fullfn = os.path.join(app.builder.env.srcdir, fn) with open(fullfn) as fr: if 'astropy_helpers.sphinx.ext.automodapi' in app._extensions: from astropy_helpers.sphinx.ext.automodapi import automodapi_replace # Must do the automodapi on the source to get the automodsumm # that might be in there filestr = automodapi_replace(fr.read(), app, True, fn, False) else: filestr = fr.read() spl = _automodsummrex.split(filestr) #0th entry is the stuff before the first automodsumm line indent1s = spl[1::5] mods = spl[2::5] opssecs = spl[3::5] indent2s = spl[4::5] remainders = spl[5::5] # only grab automodsumm sections and convert them to autosummary with the # entries for all the public objects newlines = [] #loop over all automodsumms in this document for i, (i1, i2, modnm, ops, rem) in enumerate(zip(indent1s, indent2s, mods, opssecs, remainders)): allindent = i1 + i2 #filter out functions-only and classes-only options if present oplines = ops.split('\n') toskip = [] funcsonly = clssonly = False for i, ln in reversed(list(enumerate(oplines))): if ':functions-only:' in ln: funcsonly = True del oplines[i] if ':classes-only:' in ln: clssonly = True del oplines[i] if ':skip:' in ln: toskip.extend(_str_list_converter(ln.replace(':skip:', ''))) del oplines[i] if funcsonly and clssonly: msg = ('Defined both functions-only and classes-only options. ' 'Skipping this directive.') lnnum = sum([spl[j].count('\n') for j in range(i * 5 + 1)]) app.warn('[automodsumm]' + msg, (fn, lnnum)) continue newlines.append(i1 + '.. autosummary::') newlines.extend(oplines) for nm, fqn, obj in zip(*find_mod_objs(modnm, onlylocals=True)): if nm in toskip: continue if funcsonly and not inspect.isfunction(obj): continue if clssonly and not inspect.isclass(obj): continue newlines.append(allindent + '~' + fqn) return newlines def generate_automodsumm_docs(lines, srcfn, suffix='.rst', warn=None, info=None, base_path=None, builder=None, template_dir=None): """ This function is adapted from `sphinx.ext.autosummary.generate.generate_autosummmary_docs` to generate source for the automodsumm directives that should be autosummarized. Unlike generate_autosummary_docs, this function is called one file at a time. """ from sphinx.jinja2glue import BuiltinTemplateLoader from sphinx.ext.autosummary import import_by_name, get_documenter from sphinx.ext.autosummary.generate import (find_autosummary_in_lines, _simple_info, _simple_warn) from sphinx.util.osutil import ensuredir from sphinx.util.inspect import safe_getattr from jinja2 import FileSystemLoader, TemplateNotFound from jinja2.sandbox import SandboxedEnvironment if info is None: info = _simple_info if warn is None: warn = _simple_warn #info('[automodsumm] generating automodsumm for: ' + srcfn) # Create our own templating environment - here we use Astropy's # templates rather than the default autosummary templates, in order to # allow docstrings to be shown for methods. template_dirs = [os.path.join(os.path.dirname(__file__), 'templates'), os.path.join(base_path, '_templates')] if builder is not None: # allow the user to override the templates template_loader = BuiltinTemplateLoader() template_loader.init(builder, dirs=template_dirs) else: if template_dir: template_dirs.insert(0, template_dir) template_loader = FileSystemLoader(template_dirs) template_env = SandboxedEnvironment(loader=template_loader) # read #items = find_autosummary_in_files(sources) items = find_autosummary_in_lines(lines, filename=srcfn) if len(items) > 0: msg = '[automodsumm] {1}: found {0} automodsumm entries to generate' info(msg.format(len(items), srcfn)) # gennms = [item[0] for item in items] # if len(gennms) > 20: # gennms = gennms[:10] + ['...'] + gennms[-10:] # info('[automodsumm] generating autosummary for: ' + ', '.join(gennms)) # remove possible duplicates items = dict([(item, True) for item in items]).keys() # keep track of new files new_files = [] # write for name, path, template_name in sorted(items): if path is None: # The corresponding autosummary:: directive did not have # a :toctree: option continue path = os.path.abspath(path) ensuredir(path) try: name, obj, parent = import_by_name(name) except ImportError, e: warn('[automodapi] failed to import %r: %s' % (name, e)) continue fn = os.path.join(path, name + suffix) # skip it if it exists if os.path.isfile(fn): continue new_files.append(fn) f = open(fn, 'w') try: doc = get_documenter(obj, parent) if template_name is not None: template = template_env.get_template(template_name) else: tmplstr = 'autosummary/%s.rst' try: template = template_env.get_template(tmplstr % doc.objtype) except TemplateNotFound: template = template_env.get_template(tmplstr % 'base') def get_members_mod(obj, typ, include_public=[]): """ typ = None -> all """ items = [] for name in dir(obj): try: documenter = get_documenter(safe_getattr(obj, name), obj) except AttributeError: continue if typ is None or documenter.objtype == typ: items.append(name) public = [x for x in items if x in include_public or not x.startswith('_')] return public, items def get_members_class(obj, typ, include_public=[], include_base=False): """ typ = None -> all include_base -> include attrs that are from a base class """ items = [] # using dir gets all of the attributes, including the elements # from the base class, otherwise use __slots__ or __dict__ if include_base: names = dir(obj) else: if hasattr(obj, '__slots__'): names = tuple(getattr(obj, '__slots__')) else: names = getattr(obj, '__dict__').keys() for name in names: try: documenter = get_documenter(safe_getattr(obj, name), obj) except AttributeError: continue if typ is None or documenter.objtype == typ: items.append(name) public = [x for x in items if x in include_public or not x.startswith('_')] return public, items ns = {} if doc.objtype == 'module': ns['members'] = get_members_mod(obj, None) ns['functions'], ns['all_functions'] = \ get_members_mod(obj, 'function') ns['classes'], ns['all_classes'] = \ get_members_mod(obj, 'class') ns['exceptions'], ns['all_exceptions'] = \ get_members_mod(obj, 'exception') elif doc.objtype == 'class': ns['members'] = get_members_class(obj, None) ns['methods'], ns['all_methods'] = \ get_members_class(obj, 'method', ['__init__']) ns['attributes'], ns['all_attributes'] = \ get_members_class(obj, 'attribute') ns['methods'].sort() ns['attributes'].sort() parts = name.split('.') if doc.objtype in ('method', 'attribute'): mod_name = '.'.join(parts[:-2]) cls_name = parts[-2] obj_name = '.'.join(parts[-2:]) ns['class'] = cls_name else: mod_name, obj_name = '.'.join(parts[:-1]), parts[-1] ns['fullname'] = name ns['module'] = mod_name ns['objname'] = obj_name ns['name'] = parts[-1] ns['objtype'] = doc.objtype ns['underline'] = len(name) * '=' # We now check whether a file for reference footnotes exists for # the module being documented. We first check if the # current module is a file or a directory, as this will give a # different path for the reference file. For example, if # documenting astropy.wcs then the reference file is at # ../wcs/references.txt, while if we are documenting # astropy.config.logging_helper (which is at # astropy/config/logging_helper.py) then the reference file is set # to ../config/references.txt if '.' in mod_name: mod_name_dir = mod_name.replace('.', '/').split('/', 1)[1] else: mod_name_dir = mod_name if not os.path.isdir(os.path.join(base_path, mod_name_dir)) \ and os.path.isdir(os.path.join(base_path, mod_name_dir.rsplit('/', 1)[0])): mod_name_dir = mod_name_dir.rsplit('/', 1)[0] # We then have to check whether it exists, and if so, we pass it # to the template. if os.path.exists(os.path.join(base_path, mod_name_dir, 'references.txt')): # An important subtlety here is that the path we pass in has # to be relative to the file being generated, so we have to # figure out the right number of '..'s ndirsback = path.replace(base_path, '').count('/') ref_file_rel_segments = ['..'] * ndirsback ref_file_rel_segments.append(mod_name_dir) ref_file_rel_segments.append('references.txt') ns['referencefile'] = os.path.join(*ref_file_rel_segments) rendered = template.render(**ns) f.write(rendered) finally: f.close() def setup(app): # need our autosummary app.setup_extension('astropy_helpers.sphinx.ext.astropyautosummary') # need inheritance-diagram for automod-diagram app.setup_extension('sphinx.ext.inheritance_diagram') app.add_directive('automod-diagram', Automoddiagram) app.add_directive('automodsumm', Automodsumm) app.connect('builder-inited', process_automodsumm_generation)
""" Test basics of linux core file debugging. """ from __future__ import print_function import shutil import struct import lldb from lldbsuite.test.decorators import * from lldbsuite.test.lldbtest import * from lldbsuite.test import lldbutil class LinuxCoreTestCase(TestBase): NO_DEBUG_INFO_TESTCASE = True mydir = TestBase.compute_mydir(__file__) _i386_pid = 32306 _x86_64_pid = 32259 _s390x_pid = 1045 _mips64_n64_pid = 25619 _mips64_n32_pid = 3670 _mips_o32_pid = 3532 _ppc64le_pid = 28147 _i386_regions = 4 _x86_64_regions = 5 _s390x_regions = 2 _mips_regions = 5 _ppc64le_regions = 2 def setUp(self): super(LinuxCoreTestCase, self).setUp() self._initial_platform = lldb.DBG.GetSelectedPlatform() def tearDown(self): lldb.DBG.SetSelectedPlatform(self._initial_platform) super(LinuxCoreTestCase, self).tearDown() @expectedFailureAll(bugnumber="llvm.org/pr37371", hostoslist=["windows"]) @skipIf(triple='^mips') @skipIfLLVMTargetMissing("X86") def test_i386(self): """Test that lldb can read the process information from an i386 linux core file.""" self.do_test("linux-i386", self._i386_pid, self._i386_regions, "a.out") @expectedFailureAll(bugnumber="llvm.org/pr37371", hostoslist=["windows"]) @skipIfLLVMTargetMissing("Mips") def test_mips_o32(self): """Test that lldb can read the process information from an MIPS O32 linux core file.""" self.do_test("linux-mipsel-gnuabio32", self._mips_o32_pid, self._mips_regions, "linux-mipsel-gn") @expectedFailureAll(bugnumber="llvm.org/pr37371", hostoslist=["windows"]) @skipIfLLVMTargetMissing("Mips") def test_mips_n32(self): """Test that lldb can read the process information from an MIPS N32 linux core file """ self.do_test("linux-mips64el-gnuabin32", self._mips64_n32_pid, self._mips_regions, "linux-mips64el-") @expectedFailureAll(bugnumber="llvm.org/pr37371", hostoslist=["windows"]) @skipIfLLVMTargetMissing("Mips") def test_mips_n64(self): """Test that lldb can read the process information from an MIPS N64 linux core file """ self.do_test("linux-mips64el-gnuabi64", self._mips64_n64_pid, self._mips_regions, "linux-mips64el-") @expectedFailureAll(bugnumber="llvm.org/pr37371", hostoslist=["windows"]) @skipIf(triple='^mips') @skipIfLLVMTargetMissing("PowerPC") def test_ppc64le(self): """Test that lldb can read the process information from an ppc64le linux core file.""" self.do_test("linux-ppc64le", self._ppc64le_pid, self._ppc64le_regions, "linux-ppc64le.ou") @expectedFailureAll(bugnumber="llvm.org/pr37371", hostoslist=["windows"]) @skipIf(triple='^mips') @skipIfLLVMTargetMissing("X86") def test_x86_64(self): """Test that lldb can read the process information from an x86_64 linux core file.""" self.do_test("linux-x86_64", self._x86_64_pid, self._x86_64_regions, "a.out") @expectedFailureAll(bugnumber="llvm.org/pr37371", hostoslist=["windows"]) @skipIf(triple='^mips') @skipIfLLVMTargetMissing("SystemZ") def test_s390x(self): """Test that lldb can read the process information from an s390x linux core file.""" self.do_test("linux-s390x", self._s390x_pid, self._s390x_regions, "a.out") @expectedFailureAll(bugnumber="llvm.org/pr37371", hostoslist=["windows"]) @skipIf(triple='^mips') @skipIfLLVMTargetMissing("X86") def test_same_pid_running(self): """Test that we read the information from the core correctly even if we have a running process with the same PID around""" exe_file = self.getBuildArtifact("linux-x86_64-pid.out") core_file = self.getBuildArtifact("linux-x86_64-pid.core") shutil.copyfile("linux-x86_64.out", exe_file) shutil.copyfile("linux-x86_64.core", core_file) with open(core_file, "r+b") as f: # These are offsets into the NT_PRSTATUS and NT_PRPSINFO structures in the note # segment of the core file. If you update the file, these offsets may need updating # as well. (Notes can be viewed with readelf --notes.) for pid_offset in [0x1c4, 0x320]: f.seek(pid_offset) self.assertEqual( struct.unpack( "<I", f.read(4))[0], self._x86_64_pid) # We insert our own pid, and make sure the test still # works. f.seek(pid_offset) f.write(struct.pack("<I", os.getpid())) self.do_test(self.getBuildArtifact("linux-x86_64-pid"), os.getpid(), self._x86_64_regions, "a.out") @expectedFailureAll(bugnumber="llvm.org/pr37371", hostoslist=["windows"]) @skipIf(triple='^mips') @skipIfLLVMTargetMissing("X86") def test_two_cores_same_pid(self): """Test that we handle the situation if we have two core files with the same PID around""" alttarget = self.dbg.CreateTarget("altmain.out") altprocess = alttarget.LoadCore("altmain.core") self.assertTrue(altprocess, PROCESS_IS_VALID) self.assertEqual(altprocess.GetNumThreads(), 1) self.assertEqual(altprocess.GetProcessID(), self._x86_64_pid) altframe = altprocess.GetSelectedThread().GetFrameAtIndex(0) self.assertEqual(altframe.GetFunctionName(), "_start") self.assertEqual( altframe.GetLineEntry().GetLine(), line_number( "altmain.c", "Frame _start")) error = lldb.SBError() F = altprocess.ReadCStringFromMemory( altframe.FindVariable("F").GetValueAsUnsigned(), 256, error) self.assertTrue(error.Success()) self.assertEqual(F, "_start") # without destroying this process, run the test which opens another core file with the # same pid self.do_test("linux-x86_64", self._x86_64_pid, self._x86_64_regions, "a.out") @expectedFailureAll(bugnumber="llvm.org/pr37371", hostoslist=["windows"]) @skipIf(triple='^mips') @skipIfLLVMTargetMissing("X86") def test_FPR_SSE(self): # check x86_64 core file target = self.dbg.CreateTarget(None) self.assertTrue(target, VALID_TARGET) process = target.LoadCore("linux-fpr_sse_x86_64.core") values = {} values["fctrl"] = "0x037f" values["fstat"] = "0x0000" values["ftag"] = "0x00ff" values["fop"] = "0x0000" values["fiseg"] = "0x00000000" values["fioff"] = "0x0040011e" values["foseg"] = "0x00000000" values["fooff"] = "0x00000000" values["mxcsr"] = "0x00001f80" values["mxcsrmask"] = "0x0000ffff" values["st0"] = "{0x99 0xf7 0xcf 0xfb 0x84 0x9a 0x20 0x9a 0xfd 0x3f}" values["st1"] = "{0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x80 0xff 0x3f}" values["st2"] = "{0xfe 0x8a 0x1b 0xcd 0x4b 0x78 0x9a 0xd4 0x00 0x40}" values["st3"] = "{0xac 0x79 0xcf 0xd1 0xf7 0x17 0x72 0xb1 0xfe 0x3f}" values["st4"] = "{0xbc 0xf0 0x17 0x5c 0x29 0x3b 0xaa 0xb8 0xff 0x3f}" values["st5"] = "{0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x80 0xff 0x3f}" values["st6"] = "{0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00}" values["st7"] = "{0x35 0xc2 0x68 0x21 0xa2 0xda 0x0f 0xc9 0x00 0x40}" values["xmm0"] = "{0x29 0x31 0x64 0x46 0x29 0x31 0x64 0x46 0x29 0x31 0x64 0x46 0x29 0x31 0x64 0x46}" values["xmm1"] = "{0x9c 0xed 0x86 0x64 0x9c 0xed 0x86 0x64 0x9c 0xed 0x86 0x64 0x9c 0xed 0x86 0x64}" values["xmm2"] = "{0x07 0xc2 0x1f 0xd7 0x07 0xc2 0x1f 0xd7 0x07 0xc2 0x1f 0xd7 0x07 0xc2 0x1f 0xd7}" values["xmm3"] = "{0xa2 0x20 0x48 0x25 0xa2 0x20 0x48 0x25 0xa2 0x20 0x48 0x25 0xa2 0x20 0x48 0x25}" values["xmm4"] = "{0xeb 0x5a 0xa8 0xc4 0xeb 0x5a 0xa8 0xc4 0xeb 0x5a 0xa8 0xc4 0xeb 0x5a 0xa8 0xc4}" values["xmm5"] = "{0x49 0x41 0x20 0x0b 0x49 0x41 0x20 0x0b 0x49 0x41 0x20 0x0b 0x49 0x41 0x20 0x0b}" values["xmm6"] = "{0xf8 0xf1 0x8b 0x4f 0xf8 0xf1 0x8b 0x4f 0xf8 0xf1 0x8b 0x4f 0xf8 0xf1 0x8b 0x4f}" values["xmm7"] = "{0x13 0xf1 0x30 0xcd 0x13 0xf1 0x30 0xcd 0x13 0xf1 0x30 0xcd 0x13 0xf1 0x30 0xcd}" for regname, value in values.iteritems(): self.expect("register read {}".format(regname), substrs=["{} = {}".format(regname, value)]) # now check i386 core file target = self.dbg.CreateTarget(None) self.assertTrue(target, VALID_TARGET) process = target.LoadCore("linux-fpr_sse_i386.core") values["fioff"] = "0x080480cc" for regname, value in values.iteritems(): self.expect("register read {}".format(regname), substrs=["{} = {}".format(regname, value)]) def check_memory_regions(self, process, region_count): region_list = process.GetMemoryRegions() self.assertEqual(region_list.GetSize(), region_count) region = lldb.SBMemoryRegionInfo() # Check we have the right number of regions. self.assertEqual(region_list.GetSize(), region_count) # Check that getting a region beyond the last in the list fails. self.assertFalse( region_list.GetMemoryRegionAtIndex( region_count, region)) # Check each region is valid. for i in range(region_list.GetSize()): # Check we can actually get this region. self.assertTrue(region_list.GetMemoryRegionAtIndex(i, region)) # Every region in the list should be mapped. self.assertTrue(region.IsMapped()) # Test the address at the start of a region returns it's enclosing # region. begin_address = region.GetRegionBase() region_at_begin = lldb.SBMemoryRegionInfo() error = process.GetMemoryRegionInfo(begin_address, region_at_begin) self.assertEqual(region, region_at_begin) # Test an address in the middle of a region returns it's enclosing # region. middle_address = (region.GetRegionBase() + region.GetRegionEnd()) / 2 region_at_middle = lldb.SBMemoryRegionInfo() error = process.GetMemoryRegionInfo( middle_address, region_at_middle) self.assertEqual(region, region_at_middle) # Test the address at the end of a region returns it's enclosing # region. end_address = region.GetRegionEnd() - 1 region_at_end = lldb.SBMemoryRegionInfo() error = process.GetMemoryRegionInfo(end_address, region_at_end) self.assertEqual(region, region_at_end) # Check that quering the end address does not return this region but # the next one. next_region = lldb.SBMemoryRegionInfo() error = process.GetMemoryRegionInfo( region.GetRegionEnd(), next_region) self.assertNotEqual(region, next_region) self.assertEqual( region.GetRegionEnd(), next_region.GetRegionBase()) # Check that query beyond the last region returns an unmapped region # that ends at LLDB_INVALID_ADDRESS last_region = lldb.SBMemoryRegionInfo() region_list.GetMemoryRegionAtIndex(region_count - 1, last_region) end_region = lldb.SBMemoryRegionInfo() error = process.GetMemoryRegionInfo( last_region.GetRegionEnd(), end_region) self.assertFalse(end_region.IsMapped()) self.assertEqual( last_region.GetRegionEnd(), end_region.GetRegionBase()) self.assertEqual(end_region.GetRegionEnd(), lldb.LLDB_INVALID_ADDRESS) def check_state(self, process): with open(os.devnull) as devnul: # sanitize test output self.dbg.SetOutputFileHandle(devnul, False) self.dbg.SetErrorFileHandle(devnul, False) self.assertTrue(process.is_stopped) # Process.Continue error = process.Continue() self.assertFalse(error.Success()) self.assertTrue(process.is_stopped) # Thread.StepOut thread = process.GetSelectedThread() thread.StepOut() self.assertTrue(process.is_stopped) # command line self.dbg.HandleCommand('s') self.assertTrue(process.is_stopped) self.dbg.HandleCommand('c') self.assertTrue(process.is_stopped) # restore file handles self.dbg.SetOutputFileHandle(None, False) self.dbg.SetErrorFileHandle(None, False) def do_test(self, filename, pid, region_count, thread_name): target = self.dbg.CreateTarget(filename + ".out") process = target.LoadCore(filename + ".core") self.assertTrue(process, PROCESS_IS_VALID) self.assertEqual(process.GetNumThreads(), 1) self.assertEqual(process.GetProcessID(), pid) self.check_state(process) thread = process.GetSelectedThread() self.assertTrue(thread) self.assertEqual(thread.GetThreadID(), pid) self.assertEqual(thread.GetName(), thread_name) backtrace = ["bar", "foo", "_start"] self.assertEqual(thread.GetNumFrames(), len(backtrace)) for i in range(len(backtrace)): frame = thread.GetFrameAtIndex(i) self.assertTrue(frame) self.assertEqual(frame.GetFunctionName(), backtrace[i]) self.assertEqual(frame.GetLineEntry().GetLine(), line_number("main.c", "Frame " + backtrace[i])) self.assertEqual( frame.FindVariable("F").GetValueAsUnsigned(), ord( backtrace[i][0])) self.check_memory_regions(process, region_count) self.dbg.DeleteTarget(target)
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- import functools from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.paging import ItemPaged from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpResponse from azure.core.rest import HttpRequest from azure.core.tracing.decorator import distributed_trace from azure.mgmt.core.exceptions import ARMErrorFormat from msrest import Serializer from .. import models as _models from .._vendor import _convert_request, _format_url_section T = TypeVar('T') JSONType = Any ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False def build_unregister_request( resource_provider_namespace: str, subscription_id: str, **kwargs: Any ) -> HttpRequest: api_version = "2021-04-01" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/unregister') path_format_arguments = { "resourceProviderNamespace": _SERIALIZER.url("resource_provider_namespace", resource_provider_namespace, 'str'), "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="POST", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_register_at_management_group_scope_request( resource_provider_namespace: str, group_id: str, **kwargs: Any ) -> HttpRequest: api_version = "2021-04-01" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/providers/Microsoft.Management/managementGroups/{groupId}/providers/{resourceProviderNamespace}/register') path_format_arguments = { "resourceProviderNamespace": _SERIALIZER.url("resource_provider_namespace", resource_provider_namespace, 'str'), "groupId": _SERIALIZER.url("group_id", group_id, 'str', max_length=90, min_length=1), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="POST", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_provider_permissions_request( resource_provider_namespace: str, subscription_id: str, **kwargs: Any ) -> HttpRequest: api_version = "2021-04-01" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/providerPermissions') path_format_arguments = { "resourceProviderNamespace": _SERIALIZER.url("resource_provider_namespace", resource_provider_namespace, 'str'), "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_register_request( resource_provider_namespace: str, subscription_id: str, *, json: JSONType = None, content: Any = None, **kwargs: Any ) -> HttpRequest: content_type = kwargs.pop('content_type', None) # type: Optional[str] api_version = "2021-04-01" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register') path_format_arguments = { "resourceProviderNamespace": _SERIALIZER.url("resource_provider_namespace", resource_provider_namespace, 'str'), "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] if content_type is not None: header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str') header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="POST", url=url, params=query_parameters, headers=header_parameters, json=json, content=content, **kwargs ) def build_list_request( subscription_id: str, *, expand: Optional[str] = None, **kwargs: Any ) -> HttpRequest: api_version = "2021-04-01" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] if expand is not None: query_parameters['$expand'] = _SERIALIZER.query("expand", expand, 'str') query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_list_at_tenant_scope_request( *, expand: Optional[str] = None, **kwargs: Any ) -> HttpRequest: api_version = "2021-04-01" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/providers') # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] if expand is not None: query_parameters['$expand'] = _SERIALIZER.query("expand", expand, 'str') query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_get_request( resource_provider_namespace: str, subscription_id: str, *, expand: Optional[str] = None, **kwargs: Any ) -> HttpRequest: api_version = "2021-04-01" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}') path_format_arguments = { "resourceProviderNamespace": _SERIALIZER.url("resource_provider_namespace", resource_provider_namespace, 'str'), "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] if expand is not None: query_parameters['$expand'] = _SERIALIZER.query("expand", expand, 'str') query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_get_at_tenant_scope_request( resource_provider_namespace: str, *, expand: Optional[str] = None, **kwargs: Any ) -> HttpRequest: api_version = "2021-04-01" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/providers/{resourceProviderNamespace}') path_format_arguments = { "resourceProviderNamespace": _SERIALIZER.url("resource_provider_namespace", resource_provider_namespace, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] if expand is not None: query_parameters['$expand'] = _SERIALIZER.query("expand", expand, 'str') query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs ) class ProvidersOperations(object): """ProvidersOperations operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.resource.resources.v2021_04_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config @distributed_trace def unregister( self, resource_provider_namespace: str, **kwargs: Any ) -> "_models.Provider": """Unregisters a subscription from a resource provider. :param resource_provider_namespace: The namespace of the resource provider to unregister. :type resource_provider_namespace: str :keyword callable cls: A custom type or function that will be passed the direct response :return: Provider, or the result of cls(response) :rtype: ~azure.mgmt.resource.resources.v2021_04_01.models.Provider :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.Provider"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_unregister_request( resource_provider_namespace=resource_provider_namespace, subscription_id=self._config.subscription_id, template_url=self.unregister.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('Provider', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized unregister.metadata = {'url': '/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/unregister'} # type: ignore @distributed_trace def register_at_management_group_scope( self, resource_provider_namespace: str, group_id: str, **kwargs: Any ) -> None: """Registers a management group with a resource provider. :param resource_provider_namespace: The namespace of the resource provider to register. :type resource_provider_namespace: str :param group_id: The management group ID. :type group_id: str :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_register_at_management_group_scope_request( resource_provider_namespace=resource_provider_namespace, group_id=group_id, template_url=self.register_at_management_group_scope.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) register_at_management_group_scope.metadata = {'url': '/providers/Microsoft.Management/managementGroups/{groupId}/providers/{resourceProviderNamespace}/register'} # type: ignore @distributed_trace def provider_permissions( self, resource_provider_namespace: str, **kwargs: Any ) -> "_models.ProviderPermissionListResult": """Get the provider permissions. :param resource_provider_namespace: The namespace of the resource provider. :type resource_provider_namespace: str :keyword callable cls: A custom type or function that will be passed the direct response :return: ProviderPermissionListResult, or the result of cls(response) :rtype: ~azure.mgmt.resource.resources.v2021_04_01.models.ProviderPermissionListResult :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ProviderPermissionListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_provider_permissions_request( resource_provider_namespace=resource_provider_namespace, subscription_id=self._config.subscription_id, template_url=self.provider_permissions.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('ProviderPermissionListResult', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized provider_permissions.metadata = {'url': '/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/providerPermissions'} # type: ignore @distributed_trace def register( self, resource_provider_namespace: str, properties: Optional["_models.ProviderRegistrationRequest"] = None, **kwargs: Any ) -> "_models.Provider": """Registers a subscription with a resource provider. :param resource_provider_namespace: The namespace of the resource provider to register. :type resource_provider_namespace: str :param properties: The third party consent for S2S. :type properties: ~azure.mgmt.resource.resources.v2021_04_01.models.ProviderRegistrationRequest :keyword callable cls: A custom type or function that will be passed the direct response :return: Provider, or the result of cls(response) :rtype: ~azure.mgmt.resource.resources.v2021_04_01.models.Provider :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.Provider"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] if properties is not None: _json = self._serialize.body(properties, 'ProviderRegistrationRequest') else: _json = None request = build_register_request( resource_provider_namespace=resource_provider_namespace, subscription_id=self._config.subscription_id, content_type=content_type, json=_json, template_url=self.register.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('Provider', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized register.metadata = {'url': '/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register'} # type: ignore @distributed_trace def list( self, expand: Optional[str] = None, **kwargs: Any ) -> Iterable["_models.ProviderListResult"]: """Gets all resource providers for a subscription. :param expand: The properties to include in the results. For example, use &$expand=metadata in the query string to retrieve resource provider metadata. To include property aliases in response, use $expand=resourceTypes/aliases. :type expand: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either ProviderListResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.resource.resources.v2021_04_01.models.ProviderListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ProviderListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: request = build_list_request( subscription_id=self._config.subscription_id, expand=expand, template_url=self.list.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_list_request( subscription_id=self._config.subscription_id, expand=expand, template_url=next_link, ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request def extract_data(pipeline_response): deserialized = self._deserialize("ProviderListResult", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/providers'} # type: ignore @distributed_trace def list_at_tenant_scope( self, expand: Optional[str] = None, **kwargs: Any ) -> Iterable["_models.ProviderListResult"]: """Gets all resource providers for the tenant. :param expand: The properties to include in the results. For example, use &$expand=metadata in the query string to retrieve resource provider metadata. To include property aliases in response, use $expand=resourceTypes/aliases. :type expand: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either ProviderListResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.resource.resources.v2021_04_01.models.ProviderListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ProviderListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: request = build_list_at_tenant_scope_request( expand=expand, template_url=self.list_at_tenant_scope.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_list_at_tenant_scope_request( expand=expand, template_url=next_link, ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request def extract_data(pipeline_response): deserialized = self._deserialize("ProviderListResult", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list_at_tenant_scope.metadata = {'url': '/providers'} # type: ignore @distributed_trace def get( self, resource_provider_namespace: str, expand: Optional[str] = None, **kwargs: Any ) -> "_models.Provider": """Gets the specified resource provider. :param resource_provider_namespace: The namespace of the resource provider. :type resource_provider_namespace: str :param expand: The $expand query parameter. For example, to include property aliases in response, use $expand=resourceTypes/aliases. :type expand: str :keyword callable cls: A custom type or function that will be passed the direct response :return: Provider, or the result of cls(response) :rtype: ~azure.mgmt.resource.resources.v2021_04_01.models.Provider :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.Provider"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_get_request( resource_provider_namespace=resource_provider_namespace, subscription_id=self._config.subscription_id, expand=expand, template_url=self.get.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('Provider', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}'} # type: ignore @distributed_trace def get_at_tenant_scope( self, resource_provider_namespace: str, expand: Optional[str] = None, **kwargs: Any ) -> "_models.Provider": """Gets the specified resource provider at the tenant level. :param resource_provider_namespace: The namespace of the resource provider. :type resource_provider_namespace: str :param expand: The $expand query parameter. For example, to include property aliases in response, use $expand=resourceTypes/aliases. :type expand: str :keyword callable cls: A custom type or function that will be passed the direct response :return: Provider, or the result of cls(response) :rtype: ~azure.mgmt.resource.resources.v2021_04_01.models.Provider :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.Provider"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_get_at_tenant_scope_request( resource_provider_namespace=resource_provider_namespace, expand=expand, template_url=self.get_at_tenant_scope.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('Provider', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get_at_tenant_scope.metadata = {'url': '/providers/{resourceProviderNamespace}'} # type: ignore
"""Hierarchical Agglomerative Clustering These routines perform some hierachical agglomerative clustering of some input data. Currently, only Ward's algorithm is implemented. Authors : Vincent Michel, Bertrand Thirion, Alexandre Gramfort, Gael Varoquaux License: BSD 3 clause """ from heapq import heapify, heappop, heappush, heappushpop import itertools import warnings import numpy as np from scipy import sparse from scipy.cluster import hierarchy from ..base import BaseEstimator from ..utils._csgraph import cs_graph_components from ..externals.joblib import Memory from . import _hierarchical from ._feature_agglomeration import AgglomerationTransform ############################################################################### # Ward's algorithm def ward_tree(X, connectivity=None, n_components=None, copy=True): """Ward clustering based on a Feature matrix. The inertia matrix uses a Heapq-based representation. This is the structured version, that takes into account a some topological structure between samples. Parameters ---------- X : array of shape (n_samples, n_features) feature matrix representing n_samples samples to be clustered connectivity : sparse matrix. connectivity matrix. Defines for each sample the neigbhoring samples following a given structure of the data. The matrix is assumed to be symmetric and only the upper triangular half is used. Default is None, i.e, the Ward algorithm is unstructured. n_components : int (optional) Number of connected components. If None the number of connected components is estimated from the connectivity matrix. copy : bool (optional) Make a copy of connectivity or work inplace. If connectivity is not of LIL type there will be a copy in any case. Returns ------- children : list of pairs. Lenght of n_nodes list of the children of each nodes. Leaves of the tree have empty list of children. n_components : sparse matrix. The number of connected components in the graph. n_leaves : int The number of leaves in the tree """ X = np.asarray(X) n_samples, n_features = X.shape if X.ndim == 1: X = np.reshape(X, (-1, 1)) # Compute the number of nodes if connectivity is not None: if n_components is None: n_components, _ = cs_graph_components(connectivity) if n_components > 1: warnings.warn("the number of connected components of the" " connectivity matrix is %d > 1. The tree will be stopped early." % n_components) else: out = hierarchy.ward(X) children_ = out[:, :2].astype(np.int) return children_, 1, n_samples n_nodes = 2 * n_samples - n_components if (connectivity.shape[0] != n_samples or connectivity.shape[1] != n_samples): raise ValueError('Wrong shape for connectivity matrix: %s ' 'when X is %s' % (connectivity.shape, X.shape)) # convert connectivity matrix to LIL eventually with a copy if sparse.isspmatrix_lil(connectivity) and copy: connectivity = connectivity.copy() else: connectivity = connectivity.tolil() # Remove diagonal from connectivity matrix connectivity.setdiag(np.zeros(connectivity.shape[0])) # create inertia matrix coord_row = [] coord_col = [] A = [] for ind, row in enumerate(connectivity.rows): A.append(row) # We keep only the upper triangular for the moments # Generator expressions are faster than arrays on the following row = [i for i in row if i < ind] coord_row.extend(len(row) * [ind, ]) coord_col.extend(row) coord_row = np.array(coord_row, dtype=np.int) coord_col = np.array(coord_col, dtype=np.int) # build moments as a list moments_1 = np.zeros(n_nodes) moments_1[:n_samples] = 1 moments_2 = np.zeros((n_nodes, n_features)) moments_2[:n_samples] = X inertia = np.empty(len(coord_row), dtype=np.float) _hierarchical.compute_ward_dist(moments_1, moments_2, coord_row, coord_col, inertia) inertia = zip(inertia, coord_row, coord_col) heapify(inertia) # prepare the main fields parent = np.arange(n_nodes, dtype=np.int) heights = np.zeros(n_nodes) used_node = np.ones(n_nodes, dtype=bool) children = [] visited = np.empty(n_nodes, dtype=bool) # recursive merge loop for k in xrange(n_samples, n_nodes): # identify the merge while True: inert, i, j = heappop(inertia) if used_node[i] and used_node[j]: break parent[i], parent[j], heights[k] = k, k, inert children.append([i, j]) used_node[i] = used_node[j] = False # update the moments moments_1[k] = moments_1[i] + moments_1[j] moments_2[k] = moments_2[i] + moments_2[j] # update the structure matrix A and the inertia matrix coord_col = [] visited[:] = False visited[k] = True for l in set(A[i]).union(A[j]): parent_l = parent[l] while parent_l != l: l = parent_l parent_l = parent[l] if not visited[l]: visited[l] = True coord_col.append(l) A[l].append(k) A.append(coord_col) coord_col = np.array(coord_col, dtype=np.int) coord_row = np.empty_like(coord_col) coord_row.fill(k) ini = np.empty(len(coord_row), dtype=np.float) _hierarchical.compute_ward_dist(moments_1, moments_2, coord_row, coord_col, ini) for tupl in itertools.izip(ini, coord_row, coord_col): heappush(inertia, tupl) # Separate leaves in children (empty lists up to now) n_leaves = n_samples children = np.array(children) # return numpy array for efficient caching return children, n_components, n_leaves ############################################################################### # Functions for cutting hierarchical clustering tree def _hc_cut(n_clusters, children, n_leaves): """Function cutting the ward tree for a given number of clusters. Parameters ---------- n_clusters : int or ndarray The number of clusters to form. children : list of pairs. Length of n_nodes List of the children of each nodes. Leaves have empty list of children and are not stored. n_leaves : int Number of leaves of the tree. Returns ------- labels : array [n_points] cluster labels for each point """ if n_clusters > n_leaves: raise ValueError('Cannot extract more clusters than samples: ' '%s clusters where given for a tree with %s leaves.' % (n_clusters, n_leaves)) # In this function, we store nodes as a heap to avoid recomputing # the max of the nodes: the first element is always the smallest # We use negated indices as heaps work on smallest elements, and we # are interested in largest elements # children[-1] is the root of the tree nodes = [-(max(children[-1]) + 1)] for i in range(n_clusters - 1): # As we have a heap, nodes[0] is the smallest element these_children = children[-nodes[0] - n_leaves] # Insert the 2 children and remove the largest node heappush(nodes, -these_children[0]) heappushpop(nodes, -these_children[1]) label = np.zeros(n_leaves, dtype=np.int) for i, node in enumerate(nodes): label[_hierarchical._hc_get_descendent(-node, children, n_leaves)] = i return label ############################################################################### # Class for Ward hierarchical clustering class Ward(BaseEstimator): """Ward hierarchical clustering: constructs a tree and cuts it. Parameters ---------- n_clusters : int or ndarray The number of clusters to find. connectivity : sparse matrix. Connectivity matrix. Defines for each sample the neigbhoring samples following a given structure of the data. Default is None, i.e, the hiearchical clustering algorithm is unstructured. memory : Instance of joblib.Memory or string Used to cache the output of the computation of the tree. By default, no caching is done. If a string is given, it is the path to the caching directory. copy : bool Copy the connectivity matrix or work inplace. n_components : int (optional) The number of connected components in the graph defined by the \ connectivity matrix. If not set, it is estimated. Attributes ---------- `children_` : array-like, shape = [n_nodes, 2] List of the children of each nodes. Leaves of the tree do not appear. `labels_` : array [n_points] cluster labels for each point `n_leaves_` : int Number of leaves in the hiearchical tree. """ def __init__(self, n_clusters=2, memory=Memory(cachedir=None, verbose=0), connectivity=None, copy=True, n_components=None): self.n_clusters = n_clusters self.memory = memory self.copy = copy self.n_components = n_components self.connectivity = connectivity def fit(self, X): """Fit the hierarchical clustering on the data Parameters ---------- X : array-like, shape = [n_samples, n_features] The samples a.k.a. observations. Returns ------- self """ memory = self.memory if isinstance(memory, basestring): memory = Memory(cachedir=memory) # Construct the tree self.children_, self.n_components, self.n_leaves_ = \ memory.cache(ward_tree)(X, self.connectivity, n_components=self.n_components, copy=self.copy) # Cut the tree self.labels_ = _hc_cut(self.n_clusters, self.children_, self.n_leaves_) return self ############################################################################### # Ward-based feature agglomeration class WardAgglomeration(AgglomerationTransform, Ward): """Feature agglomeration based on Ward hierarchical clustering Parameters ---------- n_clusters : int or ndarray The number of clusters. connectivity : sparse matrix connectivity matrix. Defines for each feature the neigbhoring features following a given structure of the data. Default is None, i.e, the hiearchical agglomeration algorithm is unstructured. memory : Instance of joblib.Memory or string Used to cache the output of the computation of the tree. By default, no caching is done. If a string is given, it is the path to the caching directory. copy : bool Copy the connectivity matrix or work inplace. n_components : int (optional) The number of connected components in the graph defined by the connectivity matrix. If not set, it is estimated. Attributes ---------- `children_` : array-like, shape = [n_nodes, 2] List of the children of each nodes. Leaves of the tree do not appear. `labels_` : array [n_points] cluster labels for each point `n_leaves_` : int Number of leaves in the hiearchical tree. """ def fit(self, X, y=None, **params): """Fit the hierarchical clustering on the data Parameters ---------- X : array-like, shape = [n_samples, n_features] The data Returns ------- self """ return Ward.fit(self, X.T, **params)
# -*- coding: utf-8 -*- import json import inspect from quixote.errors import TraversalError from quixote.errors import AccessError from vilya.libs.template import st, request from vilya.models.user import User from vilya.models.gist import Gist from vilya.models.gist_star import GistStar from vilya.models.gist_comment import GistComment from vilya.libs.text import highlight_code from vilya.views.util import is_mobile_device from vilya.config import DOMAIN from tasks import index_a_gist _q_exports = ['discover', 'forked', 'starred'] def _q_index(request): user = request.user if request.method == 'POST': desc, is_public, names, contents, oids = _get_req_gist_data(request) user = request.user owner_id = user and user.username or Gist.ANONYMOUS gist = Gist.add(desc, owner_id, is_public, names, contents) return request.redirect(gist.url) tdt = dict(request=request, gists=[], user=user) if user: gists = Gist.gets_by_owner(user.username, limit=4) tdt.update(dict(gists=gists)) if is_mobile_device(request): return st('/m/gist/index.html', **tdt) return st('/gist/index.html', **tdt) def _discover(request): user = request.user name = inspect.stack()[1][3] (page, start, link_prev, link_next, sort, direction) = make_page_args(request, name) gists = Gist.discover(name, sort, direction, start) tdt = dict( request=request, gists=gists, page=page, link_prev=link_prev, link_next=link_next, sort=sort, direction=direction, user=user ) return st('/gist/gists.html', **tdt) def discover(request): return _discover(request) def forked(request): return _discover(request) def starred(request): return _discover(request) def _q_lookup(request, item): if item.isdigit(): return GistUI(item) if item.count('.') == 1: gid, extend = item.split('.') if extend == 'js' and gid.isdigit(): return GistEmbedUI(gid) return UserGistUI(item) class GistUI: _q_exports = ['revisions', 'forks', 'stars', 'fork', 'star', 'unstar', 'edit', 'delete', 'comments', 'download', 're_index'] @property def comments(self): return GistCommentUI(self.id) def __init__(self, id): self.id = id self.gist = Gist.get(id) def _q_index(self, request): user = request.user tdt = dict(request=request, gist=self.gist, ref='master', user=user) if is_mobile_device(request): return st('/m/gist/gist_detail.html', **tdt) return st('/gist/gist_detail.html', **tdt) def _q_lookup(self, request, sha1): user = request.user if sha1 == 'raw': return RawGistUI(self.id) if sha1 is None or not self.gist.repo.is_commit(sha1): return TraversalError() tdt = {'request': request, 'gist': self.gist, 'ref': sha1, 'user': user} return st('/gist/gist_detail.html', **tdt) def edit(self, request): gist = self.gist user = request.user if not user or user.username != gist.owner_id: raise AccessError() if request.method == 'POST': desc, is_public, names, contents, oids = _get_req_gist_data( request) gist.update(desc, names, contents, oids) return request.redirect(gist.url) tdt = dict(request=request, gist=gist, user=user) if is_mobile_device(request): return st('/m/gist/edit.html', **tdt) return st('/gist/edit.html', **tdt) def delete(self, request): gist = self.gist user = request.user if not user or user.username != gist.owner_id: raise AccessError() gist.delete() return request.redirect('/gist/%s' % user.username) def revisions(self, request): user = request.user gist = self.gist page = int(request.get_form_var('page', 1)) skip = 3 * (page - 1) revlist = gist.get_revlist_with_renames(max_count=3, skip=skip) link_prev = _make_links(self.id, int(page) - 1, ext="revisions") if revlist: link_next = _make_links(self.id, int(page) + 1, ext="revisions") else: link_next = '' content = [] for r in revlist: # FIXME: try-except ? content.append(gist.repo.get_diff(r.sha, rename_detection=True)) tdt = { 'request': request, 'gist': gist, 'content': content, 'revlist': revlist, 'link_prev': link_prev, 'link_next': link_next, 'user': user, 'current_user': user, } return st('/gist/gist_revisions.html', **tdt) def forks(self, request): user = request.user gist = self.gist tdt = dict(request=request, gist=gist, user=user) return st('/gist/gist_forks.html', **tdt) def stars(self, request): user = request.user gist = self.gist tdt = dict(request=request, gist=gist, user=user) return st('/gist/gist_stars.html', **tdt) def fork(self, request): gist = self.gist new_gist = gist.fork(request.user.username) return request.redirect(new_gist.url) def star(self, request): GistStar.add(self.id, request.user.username) return request.redirect(self.gist.url) def unstar(self, request): star = GistStar.get_by_gist_and_user(self.id, request.user.username) if star: star.delete() return request.redirect(self.gist.url) def download(self, request): request.response.set_content_type("application/x-gzip") request.response.set_header("Content-Disposition", "filename=code_gist_%s.tar.gz" % self.id) return self.gist.repo.archive(name="code_gist_%s" % self.id) def re_index(self, request): index_a_gist(self.id) return request.redirect(self.gist.url) def _q_access(self, request): gist = self.gist user = request.user if not gist: raise TraversalError() if not gist.is_public: if not user or user.username != gist.owner_id: raise AccessError() class RawGistUI(object): _q_exports = [] def __init__(self, id): self.rev = '' self.gist = Gist.get(id) def _q_lookup(self, request, rev): self.rev = rev return RecursorGistUI(self.gist, self.rev) class RecursorGistUI(object): _q_exports = [] def __init__(self, gist, rev): self.gist = gist self.rev = rev self.path = '' def _q_lookup(self, request, path): self.path = path return self def __call__(self, request): try: # TODO: clean this text = self.gist.get_file(self.path, rev=self.rev) except IOError: raise TraversalError() if isinstance(text, bool) and text is False: raise TraversalError() resp = request.response resp.set_header("Content-Type", "text/plain; charset=utf-8") return text.encode('utf-8') class UserGistUI: _q_exports = ['forked', 'starred', 'public', 'secret'] def __init__(self, name): self.name = name self.user = User(name) current_user = request.user self.is_self = current_user and current_user.username == self.name ext = request.get_path().split('/')[-1] (self.page, self.start, self.link_prev, self.link_next, self.sort, self.direction) =\ make_page_args(request, self.name, ext=ext) self.n_all = Gist.count_user_all(self.name, self.is_self) self.n_fork = Gist.count_user_fork(self.name) self.n_star = Gist.count_user_star(self.name) if self.sort not in ('created', 'updated') \ or self.direction not in ('desc', 'asc'): raise TraversalError() def _render(self, request, gists): user = self.user tdt = { 'request': request, 'gists': gists, 'user': user, 'page': int(self.page), 'link_prev': self.link_prev, 'link_next': self.link_next, 'n_all': self.n_all, 'n_fork': self.n_fork, 'n_star': self.n_star, 'sort': self.sort, 'direction': self.direction } if is_mobile_device(request): return st('/m/gist/user_gists.html', **tdt) return st('/gist/user_gists.html', **tdt) def _q_index(self, request): gists = Gist.gets_by_owner( self.name, is_self=self.is_self, start=self.start, limit=5, sort=self.sort, direction=self.direction) return self._render(request, gists) def forked(self, request): gists = Gist.forks_by_user(self.name, start=self.start, limit=5, sort=self.sort, direction=self.direction) return self._render(request, gists) def starred(self, request): gists = Gist.stars_by_user(self.name, start=self.start, limit=5) return self._render(request, gists) def public(self, request): gists = Gist.publics_by_user(self.name, start=self.start, limit=5, sort=self.sort, direction=self.direction) return self._render(request, gists) def secret(self, request): current_user = request.user if not current_user or current_user.username != self.name: return request.redirect('/gist/%s' % self.name) gists = Gist.secrets_by_user(self.name, start=self.start, limit=5, sort=self.sort, direction=self.direction) return self._render(request, gists) def _q_lookup(self, request, item): gid = item extend = None if item.count('.') == 1: gid, extend = item.split('.') if not gid.isdigit(): raise TraversalError() gist = Gist.get(gid) if not gist or gist.owner_id != self.name: raise TraversalError() if extend == 'js': return GistEmbedUI(gid) return GistUI(gid) EMBED_CSS = """ <link href=\"%s/static/css/highlight.css\" rel=\"stylesheet\"> <link href=\"%s/static/css/embed.css\" rel=\"stylesheet\"> """ % (DOMAIN, DOMAIN) EMBED_HEAD = "<div id=\"gist%s\" class=\"gist\">" EMBED_FOOTER = "</div>" SRC_FORMAT = """ <div class=\"gist-data gist-syntax\"> <div class=\"gist-file\"> <div class=\"data\"> %s </div> <div class=\"gist-meta\"> <a href=\"%s/gist/%s/raw/master/%s\" style=\"float:right\">view raw</a> <a href=\"%s/gist/%s#%s\" style=\"float:right; margin-right:10px; color:#666;\">%s</a> # noqa <a href=\"%s\">This Gist</a> brought to you by <a href=\"%s\">Code</a>. </div> </div> </div> """ class GistEmbedUI(object): _q_exports = [] def __init__(self, gist_id): self.gist_id = gist_id def __call__(self, request): resp = request.response resp.set_header("Content-Type", "text/javascript") resp.set_header('Expires', 'Sun, 1 Jan 2006 01:00:00 GMT') resp.set_header('Pragma', 'no-cache') resp.set_header('Cache-Control', 'must-revalidate, no-cache, private') if not self.gist_id.isdigit() or not Gist.get(self.gist_id): return "document.write('<span style=\"color:red;\">NOT EXIST GIST</span>')" # noqa gist = Gist.get(self.gist_id) html = EMBED_CSS + EMBED_HEAD % gist.id for path in gist.files: path = path.encode('utf8') # TODO: clean this src = gist.get_file(path, rev='HEAD') src = highlight_code(path, src) src = src.replace('"', '\"').replace("'", "\'") html += SRC_FORMAT % (src, DOMAIN, gist.id, path, DOMAIN, gist.id, path, path, gist.url, DOMAIN) html += EMBED_FOOTER html = html.replace('\n', '\\n') return "document.write('%s')" % html class GistCommentUI(object): _q_exports = [] def __init__(self, gist_id): self.gist = Gist.get(gist_id) def _q_index(self, request): if request.method == 'POST': content = request.get_form_var('content', '') if content: GistComment.add(self.gist.id, request.user.username, content) return request.redirect(self.gist.url) def _q_lookup(self, request, comment_id): if request.method == 'POST': act = request.get_form_var('act', None) if act and act in ('delete', 'update'): comment = GistComment.get(comment_id) if act == 'delete' and comment: if comment.can_delete(request.user.username): comment.delete() return json.dumps({'r': 1}) raise TraversalError( "Unable to delete comment %s" % comment_id) return request.redirect(self.gist.url) def _get_req_gist_data(request): _form = request.form desc = _form.get('desc', '') is_public = _form.get('gist_public', '1') gist_names = _form.get('gist_name', '') gist_contents = _form.get('gist_content', '') gist_oids = _form.get('oid', '') return (desc, is_public, gist_names, gist_contents, gist_oids) def _make_links(name, page, ext=''): if page < 1: return '' if page and page >= 1: if ext: return '/gist/%s/%s/?page=%s' % (name, ext, page) else: return '/gist/%s/?page=%s' % (name, page) def make_page_args(request, name, ext=''): page = request.get_form_var('page', 1) start = 5 * (int(page) - 1) link_prev = _make_links(name, int(page) - 1, ext=ext) link_next = _make_links(name, int(page) + 1, ext=ext) sort = request.get_form_var('sort', 'created') direction = request.get_form_var('direction', 'desc') return (page, start, link_prev, link_next, sort, direction)
""" Room Typeclasses for the TutorialWorld. This defines special types of Rooms available in the tutorial. To keep everything in one place we define them together with the custom commands needed to control them. Those commands could also have been in a separate module (e.g. if they could have been re-used elsewhere.) """ import random from evennia import TICKER_HANDLER from evennia import CmdSet, Command, DefaultRoom from evennia import utils, create_object, search_object from evennia import syscmdkeys, default_cmds from evennia.contrib.tutorial_world.objects import LightSource, TutorialObject # the system error-handling module is defined in the settings. We load the # given setting here using utils.object_from_module. This way we can use # it regardless of if we change settings later. from django.conf import settings _SEARCH_AT_RESULT = utils.object_from_module(settings.SEARCH_AT_RESULT) #------------------------------------------------------------ # # Tutorial room - parent room class # # This room is the parent of all rooms in the tutorial. # It defines a tutorial command on itself (available to # all those who are in a tutorial room). # #------------------------------------------------------------ # # Special command available in all tutorial rooms # class CmdTutorial(Command): """ Get help during the tutorial Usage: tutorial [obj] This command allows you to get behind-the-scenes info about an object or the current location. """ key = "tutorial" aliases = ["tut"] locks = "cmd:all()" help_category = "TutorialWorld" def func(self): """ All we do is to scan the current location for an Attribute called `tutorial_info` and display that. """ caller = self.caller if not self.args: target = self.obj # this is the room the command is defined on else: target = caller.search(self.args.strip()) if not target: return helptext = target.db.tutorial_info if helptext: caller.msg("{G%s{n" % helptext) else: caller.msg("{RSorry, there is no tutorial help available here.{n") # for the @detail command we inherit from MuxCommand, since # we want to make use of MuxCommand's pre-parsing of '=' in the # argument. class CmdTutorialSetDetail(default_cmds.MuxCommand): """ sets a detail on a room Usage: @detail <key> = <description> @detail <key>;<alias>;... = description Example: @detail walls = The walls are covered in ... @detail castle;ruin;tower = The distant ruin ... This sets a "detail" on the object this command is defined on (TutorialRoom for this tutorial). This detail can be accessed with the TutorialRoomLook command sitting on TutorialRoom objects (details are set as a simple dictionary on the room). This is a Builder command. We custom parse the key for the ;-separator in order to create multiple aliases to the detail all at once. """ key = "@detail" locks = "cmd:perm(Builders)" help_category = "TutorialWorld" def func(self): """ All this does is to check if the object has the set_detail method and uses it. """ if not self.args or not self.rhs: self.caller.msg("Usage: @detail key = description") return if not hasattr(self.obj, "set_detail"): self.caller.msg("Details cannot be set on %s." % self.obj) return for key in self.lhs.split(";"): # loop over all aliases, if any (if not, this will just be # the one key to loop over) self.obj.set_detail(key, self.rhs) self.caller.msg("Detail set: '%s': '%s'" % (self.lhs, self.rhs)) class CmdTutorialLook(default_cmds.CmdLook): """ looks at the room and on details Usage: look <obj> look <room detail> look *<player> Observes your location, details at your location or objects in your vicinity. Tutorial: This is a child of the default Look command, that also allows us to look at "details" in the room. These details are things to examine and offers some extra description without actually having to be actual database objects. It uses the return_detail() hook on TutorialRooms for this. """ # we don't need to specify key/locks etc, this is already # set by the parent. help_category = "TutorialWorld" def func(self): """ Handle the looking. This is a copy of the default look code except for adding in the details. """ caller = self.caller args = self.args if args: # we use quiet=True to turn off automatic error reporting. # This tells search that we want to handle error messages # ourself. This also means the search function will always # return a list (with 0, 1 or more elements) rather than # result/None. looking_at_obj = caller.search(args, use_nicks=True, quiet=True) if len(looking_at_obj) != 1: # no target found or more than one target found (multimatch) # look for a detail that may match detail = self.obj.return_detail(args) if detail: self.caller.msg(detail) return else: # no detail found, delegate our result to the normal # error message handler. _SEARCH_AT_RESULT(caller, args, looking_at_obj) return else: # we found a match, extract it from the list and carry on # normally with the look handling. looking_at_obj = looking_at_obj[0] else: looking_at_obj = caller.location if not looking_at_obj: caller.msg("You have no location to look at!") return if not hasattr(looking_at_obj, 'return_appearance'): # this is likely due to us having a player instead looking_at_obj = looking_at_obj.character if not looking_at_obj.access(caller, "view"): caller.msg("Could not find '%s'." % args) return # get object's appearance caller.msg(looking_at_obj.return_appearance(caller)) # the object's at_desc() method. looking_at_obj.at_desc(looker=caller) class TutorialRoomCmdSet(CmdSet): """ Implements the simple tutorial cmdset. This will overload the look command in the default CharacterCmdSet since it has a higher priority (ChracterCmdSet has prio 0) """ key = "tutorial_cmdset" priority = 1 def at_cmdset_creation(self): "add the tutorial-room commands" self.add(CmdTutorial()) self.add(CmdTutorialSetDetail()) self.add(CmdTutorialLook()) class TutorialRoom(DefaultRoom): """ This is the base room type for all rooms in the tutorial world. It defines a cmdset on itself for reading tutorial info about the location. """ def at_object_creation(self): "Called when room is first created" self.db.tutorial_info = "This is a tutorial room. It allows you to use the 'tutorial' command." self.cmdset.add_default(TutorialRoomCmdSet) def at_object_receive(self, new_arrival, source_location): """ When an object enter a tutorial room we tell other objects in the room about it by trying to call a hook on them. The Mob object uses this to cheaply get notified of enemies without having to constantly scan for them. Args: new_arrival (Object): the object that just entered this room. source_location (Object): the previous location of new_arrival. """ if new_arrival.has_player and not new_arrival.is_superuser: # this is a character for obj in self.contents_get(exclude=new_arrival): if hasattr(obj, "at_new_arrival"): obj.at_new_arrival(new_arrival) def return_detail(self, detailkey): """ This looks for an Attribute "obj_details" and possibly returns the value of it. Args: detailkey (str): The detail being looked at. This is case-insensitive. """ details = self.db.details if details: return details.get(detailkey.lower(), None) def set_detail(self, detailkey, description): """ This sets a new detail, using an Attribute "details". Args: detailkey (str): The detail identifier to add (for aliases you need to add multiple keys to the same description). Case-insensitive. description (str): The text to return when looking at the given detailkey. """ if self.db.details: self.db.details[detailkey.lower()] = description else: self.db.details = {detailkey.lower(): description} #------------------------------------------------------------ # # Weather room - room with a ticker # #------------------------------------------------------------ # These are rainy weather strings WEATHER_STRINGS = ( "The rain coming down from the iron-grey sky intensifies.", "A gush of wind throws the rain right in your face. Despite your cloak you shiver.", "The rainfall eases a bit and the sky momentarily brightens.", "For a moment it looks like the rain is slowing, then it begins anew with renewed force.", "The rain pummels you with large, heavy drops. You hear the rumble of thunder in the distance.", "The wind is picking up, howling around you, throwing water droplets in your face. It's cold.", "Bright fingers of lightning flash over the sky, moments later followed by a deafening rumble.", "It rains so hard you can hardly see your hand in front of you. You'll soon be drenched to the bone.", "Lightning strikes in several thundering bolts, striking the trees in the forest to your west.", "You hear the distant howl of what sounds like some sort of dog or wolf.", "Large clouds rush across the sky, throwing their load of rain over the world.") class WeatherRoom(TutorialRoom): """ This should probably better be called a rainy room... This sets up an outdoor room typeclass. At irregular intervals, the effects of weather will show in the room. Outdoor rooms should inherit from this. """ def at_object_creation(self): """ Called when object is first created. We set up a ticker to update this room regularly. Note that we could in principle also use a Script to manage the ticking of the room; the TickerHandler works fine for simple things like this though. """ super(WeatherRoom, self).at_object_creation() # subscribe ourselves to a ticker to repeatedly call the hook # "update_weather" on this object. The interval is randomized # so as to not have all weather rooms update at the same time. interval = random.randint(50, 70) TICKER_HANDLER.add(self, interval, idstring="tutorial", hook_key="update_weather") # this is parsed by the 'tutorial' command on TutorialRooms. self.db.tutorial_info = \ "This room has a Script running that has it echo a weather-related message at irregular intervals." def update_weather(self, *args, **kwargs): """ Called by the tickerhandler at regular intervals. Even so, we only update 20% of the time, picking a random weather message when we do. The tickerhandler requires that this hook accepts any arguments and keyword arguments (hence the *args, **kwargs even though we don't actually use them in this example) """ if random.random() < 0.2: # only update 20 % of the time self.msg_contents("{w%s{n" % random.choice(WEATHER_STRINGS)) SUPERUSER_WARNING = "\nWARNING: You are playing as a superuser ({name}). Use the {quell} command to\n" \ "play without superuser privileges (many functions and puzzles ignore the \n" \ "presence of a superuser, making this mode useful for exploring things behind \n" \ "the scenes later).\n" \ #----------------------------------------------------------- # # Intro Room - unique room # # This room marks the start of the tutorial. It sets up properties on # the player char that is needed for the tutorial. # #------------------------------------------------------------ class IntroRoom(TutorialRoom): """ Intro room properties to customize: char_health - integer > 0 (default 20) """ def at_object_creation(self): """ Called when the room is first created. """ super(IntroRoom, self).at_object_creation() self.db_tutorial_info = "The first room of the tutorial. " \ "This assigns the health Attribute to "\ "the player." def at_object_receive(self, character, source_location): """ Assign properties on characters """ # setup character for the tutorial health = self.db.char_health or 20 if character.has_player: character.db.health = health character.db.health_max = health if character.is_superuser: string = "-"*78 + SUPERUSER_WARNING + "-"*78 character.msg("{r%s{n" % string.format(name=character.key, quell="{w@quell{r")) #------------------------------------------------------------ # # Bridge - unique room # # Defines a special west-eastward "bridge"-room, a large room that takes # several steps to cross. It is complete with custom commands and a # chance of falling off the bridge. This room has no regular exits, # instead the exitings are handled by custom commands set on the player # upon first entering the room. # # Since one can enter the bridge room from both ends, it is # divided into five steps: # westroom <- 0 1 2 3 4 -> eastroom # #------------------------------------------------------------ class CmdEast(Command): """ Go eastwards across the bridge. Tutorial info: This command relies on the caller having two Attributes (assigned by the room when entering): - east_exit: a unique name or dbref to the room to go to when exiting east. - west_exit: a unique name or dbref to the room to go to when exiting west. The room must also have the following Attributes - tutorial_bridge_posistion: the current position on on the bridge, 0 - 4. """ key = "east" aliases = ["e"] locks = "cmd:all()" help_category = "TutorialWorld" def func(self): "move one step eastwards" caller = self.caller bridge_step = min(5, caller.db.tutorial_bridge_position + 1) if bridge_step > 4: # we have reached the far east end of the bridge. # Move to the east room. eexit = search_object(self.obj.db.east_exit) if eexit: caller.move_to(eexit[0]) else: caller.msg("No east exit was found for this room. Contact an admin.") return caller.db.tutorial_bridge_position = bridge_step # since we are really in one room, we have to notify others # in the room when we move. caller.location.msg_contents("%s steps eastwards across the bridge." % caller.name, exclude=caller) caller.execute_cmd("look") # go back across the bridge class CmdWest(Command): """ Go westwards across the bridge. Tutorial info: This command relies on the caller having two Attributes (assigned by the room when entering): - east_exit: a unique name or dbref to the room to go to when exiting east. - west_exit: a unique name or dbref to the room to go to when exiting west. The room must also have the following property: - tutorial_bridge_posistion: the current position on on the bridge, 0 - 4. """ key = "west" aliases = ["w"] locks = "cmd:all()" help_category = "TutorialWorld" def func(self): "move one step westwards" caller = self.caller bridge_step = max(-1, caller.db.tutorial_bridge_position - 1) if bridge_step < 0: # we have reached the far west end of the bridge. # Move to the west room. wexit = search_object(self.obj.db.west_exit) if wexit: caller.move_to(wexit[0]) else: caller.msg("No west exit was found for this room. Contact an admin.") return caller.db.tutorial_bridge_position = bridge_step # since we are really in one room, we have to notify others # in the room when we move. caller.location.msg_contents("%s steps westwards across the bridge." % caller.name, exclude=caller) caller.execute_cmd("look") BRIDGE_POS_MESSAGES = ("You are standing {wvery close to the the bridge's western foundation{n. If you go west you will be back on solid ground ...", "The bridge slopes precariously where it extends eastwards towards the lowest point - the center point of the hang bridge.", "You are {whalfways{n out on the unstable bridge.", "The bridge slopes precariously where it extends westwards towards the lowest point - the center point of the hang bridge.", "You are standing {wvery close to the bridge's eastern foundation{n. If you go east you will be back on solid ground ...") BRIDGE_MOODS = ("The bridge sways in the wind.", "The hanging bridge creaks dangerously.", "You clasp the ropes firmly as the bridge sways and creaks under you.", "From the castle you hear a distant howling sound, like that of a large dog or other beast.", "The bridge creaks under your feet. Those planks does not seem very sturdy.", "Far below you the ocean roars and throws its waves against the cliff, as if trying its best to reach you.", "Parts of the bridge come loose behind you, falling into the chasm far below!", "A gust of wind causes the bridge to sway precariously.", "Under your feet a plank comes loose, tumbling down. For a moment you dangle over the abyss ...", "The section of rope you hold onto crumble in your hands, parts of it breaking apart. You sway trying to regain balance.") FALL_MESSAGE = "Suddenly the plank you stand on gives way under your feet! You fall!" \ "\nYou try to grab hold of an adjoining plank, but all you manage to do is to " \ "divert your fall westwards, towards the cliff face. This is going to hurt ... " \ "\n ... The world goes dark ...\n\n" \ class CmdLookBridge(Command): """ looks around at the bridge. Tutorial info: This command assumes that the room has an Attribute "fall_exit", a unique name or dbref to the place they end upp if they fall off the bridge. """ key = 'look' aliases = ["l"] locks = "cmd:all()" help_category = "TutorialWorld" def func(self): "Looking around, including a chance to fall." caller = self.caller bridge_position = self.caller.db.tutorial_bridge_position # this command is defined on the room, so we get it through self.obj location = self.obj # randomize the look-echo message = "{c%s{n\n%s\n%s" % (location.key, BRIDGE_POS_MESSAGES[bridge_position], random.choice(BRIDGE_MOODS)) chars = [obj for obj in self.obj.contents_get(exclude=caller) if obj.has_player] if chars: # we create the You see: message manually here message += "\n You see: %s" % ", ".join("{c%s{n" % char.key for char in chars) self.caller.msg(message) # there is a chance that we fall if we are on the western or central # part of the bridge. if bridge_position < 3 and random.random() < 0.05 and not self.caller.is_superuser: # we fall 5% of time. fall_exit = search_object(self.obj.db.fall_exit) if fall_exit: self.caller.msg("{r%s{n" % FALL_MESSAGE) self.caller.move_to(fall_exit[0], quiet=True) # inform others on the bridge self.obj.msg_contents("A plank gives way under %s's feet and " \ "they fall from the bridge!" % self.caller.key) # custom help command class CmdBridgeHelp(Command): """ Overwritten help command while on the bridge. """ key = "help" aliases = ["h"] locks = "cmd:all()" help_category = "Tutorial world" def func(self): "Implements the command." string = "You are trying hard not to fall off the bridge ..." string += "\n\nWhat you can do is trying to cross the bridge {weast{n " string += "or try to get back to the mainland {wwest{n)." self.caller.msg(string) class BridgeCmdSet(CmdSet): "This groups the bridge commands. We will store it on the room." key = "Bridge commands" priority = 1 # this gives it precedence over the normal look/help commands. def at_cmdset_creation(self): "Called at first cmdset creation" self.add(CmdTutorial()) self.add(CmdEast()) self.add(CmdWest()) self.add(CmdLookBridge()) self.add(CmdBridgeHelp()) BRIDGE_WEATHER = ( "The rain intensifies, making the planks of the bridge even more slippery.", "A gush of wind throws the rain right in your face.", "The rainfall eases a bit and the sky momentarily brightens.", "The bridge shakes under the thunder of a closeby thunder strike.", "The rain pummels you with large, heavy drops. You hear the distinct howl of a large hound in the distance.", "The wind is picking up, howling around you and causing the bridge to sway from side to side.", "Some sort of large bird sweeps by overhead, giving off an eery screech. Soon it has disappeared in the gloom.", "The bridge sways from side to side in the wind.", "Below you a particularly large wave crashes into the rocks.", "From the ruin you hear a distant, otherwordly howl. Or maybe it was just the wind.") class BridgeRoom(WeatherRoom): """ The bridge room implements an unsafe bridge. It also enters the player into a state where they get new commands so as to try to cross the bridge. We want this to result in the player getting a special set of commands related to crossing the bridge. The result is that it will take several steps to cross it, despite it being represented by only a single room. We divide the bridge into steps: self.db.west_exit - - | - - self.db.east_exit 0 1 2 3 4 The position is handled by a variable stored on the character when entering and giving special move commands will increase/decrease the counter until the bridge is crossed. We also has self.db.fall_exit, which points to a gathering location to end up if we happen to fall off the bridge (used by the CmdLookBridge command). """ def at_object_creation(self): "Setups the room" # this will start the weather room's ticker and tell # it to call update_weather regularly. super(BridgeRoom, self).at_object_creation() # this identifies the exits from the room (should be the command # needed to leave through that exit). These are defaults, but you # could of course also change them after the room has been created. self.db.west_exit = "cliff" self.db.east_exit = "gate" self.db.fall_exit = "cliffledge" # add the cmdset on the room. self.cmdset.add_default(BridgeCmdSet) def update_weather(self, *args, **kwargs): """ This is called at irregular intervals and makes the passage over the bridge a little more interesting. """ if random.random() < 80: # send a message most of the time self.msg_contents("{w%s{n" % random.choice(BRIDGE_WEATHER)) def at_object_receive(self, character, source_location): """ This hook is called by the engine whenever the player is moved into this room. """ if character.has_player: # we only run this if the entered object is indeed a player object. # check so our east/west exits are correctly defined. wexit = search_object(self.db.west_exit) eexit = search_object(self.db.east_exit) fexit = search_object(self.db.fall_exit) if not (wexit and eexit and fexit): character.msg("The bridge's exits are not properly configured. "\ "Contact an admin. Forcing west-end placement.") character.db.tutorial_bridge_position = 0 return if source_location == eexit[0]: # we assume we enter from the same room we will exit to character.db.tutorial_bridge_position = 4 else: # if not from the east, then from the west! character.db.tutorial_bridge_position = 0 def at_object_leave(self, character, target_location): """ This is triggered when the player leaves the bridge room. """ if character.has_player: # clean up the position attribute del character.db.tutorial_bridge_position #------------------------------------------------------------------------------ # # Dark Room - a room with states # # This room limits the movemenets of its denizens unless they carry an active # LightSource object (LightSource is defined in # tutorialworld.objects.LightSource) # #------------------------------------------------------------------------------ DARK_MESSAGES = ("It is pitch black. You are likely to be eaten by a grue.", "It's pitch black. You fumble around but cannot find anything.", "You don't see a thing. You feel around, managing to bump your fingers hard against something. Ouch!", "You don't see a thing! Blindly grasping the air around you, you find nothing.", "It's totally dark here. You almost stumble over some un-evenness in the ground.", "You are completely blind. For a moment you think you hear someone breathing nearby ... \n ... surely you must be mistaken.", "Blind, you think you find some sort of object on the ground, but it turns out to be just a stone.", "Blind, you bump into a wall. The wall seems to be covered with some sort of vegetation, but its too damp to burn.", "You can't see anything, but the air is damp. It feels like you are far underground.") ALREADY_LIGHTSOURCE = "You don't want to stumble around in blindness anymore. You already " \ "found what you need. Let's get light already!" FOUND_LIGHTSOURCE = "Your fingers bump against a splinter of wood in a corner. It smells of resin and seems dry enough to burn! " \ "You pick it up, holding it firmly. Now you just need to {wlight{n it using the flint and steel you carry with you." class CmdLookDark(Command): """ Look around in darkness Usage: look Look around in the darkness, trying to find something. """ key = "look" aliases = ["l", 'feel', 'search', 'feel around', 'fiddle'] locks = "cmd:all()" help_category = "TutorialWorld" def func(self): """ Implement the command. This works both as a look and a search command; there is a random chance of eventually finding a light source. """ caller = self.caller if random.random() < 0.8: # we don't find anything caller.msg(random.choice(DARK_MESSAGES)) else: # we could have found something! if any(obj for obj in caller.contents if utils.inherits_from(obj, LightSource)): # we already carry a LightSource object. caller.msg(ALREADY_LIGHTSOURCE) else: # don't have a light source, create a new one. create_object(LightSource, key="splinter", location=caller) caller.msg(FOUND_LIGHTSOURCE) class CmdDarkHelp(Command): """ Help command for the dark state. """ key = "help" locks = "cmd:all()" help_category = "TutorialWorld" def func(self): """ Replace the the help command with a not-so-useful help """ string = "Can't help you until you find some light! Try looking/feeling around for something to burn. " \ "You shouldn't give up even if you don't find anything right away." self.caller.msg(string) class CmdDarkNoMatch(Command): """ This is a system command. Commands with special keys are used to override special sitations in the game. The CMD_NOMATCH is used when the given command is not found in the current command set (it replaces Evennia's default behavior or offering command suggestions) """ key = syscmdkeys.CMD_NOMATCH locks = "cmd:all()" def func(self): "Implements the command." self.caller.msg("Until you find some light, there's not much you can do. Try feeling around.") class DarkCmdSet(CmdSet): """ Groups the commands of the dark room together. We also import the default say command here so that players can still talk in the darkness. We give the cmdset the mergetype "Replace" to make sure it completely replaces whichever command set it is merged onto (usually the default cmdset) """ key = "darkroom_cmdset" mergetype = "Replace" priority = 2 def at_cmdset_creation(self): "populate the cmdset." self.add(CmdTutorial()) self.add(CmdLookDark()) self.add(CmdDarkHelp()) self.add(CmdDarkNoMatch()) self.add(default_cmds.CmdSay) class DarkRoom(TutorialRoom): """ A dark room. This tries to start the DarkState script on all objects entering. The script is responsible for making sure it is valid (that is, that there is no light source shining in the room). The is_lit Attribute is used to define if the room is currently lit or not, so as to properly echo state changes. Since this room (in the tutorial) is meant as a sort of catch-all, we also make sure to heal characters ending up here, since they may have been beaten up by the ghostly apparition at this point. """ def at_object_creation(self): """ Called when object is first created. """ super(DarkRoom, self).at_object_creation() self.db.tutorial_info = "This is a room with custom command sets on itself." # the room starts dark. self.db.is_lit = False self.cmdset.add(DarkCmdSet, permanent=True) def at_init(self): """ Called when room is first recached (such as after a reload) """ self.check_light_state() def _carries_light(self, obj): """ Checks if the given object carries anything that gives light. Note that we do NOT look for a specific LightSource typeclass, but for the Attribute is_giving_light - this makes it easy to later add other types of light-giving items. We also accept if there is a light-giving object in the room overall (like if a splinter was dropped in the room) """ return obj.is_superuser or obj.db.is_giving_light or obj.is_superuser or any(o for o in obj.contents if o.db.is_giving_light) def _heal(self, character): """ Heal a character. """ health = character.db.health_max or 20 character.db.health = health def check_light_state(self): """ This method checks if there are any light sources in the room. If there isn't it makes sure to add the dark cmdset to all characters in the room. It is called whenever characters enter the room and also by the Light sources when they turn on. """ if any(self._carries_light(obj) for obj in self.contents): self.cmdset.remove(DarkCmdSet) self.db.is_lit = True for char in (obj for obj in self.contents if obj.has_player): # this won't do anything if it is already removed char.msg("The room is lit up.") else: # noone is carrying light - darken the room self.db.is_lit = False self.cmdset.add(DarkCmdSet, permanent=True) for char in (obj for obj in self.contents if obj.has_player): if char.is_superuser: char.msg("You are Superuser, so you are not affected by the dark state.") else: # put players in darkness char.msg("The room is completely dark.") def at_object_receive(self, obj, source_location): """ Called when an object enters the room. """ if obj.has_player: # a puppeted object, that is, a Character self._heal(obj) # in case the new guy carries light with them self.check_light_state() def at_object_leave(self, obj, target_location): """ In case people leave with the light, we make sure to clear the DarkCmdSet if necessary. This also works if they are teleported away. """ self.check_light_state() #------------------------------------------------------------ # # Teleport room - puzzles solution # # This is a sort of puzzle room that requires a certain # attribute on the entering character to be the same as # an attribute of the room. If not, the character will # be teleported away to a target location. This is used # by the Obelisk - grave chamber puzzle, where one must # have looked at the obelisk to get an attribute set on # oneself, and then pick the grave chamber with the # matching imagery for this attribute. # #------------------------------------------------------------ class TeleportRoom(TutorialRoom): """ Teleporter - puzzle room. Important attributes (set at creation): puzzle_key - which attr to look for on character puzzle_value - what char.db.puzzle_key must be set to success_teleport_to - where to teleport in case if success success_teleport_msg - message to echo while teleporting to success failure_teleport_to - where to teleport to in case of failure failure_teleport_msg - message to echo while teleporting to failure """ def at_object_creation(self): "Called at first creation" super(TeleportRoom, self).at_object_creation() # what character.db.puzzle_clue must be set to, to avoid teleportation. self.db.puzzle_value = 1 # target of successful teleportation. Can be a dbref or a # unique room name. self.db.success_teleport_msg = "You are successful!" self.db.success_teleport_to = "treasure room" # the target of the failure teleportation. self.db.failure_teleport_msg = "You fail!" self.db.failure_teleport_to = "dark cell" def at_object_receive(self, character, source_location): """ This hook is called by the engine whenever the player is moved into this room. """ if not character.has_player: # only act on player characters. return # determine if the puzzle is a success or not is_success = str(character.db.puzzle_clue) == str(self.db.puzzle_value) teleport_to = self.db.success_teleport_to if is_success else self.db.failure_teleport_to # note that this returns a list results = search_object(teleport_to) if not results or len(results) > 1: # we cannot move anywhere since no valid target was found. print "no valid teleport target for %s was found." % teleport_to return if character.is_superuser: # superusers don't get teleported character.msg("Superuser block: You would have been teleported to %s." % results[0]) return # perform the teleport if is_success: character.msg(self.db.success_teleport_msg) else: character.msg(self.db.failure_teleport_msg) # teleport quietly to the new place character.move_to(results[0], quiet=True, move_hooks=False) #------------------------------------------------------------ # # Outro room - unique exit room # # Cleans up the character from all tutorial-related properties. # #------------------------------------------------------------ class OutroRoom(TutorialRoom): """ Outro room. Called when exiting the tutorial, cleans the character of tutorial-related attributes. """ def at_object_creation(self): """ Called when the room is first created. """ super(OutroRoom, self).at_object_creation() self.db_tutorial_info = "The last room of the tutorial. " \ "This cleans up all temporary Attributes " \ "the tutorial may have assigned to the "\ "character." def at_object_receive(self, character, source_location): """ Do cleanup. """ if character.has_player: if self.db.wracklist: for wrackid in self.db.wracklist: character.del_attribute(wrackid) del character.db.health_max del character.db.health del character.db.last_climbed del character.db.puzzle_clue del character.db.combat_parry_mode del character.db.tutorial_bridge_position character.tags.clear(category="tutorial_world")
""" Class definition for a Box shaped window. Inherets from """ from neus.window import Window import numpy as np import collections class Box(Window): """Class definition of Box which inherits from window. Implements the Box shaped basis function. """ def __init__(self, center, width, ref_center=None, ref_width=None, time=None, periodic_length=None, max_list_size=100, initial_conditions=[], initial_conditions_probability=0.0): """Create an intsance of the Box object. Note that the width parameter refers to the maximum distance in each direction for which this window is defined to have nonzero support. Parameters --------------- center : numpy.ndarray, list The coordinates of the center of the window support. width : numpy.ndarray, list The width of the support of the window in nx2 size. First column is witdh in negative direction, second column is width in positive direction. ref_center : numpy.ndarray, list (None) The center of the support in the reference configuration at time :math:`t=0` ref_width : numpy.ndarray, list (None) The width of the support of the reference configuration at time :math:`t=0` time : numpy.ndarray, list (None) The time interval for which this window has nonzero support. periodic_length : numpy.ndarray, list (None) The length of the periodicity for each coordinate this window supports. max_list_size : interval (100) The maximum size of each :math:`\gamma_{ij}` distribution. initial_conditions : iterable The iterable of entry point objects at time :math:`t=0`. initial_conditions_probability : float A float between 0.0 and 1.0. """ # call parent constructor Window.__init__(self, center, width, ref_center=ref_center, ref_width=ref_width, time=time, periodic_length=periodic_length, max_list_size=max_list_size, initial_conditions=initial_conditions, initial_conditions_probability=initial_conditions_probability) # We calculate the slope of the Box. #self.slopes = 1.0/self.width self.w0 = self.center - self.width[0] self.w1 = self.center + self.width[1] return None def __repr__(self): """Return the string represetation of the Box instance. Returns ---------- string A string representation of the Box instance. """ id = "Box(" + str(self.center) + ", " + str(self.width) + ")" return id def __call__(self, walker): """Return the value of the support for this Box object. Parameters ------------- walker : walker instance The walker instance for which to evaluate the support of the Box. Returns ------------ float The value of the support. """ # DEVLEOPER Note: this is the key for how the NEUS application module relies on the walker object definition. We have to think carefully about how exactly we want the window object accept input in the call function. Should this explicitly make assumptions about the structure and callable functions of the walker class or should we try to generalize this to act through something like a numpy array? # second comment: This question arises separately really from how we wish this call function is implemented from a partition object. The partition object will # ok, here we are going to implement the window on top if the walker object definition. But we will enforce that the behavoir of Box's call will depend on both the definition of the reference and the information it can get from walker. # check to see that we've received a walker object try: coord = walker.get_colvars() except AttributeError: coord = np.array(walker) assert coord.shape == self.center.shape, "walker collective variables array does not match the shape of this window instance definition." # create a distance vector distancevec = coord - self.center # if any collective variable is periodic, construct dr, the adjuct for minimum image convention for the periodic cv's if self.wrapping is not None: # build dr dr = np.zeros(distancevec.shape) # add values to dr if the CV wraps for i in xrange(len(self.wrapping)): if self.wrapping[i] != 0.0: # This is an old trick from MD codes to find the minimum distance between two points. dr[i] = self.wrapping[i] * np.rint(distancevec[i]/self.wrapping[i]) # add min image vector distancevec -= dr # We calculate the value of #psiparts = 1.0-self.slopes*np.abs(distancevec) #val = min(psiparts.clip(min=0)) val = 1.0 * np.all(self.w0 < coord) and np.all(self.w1 > coord) if self.ref_center is not None: # check to see that if not hasattr(walker, "get_initial_colvars"): raise Exception("Walker object passed to Box __call__ does not have support for getting refernce collective variable value.") # return initial state of the collective variable ref_coord = walker.get_initial_colvars() val *=self.ref_indicator(ref_coord) if self.time_start is not None: # check that the passed walker object has a time coordinate if not hasattr(walker, "get_time"): raise Exception("Walker object passed to Box __call__ does not have support for getting time value.") # get time time = walker.get_time() # return indicator and multiply against support value val *= self.time_indicator(time) # return the minimum value. return val def ref_indicator(self, coord): """Return the value of the support for the reference phase space point. Parameters ------------- coord : numpy.ndarray The coordinates of the reference phase space point. Returns ------------ float The value of the support on the reference coordinate. """ assert coord.shape == self.ref_center.shape, "walker reference collective variables array does not match the shape of this window reference center." # create a distance vector distancevec = coord - self.ref_center # if any collective variable is periodic, construct dr, the adjuct for minimum image convetion for the periodic cv's if self.wrapping is not None: # build dr dr = np.zeros(distancevec.shape) # add values to dr if the CV wraps for i in xrange(len(self.wrapping)): if self.wrapping[i] != 0.0: # This is an old trick from MD codes to find the minimum distance between two points. dr[i] = self.wrapping[i] * np.rint(distancevec[i]/self.wrapping[i]) # add min image vector distancevec -= dr # We return 1.0 if all the distances are smaller than the width of the box from the center, 0.0 otherwise. return float(np.prod(self.ref_width > np.abs(distancevec))) def time_indicator(self, time): """Return the indicator function on the time interval. Takes the value 1.0 if the time provided is in the time interval for which this window has nonzero support. Parameters ------------ time : int A time to evaluate. Returns ---------- float The indicator value. """ if self.time_start <= time < self.time_end: return 1.0 else: return 0.0
#! coding: utf-8 import re import os.path ### # Django related settings ### DEBUG = True TEMPLATE_DEBUG = DEBUG WILDCARD_USER_SUBDOMAINS_ENABLED = True ADMINS = ( # ('AloneRoad', '[email protected]'), ) MANAGERS = ADMINS # This stuff is always going to be the same for an App Engine instance DATABASE_ENGINE = 'appengine' # 'appengine' is the only supported engine DATABASE_NAME = '' # Not used with appengine DATABASE_USER = '' # Not used with appengine DATABASE_PASSWORD = '' # Not used with appengine DATABASE_HOST = '' # Not used with appengine DATABASE_PORT = '' # Not used with appengine ## Default ourpicks_channels # (show at "Ourpicks Channels" - "Channel" when not login) DEFAULT_OURPICKS_CHANNELS = ["#[email protected]", "#[email protected]"] # The appengine_django code doesn't care about the address of memcached # because it is a built in API for App Engine CACHE_BACKEND = 'memcached://' # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'UTC' DEFAULT_CHARSET = 'utf-8' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # Make this unique, and don't share it with anybody. SECRET_KEY = 'dd0bf43ad04974c6436219a9e863a838' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.load_template_source', 'django.template.loaders.app_directories.load_template_source', # 'django.template.loaders.eggs.load_template_source', ) MIDDLEWARE_CLASSES = ( 'middleware.domain.DomainMiddleware', 'middleware.auth.AuthenticationMiddleware', 'middleware.exception.ExceptionMiddleware', 'middleware.cache.CacheMiddleware', 'middleware.strip_whitespace.WhitespaceMiddleware', 'middleware.profile.ProfileMiddleware', ) ROOT_URLCONF = 'urls' # Where the templates live, you probably don't want to change this unless you # know what you're doing TEMPLATE_DIRS = ( os.path.dirname(__file__), ) TEMPLATE_CONTEXT_PROCESSORS = ( 'django.core.context_processors.debug', 'django.core.context_processors.request', 'common.context_processors.settings', 'common.context_processors.flash', 'common.context_processors.components', ) # Only apps under INSTALLED_APPS will be automatically tested via # `python manage.py test` and the profiling code takes this list into # account while filtering calls INSTALLED_APPS = ( 'appengine_django', 'common', 'actor', 'api', 'channel', 'explore', 'join', 'flat', 'login', 'front', 'invite', 'install', 'confirm', 'components', ) # We override the default test runner so that we can be Totally Awesome TEST_RUNNER = 'common.test.runner.run_tests' #### # # Below this is custom for Inforlearn (not related to Django) # #### # This is a dynamic setting so that we can check whether we have been run # locally, it is used mainly for making special testing-only tweaks. Ideally # we wouldn't need this, but the alternatives so far have been tricky. MANAGE_PY = os.path.exists('manage.py') # This is the name of the site that will be used whenever it refers to itself SITE_NAME = 'Inforlearn' SUPPORT_CHANNEL = 'support' # This is the colloquial name for an entry, mostly used for branding purposes POST_NAME = 'Post' # This is the name of the root user of the site ROOT_NICK = 'inforlearn' # This is the domain where this is installed on App Engine. It will be # necessary to know this if you plan on enabling SSL for login and join. GAE_DOMAIN = 'inforlearn.appspot.com' # Enabling this means we expect to be spending most of our time on a # Hosted domain HOSTED_DOMAIN_ENABLED = True # This is the domain you intend to serve your site from, when using hosted # domains. If SSL is enabled for login and join those requests will still # go to the GAE_DOMAIN above. HOSTED_DOMAIN = 'inforlearn.com' # App Engine requires you to serve with a subdomain DEFAULT_HOSTED_SUBDOMAIN = 'www' NS_DOMAIN = 'inforlearn.com' # DOMAIN will be used wherever a url to this site needs to be created # NS_DOMAIN will be used as the domain part of actor identifiers. # Note that changing this once you have deployed the site will likely result # in catastrophic failure. if HOSTED_DOMAIN_ENABLED: DOMAIN = '%s.%s' % (DEFAULT_HOSTED_SUBDOMAIN, HOSTED_DOMAIN) else: DOMAIN = GAE_DOMAIN # Subdomains aren't supported all that nicely by App Engine yet, so you # probably won't be able to enable WILDCARD_SUBDOMAINS below, but you can # still set up your app to use some of the static subdomains below. # Subdomains are ignored unless HOSTED_DOMAIN_ENABLED is True. SUBDOMAINS_ENABLED = True # These are defined as { subdomain : url_conf, ...} INSTALLED_SUBDOMAINS = { 'api': 'api.urls', # api-only urlconf 'm': 'urls', # default urlconf, but allow the subdomain, 'static': 'urls', } # Enable SSL support for login and join, if using HOSTED_DOMAIN_ENABLED # this means you will be redirecting through https://GAE_DOMAIN/login # and https://GAE_DOMAIN/join for those respective actions. SSL_LOGIN_ENABLED = False # # Appearance / Theme # # The default theme to use DEFAULT_THEME = 'default' # # Cookie # # Cookie settings, pretty self explanatory, you shouldn't need to touch these. USER_COOKIE = 'username' PASSWORD_COOKIE = 'password' COOKIE_DOMAIN = '%s' % DOMAIN COOKIE_PATH = '/' # # Blog # # Do you want /blog to redirect to your blog? BLOG_ENABLED = False # Where is your blog? BLOG_URL = 'http://example.com' BLOG_FEED_URL = 'http://example.com/feeds' # # API # # Setting this to True will make the public API accept all requests as being # from ROOT with no regard to actual authentication. # Never this set to True on a production site. API_DISABLE_VERIFICATION = False # These next three determine which OAuth Signature Methods to allow. API_ALLOW_RSA_SHA1 = True API_ALLOW_HMAC_SHA1 = True API_ALLOW_PLAINTEXT = False # These three determine whether the ROOT use should be allowed to use these # methods, if any at all. Setting all of these to False will disable the # ROOT user from accessing the public API API_ALLOW_ROOT_RSA_SHA1 = True API_ALLOW_ROOT_HMAC_SHA1 = True API_ALLOW_ROOT_PLAINTEXT = False # OAuth consumer key and secret values ROOT_TOKEN_KEY = 'ROOT_TOKEN_KEY' ROOT_TOKEN_SECRET = 'ROOT_TOKEN_SECRET' ROOT_CONSUMER_KEY = 'ROOT_CONSUMER_KEY' ROOT_CONSUMER_SECRET = 'ROOT_CONSUMER_SECRET' # Allow support for legacy API authentication API_ALLOW_LEGACY_AUTH = False LEGACY_SECRET_KEY = 'I AM ALSO SECRET' # # SMS # # Enabling SMS will require a bit more than just making this True, please # read the docs at http://code.google.com/p/jaikuengine/wiki/sms_support SMS_ENABLED = False # Most SMS vendors will provide a service that will post messages to a url # on your site when an SMS has been received on their end, this setting allows # you to add a secret value to that must exist in that url to prevent # malicious use. SMS_VENDOR_SECRET = 'SMS_VENDOR' # Valid numbers on which you expect to receive SMS SMS_TARGET = '00000' # Whitelist regular expression for allowable mobile-terminated targets SMS_MT_WHITELIST = re.compile('\+\d+') # Blacklist regular expression for blocked mobile-terminated targets SMS_MT_BLACKLIST = None # Turn on test mode for SMS SMS_TEST_ONLY = False # Numbers to use when testing live SMS so you don't spam all your users SMS_TEST_NUMBERS = [] # # XMPP / IM # # Enabling IM will require a bit more than just making this True, please # read the docs at http://code.google.com/p/jaikuengine/wiki/im_support IM_ENABLED = True # This is the id (JID) of the IM bot that you will use to communicate with # users of the IM interface IM_BOT = '[email protected]' # Turn on test mode for IM IM_TEST_ONLY = False # JIDs to allow when testing live XMPP so you don't spam all your users IM_TEST_JIDS = [] # Enable to send plain text messages only. Default is to send both plain # text and html. IM_PLAIN_TEXT_ONLY = False # Truncate entry title in comments. None or 140+ means no truncation. IM_MAX_LENGTH_OF_ENTRY_TITLES_FOR_COMMENTS = 255 # # Task Queue # # Enabling the queue will allow you to process posts with larger numbers # of followers but will require you to set up a cron job that will continuously # ping a special url to make sure the queue gets processed QUEUE_ENABLED = True # The secret to use for your cron job that processes your queue QUEUE_VENDOR_SECRET = 'SECRET' # # Throttling Config # # This will control the max number of SMS to send over a 30-day period THROTTLE_SMS_GLOBAL_MONTH = 10000 # Settings for remote services IMAGE_UPLOAD_ENABLED = False IMAGE_UPLOAD_URL = 'upload.example.com' # Settings for Google Contacts import GOOGLE_CONTACTS_IMPORT_ENABLED = True FEEDS_ENABLED = False MARK_AS_SPAM_ENABLED = False PRESS_ENABLED = False BADGES_ENABLED = False HIDE_COMMENTS_ENABLED = True MULTIADMIN_ENABLED = False PRIVATE_CHANNELS_ENABLED = False MARKDOWN_ENABLED = True # Lists nicks of users participating in conversations underneath comment # areas for posts. Clicking list items inserts @nicks into comment box. # The list shows a maximum of 25 nicks. COMMENT_QUICKLINKS_ENABLED = True # If enabled, adds support for using access keys 1-9 to insert @nicks into # comment box. Requires COMMENT_QUICKLINKS_ENABLED. COMMENT_QUICKLINKS_ACCESSKEYS_ENABLED = False PROFILE_DB = False # Limit of avatar photo size in kilobytes MAX_AVATAR_PHOTO_KB = 200 MAX_ACTIVATIONS = 10 # Email Test mode EMAIL_TEST_ONLY = False # Allowed email addresses for testing EMAIL_TEST_ADDRESSES = [] # Email limiting, if this is set it will restrict users to those with # email addresses in this domain EMAIL_LIMIT_DOMAIN = None # Things to measure to taste MAX_COMMENT_LENGTH = 2000 # Gdata Stuff GDATA_CONSUMER_KEY = '' GDATA_CONSUMER_SECRET = '' def default_email_sender(): try: return os.environ['DJANGO_DEFAULT_FROM_EMAIL'] except KeyError: return '[email protected]' DEFAULT_FROM_EMAIL = default_email_sender() DEFAULT_UNITTEST_TO_EMAIL = '[email protected]' PROFILING_DATA_PATH = 'profiling/prof_db.csv' # Set up the settings for the dev server if we are running it if MANAGE_PY: try: from dev_settings import * except ImportError: pass # Allow local overrides, useful for testing during development try: from local_settings import * except ImportError: pass
from contextlib import contextmanager import attr import socket import yaml import re import six @attr.s(frozen=True) class Job(object): """Structure holding a job returned from Beanstalk :ivar job_id: Opaque identifier for the job (to be passed to :func:`BeanstalkClient.release_job()` or :func:`BeanstalkClient.stats_job()`). :ivar job_data: Blob of the data. str if :attr:`BeanstalkClient.auto_decode` is True; otherwise bytes """ job_id = attr.ib(validator=attr.validators.instance_of(int)) job_data = attr.ib() if getattr(attr, "__version_info__", (0,)) >= (19, 2): _attrs_kwargs = dict(eq=True) else: _attrs_kwargs = dict(cmp=True) @attr.s(frozen=True, hash=True, **_attrs_kwargs) class BeanstalkError(Exception): """Common error raised when something goes wrong with beanstalk""" message = attr.ib(converter=lambda m: m.decode('ascii')) def yaml_load(fo): # yaml.safe_load will never use the C loader; we have to detect it ourselves if hasattr(yaml, 'CSafeLoader'): return yaml.load(fo, Loader=yaml.CSafeLoader) else: return yaml.safe_load(fo) @attr.s(frozen=True) class BeanstalkInsertingProxy(object): """Proxy object yielded by :func:`BeanstalkClient.using()`""" beanstalk_client = attr.ib() tube = attr.ib() def put_job(self, data, pri=65536, delay=0, ttr=120): """Method to insert a job into the tube selected with :func:`BeanstalkClient.using`. :param data: Job body :type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8 :param pri: Priority for the job :type pri: int :param delay: Delay in seconds before the job should be placed on the ready queue :type delay: int :param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked and give the job to another worker :type ttr: int """ self.beanstalk_client.use(self.tube) return self.beanstalk_client.put_job(data=data, pri=pri, delay=delay, ttr=ttr) class BeanstalkClient(object): """Simple wrapper around the Beanstalk API. :param host: Hostname or IP address to connect to :type host: str :param port: Port to connect to :type port: int :param socket_timeout: Timeout to set on the socket. :type socket_timeout: float :param auto_decode: Attempt to decode job bodies as UTF-8 when reading them :type auto_decode: bool Doesn't provide any fanciness for writing consumers or producers. Just lets you invoke methods to call beanstalk functions. .. warning:: Setting socket timeout to a value lower than the value you pass to blocking functions like :func:`reserve_job()` will cause errors! """ def __init__(self, host, port=11300, socket_timeout=None, auto_decode=False): """Construct a synchronous Beanstalk Client. Does not connect!""" self.host = host self.port = port self.socket_timeout = socket_timeout self._reset_state() self.desired_tube = 'default' self.desired_watchlist = set(['default']) self.auto_decode = auto_decode @classmethod def from_uri(cls, uri, socket_timeout=None, auto_decode=False): """Construct a synchronous Beanstalk Client from a URI. The URI may be of the form beanstalk://host:port or beanstalkd://host:port IPv6 literals must be wrapped in brackets as per RFC 2732. """ parts = six.moves.urllib.parse.urlparse(uri) if parts.scheme.lower() not in ('beanstalk', 'beanstalkd'): raise ValueError('Invalid scheme %s' % parts.scheme) ipv6_md = re.match(r'^\[([0-9a-fA-F:]+)\](:[0-9]+)?$', parts.netloc) if ipv6_md: host = ipv6_md.group(1) port = ipv6_md.group(2) or '11300' port = port.lstrip(':') elif ':' in parts.netloc: host, port = parts.netloc.rsplit(':', 1) else: host = parts.netloc port = 11300 port = int(port) return cls(host, port, socket_timeout=socket_timeout, auto_decode=auto_decode) def _reset_state(self): self._watchlist = set(['default']) self.current_tube = 'default' self.initial_watch = True self.socket = None def __repr__(self): return '{0}({1!r}, {2!r})'.format(self.__class__.__name__, self.host, self.port) # pragma: no cover def __str__(self): return '{0} - watching:{1}, current:{2}'.format( # pragma: no cover repr(self), self._watchlist, self.current_tube # pragma: no cover ) # pragma: no cover @property def _socket(self): if self.socket is None: self.socket = socket.create_connection((self.host, self.port), timeout=self.socket_timeout) self._re_establish_use_watch() return self.socket def _re_establish_use_watch(self): """Call after a close/re-connect. Automatically re-establishes the USE and WATCH configs previously setup. """ if self.current_tube != self.desired_tube: self.use(self.desired_tube) if self._watchlist != self.desired_watchlist: self.watchlist = self.desired_watchlist def close(self): """Close any open connection to the Beanstalk server. This object is still safe to use after calling :func:`close()` ; it will automatically reconnect and re-establish any open watches / uses. It is a logic error to close the connection while you have a reserved job """ if self.socket is not None: self.socket.close() self._reset_state() @contextmanager def _sock_ctx(self): yield self._socket def _receive_data_with_prefix(self, prefix, sock): buf = b'' target_len = len(prefix) + 28 while b'\r\n' not in buf: message = sock.recv(target_len - len(buf)) if not message: break buf += message if b' ' not in buf: error = buf.rstrip() raise BeanstalkError(error) first_word, rest = buf.split(b' ', 1) if first_word != prefix: raise BeanstalkError(first_word) return self._receive_data(sock, rest) def _receive_id_and_data_with_prefix(self, prefix, sock): buf = b'' target_len = len(prefix) + 28 while b'\r\n' not in buf: message = sock.recv(target_len - len(buf)) if not message: break buf += message if b' ' not in buf: error = buf.rstrip() raise BeanstalkError(error) first_word, rest = buf.split(b' ', 1) if first_word != prefix: raise BeanstalkError(first_word) the_id, rest = rest.split(b' ', 1) return int(the_id), self._receive_data(sock, rest) def _receive_data(self, sock, initial=None): if initial is None: initial = sock.recv(12) byte_length, rest = initial.split(b'\r\n', 1) byte_length = int(byte_length) + 2 buf = [rest] bytes_read = len(rest) while bytes_read < byte_length: message = sock.recv(min(4096, byte_length - bytes_read)) if not message: break bytes_read += len(message) buf.append(message) bytez = b''.join(buf)[:-2] if self.auto_decode: return bytez.decode('utf-8') else: return bytez def _receive_id(self, sock): status, gid = self._receive_name(sock) return status, int(gid) def _receive_name(self, sock): message = sock.recv(1024) if b' ' in message: status, rest = message.split(b' ', 1) return status, rest.rstrip() else: raise BeanstalkError(message.rstrip()) def _receive_word(self, sock, *expected_words): message = sock.recv(1024).rstrip() if message not in expected_words: raise BeanstalkError(message) return message def _send_message(self, message, sock): if isinstance(message, bytes): if not message.endswith(b'\r\n'): message += b'\r\n' return sock.sendall(message) else: if not message.endswith('\r\n'): message += '\r\n' return sock.sendall(message.encode('utf-8')) def list_tubes(self): """Return a list of tubes that this beanstalk instance knows about :rtype: list of tubes """ with self._sock_ctx() as sock: self._send_message('list-tubes', sock) body = self._receive_data_with_prefix(b'OK', sock) tubes = yaml_load(body) return tubes def stats(self): """Return a dictionary with a bunch of instance-wide statistics :rtype: dict """ with self._sock_ctx() as socket: self._send_message('stats', socket) body = self._receive_data_with_prefix(b'OK', socket) stats = yaml_load(body) return stats def put_job(self, data, pri=65536, delay=0, ttr=120): """Insert a new job into whatever queue is currently USEd :param data: Job body :type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8 :param pri: Priority for the job :type pri: int :param delay: Delay in seconds before the job should be placed on the ready queue :type delay: int :param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked and give the job to another worker :type ttr: int .. seealso:: :func:`put_job_into()` Put a job into a specific tube :func:`using()` Insert a job using an external guard """ with self._sock_ctx() as socket: message = 'put {pri} {delay} {ttr} {datalen}\r\n'.format( pri=pri, delay=delay, ttr=ttr, datalen=len(data) ).encode('utf-8') if not isinstance(data, bytes): data = data.encode('utf-8') message += data message += b'\r\n' self._send_message(message, socket) return self._receive_id(socket) def put_job_into(self, tube_name, data, pri=65536, delay=0, ttr=120): """Insert a new job into a specific queue. Wrapper around :func:`put_job`. :param tube_name: Tube name :type tube_name: str :param data: Job body :type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8 :param pri: Priority for the job :type pri: int :param delay: Delay in seconds before the job should be placed on the ready queue :type delay: int :param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked and give the job to another worker :type ttr: int .. seealso:: :func:`put_job()` Put a job into whatever the current tube is :func:`using()` Insert a job using an external guard """ with self.using(tube_name) as inserter: return inserter.put_job(data=data, pri=pri, delay=delay, ttr=ttr) @property def watchlist(self): return self._watchlist @watchlist.setter def watchlist(self, tubes): """Set the watchlist to the given tubes :param tubes: A list of tubes to watch Automatically un-watches any tubes that are not on the target list """ tubes = set(tubes) for tube in tubes - self._watchlist: self.watch(tube) for tube in self._watchlist - tubes: self.ignore(tube) def watch(self, tube): """Add the given tube to the watchlist. :param tube: Name of the tube to add to the watchlist Note: Initially, all connections are watching a tube named "default". If you manually call :func:`watch()`, we will un-watch the "default" tube. To keep it in your list, first call :func:`watch()` with the other tubes, then call :func:`watch()` with "default". """ with self._sock_ctx() as socket: self.desired_watchlist.add(tube) if tube not in self._watchlist: self._send_message('watch {0}'.format(tube), socket) self._receive_id(socket) self._watchlist.add(tube) if self.initial_watch: if tube != 'default': self.ignore('default') self.initial_watch = False def ignore(self, tube): """Remove the given tube from the watchlist. :param tube: Name of tube to remove from the watchlist If all tubes are :func:`ignore()` d, beanstalk will auto-add "default" to the watchlist to prevent the list from being empty. See :func:`watch()` for more unformation. """ with self._sock_ctx() as socket: if tube not in self._watchlist: raise KeyError(tube) if tube != 'default': self.desired_watchlist.remove(tube) if tube in self._watchlist: self._send_message('ignore {0}'.format(tube), socket) self._receive_id(socket) self._watchlist.remove(tube) if not self._watchlist: self._watchlist.add('default') def stats_job(self, job_id): """Fetch statistics about a single job :rtype: dict """ with self._sock_ctx() as socket: if hasattr(job_id, 'job_id'): job_id = job_id.job_id self._send_message('stats-job {0}'.format(job_id), socket) body = self._receive_data_with_prefix(b'OK', socket) job_status = yaml_load(body) return job_status def stats_tube(self, tube_name): """Fetch statistics about a single tube :param tube_name: Tube to fetch stats about :rtype: dict """ with self._sock_ctx() as socket: self._send_message('stats-tube {0}'.format(tube_name), socket) body = self._receive_data_with_prefix(b'OK', socket) return yaml_load(body) def reserve_job(self, timeout=5): """Reserve a job for this connection. Blocks for TIMEOUT secionds and raises TIMED_OUT if no job was available :param timeout: Time to wait for a job, in seconds. :type timeout: int """ timeout = int(timeout) if self.socket_timeout is not None: if timeout >= self.socket_timeout: raise ValueError('reserve_job timeout must be < socket timeout') if not self._watchlist: raise ValueError('Select a tube or two before reserving a job') with self._sock_ctx() as socket: self._send_message('reserve-with-timeout {0}'.format(timeout), socket) job_id, job_data = self._receive_id_and_data_with_prefix(b'RESERVED', socket) return Job(job_id, job_data) def _peek_common(self, typ): """Common implementation for the peek_* functions""" with self._sock_ctx() as socket: self._send_message('peek-{0}'.format(typ), socket) job_id, job_data = self._receive_id_and_data_with_prefix(b'FOUND', socket) return Job(job_id, job_data) def peek_ready(self): """Peek at the job job on the ready queue. :rtype: :class:`Job` """ return self._peek_common('ready') def peek_delayed(self): """Peek at the job job on the delayed queue""" return self._peek_common('delayed') def peek_buried(self): """Peek at the top job on the buried queue""" return self._peek_common('buried') def _common_iter(self, kallable, error): while True: try: job = kallable() except BeanstalkError as e: if e.message != error: raise break yield job def reserve_iter(self): """Reserve jobs as an iterator. Ends iteration when there are no more jobs immediately available""" return self._common_iter(lambda: self.reserve_job(0), 'TIMED_OUT') def peek_ready_iter(self): """Peek at ready jobs in sequence""" return self._common_iter(self.peek_ready, 'NOT_FOUND') def peek_delayed_iter(self): """Peek at delayed jobs in sequence""" return self._common_iter(self.peek_delayed, 'NOT_FOUND') def peek_buried_iter(self): """Peek at buried jobs in sequence""" return self._common_iter(self.peek_buried, 'NOT_FOUND') def delete_job(self, job_id): """Delete the given job id. The job must have been previously reserved by this connection""" if hasattr(job_id, 'job_id'): job_id = job_id.job_id with self._sock_ctx() as socket: self._send_message('delete {0}'.format(job_id), socket) self._receive_word(socket, b'DELETED') def bury_job(self, job_id, pri=65536): """Mark the given job_id as buried. The job must have been previously reserved by this connection :param job_id: Job to bury :param pri: Priority for the newly-buried job. If not passed, will keep its current priority :type pri: int """ if hasattr(job_id, 'job_id'): job_id = job_id.job_id with self._sock_ctx() as socket: self._send_message('bury {0} {1}'.format(job_id, pri), socket) return self._receive_word(socket, b'BURIED') def release_job(self, job_id, pri=65536, delay=0): """Put a job back on the queue to be processed (indicating that you've aborted it) You can only release a job which you have reserved using :func:`reserve_job()` or :func:`reserve_iter()`. :param job_id: Job ID to return :param pri: New priority (if not passed, will use old priority) :type pri: int :param delay: New delay for job (if not passed, will use 0) :type delay: int """ if hasattr(job_id, 'job_id'): job_id = job_id.job_id with self._sock_ctx() as socket: self._send_message('release {0} {1} {2}\r\n'.format(job_id, pri, delay), socket) return self._receive_word(socket, b'RELEASED', b'BURIED') def kick_job(self, job_id): """Kick the given job id. The job must either be in the DELAYED or BURIED state and will be immediately moved to the READY state.""" if hasattr(job_id, 'job_id'): job_id = job_id.job_id with self._sock_ctx() as socket: self._send_message('kick-job {0}'.format(job_id), socket) self._receive_word(socket, b'KICKED') def use(self, tube): """Start producing jobs into the given tube. :param tube: Name of the tube to USE Subsequent calls to :func:`put_job` insert jobs into this tube. """ with self._sock_ctx() as socket: if self.current_tube != tube: self.desired_tube = tube self._send_message('use {0}'.format(tube), socket) self._receive_name(socket) self.current_tube = tube @contextmanager def using(self, tube): """Context-manager to insert jobs into a specific tube :param tube: Tube to insert to Yields out an instance of :class:`BeanstalkInsertingProxy` to insert items into that tube .. seealso:: :func:`use()` Change the default tube :func:`put_job()` Put a job into whatever the current tube is :func:`put_job_into()` Put a job into a specific tube """ try: current_tube = self.current_tube self.use(tube) yield BeanstalkInsertingProxy(self, tube) finally: self.use(current_tube) def kick_jobs(self, num_jobs): """Kick some number of jobs from the buried queue onto the ready queue. :param num_jobs: Number of jobs to kick :type num_jobs: int If not that many jobs are in the buried queue, it will kick as many as it can.""" with self._sock_ctx() as socket: self._send_message('kick {0}'.format(num_jobs), socket) return self._receive_id(socket) def pause_tube(self, tube, delay=3600): """Pause a tube for some number of seconds, preventing it from issuing jobs. :param delay: Time to pause for, in seconds :type delay: int There is no way to permanently pause a tube; passing 0 for delay actually un-pauses the tube. .. seealso:: :func:`unpause_tube()` """ with self._sock_ctx() as socket: delay = int(delay) self._send_message('pause-tube {0} {1}'.format(tube, delay), socket) return self._receive_word(socket, b'PAUSED') def unpause_tube(self, tube): """Unpause a tube which was previously paused with :func:`pause_tube()`. .. seealso:: :func:`pause_tube()` """ with self._sock_ctx() as socket: self._send_message('pause-tube {0} 0'.format(tube), socket) return self._receive_word(socket, b'PAUSED')
#!/usr/bin/env python # -*- coding: utf-8 -*- # # File: xmlhandler.py # by Arzaroth Lekva # [email protected] # from __future__ import print_function, absolute_import, unicode_literals import rapidxml from collections import OrderedDict, defaultdict from six import add_metaclass from celestia.utility import PONY_LIST from celestia.utility.defaultordereddict import DefaultOrderedDict from celestia.utility.utility import (Pony, Inventory, MissingPonies, Currency, PlayerData, Clearables, Foes, Zone, Shops, Quest) class XmlDescriptor(object): def __init__(self): self.name = None def __get__(self, instance, objtyp=None): if not instance.__dict__.get('_' + self.name, None): instance.__dict__['_' + self.name] = getattr(instance, '_get_' + self.name)() return instance.__dict__['_' + self.name] class XmlMeta(type): def __new__(cls, name, bases, attrs): for key, value in attrs.items(): if isinstance(value, XmlDescriptor): value.name = key return super(XmlMeta, cls).__new__(cls, name, bases, attrs) @add_metaclass(XmlMeta) class XmlHandler(object): ponies = XmlDescriptor() inventory = XmlDescriptor() missing_ponies = XmlDescriptor() currencies = XmlDescriptor() player_infos = XmlDescriptor() actions = XmlDescriptor() zones = XmlDescriptor() quests = XmlDescriptor() _mapzones = XmlDescriptor() def __init__(self, xml_data): print('Parsing XML tree...') self.xmlobj = rapidxml.RapidXml(bytearray(xml_data)) def _get__mapzones(self): if type(self.xmlobj['MLP_Save']['MapZone']) != list: return [self.xmlobj['MLP_Save']['MapZone']] return self.xmlobj['MLP_Save']['MapZone'] def _filtered_actions(self, ID): res = defaultdict(dict) for typ, actions in self.actions['Ponies'].items(): for action, tags in actions.items(): items = tags[0]['Item'] if type(items) != list: items = [items] tag = [i for i in items if i['@Category'].value == ID] if not tag: tag = self.xmlobj.allocate_node('Item') tag.append_attribute(self.xmlobj.allocate_attribute('Category', ID)) tag.append_attribute(self.xmlobj.allocate_attribute('Value', '0')) tags[0].append_node(tag) tag = [tag] res[typ][action] = tag return {'Pony': res, 'Global': self.actions['Global']} def _get_ponies(self): res = OrderedDict() for mapzone in self._mapzones: try: ponyobjects = mapzone['GameObjects']['Pony_Objects'] except KeyError: pass else: for ponytag in ponyobjects: ID = ponytag["@ID"].value res[ID] = Pony(ponytag, self._filtered_actions(ID), None if ID not in PONY_LIST else PONY_LIST[ID]) return res def _get_inventory(self): try: storage = self.xmlobj['MLP_Save']['PlayerData']['Storage'] except KeyError: storage = self.xmlobj.allocate_node('Storage') self.xmlobj['MLP_Save']['PlayerData'].append_node(storage) return Inventory(storage, self.xmlobj) def _get_missing_ponies(self): return MissingPonies(self.ponies, self.inventory.ponies, PONY_LIST) def _get_currencies(self): playerdata = self.xmlobj['MLP_Save']['PlayerData'] res = DefaultOrderedDict(OrderedDict) main = res['Main currencies'] main['Bits'] = Currency('@Coins', 'Bits', playerdata) main['Gems'] = Currency('@Hearts', 'Gems', playerdata) main['Hearts'] = Currency('@Social', 'Hearts', playerdata) shards = playerdata['Shards'] for i in ('Loyalty', 'Honesty', 'Kindness', 'Generosity', 'Laughter', 'Magic'): res['Shards'][i + ' shards'] = Currency('@' + i, i + ' shards', shards, 999) try: # Minecart Update (1.8) main['Wheels'] = Currency('@Wheels', 'Wheels', playerdata['Minecart'], 5) except: pass try: # Dragon Update (2.3) main['Sapphires'] = Currency('@BossEventCurrency', 'Sapphires', playerdata) except: pass try: # Everfree Update (2.1) ingredients = playerdata['Ingredients'] zecora = res['Zecora ingredients'] zecora['Black Iris'] = Currency('@BlackIris', 'Black Iris', ingredients, 5) zecora['Garlic'] = Currency('@Garlic', 'Garlic', ingredients, 5) zecora['Sticky Sap'] = Currency('@GlueTree', 'Sticky Sap', ingredients, 5) zecora['Joke Plant'] = Currency('@PoisonJokePlant', 'Joke Plant', ingredients, 5) zecora['Purple Mushrooms'] = Currency('@PurpleGlowingMushrooms', 'Purple Mushrooms', ingredients, 5) zecora['Red Orchid'] = Currency('@RedOrchid', 'Red Orchid', ingredients, 5) except: pass try: # Boutique Update (2.6) popcurrency = playerdata['PopCurrency'] boutique = res['Boutique ingredients'] boutique['Flower'] = Currency('@PopCurrency1', 'Flower', popcurrency, 999) boutique['Button'] = Currency('@PopCurrency2', 'Button', popcurrency, 999) boutique['Thread'] = Currency('@PopCurrency3', 'Thread', popcurrency, 999) boutique['Fabric'] = Currency('@PopCurrency4', 'Fabric', popcurrency, 999) boutique['Ribbon'] = Currency('@PopCurrency5', 'Ribbon', popcurrency, 999) except: pass try: # Party Update (2.8) main['Party Points'] = Currency('@SpecialCurrency', 'Party Points', playerdata) except: pass try: # Countess Coloratura Update (3.0) tasktokens = playerdata['TaskTokens'] tokens = res['Tokens'] tokens['Camera'] = Currency('@Token_Camera', 'Camera', tasktokens) tokens['Leather Jacket'] = Currency('@Token_Leather_Jacket', 'Leather Jacket', tasktokens) tokens['Hair Dye'] = Currency('@Token_Hair_dye', 'Hair Dye', tasktokens) tokens['Note'] = Currency('@Token_Note', 'Note', tasktokens) tokens['Glasses'] = Currency('@Token_Glasses', 'Glasses', tasktokens) tokens['Microphone'] = Currency('@Token_Microphone', 'Microphone', tasktokens) tokens['Cap'] = Currency('@Token_Cap', 'Cap', tasktokens) except: pass return res def _get_player_infos(self): playerdata = self.xmlobj['MLP_Save']['PlayerData'] res = OrderedDict() res['Level'] = PlayerData('@Level', 'Level', playerdata, 135) res['XP'] = PlayerData('@XP', 'XP', playerdata) try: # VIP Update (2.7) res['VIP Points'] = PlayerData('@vip_points', 'VIP Points', playerdata['vip']) except: pass return res def _get_zones(self): mapzones_spec = OrderedDict(( ("0", {"name": "Ponyville", "foes": {"ID": "Parasprite_Objects", "name": "Parasprites"}}), ("1", {"name": "Canterlot", "foes": {"ID": "Changeling_Objects", "name": "Changelings"}}), ("2", {"name": "Sweet Apple Acres", "foes": {"ID": "Parasprite_Objects", "name": "Vampire Bats"}}), ("3", {"name": "Everfree Forest", "foes": {"ID": "Plunderseed_Vine_Objects", "name": "Plunderseed Vines"}}), )) zones = OrderedDict() for mapzone in self._mapzones: gameobjects = mapzone['GameObjects'] try: zone_spec = mapzones_spec[mapzone["@ID"].value] except KeyError: continue clearables = Clearables('Clearable_Objects', gameobjects) foes = Foes(zone_spec["foes"]["ID"], zone_spec["foes"]["name"], gameobjects) shops = Shops(gameobjects['Pony_House_Objects']) zones[mapzone["@ID"].value] = Zone(mapzone["@ID"].value, zone_spec["name"], clearables, foes, shops) return zones def _get_actions(self): datatable = self.xmlobj['MLP_Save']['QuestData']['GlobalDataTable']['DataTable'] objectcategories = datatable['ObjectCategoryList']['ObjectCategory'] globalcategories = datatable['GlobalCategoryList']['GlobalCategory'] actions = defaultdict(lambda: defaultdict(list)) glob = defaultdict(lambda: defaultdict(list)) def populate_dict(dikt, key='@ID', suffix='', inner=None): for typ in ('Complete', 'Started', 'ItemSelected'): for action in Pony.GameTypes.map: if (tag[key].value == ('PlayAction%s_%s%s' % (typ, action, suffix)) and (not inner or inner in tag)): dikt[typ][action].append(tag) for typ in ('Complete', 'Started'): if tag[key].value == ('ClearSkies_%s%s' % (typ, suffix)): dikt['ClearSkies'][typ].append(tag) if tag[key].value == ('PlayActionComplete%s' % suffix): dikt['Complete']['All'].append(tag) for tag in objectcategories: populate_dict(actions, inner='Item') for tag in globalcategories: for suffix in (' [TOTAL]', ' [TOTAL] Pony'): populate_dict(glob, key='@Category', suffix=suffix) return {'Global': glob, 'Ponies': actions} def _get_quests(self): activequestlist = self.xmlobj['MLP_Save']['QuestData']['ActiveQuestList'] return [Quest(quest) for quest in activequestlist] def pre_load(self): self.player_infos self.currencies self.ponies self.inventory self.missing_ponies self.zones self.quests self.actions def to_string(self): return self.xmlobj.unparse(raw=True) def prettify(self): return self.xmlobj.unparse(pretty=True, raw=True)
import logging import os import sys import tempfile import time import requests from .cache import CachingSession, FileCache # noqa if sys.version_info[0] < 3: # pragma: no cover from urllib2 import urlopen as urllib_urlopen from urllib2 import URLError as urllib_URLError import urlparse import robotparser _str_type = unicode else: # pragma: no cover PY3K = True from urllib.request import urlopen as urllib_urlopen from urllib.error import URLError as urllib_URLError from urllib import parse as urlparse from urllib import robotparser _str_type = str __version__ = '0.9.1' _user_agent = ' '.join(('scrapelib', __version__, requests.utils.default_user_agent())) class NullHandler(logging.Handler): def emit(self, record): pass _log = logging.getLogger('scrapelib') _log.addHandler(NullHandler()) class RobotExclusionError(requests.RequestException): """ Raised when an attempt is made to access a page denied by the host's robots.txt file. """ def __init__(self, message, url, user_agent): super(RobotExclusionError, self).__init__(message) self.url = url self.user_agent = user_agent class HTTPMethodUnavailableError(requests.RequestException): """ Raised when the supplied HTTP method is invalid or not supported by the HTTP backend. """ def __init__(self, message, method): super(HTTPMethodUnavailableError, self).__init__(message) self.method = method class HTTPError(requests.HTTPError): """ Raised when urlopen encounters a 4xx or 5xx error code and the raise_errors option is true. """ def __init__(self, response, body=None): message = '%s while retrieving %s' % (response.status_code, response.url) super(HTTPError, self).__init__(message) self.response = response self.body = body or self.response.text class FTPError(requests.HTTPError): def __init__(self, url): message = 'error while retrieving %s' % url super(FTPError, self).__init__(message) class ResultStr(_str_type): """ Wrapper for responses. Can treat identically to a ``str`` to get body of response, additional headers, etc. available via ``response`` attribute. """ def __new__(cls, scraper, response, requested_url): try: self = _str_type.__new__(cls, response.text) except TypeError: # use UTF8 as a default encoding if one couldn't be guessed response.encoding = 'utf8' self = _str_type.__new__(cls, response.text) self._scraper = scraper self.bytes = response.content self.encoding = response.encoding self.response = response # augment self.response # manually set: requested_url # aliases: code -> status_code self.response.requested_url = requested_url self.response.code = self.response.status_code return self class ThrottledSession(requests.Session): def _throttle(self): now = time.time() diff = self._request_frequency - (now - self._last_request) if diff > 0: _log.debug("sleeping for %fs" % diff) time.sleep(diff) self._last_request = time.time() else: self._last_request = now @property def requests_per_minute(self): return self._requests_per_minute @requests_per_minute.setter def requests_per_minute(self, value): if value > 0: self._throttled = True self._requests_per_minute = value self._request_frequency = 60.0 / value self._last_request = 0 else: self._throttled = False self._requests_per_minute = 0 self._request_frequency = 0.0 self._last_request = 0 def request(self, method, url, **kwargs): if self._throttled: self._throttle() return super(ThrottledSession, self).request(method, url, **kwargs) class RobotsTxtSession(requests.Session): def __init__(self): super(RobotsTxtSession, self).__init__() self._robot_parsers = {} self.follow_robots = True def _robot_allowed(self, user_agent, parsed_url): _log.info("checking robots permission for %s" % parsed_url.geturl()) robots_url = urlparse.urljoin(parsed_url.scheme + "://" + parsed_url.netloc, "robots.txt") try: parser = self._robot_parsers[robots_url] _log.info("using cached copy of %s" % robots_url) except KeyError: _log.info("grabbing %s" % robots_url) parser = robotparser.RobotFileParser() parser.set_url(robots_url) parser.read() self._robot_parsers[robots_url] = parser return parser.can_fetch(user_agent, parsed_url.geturl()) def request(self, method, url, **kwargs): parsed_url = urlparse.urlparse(url) user_agent = (kwargs.get('headers', {}).get('User-Agent') or self.headers.get('User-Agent')) # robots.txt is http-only if (parsed_url.scheme in ('http', 'https') and self.follow_robots and not self._robot_allowed(user_agent, parsed_url)): raise RobotExclusionError( "User-Agent '%s' not allowed at '%s'" % ( user_agent, url), url, user_agent) return super(RobotsTxtSession, self).request(method, url, **kwargs) # this object exists because Requests assumes it can call # resp.raw._original_response.msg.getheaders() and we need to cope with that class DummyObject(object): def getheaders(self, name): return '' def get_all(self, name, default): return default _dummy = DummyObject() _dummy._original_response = DummyObject() _dummy._original_response.msg = DummyObject() class FTPAdapter(requests.adapters.BaseAdapter): def send(self, request, stream=False, timeout=None, verify=False, cert=None, proxies=None): if request.method != 'GET': raise HTTPMethodUnavailableError( "FTP requests do not support method '%s'" % request.method, request.method) try: real_resp = urllib_urlopen(request.url, timeout=timeout) # we're going to fake a requests.Response with this resp = requests.Response() resp.status_code = 200 resp.url = request.url resp.headers = {} resp._content = real_resp.read() resp.raw = _dummy return resp except urllib_URLError: raise FTPError(request.url) class RetrySession(requests.Session): def __init__(self): super(RetrySession, self).__init__() self._retry_attempts = 0 self.retry_wait_seconds = 10 # retry_attempts is a property so that it can't go negative @property def retry_attempts(self): return self._retry_attempts @retry_attempts.setter def retry_attempts(self, value): self._retry_attempts = max(value, 0) def accept_response(self, response, **kwargs): return response.status_code < 400 def request(self, method, url, retry_on_404=False, **kwargs): # the retry loop tries = 0 exception_raised = None while tries <= self.retry_attempts: exception_raised = None try: resp = super(RetrySession, self).request(method, url, **kwargs) # break from loop on an accepted response if self.accept_response(resp) or (resp.status_code == 404 and not retry_on_404): break except (requests.HTTPError, requests.ConnectionError, requests.Timeout) as e: exception_raised = e # if we're going to retry, sleep first tries += 1 if tries <= self.retry_attempts: # twice as long each time wait = (self.retry_wait_seconds * (2 ** (tries - 1))) _log.debug('sleeping for %s seconds before retry' % wait) time.sleep(wait) # out of the loop, either an exception was raised or we had a success if exception_raised: raise exception_raised else: return resp # compose sessions, order matters class Scraper(RobotsTxtSession, # first, check robots.txt CachingSession, # cache responses ThrottledSession, # throttle requests RetrySession, # do retries ): """ Scraper is the most important class provided by scrapelib (and generally the only one to be instantiated directly). It provides a large number of options allowing for customization. Usage is generally just creating an instance with the desired options and then using the :meth:`urlopen` & :meth:`urlretrieve` methods of that instance. :param raise_errors: set to True to raise a :class:`HTTPError` on 4xx or 5xx response :param requests_per_minute: maximum requests per minute (0 for unlimited, defaults to 60) :param follow_robots: respect robots.txt files (default: True) :param retry_attempts: number of times to retry if timeout occurs or page returns a (non-404) error :param retry_wait_seconds: number of seconds to retry after first failure, subsequent retries will double this wait """ def __init__(self, raise_errors=True, requests_per_minute=60, follow_robots=True, retry_attempts=0, retry_wait_seconds=5, header_func=None): super(Scraper, self).__init__() self.mount('ftp://', FTPAdapter()) # added by this class self.raise_errors = raise_errors # added by ThrottledSession self.requests_per_minute = requests_per_minute # added by RobotsTxtSession self.follow_robots = follow_robots # added by RetrySession self.retry_attempts = retry_attempts self.retry_wait_seconds = retry_wait_seconds # added by this class self._header_func = header_func # added by CachingSession self.cache_storage = None self.cache_write_only = True # non-parameter options self.timeout = None self.user_agent = _user_agent @property def user_agent(self): return self.headers['User-Agent'] @user_agent.setter def user_agent(self, value): self.headers['User-Agent'] = value @property def disable_compression(self): return self.headers['Accept-Encoding'] == 'text/*' @disable_compression.setter def disable_compression(self, value): # disabled: set encoding to text/* if value: self.headers['Accept-Encoding'] = 'text/*' # enabled: if set to text/* pop, otherwise leave unmodified elif self.headers.get('Accept-Encoding') == 'text/*': self.headers['Accept-Encoding'] = 'gzip, deflate, compress' def request(self, method, url, **kwargs): # apply global timeout timeout = kwargs.pop('timeout', self.timeout) if self._header_func: headers = requests.structures.CaseInsensitiveDict( self._header_func(url)) else: headers = {} try: # requests < 1.2.2 headers = requests.sessions.merge_kwargs(headers, self.headers) headers = requests.sessions.merge_kwargs(kwargs.pop('headers', {}), headers) except AttributeError: # requests >= 1.2.2 headers = requests.sessions.merge_setting(headers, self.headers) headers = requests.sessions.merge_setting( kwargs.pop('headers', {}), headers) return super(Scraper, self).request(method, url, timeout=timeout, headers=headers, **kwargs) def urlopen(self, url, method='GET', body=None, retry_on_404=False, **kwargs): """ Make an HTTP request and return a :class:`ResultStr` object. If an error is encountered may raise any of the scrapelib `exceptions`_. :param url: URL for request :param method: any valid HTTP method, but generally GET or POST :param body: optional body for request, to turn parameters into an appropriate string use :func:`urllib.urlencode()` :param retry_on_404: if retries are enabled, retry if a 404 is encountered, this should only be used on pages known to exist if retries are not enabled this parameter does nothing (default: False) """ _log.info("{0} - {1}".format(method.upper(), url)) resp = self.request(method, url, data=body, retry_on_404=retry_on_404, **kwargs) if self.raise_errors and not self.accept_response(resp): raise HTTPError(resp) else: return ResultStr(self, resp, url) def urlretrieve(self, url, filename=None, method='GET', body=None, dir=None, **kwargs): """ Save result of a request to a file, similarly to :func:`urllib.urlretrieve`. If an error is encountered may raise any of the scrapelib `exceptions`_. A filename may be provided or :meth:`urlretrieve` will safely create a temporary file. If a directory is provided, a file will be given a random name within the specified directory. Either way, it is the responsibility of the caller to ensure that the temporary file is deleted when it is no longer needed. :param url: URL for request :param filename: optional name for file :param method: any valid HTTP method, but generally GET or POST :param body: optional body for request, to turn parameters into an appropriate string use :func:`urllib.urlencode()` :param dir: optional directory to place file in :returns filename, response: tuple with filename for saved response (will be same as given filename if one was given, otherwise will be a temp file in the OS temp directory) and a :class:`Response` object that can be used to inspect the response headers. """ result = self.urlopen(url, method, body, **kwargs) if not filename: fd, filename = tempfile.mkstemp(dir=dir) f = os.fdopen(fd, 'wb') else: f = open(filename, 'wb') f.write(result.bytes) f.close() return filename, result.response _default_scraper = Scraper(follow_robots=False, requests_per_minute=0) def urlopen(url, method='GET', body=None, **kwargs): # pragma: no cover return _default_scraper.urlopen(url, method, body, **kwargs)
import inspect import enum import abc import collections from . import utils, errors from .helpnode import HelpNode from .enums import PrivilegeLevel from .servermodulewrapper import ServerModuleWrapper ########################################################################################### # UTILITY FUNCTIONS ####################################################################### ########################################################################################### # Returns the appropriate function object from a dictionary of function objects filled # by the "add()" decorator while also handling checks. As long as the dictionary has # been filled correctly with properly decorated function objects, it is guaranteed to # either return a working command function, or raise appropriate exceptions. # # THROWS: UnknownCommandError - Thrown if cmd_name is not in the dictionary. # THROWS: CommandPrivilegeError - Thrown if cmd_name is in the dictionary, but # privilege_level is not high enough to execute it. async def get(cmd_dict, cmd_name, privilege_level): try: cmd_to_execute = cmd_dict[cmd_name] if privilege_level < (await cmd_to_execute.cmd_meta.node_min_priv()): raise errors.CommandPrivilegeError return cmd_to_execute except KeyError: raise errors.InvalidCommandArgumentsError # Produces a help content string out of a dictionary of command functions. async def summarise_commands(cmd_dict, privilege_level=None): # TODO: NOTHING IS ACTUALLY DONE WITH THE PRIVILEGE LEVEL YET. if privilege_level is None: privilege_level = Privilege_level.get_lowest_privilege() seen = set() # Seen CommandMeta objects cats_dict = collections.defaultdict(lambda: []) # cats_dict - Maps category names to lists of command help summaries. # Those without categories are in the "" category. for (cmd_name, cmd_obj) in cmd_dict.items(): node = cmd_obj.cmd_meta if node in seen: continue seen.add(node) cat_name = await node.node_category() # Compose the string to append to the list within the relevant category. buf = await node.get_help_summary(privilege_level) cats_dict[cat_name].append(buf) # Separate the no-category category. This will be dealt with separately. no_cat = cats_dict[""] del cats_dict[""] # Sort each category and put the category names into a list. cat_name_list = [] for (cat_name, cat) in cats_dict.items(): cat_name_list.append(cat_name) cat.sort(key=lambda e: e.lower()) # Sort the category names cat_name_list.sort(key=lambda e: e.lower()) # Put it all together buf = "" if len(no_cat) > 0: no_cat.sort(key=lambda e: e.lower()) buf += "\n".join(no_cat) for cat_name in cat_name_list: buf += "\n\n**{}**\n".format(cat_name) buf += "\n".join(cats_dict[cat_name]) return buf # Produces a help content string out of a list of ServerModuleWrapper objects # and CoreCommandsHelpPage objects. async def summarise_server_modules(modules, privilege_level): assert isinstance(privilege_level, PrivilegeLevel) cats_dict = collections.defaultdict(lambda: []) # cats_dict - Maps category names to lists of module summaries. # Those without categories are in the "" category. for module in modules: cat_name = await module.node_category() # Compose the string to append to the list within the relevant category. buf = await module.get_help_summary(privilege_level) if isinstance(module, ServerModuleWrapper): buf = buf.format(p="{p}", grp="{grp}" + module.module_cmd_aliases[0] + " ") cats_dict[cat_name].append(buf) # Separate the no-category category. This will be dealt with separately. no_cat = cats_dict[""] del cats_dict[""] # Sort each category and put the category names into a list. cat_name_list = [] for (cat_name, cat) in cats_dict.items(): cat_name_list.append(cat_name) cat.sort(key=lambda e: e.lower()) # Sort the category names cat_name_list.sort(key=lambda e: e.lower()) # Put it all together buf = "" if len(no_cat) > 0: no_cat.sort(key=lambda e: e.lower()) buf += "\n".join(no_cat) for cat_name in cat_name_list: buf += "\n\n**{}**\n".format(cat_name) buf += "\n".join(cats_dict[cat_name]) return buf ########################################################################################### # FUNCTION DECORATORS ##################################################################### ########################################################################################### # IMPORTANT: Command function decorators must never wrap! # Wrapping functions may hide other decorated attributes. # Decorator for adding commands to a dictionary. # PARAMETER: cmd_dict - The dictionary in which the command is to be added to. # PARAMETER: *cmd_names - List of names the command is to be mapped to. # PARAMETER: default - (bool) If true, list the command as a default command. # PARAMETER: top - (bool or list<str>) For defining top-level aliases. # If False, command does not have any top-level aliases. # If True, all of the aliases are also top-level aliases. # If it's a string or a non-empty list of strings, then those # strings are used as top-level aliases. # PARAMETER: category - (str or None) For defining a category name, useful by # certain HelpNode aggregators for organizing lines. # If None, then no category. # If a string, then that string is used as the category # name. # PARAMETER: minimum_privilege - A minimum privilege level normally required # to use the command. # (Implementation note: If minimum_privilege # is None, then the default value in the # CommandMeta object is kept.) # # THE FOLLOWING PARAMETER IS CURRENTLY STILL PLANNED AND THUS UNUSED. # # PARAMETER: help_pages - A CommandHelpPage object (or list of) in which the # # command is to be added to. # Note: minimum_privilege is still used as def add(cmd_dict, *cmd_names, **kwargs): assert isinstance(cmd_dict, dict) # Get kwargs default = bool(kwargs.get("default", False)) top_kwarg = kwargs.get("top", False) category = kwargs.get("category", None) minimum_privilege = kwargs.get("minimum_privilege", None) def function_decorator(function): _ensure_cmd_obj(function) function.cmd_meta.set_aliases(cmd_names) top = top_kwarg if isinstance(top, bool): if top: function.cmd_meta.set_top_aliases_existing() else: if isinstance(top, str): top = [top] function.cmd_meta.set_top_aliases_explicitly(list(top)) if not category is None: assert isinstance(category, str) function.cmd_meta.set_help_category(category) if not minimum_privilege is None: assert isinstance(minimum_privilege, PrivilegeLevel) function.cmd_meta.set_min_priv(minimum_privilege_level) # Add the function to cmd_dict for cmd_name in cmd_names: assert isinstance(cmd_name, str) if cmd_name in cmd_dict: raise RuntimeError("Command with alias '{}' already exists.".format(cmd_name)) cmd_dict[cmd_name] = function if default: if "" in cmd_dict: raise RuntimeError("A default command has already defined.") cmd_dict[""] = function return function return function_decorator # Decorator adds an attribute named "privilege_gate" to a function object. # This attribute is simply checked before execution. def minimum_privilege(minimum_privilege_level): assert isinstance(minimum_privilege_level, PrivilegeLevel) def function_decorator(function): _ensure_cmd_obj(function) function.cmd_meta.set_min_priv(minimum_privilege_level) return function return function_decorator # Decorator adds an attribute named "help_category" to a function object. # This attribute is used when composing help messages, for grouping. # When no category has been assigned (i.e. the decorator is not used), # the composed help message will group the command along with all the # other ungrouped commands. def category(text): assert isinstance(text, str) def function_decorator(function): _ensure_cmd_obj(function) function.cmd_meta.set_help_category(text) return function return function_decorator ########################################################################################### ########################################################################################### ########################################################################################### def _ensure_cmd_obj(function): if not hasattr(function, "cmd_meta"): function.cmd_meta = CommandMeta(function) return class CommandMeta(HelpNode): """ Stores information about a command. MOTIVATION The original implementation of command functions involved "duct taping" new attributes to command functions, with no clear organization of this. Not only is data neatness an issue, but the code to handle these command function objects has to explicitly check for the existence of these attributes, so data access is also an issue. CommandMeta is designed to tidy all of this up. """ DEFAULT_HELP_STR = "`{cmd}`" class TopLevelAliasAction(enum.Enum): NO_TOP_LEVEL_ALIASES = 0 USE_EXISTING_ALIASES = 1 USE_NEW_ALIASES = 2 def __init__(self, cmd_fn): self._cmd_fn = cmd_fn # Attributes for aliases self._aliases = None self._top_level_alias_action = self.TopLevelAliasAction.NO_TOP_LEVEL_ALIASES self._top_level_aliases = None # Attributes for privilege levels self._minimum_privilege = PrivilegeLevel.get_lowest_privilege() # Attributes for leaf help content self._help_detail = None # Attributes for module help content self._help_category = "" # No category by default self._help_summary = None # Parsing the docstring to get _help_detail and _help_summary docstr = inspect.getdoc(cmd_fn) if docstr is None or len(docstr) == 0: # Give the default value. self._help_detail = self.DEFAULT_HELP_STR self._help_summary = self.DEFAULT_HELP_STR else: docstr = docstr.strip() self._help_detail = docstr # TODO: Is it necessary? # Summaries include the first few lines of the string up until the first # empty line. lines = [] for line in docstr.splitlines(): if len(line) == 0: break lines.append(line) assert len(lines) > 0 assert len(lines[0].strip()) > 0 self._help_summary = "\n".join(lines) return def set_aliases(self, string_list): self._aliases = list(string_list) return def set_min_priv(self, privilege_level): self._minimum_privilege = privilege_level return def set_help_category(self, string): assert isinstance(string, str) and len(string) > 0 self._help_category = string return # Make top-level aliases match the existing aliases. def set_top_aliases_existing(self): self._top_level_alias_action = self.TopLevelAliasAction.USE_EXISTING_ALIASES return # Sets top-level aliases explicitly. def set_top_aliases_explicitly(self, str_list): self._top_level_alias_action = self.TopLevelAliasAction.USE_NEW_ALIASES self._top_level_aliases = list(str_list) return def get_aliases(self): return list(self._aliases) def get_top_aliases(self): if self._top_level_alias_action is self.TopLevelAliasAction.USE_EXISTING_ALIASES: return list(self._aliases) elif self._top_level_alias_action is self.TopLevelAliasAction.USE_NEW_ALIASES: return list(self._top_level_aliases) else: return None ################################ ### HelpNode Implementations ### ################################ async def get_help_detail(self, locator_string, entry_string, privilege_level): assert isinstance(locator_string, str) and isinstance(entry_string, str) assert isinstance(privilege_level, PrivilegeLevel) buf = None if self._top_level_alias_action is self.TopLevelAliasAction.NO_TOP_LEVEL_ALIASES: buf = self._help_detail.format(p="{p}", grp="{grp}", cmd="{p}{grp}" + self._aliases[0]) else: buf = self._help_detail.format(p="{p}", grp="{grp}", cmd="{p}" + self.get_top_aliases()[0]) buf += "\n\n" buf0 = "" if not self._minimum_privilege is PrivilegeLevel.get_lowest_privilege(): buf0 = "**Required privilege level:** " buf0 += self._minimum_privilege.get_commonname() if (not privilege_level is None) and (privilege_level < self._minimum_privilege): buf += "**You do not have the correct privilege level to use this command.**\n" buf += buf0 + "\n**Your privilege level:** " buf += privilege_level.get_commonname() else: buf = (buf + buf0).strip() return buf async def get_help_summary(self, privilege_level): assert isinstance(privilege_level, PrivilegeLevel) if self._top_level_alias_action is self.TopLevelAliasAction.NO_TOP_LEVEL_ALIASES: return self._help_summary.format(p="{p}", grp="{grp}", cmd="{p}{grp}" + self._aliases[0]) else: return self._help_summary.format(p="{p}", grp="{grp}", cmd="{p}" + self.get_top_aliases()[0]) async def node_min_priv(self): return self._minimum_privilege async def node_category(self): return self._help_category
# -*- encoding: UTF-8 -*- import math # import almath as m # python's wrapping of almath import sys from naoqi import ALProxy def StiffnessOn(proxy): # We use the "Body" name to signify the collection of all joints pNames = "Body" pStiffnessLists = 1.0 pTimeLists = 1.0 proxy.stiffnessInterpolation(pNames, pStiffnessLists, pTimeLists) def StiffnessOff(proxy): # We use the "Body" name to signify the collection of all joints pNames = "Body" pStiffnessLists = 0.0 pTimeLists = 1.0 proxy.stiffnessInterpolation(pNames, pStiffnessLists, pTimeLists) def StandUp(proxy): proxy.goToPosture("StandInit", 1.0) def SitDown(proxy): proxy.goToPosture("Sit", 1.0) def Walk(proxy,x,y,theta): proxy.moveTo(x, y, theta) self.onStopped() def gesture_1_handwave(robotIP) : # Choregraphe simplified export in Python. names = list() times = list() keys = list() names.append("HeadPitch") times.append([0.6]) keys.append([0.0459781]) names.append("HeadYaw") times.append([0.6]) keys.append([-0.0322559]) names.append("LAnklePitch") times.append([0.6]) keys.append([-0.352862]) names.append("LAnkleRoll") times.append([0.6]) keys.append([-0.00149202]) names.append("LElbowRoll") times.append([0.6]) keys.append([-0.492372]) names.append("LElbowYaw") times.append([0.6]) keys.append([-0.09515]) names.append("LHand") times.append([0.6]) keys.append([0.2372]) names.append("LHipPitch") times.append([0.6]) keys.append([-0.443284]) names.append("LHipRoll") times.append([0.6]) keys.append([-0.00609398]) names.append("LHipYawPitch") times.append([0.6]) keys.append([0.00310993]) names.append("LKneePitch") times.append([0.6]) keys.append([0.699462]) names.append("LShoulderPitch") times.append([0.6]) keys.append([-0.921976]) names.append("LShoulderRoll") times.append([0.6]) keys.append([0.25]) names.append("LWristYaw") times.append([0.6]) keys.append([0.11194]) names.append("RAnklePitch") times.append([0.6]) keys.append([-0.348176]) names.append("RAnkleRoll") times.append([0.6]) keys.append([0.00157595]) names.append("RElbowRoll") times.append([0.6]) keys.append([0.925044]) names.append("RElbowYaw") times.append([0.6]) keys.append([1.39897]) names.append("RHand") times.append([0.6]) keys.append([0.2448]) names.append("RHipPitch") times.append([0.6]) keys.append([-0.451038]) names.append("RHipRoll") times.append([0.6]) keys.append([-0.00302601]) names.append("RHipYawPitch") times.append([0.6]) keys.append([0.00310993]) names.append("RKneePitch") times.append([0.6]) keys.append([0.696478]) names.append("RShoulderPitch") times.append([0.6]) keys.append([1.40212]) names.append("RShoulderRoll") times.append([0.6]) keys.append([-0.069072]) names.append("RWristYaw") times.append([0.6]) keys.append([0.0597839]) try: # uncomment the following line and modify the IP if you use this script outside Choregraphe. motion = ALProxy("ALMotion", robotIP, 9559) #motion = ALProxy("ALMotion") motion.angleInterpolation(names, keys, times, True) except BaseException, err: print err def gesture_2(robotIP): # Choregraphe simplified export in Python. names = list() times = list() keys = list() names.append("HeadPitch") times.append([0.6]) keys.append([0.0459781]) names.append("HeadYaw") times.append([0.6]) keys.append([-0.0322559]) names.append("LAnklePitch") times.append([0.6]) keys.append([-0.348176]) names.append("LAnkleRoll") times.append([0.6]) keys.append([-0.00157595]) names.append("LElbowRoll") times.append([0.6]) keys.append([-0.925044]) names.append("LElbowYaw") times.append([0.6]) keys.append([-1.39897]) names.append("LHand") times.append([0.6]) keys.append([0.2448]) names.append("LHipPitch") times.append([0.6]) keys.append([-0.451038]) names.append("LHipRoll") times.append([0.6]) keys.append([0.00302601]) names.append("LHipYawPitch") times.append([0.6]) keys.append([0.00310993]) names.append("LKneePitch") times.append([0.6]) keys.append([0.696478]) names.append("LShoulderPitch") times.append([0.6]) keys.append([1.40212]) names.append("LShoulderRoll") times.append([0.6]) keys.append([0.069072]) names.append("LWristYaw") times.append([0.6]) keys.append([-0.0597839]) names.append("RAnklePitch") times.append([0.6]) keys.append([-0.348176]) names.append("RAnkleRoll") times.append([0.6]) keys.append([0.00157595]) names.append("RElbowRoll") times.append([0.6]) keys.append([0.925044]) names.append("RElbowYaw") times.append([0.6]) keys.append([1.39897]) names.append("RHand") times.append([0.6]) keys.append([0.2448]) names.append("RHipPitch") times.append([0.6]) keys.append([-0.451038]) names.append("RHipRoll") times.append([0.6]) keys.append([-0.00302601]) names.append("RHipYawPitch") times.append([0.6]) keys.append([0.00310993]) names.append("RKneePitch") times.append([0.6]) keys.append([0.696478]) names.append("RShoulderPitch") times.append([0.6]) keys.append([1.40212]) names.append("RShoulderRoll") times.append([0.6]) keys.append([-0.069072]) names.append("RWristYaw") times.append([0.6]) keys.append([0.0597839]) try: # uncomment the following line and modify the IP if you use this script outside Choregraphe. motion = ALProxy("ALMotion", robotIP, 9559) #motion = ALProxy("ALMotion") motion.angleInterpolation(names, keys, times, True) except BaseException, err: print err def squat(robotIP): # Choregraphe simplified export in Python. names = list() times = list() keys = list() names.append("HeadPitch") times.append([0.6]) keys.append([0.0674541]) names.append("HeadYaw") times.append([0.6]) keys.append([-0.0276539]) names.append("LAnklePitch") times.append([0.6]) keys.append([-1.18276]) names.append("LAnkleRoll") times.append([0.6]) keys.append([0.070606]) names.append("LElbowRoll") times.append([0.6]) keys.append([-1.03848]) names.append("LElbowYaw") times.append([0.6]) keys.append([-0.794654]) names.append("LHand") times.append([0.6]) keys.append([0.0192]) names.append("LHipPitch") times.append([0.6]) keys.append([-0.700996]) names.append("LHipRoll") times.append([0.6]) keys.append([-0.076658]) names.append("LHipYawPitch") times.append([0.6]) keys.append([-0.237728]) names.append("LKneePitch") times.append([0.6]) keys.append([2.10767]) names.append("LShoulderPitch") times.append([0.6]) keys.append([1.44959]) names.append("LShoulderRoll") times.append([0.6]) keys.append([0.0873961]) names.append("LWristYaw") times.append([0.6]) keys.append([0.0843279]) names.append("RAnklePitch") times.append([0.6]) keys.append([-1.1863]) names.append("RAnkleRoll") times.append([0.6]) keys.append([-0.078192]) names.append("RElbowRoll") times.append([0.6]) keys.append([1.02782]) names.append("RElbowYaw") times.append([0.6]) keys.append([0.823716]) names.append("RHand") times.append([0.6]) keys.append([0.0172]) names.append("RHipPitch") times.append([0.6]) keys.append([-0.698012]) names.append("RHipRoll") times.append([0.6]) keys.append([0.07214]) names.append("RHipYawPitch") times.append([0.6]) keys.append([-0.237728]) names.append("RKneePitch") times.append([0.6]) keys.append([2.10622]) names.append("RShoulderPitch") times.append([0.6]) keys.append([1.44967]) names.append("RShoulderRoll") times.append([0.6]) keys.append([-0.0844119]) names.append("RWristYaw") times.append([0.6]) keys.append([-0.0583339]) try: # uncomment the following line and modify the IP if you use this script outside Choregraphe. motion = ALProxy("ALMotion", robotIP, 9559) #motion = ALProxy("ALMotion") motion.angleInterpolation(names, keys, times, True) except BaseException, err: print err def main(robotIP,robotPort): #Setting the Proxies try: motionProxy = ALProxy("ALMotion", robotIP, robotPort) except Exception, e: print "Could not create proxy to ALMotion" print "Error was: ", e try: ttsProxy = ALProxy("ALTextToSpeech", robotIP, robotPort) except Exception, e: print "Could not create proxy to ALTextToSpeech" print "Error was: ", e try: postureProxy = ALProxy("ALRobotPosture", robotIP, robotPort) except Exception, e: print "Could not create proxy to ALRobotPosture" print "Error was: ", e # Turn on the Motors StiffnessOn(motionProxy) #StandUp StandUp(postureProxy) # #Walk 1 Meter # Walk(motionProxy,1,0,0) # gesture_1_handwave(robotIP) # StandUp(postureProxy) # gesture_2(robotIP) # #StandUp # StandUp(postureProxy) # # #Sit Down # squat(robotIP) # # SitDown(postureProxy) # # # Turn off the Motors # StiffnessOff(motionProxy) if __name__ == "__main__": robotIp = "169.254.44.123" #Set a default IP here robotPort = 9559 #Set default POort here # if len(sys.argv) < 2: # print "Usage python robotIP please" # else: # robotIp = sys.argv[1] # if len(sys.argv) > 2: # print "Usage python robotPort please" # else: # robotPort = int(sys.argv[2]) main(robotIp, robotPort)
""" Spike train statistics plots ---------------------------- .. autosummary:: :toctree: toctree/statistics/ plot_isi_histogram plot_time_histogram plot_instantaneous_rates_colormesh """ # Copyright 2017-2022 by the Viziphant team, see `doc/authors.rst`. # License: Modified BSD, see LICENSE.txt.txt for details. import matplotlib.pyplot as plt import neo import numpy as np import quantities as pq from elephant import statistics from viziphant.utils import check_same_units def plot_isi_histogram(spiketrains, axes=None, bin_size=3 * pq.ms, cutoff=None, title='ISI distribution', legend=None, histtype='step'): """ Create a simple histogram plot to visualise an inter-spike interval (ISI) distribution of spike trains. Input spike trains are sorted in time prior to computing the ISI. If the input is a list of list of spike trains, as in the Example 3, the ISI of a population is concatenated from all neuron spike trains. Parameters ---------- spiketrains : neo.SpikeTrain or pq.Quantity or list A spike train or a list of spike trains the ISI to be computed from. axes : matplotlib.axes.Axes or None, optional Matplotlib axes handle. If set to None, new axes are created and returned. Default: None bin_size : pq.Quantity, optional The bin size for the histogram. Default: 3 ms cutoff : pq.Quantity or None, optional The largest ISI to consider. Otherwise, if set to None, all range of values are plotted. Typical cutoff values are ~250 ms. Default: None title : str, optional The axes title. Default: 'ISI distribution' legend : str or list of str or None, optional The axes legend labels. Default: None histtype : str Histogram type passed to matplotlib `hist` function. Default: 'step' Returns ------- axes : matplotlib.axes.Axes Examples -------- 1. Basic ISI histogram plot. .. plot:: :include-source: import quantities as pq import matplotlib.pyplot as plt from elephant.spike_train_generation import homogeneous_poisson_process from viziphant.statistics import plot_isi_histogram np.random.seed(12) spiketrain = homogeneous_poisson_process(rate=10*pq.Hz, t_stop=50*pq.s) plot_isi_histogram(spiketrain, cutoff=250*pq.ms, histtype='bar') plt.show() 2. ISI histogram of multiple spike trains. .. plot:: :include-source: import quantities as pq import matplotlib.pyplot as plt from elephant.spike_train_generation import homogeneous_poisson_process from viziphant.statistics import plot_isi_histogram np.random.seed(12) rates = [5, 10, 15] * pq.Hz spiketrains = [homogeneous_poisson_process(rate=r, t_stop=100 * pq.s) for r in rates] plot_isi_histogram(spiketrains, cutoff=250*pq.ms, legend=rates) plt.show() 3. ISI histogram of multiple neuron populations. .. plot:: :include-source: import quantities as pq import matplotlib.pyplot as plt from elephant.spike_train_generation import homogeneous_poisson_process from viziphant.statistics import plot_isi_histogram np.random.seed(12) population1 = [homogeneous_poisson_process(rate=30 * pq.Hz, t_stop=50 * pq.s) for _ in range(10)] population2 = [homogeneous_poisson_process(rate=r * pq.Hz, t_stop=50 * pq.s) for r in range(1, 20)] plot_isi_histogram([population1, population2], cutoff=250 * pq.ms, legend=['population1', 'population2']) plt.show() """ def isi_population(spiketrain_list): return [statistics.isi(np.sort(st.magnitude)) for st in spiketrain_list] if isinstance(spiketrains, pq.Quantity): spiketrains = [spiketrains] check_same_units(spiketrains) if isinstance(spiketrains[0], (list, tuple)): intervals = [np.hstack(isi_population(sts)) for sts in spiketrains] units = spiketrains[0][0].units else: intervals = isi_population(spiketrains) units = spiketrains[0].units if legend is None: legend = [None] * len(intervals) elif isinstance(legend, str): legend = [legend] if len(legend) != len(intervals): raise ValueError("The length of the input list and legend labels do " "not match.") if cutoff is None: cutoff = max(interval.max() for interval in intervals) * units if axes is None: fig, axes = plt.subplots() bins = np.arange(start=0, stop=(cutoff + bin_size).rescale(units).item(), step=bin_size.rescale(units).item()) for label, interval in zip(legend, intervals): axes.hist(interval, bins=bins, histtype=histtype, label=label) axes.set_title(title) axes.set_xlabel(f'Inter-spike interval ({units.dimensionality})') axes.set_ylabel('Count') if legend[0] is not None: axes.legend() return axes def plot_time_histogram(histogram, axes=None, units=None): """ This function plots a time histogram, such as the result of :func:`elephant.statistics.time_histogram`. Parameters ---------- histogram : neo.AnalogSignal Object containing the histogram bins. axes : matplotlib.axes.Axes or None, optional Matplotlib axes handle. If set to None, new axes are created and returned. units : pq.Quantity or str or None, optional Desired time axis units. If None, ``histogram.sampling_period`` units are used. Default: None Returns ------- axes : matplotlib.axes.Axes Examples -------- 1. Basic example of spike count histogram. .. plot:: :include-source: import quantities as pq import matplotlib.pyplot as plt from elephant.spike_train_generation import homogeneous_poisson_process from elephant import statistics from viziphant.statistics import plot_time_histogram np.random.seed(14) spiketrains = [homogeneous_poisson_process(rate=10*pq.Hz, t_stop=10*pq.s) for _ in range(10)] histogram = statistics.time_histogram(spiketrains, bin_size=100*pq.ms) plot_time_histogram(histogram, units='s') plt.show() 2. Multiple time histograms are shown side by side with a common event point. .. plot:: :include-source: import neo import quantities as pq import matplotlib.pyplot as plt from elephant.spike_train_generation import homogeneous_poisson_process from elephant import statistics from viziphant.statistics import plot_time_histogram from viziphant.events import add_event np.random.seed(11) fig, axes = plt.subplots(2, 1, sharex=True, sharey=True) event = neo.Event([2]*pq.s, labels=['Trigger ON']) for axis in axes: spiketrains = [homogeneous_poisson_process(rate=10 * pq.Hz, t_stop=10 * pq.s) for _ in range(10)] histogram = statistics.time_histogram(spiketrains, bin_size=0.1 * pq.s, output='rate') plot_time_histogram(histogram, axes=axis, units='s') add_event(axes, event=event) plt.show() """ if axes is None: fig, axes = plt.subplots() # Rescale the time axis if requested if units is None: units = histogram.sampling_period.units elif isinstance(units, str): units = pq.Quantity(1, units) width = histogram.sampling_period.rescale(units).item() times = histogram.times.rescale(units).magnitude # Create the plot axes.bar(times, histogram.squeeze().magnitude, align='edge', width=width) axes.set_xlabel(f"Time ({units.dimensionality})") # Human-readable description of the 'output' flag used in time_histogram output_dict = dict(counts="Counts", mean="Counts per spike train", rate=f"Spike rate ({histogram.units.dimensionality})") normalization = histogram.annotations.get('normalization') axes.set_ylabel(output_dict.get(normalization)) return axes def plot_instantaneous_rates_colormesh(rates, axes=None, units=None, **kwargs): """ Plots a colormesh of instantaneous firing rates. Each row represents a spike train the instantaneous rate was computed from. Parameters ---------- rates : neo.AnalogSignal `neo.AnalogSignal` matrix of shape ``(len(spiketrains), time)`` containing instantaneous rates obtained by :func:`elephant.statistics.instantaneous_rate` function. axes : matplotlib.axes.Axes or None, optional Matplotlib axes handle. If set to None, new axes are created and returned. units : pq.Quantity or str or None, optional Desired time axis units. If None, ``histogram.sampling_period`` units are used. Default: None **kwargs Additional parameters passed to matplotlib `pcolormesh` function. Returns ------- fig : matplotlib.figure.Figure ax : matplotlib.axes.Axes Examples -------- .. plot:: :include-source: import quantities as pq from elephant import statistics, kernels import matplotlib.pyplot as plt from elephant.spike_train_generation import homogeneous_poisson_process from viziphant.statistics import plot_instantaneous_rates_colormesh np.random.seed(6) spiketrains = [homogeneous_poisson_process(rate=10 * pq.Hz, t_stop=10 * pq.s) for _ in range(10)] kernel = kernels.GaussianKernel(sigma=100 * pq.ms) rates = statistics.instantaneous_rate(spiketrains, sampling_period=10 * pq.ms, kernel=kernel) plot_instantaneous_rates_colormesh(rates) plt.show() """ if axes is None: fig, axes = plt.subplots() if units is None: units = rates.sampling_period.units elif isinstance(units, str): units = pq.Quantity(1, units) t_stop = rates.t_stop.rescale(units).item() times = np.r_[rates.times.rescale(units).magnitude, t_stop] neurons_range = range(rates.shape[1] + 1) im = axes.pcolormesh(times, neurons_range, rates.magnitude.T, **kwargs) # Add a colorbar cbar = plt.colorbar(im, ax=axes) cbar.set_label("Firing rate [Hz]") axes.set_xlabel(f"Time ({units.dimensionality})") axes.set_ylabel("Neuron") axes.set_yticks([rates.shape[1] - 0.5]) axes.set_yticklabels([rates.shape[1] - 1]) return axes
#!/usr/bin/env python # -*- coding: utf-8 -*- ############################################################################### # Copyright Kitware Inc. # # Licensed under the Apache License, Version 2.0 ( the "License" ); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################### import contextlib import girder_client.cli import logging import mock import os import requests import shutil import sys import six import httmock from girder import config from girder.models.api_key import ApiKey from girder.models.folder import Folder from girder.models.item import Item from girder.models.user import User from girder_client.cli import GirderCli from tests import base from six.moves.http_client import HTTPConnection from six import StringIO os.environ['GIRDER_PORT'] = os.environ.get('GIRDER_TEST_PORT', '20200') config.loadConfig() # Must reload config to pickup correct port @contextlib.contextmanager def captureOutput(): oldout, olderr = sys.stdout, sys.stderr try: out = [StringIO(), StringIO()] sys.stdout, sys.stderr = out yield out finally: sys.stdout, sys.stderr = oldout, olderr out[0] = out[0].getvalue() out[1] = out[1].getvalue() class SysExitException(Exception): pass def invokeCli(argv, username='', password='', useApiUrl=False): """ Invoke the Girder Python client CLI with a set of arguments. """ if useApiUrl: apiUrl = 'http://localhost:%s/api/v1' % os.environ['GIRDER_PORT'] argsList = ['girder-client', '--api-url', apiUrl] else: argsList = ['girder-client', '--port', os.environ['GIRDER_PORT']] if username: argsList += ['--username', username] if password: argsList += ['--password', password] argsList += list(argv) exitVal = 0 with mock.patch.object(sys, 'argv', argsList),\ mock.patch('sys.exit', side_effect=SysExitException) as exit,\ captureOutput() as output: try: girder_client.cli.main() except SysExitException: args = exit.mock_calls[0][1] exitVal = args[0] if len(args) else 0 return { 'exitVal': exitVal, 'stdout': output[0], 'stderr': output[1] } def setUpModule(): plugins = os.environ.get('ENABLED_PLUGINS', '') if plugins: base.enabledPlugins.extend(plugins.split()) base.startServer(False) def tearDownModule(): base.stopServer() class PythonCliTestCase(base.TestCase): def setUp(self): base.TestCase.setUp(self) self.user = User().createUser( firstName='First', lastName='Last', login='mylogin', password='password', email='[email protected]') self.publicFolder = six.next(Folder().childFolders( parentType='user', parent=self.user, user=None, limit=1)) self.apiKey = ApiKey().createApiKey(self.user, name='') self.downloadDir = os.path.join( os.path.dirname(__file__), '_testDownload') shutil.rmtree(self.downloadDir, ignore_errors=True) def tearDown(self): logger = logging.getLogger('girder_client') logger.setLevel(logging.ERROR) logger.handlers = [] shutil.rmtree(self.downloadDir, ignore_errors=True) base.TestCase.tearDown(self) def testUrlByPart(self): # This test does NOT connect to the test server. It only checks that the # client object has the expected attributes. username = None password = None for case in [ # Check that apiUrl is preferred { 'input': {'apiUrl': 'https://girder.example.com:74/api/v74', 'host': 'foo', 'scheme': 'bar', 'port': 42, 'apiRoot': 'bar'}, 'expected': { 'urlBase': 'https://girder.example.com:74/api/v74/', 'host': None, 'scheme': None, 'port': None} }, # Check different configuration of URL by part { 'input': {}, 'expected': { 'urlBase': 'http://localhost:8080/api/v1/', 'host': 'localhost', 'scheme': 'http', 'port': 8080} }, { 'input': {'host': 'localhost'}, 'expected': { 'urlBase': 'http://localhost:8080/api/v1/', 'host': 'localhost', 'scheme': 'http', 'port': 8080} }, { 'input': {'port': 42}, 'expected': { 'urlBase': 'http://localhost:42/api/v1/', 'host': 'localhost', 'scheme': 'http', 'port': 42} }, { 'input': {'scheme': 'https'}, 'expected': { 'urlBase': 'https://localhost:443/api/v1/', 'host': 'localhost', 'scheme': 'https', 'port': 443} }, { 'input': {'host': 'girder.example.com'}, 'expected': { 'urlBase': 'https://girder.example.com:443/api/v1/', 'host': 'girder.example.com', 'scheme': 'https', 'port': 443} }, { 'input': {'host': 'girder.example.com', 'scheme': 'http'}, 'expected': { 'urlBase': 'http://girder.example.com:80/api/v1/', 'host': 'girder.example.com', 'scheme': 'http', 'port': 80} }, { 'input': {'host': 'localhost', 'port': 42}, 'expected': { 'urlBase': 'http://localhost:42/api/v1/', 'host': 'localhost', 'scheme': 'http', 'port': 42} }, { 'input': {'host': 'girder.example.com', 'port': 42}, 'expected': { 'urlBase': 'https://girder.example.com:42/api/v1/', 'host': 'girder.example.com', 'scheme': 'https', 'port': 42} }, { 'input': {'host': 'localhost', 'scheme': 'https'}, 'expected': { 'urlBase': 'https://localhost:443/api/v1/', 'host': 'localhost', 'scheme': 'https', 'port': 443} }, { 'input': {'host': 'girder.example.com', 'scheme': 'https'}, 'expected': { 'urlBase': 'https://girder.example.com:443/api/v1/', 'host': 'girder.example.com', 'scheme': 'https', 'port': 443} }, ]: client = girder_client.cli.GirderCli(username, password, **case['input']) for attribute, value in case['expected'].items(): self.assertEqual(getattr(client, attribute), value) def testCliHelp(self): ret = invokeCli(()) self.assertNotEqual(ret['exitVal'], 0) ret = invokeCli(('-h',)) self.assertIn('Usage: girder-client', ret['stdout']) self.assertEqual(ret['exitVal'], 0) def testUploadDownload(self): localDir = os.path.join(os.path.dirname(__file__), 'testdata') args = ['upload', str(self.publicFolder['_id']), localDir, '--parent-type=folder'] with self.assertRaises(requests.HTTPError): invokeCli(args) with self.assertRaises(requests.HTTPError): invokeCli(['--api-key', '1234'] + args) # Test dry-run and blacklist options ret = invokeCli( args + ['--dry-run', '--blacklist=hello.txt'], username='mylogin', password='password') self.assertEqual(ret['exitVal'], 0) self.assertIn('Ignoring file hello.txt as it is blacklisted', ret['stdout']) # Test with multiple files in a dry-run ret = invokeCli([ 'upload', str(self.publicFolder['_id']), '--parent-type=folder', os.path.join(localDir, 'hello.txt'), os.path.join(localDir, 'world.txt'), '--dry-run'], username='mylogin', password='password') self.assertEqual(ret['exitVal'], 0) self.assertIn('Uploading Item from hello.txt', ret['stdout']) self.assertIn('Uploading Item from world.txt', ret['stdout']) # Actually upload the test data ret = invokeCli(args, username='mylogin', password='password', useApiUrl=True) self.assertEqual(ret['exitVal'], 0) six.assertRegex( self, ret['stdout'], 'Creating Folder from .*tests/cases/py_client/testdata') self.assertIn('Uploading Item from hello.txt', ret['stdout']) subfolder = six.next(Folder().childFolders( parent=self.publicFolder, parentType='folder', limit=1)) self.assertEqual(subfolder['name'], 'testdata') items = list(Folder().childItems(folder=subfolder)) toUpload = list(os.listdir(localDir)) self.assertEqual(len(toUpload), len(items)) downloadDir = os.path.join(os.path.dirname(localDir), '_testDownload') ret = invokeCli(('download', str(subfolder['_id']), downloadDir), username='mylogin', password='password') self.assertEqual(ret['exitVal'], 0) for downloaded in os.listdir(downloadDir): if downloaded == '.girder_metadata': continue self.assertIn(downloaded, toUpload) # Download again to same location, we should not get errors ret = invokeCli(('download', str(subfolder['_id']), downloadDir), username='mylogin', password='password') self.assertEqual(ret['exitVal'], 0) # Download again to same location, using path, we should not get errors ret = invokeCli(('download', '/user/mylogin/Public/testdata', downloadDir), username='mylogin', password='password') self.assertEqual(ret['exitVal'], 0) # Test uploading with reference queryList = [] @httmock.urlmatch(netloc='localhost', path='/api/v1/file$', method='POST') def checkParams(url, request): # Add query for every file upload request queryList.append(six.moves.urllib.parse.parse_qs(url[3])) with httmock.HTTMock(checkParams): ret = invokeCli( args + ['--reference', 'reference_string'], username='mylogin', password='password') # Test if reference is sent with each file upload fileList = os.listdir(localDir) self.assertTrue(queryList) self.assertTrue(fileList) self.assertEqual(len(queryList), len(fileList)) for query in queryList: self.assertIn('reference', query) self.assertIn('reference_string', query['reference']) # Create a collection and subfolder resp = self.request('/collection', 'POST', user=self.user, params={ 'name': 'my_collection' }) self.assertStatusOk(resp) resp = self.request('/folder', 'POST', user=self.user, params={ 'parentType': 'collection', 'parentId': resp.json['_id'], 'name': 'my_folder' }) self.assertStatusOk(resp) # Test download of the collection ret = invokeCli(('download', '--parent-type=collection', '/collection/my_collection', downloadDir), username='mylogin', password='password') self.assertEqual(ret['exitVal'], 0) self.assertTrue(os.path.isdir(os.path.join(downloadDir, 'my_folder'))) shutil.rmtree(downloadDir, ignore_errors=True) # Test download of the collection auto-detecting parent-type ret = invokeCli(('download', '/collection/my_collection', downloadDir), username='mylogin', password='password') self.assertEqual(ret['exitVal'], 0) self.assertTrue(os.path.isdir(os.path.join(downloadDir, 'my_folder'))) shutil.rmtree(downloadDir, ignore_errors=True) # Test download of a user ret = invokeCli(('download', '--parent-type=user', '/user/mylogin', downloadDir), username='mylogin', password='password') self.assertEqual(ret['exitVal'], 0) self.assertTrue( os.path.isfile(os.path.join(downloadDir, 'Public', 'testdata', 'hello.txt'))) shutil.rmtree(downloadDir, ignore_errors=True) # Test download of a user auto-detecting parent-type ret = invokeCli(('download', '/user/mylogin', downloadDir), username='mylogin', password='password') self.assertEqual(ret['exitVal'], 0) self.assertTrue( os.path.isfile(os.path.join(downloadDir, 'Public', 'testdata', 'hello.txt'))) shutil.rmtree(downloadDir, ignore_errors=True) # Test download of an item items = list(Folder().childItems(folder=subfolder)) item_id = items[0]['_id'] item_name = items[0]['name'] ret = invokeCli(('download', '--parent-type=item', '%s' % item_id, downloadDir), username='mylogin', password='password') self.assertEqual(ret['exitVal'], 0) self.assertTrue( os.path.isfile(os.path.join(downloadDir, item_name))) shutil.rmtree(downloadDir, ignore_errors=True) # Test download of a file os.makedirs(downloadDir) items = list(Folder().childItems(folder=subfolder)) file_name, file_doc = next(Item().fileList(items[0], data=False)) ret = invokeCli( ('download', '--parent-type=file', '%s' % file_doc['_id'], os.path.join(downloadDir, file_name)), username='mylogin', password='password') self.assertEqual(ret['exitVal'], 0) self.assertTrue( os.path.isfile(os.path.join(downloadDir, file_name))) shutil.rmtree(downloadDir, ignore_errors=True) # Test download of an item auto-detecting parent-type ret = invokeCli(('download', '%s' % item_id, downloadDir), username='mylogin', password='password') self.assertEqual(ret['exitVal'], 0) self.assertTrue( os.path.isfile(os.path.join(downloadDir, item_name))) shutil.rmtree(downloadDir, ignore_errors=True) def _check_upload(ret): self.assertEqual(ret['exitVal'], 0) six.assertRegex( self, ret['stdout'], 'Creating Folder from .*tests/cases/py_client/testdata') self.assertIn('Uploading Item from hello.txt', ret['stdout']) # Try uploading using API key _check_upload(invokeCli(['--api-key', self.apiKey['key']] + args)) # Try uploading using API key set with GIRDER_API_KEY env. variable os.environ["GIRDER_API_KEY"] = self.apiKey['key'] _check_upload(invokeCli(args)) del os.environ["GIRDER_API_KEY"] # Test localsync, it shouldn't touch files on 2nd pass ret = invokeCli(('localsync', str(subfolder['_id']), downloadDir), username='mylogin', password='password') self.assertEqual(ret['exitVal'], 0) old_mtimes = {} for fname in os.listdir(downloadDir): filename = os.path.join(downloadDir, fname) old_mtimes[fname] = os.path.getmtime(filename) ret = invokeCli(('localsync', str(subfolder['_id']), downloadDir), username='mylogin', password='password') self.assertEqual(ret['exitVal'], 0) for fname in os.listdir(downloadDir): if fname == '.girder_metadata': continue filename = os.path.join(downloadDir, fname) self.assertEqual(os.path.getmtime(filename), old_mtimes[fname]) # Check that localsync command do not show '--parent-type' option help ret = invokeCli(('localsync', '--help')) self.assertNotIn('--parent-type', ret['stdout']) self.assertEqual(ret['exitVal'], 0) # Check that localsync command still accepts '--parent-type' argument ret = invokeCli(('localsync', '--parent-type', 'folder', str(subfolder['_id']), downloadDir), username='mylogin', password='password') self.assertEqual(ret['exitVal'], 0) def testLeafFoldersAsItems(self): localDir = os.path.join(os.path.dirname(__file__), 'testdata') args = ['upload', str(self.publicFolder['_id']), localDir, '--leaf-folders-as-items'] ret = invokeCli(args, username='mylogin', password='password') self.assertEqual(ret['exitVal'], 0) six.assertRegex( self, ret['stdout'], 'Creating Item from folder .*tests/cases/py_client/testdata') self.assertIn('Adding file world.txt', ret['stdout']) # Test re-use existing case args.append('--reuse') ret = invokeCli(args, username='mylogin', password='password') self.assertEqual(ret['exitVal'], 0) self.assertIn('File hello.txt already exists in parent Item', ret['stdout']) def testVerboseLoggingLevel0(self): args = ['localsync', '--help'] ret = invokeCli(args, username='mylogin', password='password') self.assertEqual(ret['exitVal'], 0) self.assertEqual(logging.getLogger('girder_client').level, logging.ERROR) def testVerboseLoggingLevel1(self): args = ['-v', 'localsync', '--help'] ret = invokeCli(args, username='mylogin', password='password') self.assertEqual(ret['exitVal'], 0) self.assertEqual(logging.getLogger('girder_client').level, logging.WARNING) def testVerboseLoggingLevel2(self): args = ['-vv', 'localsync', '--help'] ret = invokeCli(args, username='mylogin', password='password') self.assertEqual(ret['exitVal'], 0) self.assertEqual(logging.getLogger('girder_client').level, logging.INFO) def testVerboseLoggingLevel3(self): args = ['-vvv', 'localsync', '--help'] ret = invokeCli(args, username='mylogin', password='password') self.assertEqual(ret['exitVal'], 0) self.assertEqual(logging.getLogger('girder_client').level, logging.DEBUG) self.assertEqual(HTTPConnection.debuglevel, 1) def testRetryUpload(self): gc = GirderCli('mylogin', 'password', host='localhost', port=os.environ['GIRDER_PORT'], retries=5) def checkRetryHandler(*args, **kwargs): session = gc._session self.assertIsNotNone(session) self.assertIn(gc.urlBase, session.adapters) adapter = session.adapters[gc.urlBase] self.assertEqual(adapter.max_retries.total, 5) with mock.patch('girder_client.cli.GirderClient.sendRestRequest', side_effect=checkRetryHandler) as m: gc.sendRestRequest('') self.assertTrue(m.called)
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time from oslo_log import log as logging from six.moves.urllib import parse as urllib from tempest_lib import exceptions as lib_exc from tempest.common import service_client from tempest import exceptions LOG = logging.getLogger(__name__) class BaseSnapshotsClient(service_client.ServiceClient): """Base Client class to send CRUD Volume API requests.""" create_resp = 200 def list_snapshots(self, detail=False, params=None): """List all the snapshot.""" url = 'snapshots' if detail: url += '/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) self.expected_success(200, resp.status) return service_client.ResponseBodyList(resp, body['snapshots']) def show_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) self.expected_success(200, resp.status) return service_client.ResponseBody(resp, body['snapshot']) def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) self.expected_success(self.create_resp, resp.status) return service_client.ResponseBody(resp, body['snapshot']) def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) self.expected_success(200, resp.status) return service_client.ResponseBody(resp, body['snapshot']) # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): body = self.show_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" resp, body = self.delete("snapshots/%s" % str(snapshot_id)) self.expected_success(202, resp.status) return service_client.ResponseBody(resp, body) def is_resource_deleted(self, id): try: self.show_snapshot(id) except lib_exc.NotFound: return True return False @property def resource_type(self): """Returns the primary type of resource this client works with.""" return 'volume-snapshot' def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) self.expected_success(202, resp.status) return service_client.ResponseBody(resp, body) def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) self.expected_success(202, resp.status) return service_client.ResponseBody(resp, body) def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) self.expected_success(200, resp.status) return service_client.ResponseBody(resp, body['metadata']) def show_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) self.expected_success(200, resp.status) return service_client.ResponseBody(resp, body['metadata']) def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) self.expected_success(200, resp.status) return service_client.ResponseBody(resp, body['metadata']) def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) self.expected_success(200, resp.status) return service_client.ResponseBody(resp, body['meta']) def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) self.expected_success(200, resp.status) return service_client.ResponseBody(resp, body) def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) self.expected_success(202, resp.status) return service_client.ResponseBody(resp, body) class SnapshotsClient(BaseSnapshotsClient): """Client class to send CRUD Volume V1 API requests."""
""" Testing the funcionality of the reports and the reporting utils """ from datetime import datetime from django.test import TestCase from django.utils import timezone from django.db.models import Avg, Sum from django.core.urlresolvers import reverse from geoposition import Geoposition from seshdash.models import Daily_Data_Point, Report_Job, Sesh_Site, Sesh_Organisation, Sesh_User, Report_Sent from seshdash.utils.reporting import send_report, generate_report_data, _format_column_str, _get_operation, get_emails_list, \ get_table_report_dict, get_edit_report_list, is_in_report_attributes, get_report_table_attributes, \ is_in_report_attributes_dictionary from seshdash.tasks import check_reports class ReportTestCase(TestCase): def setUp(self): """ Initializing """ self.location = Geoposition(1.7, 1.8) self.organisation = Sesh_Organisation.objects.create(name='test_org', slack_token='testing_token') self.test_user = Sesh_User.objects.create_user(username='test_user', email='[email protected]', password='test.test.test', organisation=self.organisation, is_org_admin=True, department='test', send_mail=True) self.site = Sesh_Site.objects.create(site_name='test_site', organisation=self.organisation, comission_date=timezone.now(), location_city='kigali', location_country='Rwanda', installed_kw=123.0, position = self.location, system_voltage = 12, number_of_panels = 12, battery_bank_capacity = 1000) self.attributes_data = [ {"table":"Daily_Data_Point", "column":"daily_no_of_alerts", "operation":"average", "user_friendly_name":"Daily no of alerts average"}, {"table":"Daily_Data_Point", "column":"daily_power_consumption_total", "operation":"sum", "user_friendly_name":"Daily power consumption total sum" }, ] self.daily_data_point_one = Daily_Data_Point.objects.create( site = self.site, date = timezone.now(), daily_pv_yield = 10, daily_power_consumption_total = 10, ) self.daily_data_point_two = Daily_Data_Point.objects.create( site = self.site, date = timezone.now(), daily_pv_yield = 10, daily_power_consumption_total = 10, ) self.report = Report_Job.objects.create(site=self.site, duration="daily", day_to_report=datetime.now().today().weekday(), attributes=self.attributes_data) def test_models(self): """ Testing the models """ self.assertEqual(Report_Job.objects.all().count(), 1) def test_generate_report_data(self): """ Testing the util that generates the report dict """ results = generate_report_data(self.report) # Asserting if the aggregations are correct self.assertTrue(results[0]['unit']) self.assertTrue(results[0]['user_friendly_name']) for item in results: if item['user_friendly_name'] == 'Daily pv yield average': self.assertEqual(item['val'], 10) def test_send_reports(self): """ Testing the task that send reports """ reported_reports = check_reports() self.assertEqual(reported_reports, 1) def test_send_report(self): """ Testing the sending of the generated reports, Test logging of report """ val = send_report(self.report) report_log = Report_Sent.objects.all() self.assertTrue(val) self.assertGreater(len(report_log),0) def test__get_operation(self): ''' Testing _get_operation function that takes an attribute and returns a function to execute ''' val = _get_operation(self.attributes_data[0]) self.assertEqual(val, Avg) def test__format_column_str(self): """ Tests the formating of a string, changing column to spaces and capitalizing the first letter """ val = _format_column_str('daily_pv_yield') self.assertEqual(val, 'Daily pv yield') def test_get_email_users(self): """ A function to return a list of emails when given an array of sesh user instances """ mail_list = get_emails_list([self.test_user]) self.assertEqual(mail_list, ['[email protected]']) def tests_get_table_report_dict(self): """ Testing the function that takes a table and operations as input and then returns a attribute dict that can be used to create a report """ report_dict = get_table_report_dict(self.site, 'Daily_Data_Point', 'sum') self.assertEqual(report_dict[0]['operation'], 'sum') self.assertEqual(report_dict[0]['table'], 'Daily_Data_Point') # Should raise a lookup error in case of incorrect table input with self.assertRaises(LookupError): get_table_report_dict(self.site, 'UnknownTable', 'sum') def test_get_table_report_attributes(self): """ This is the function that returns the table attributes to display, It should display the options basing on the site, This test will test how it will work with Daily_Data_Point Field models where it should check if a site has pv and return values consideringly """ fields = get_report_table_attributes(self.site) # Testing for some of the values that should not be in for a site that doesn't have pv, genset, grid, batteries dict = {'column': 'daily_pv_yield', 'operation':'sum', 'table':'Daily_Data_Point', 'user_friendly_name':'Daily pv yield sum'} self.assertFalse(is_in_report_attributes_dictionary(dict, fields)) dict = {'column': 'daily_power_cons_pv', 'operation':'sum', 'table': 'Daily_Data_Point', 'user_friendly_name': 'Daily power cons pv sum'} self.assertFalse(is_in_report_attributes_dictionary(dict, fields)) dict = {'column': 'daily_grid_outage_n', 'operation':'sum','table': 'Daily_Data_Point', 'user_friendly_name': 'Daily grid outage n sum'} self.assertFalse(is_in_report_attributes_dictionary(dict, fields)) dict = {'column': 'daily_battery_charge', 'operation': 'sum', 'table': 'Daily_Data_Point', 'user_friendly_name': 'Daily battery charge sum'} self.assertFalse(is_in_report_attributes_dictionary(dict, fields)) # Creating a site that has pv, genset grid and batteries site_has_all = Sesh_Site.objects.create(site_name='test_site_has_all', organisation=self.organisation, comission_date=timezone.now(), location_city='kigali', location_country='Rwanda', installed_kw=123.0, position = self.location, system_voltage = 12, number_of_panels = 12, battery_bank_capacity = 1000, has_genset=True, has_grid=True, has_pv=True, has_batteries=True) fields = get_report_table_attributes(site_has_all) # Testing if the correct values are there for a site that has pv, genset, grid and batteries dict = {'column': 'daily_pv_yield', 'operation':'sum', 'table':'Daily_Data_Point', 'user_friendly_name':'Daily pv yield sum'} self.assertTrue(is_in_report_attributes_dictionary(dict, fields)) dict = {'column': 'daily_power_cons_pv', 'operation': 'sum', 'table': 'Daily_Data_Point', 'user_friendly_name': 'Daily power cons pv sum'} self.assertTrue(is_in_report_attributes_dictionary(dict, fields)) dict = {'column': 'daily_grid_outage_n', 'operation': 'sum', 'table': 'Daily_Data_Point', 'user_friendly_name': 'Daily grid outage n sum'} self.assertTrue(is_in_report_attributes_dictionary(dict, fields)) dict4 = {'column': 'daily_battery_charge', 'operation': 'sum', 'table': 'Daily_Data_Point', 'user_friendly_name': 'Daily battery charge sum'} self.assertTrue(is_in_report_attributes_dictionary(dict, fields)) def test_add_report(self): """ Testing the adding of the reports from a client to a the db """ # The below is the format of the data that is received from a client when adding a report data = { '{"table":"Daily_Data_Point","column":"daily_pv_yield","operation":"average","user_friendly_name":"Daily pv yield average"}': ['on'], '{"table":"Daily_Data_Point","column":"daily_power_consumption_total","operation":"sum","user_friendly_name":"Daily power consumption sum"}': ['on'], } self.client.login(username='test_user', password='test.test.test') response = self.client.post(reverse('add_report', args=[self.site.id]), data) self.assertEqual(response.status_code, 302) # Testing the redirection to manage reports page for site self.assertEqual(Report_Job.objects.all().count(), 2) def test_delete_report(self): """ Testing the deletion of a report """ self.client.login(username='test_user', password='test.test.test') response = self.client.get(reverse('delete_report', args=[self.report.id])) self.assertEqual(response.status_code, 302) # Testing the redirection to manage reports page for site self.assertEqual(Report_Job.objects.all().count(), 0) def test_is_in_report_attributes(self): """ Testing the function that determines if an attribute is in the report.attribues """ result = is_in_report_attributes(self.report.attributes[0], self.report) self.assertTrue(result) def test_get_edit_report_list(self): """ Testing the function that returns a list representing the report.attributes status. The function returns a dict, which has status on for each active attribute and off otherwise """ report_dict = get_edit_report_list(self.report) count = 0 for item in report_dict: if item['status'] == 'on': count += 1 self.assertEqual(count, 2) # Testing that the report list is detecting 2 attributes in the report. def test_edit_report(self): """ This will test the editing of the sesh reports """ self.client.login(username='test_user', password='test.test.test') data = { '{"table":"Daily_Data_Point", "column":"daily_pv_yield","operation":"average","user_friendly_name":"Daily pv yield average"}' : ['on'], 'duration': 'monthly', } response = self.client.post(reverse('edit_report', args=[self.report.id]), data) self.assertEqual(response.status_code, 302) # The rediction to the manage reports report = Report_Job.objects.filter(id=self.report.id).first() self.assertEqual(report.duration, 'monthly') self.assertEqual(len(report.attributes), 1)
# Licensed under a 3-clause BSD style license - see LICENSE.rst # -*- coding: utf-8 -*- import pytest asdf = pytest.importorskip('asdf') import numpy as np import astropy.units as u from astropy import table from astropy.time import Time, TimeDelta from astropy.coordinates import SkyCoord, EarthLocation from astropy.io.misc.asdf.tags.helpers import skycoord_equal from asdf.tests import helpers from asdf.tags.core.ndarray import NDArrayType from astropy.io.misc.asdf.tags.tests.helpers import run_schema_example_test def test_table(tmpdir): data_rows = [(1, 2.0, 'x'), (4, 5.0, 'y'), (5, 8.2, 'z')] t = table.Table(rows=data_rows, names=('a', 'b', 'c'), dtype=('i4', 'f8', 'S1')) t.columns['a'].description = 'RA' t.columns['a'].unit = 'degree' t.columns['a'].meta = {'foo': 'bar'} t.columns['c'].description = 'Some description of some sort' def check(ff): assert len(ff.blocks) == 3 helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check) def test_array_columns(tmpdir): a = np.array([([[1, 2], [3, 4]], 2.0, 'x'), ([[5, 6], [7, 8]], 5.0, 'y'), ([[9, 10], [11, 12]], 8.2, 'z')], dtype=[('a', '<i4', (2, 2)), ('b', '<f8'), ('c', '|S1')]) t = table.Table(a, copy=False) assert t.columns['a'].shape == (3, 2, 2) def check(ff): assert len(ff.blocks) == 1 helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check) def test_structured_array_columns(tmpdir): a = np.array([((1, 'a'), 2.0, 'x'), ((4, 'b'), 5.0, 'y'), ((5, 'c'), 8.2, 'z')], dtype=[('a', [('a0', '<i4'), ('a1', '|S1')]), ('b', '<f8'), ('c', '|S1')]) t = table.Table(a, copy=False) def check(ff): assert len(ff.blocks) == 1 helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check) def test_table_row_order(tmpdir): a = np.array([(1, 2.0, 'x'), (4, 5.0, 'y'), (5, 8.2, 'z')], dtype=[('a', '<i4'), ('b', '<f8'), ('c', '|S1')]) t = table.Table(a, copy=False) t.columns['a'].description = 'RA' t.columns['a'].unit = 'degree' t.columns['a'].meta = {'foo': 'bar'} t.columns['c'].description = 'Some description of some sort' def check(ff): assert len(ff.blocks) == 1 helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check) def test_table_inline(tmpdir): data_rows = [(1, 2.0, 'x'), (4, 5.0, 'y'), (5, 8.2, 'z')] t = table.Table(rows=data_rows, names=('a', 'b', 'c'), dtype=('i4', 'f8', 'S1')) t.columns['a'].description = 'RA' t.columns['a'].unit = 'degree' t.columns['a'].meta = {'foo': 'bar'} t.columns['c'].description = 'Some description of some sort' def check(ff): assert len(list(ff.blocks.internal_blocks)) == 0 helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check, write_options={'auto_inline': 64}) def test_mismatched_columns(): yaml = """ table: !<tag:astropy.org:astropy/table/table-1.0.0> columns: - !core/column-1.0.0 data: !core/ndarray-1.0.0 data: [0, 1, 2] name: a - !core/column-1.0.0 data: !core/ndarray-1.0.0 data: [0, 1, 2, 3] name: b colnames: [a, b] """ buff = helpers.yaml_to_asdf(yaml) with pytest.raises(ValueError) as err: with asdf.open(buff) as ff: pass assert 'Inconsistent data column lengths' in str(err.value) def test_masked_table(tmpdir): data_rows = [(1, 2.0, 'x'), (4, 5.0, 'y'), (5, 8.2, 'z')] t = table.Table(rows=data_rows, names=('a', 'b', 'c'), dtype=('i4', 'f8', 'S1'), masked=True) t.columns['a'].description = 'RA' t.columns['a'].unit = 'degree' t.columns['a'].meta = {'foo': 'bar'} t.columns['a'].mask = [True, False, True] t.columns['c'].description = 'Some description of some sort' def check(ff): assert len(ff.blocks) == 4 helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check) def test_quantity_mixin(tmpdir): t = table.QTable() t['a'] = [1, 2, 3] t['b'] = ['x', 'y', 'z'] t['c'] = [2.0, 5.0, 8.2] * u.m def check(ff): assert isinstance(ff['table']['c'], u.Quantity) helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check) def test_time_mixin(tmpdir): t = table.Table() t['a'] = [1, 2] t['b'] = ['x', 'y'] t['c'] = Time(['2001-01-02T12:34:56', '2001-02-03T00:01:02']) def check(ff): assert isinstance(ff['table']['c'], Time) helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check) def test_timedelta_mixin(tmpdir): t = table.Table() t['a'] = [1, 2] t['b'] = ['x', 'y'] t['c'] = TimeDelta([1, 2] * u.day) def check(ff): assert isinstance(ff['table']['c'], TimeDelta) helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check) def test_skycoord_mixin(tmpdir): t = table.Table() t['a'] = [1, 2] t['b'] = ['x', 'y'] t['c'] = SkyCoord([1, 2], [3, 4], unit='deg,deg', frame='fk4', obstime='J1990.5') def check(ff): assert isinstance(ff['table']['c'], SkyCoord) def tree_match(old, new): NDArrayType.assert_equal(new['a'], old['a']) NDArrayType.assert_equal(new['b'], old['b']) assert skycoord_equal(new['c'], old['c']) helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check, tree_match_func=tree_match) def test_earthlocation_mixin(tmpdir): t = table.Table() t['a'] = [1, 2] t['b'] = ['x', 'y'] t['c'] = EarthLocation(x=[1, 2] * u.km, y=[3, 4] * u.km, z=[5, 6] * u.km) def check(ff): assert isinstance(ff['table']['c'], EarthLocation) helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check) def test_ndarray_mixin(tmpdir): t = table.Table() t['a'] = [1, 2] t['b'] = ['x', 'y'] t['c'] = table.NdarrayMixin([5, 6]) helpers.assert_roundtrip_tree({'table': t}, tmpdir) def test_backwards_compat(): """ Make sure that we can continue to read tables that use the schema from the ASDF Standard. This test uses the examples in the table schema from the ASDF Standard, since these make no reference to Astropy's own table definition. """ def check(asdffile): assert isinstance(asdffile['example'], table.Table) run_schema_example_test('stsci.edu', 'asdf', 'core/table', '1.0.0', check)
# -*- coding: utf-8 -*- ''' The modules in this file are refered to https://github.com/Kyubyong/transformer ''' from __future__ import print_function import tensorflow as tf from load_data import * def normalize(inputs, epsilon = 1e-8, scope="ln", reuse=None): '''Applies layer normalization. Args: inputs: A tensor with 2 or more dimensions, where the first dimension has `batch_size`. epsilon: A floating number. A very small number for preventing ZeroDivision Error. scope: Optional scope for `variable_scope`. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: A tensor with the same shape and data dtype as `inputs`. ''' with tf.variable_scope(scope, reuse=reuse): inputs_shape = inputs.get_shape() params_shape = inputs_shape[-1:] mean, variance = tf.nn.moments(inputs, [-1], keep_dims=True) beta= tf.Variable(tf.zeros(params_shape)) gamma = tf.Variable(tf.ones(params_shape)) normalized = (inputs - mean) / ( (variance + epsilon) ** (.5) ) outputs = gamma * normalized + beta return outputs def embedding(inputs, vocab_size, num_units, zero_pad=True, scale=True, scope="embedding", reuse=None): '''Embeds a given tensor. Args: inputs: A `Tensor` with type `int32` or `int64` containing the ids to be looked up in `lookup table`. vocab_size: An int. Vocabulary size. num_units: An int. Number of embedding hidden units. zero_pad: A boolean. If True, all the values of the fist row (id 0) should be constant zeros. scale: A boolean. If True. the outputs is multiplied by sqrt num_units. scope: Optional scope for `variable_scope`. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: A `Tensor` with one more rank than inputs's. The last dimensionality should be `num_units`. For example, ``` import tensorflow as tf inputs = tf.to_int32(tf.reshape(tf.range(2*3), (2, 3))) outputs = embedding(inputs, 6, 2, zero_pad=True) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) print sess.run(outputs) >> [[[ 0. 0. ] [ 0.09754146 0.67385566] [ 0.37864095 -0.35689294]] [[-1.01329422 -1.09939694] [ 0.7521342 0.38203377] [-0.04973143 -0.06210355]]] ``` ``` import tensorflow as tf inputs = tf.to_int32(tf.reshape(tf.range(2*3), (2, 3))) outputs = embedding(inputs, 6, 2, zero_pad=False) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) print sess.run(outputs) >> [[[-0.19172323 -0.39159766] [-0.43212751 -0.66207761] [ 1.03452027 -0.26704335]] [[-0.11634696 -0.35983452] [ 0.50208133 0.53509563] [ 1.22204471 -0.96587461]]] ``` ''' with tf.variable_scope(scope, reuse=reuse): lookup_table = tf.get_variable('lookup_table', dtype=tf.float32, shape=[vocab_size, num_units], initializer=tf.contrib.layers.xavier_initializer()) if zero_pad: lookup_table = tf.concat((tf.zeros(shape=[1, num_units]), lookup_table[1:, :]), 0) outputs = tf.nn.embedding_lookup(lookup_table, inputs) if scale: outputs = outputs * (num_units ** 0.5) return outputs def multihead_attention(queries, keys, num_units=None, num_heads=8, dropout_rate=0, is_training=True, causality=False, scope="multihead_attention", reuse=None): '''Applies multihead attention. Args: queries: A 3d tensor with shape of [N, T_q, C_q]. keys: A 3d tensor with shape of [N, T_k, C_k]. num_units: A scalar. Attention size. dropout_rate: A floating point number. is_training: Boolean. Controller of mechanism for dropout. causality: Boolean. If true, units that reference the future are masked. num_heads: An int. Number of heads. scope: Optional scope for `variable_scope`. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns A 3d tensor with shape of (N, T_q, C) ''' with tf.variable_scope(scope, reuse=reuse): # Set the fall back option for num_units if num_units is None: num_units = queries.get_shape().as_list[-1] # Linear projections Q = tf.layers.dense(queries, num_units, activation=tf.nn.relu) # (N, T_q, C) K = tf.layers.dense(keys, num_units, activation=tf.nn.relu) # (N, T_k, C) V = tf.layers.dense(keys, num_units, activation=tf.nn.relu) # (N, T_k, C) # Split and concat Q_ = tf.concat(tf.split(Q, num_heads, axis=2), axis=0) # (h*N, T_q, C/h) K_ = tf.concat(tf.split(K, num_heads, axis=2), axis=0) # (h*N, T_k, C/h) V_ = tf.concat(tf.split(V, num_heads, axis=2), axis=0) # (h*N, T_k, C/h) # Multiplication outputs = tf.matmul(Q_, tf.transpose(K_, [0, 2, 1])) # (h*N, T_q, T_k) # Scale outputs = outputs / (K_.get_shape().as_list()[-1] ** 0.5) # Key Masking key_masks = tf.sign(tf.abs(tf.reduce_sum(keys, axis=-1))) # (N, T_k) key_masks = tf.tile(key_masks, [num_heads, 1]) # (h*N, T_k) key_masks = tf.tile(tf.expand_dims(key_masks, 1), [1, tf.shape(queries)[1], 1]) # (h*N, T_q, T_k) paddings = tf.ones_like(outputs)*(-2**32+1) outputs = tf.where(tf.equal(key_masks, 0), paddings, outputs) # (h*N, T_q, T_k) # Causality = Future blinding if causality: diag_vals = tf.ones_like(outputs[0, :, :]) # (T_q, T_k) tril = tf.contrib.linalg.LinearOperatorTriL(diag_vals).to_dense() # (T_q, T_k) masks = tf.tile(tf.expand_dims(tril, 0), [tf.shape(outputs)[0], 1, 1]) # (h*N, T_q, T_k) paddings = tf.ones_like(masks)*(-2**32+1) outputs = tf.where(tf.equal(masks, 0), paddings, outputs) # (h*N, T_q, T_k) # Activation outputs = tf.nn.softmax(outputs) # (h*N, T_q, T_k) # Query Masking query_masks = tf.sign(tf.abs(tf.reduce_sum(queries, axis=-1))) # (N, T_q) query_masks = tf.tile(query_masks, [num_heads, 1]) # (h*N, T_q) query_masks = tf.tile(tf.expand_dims(query_masks, -1), [1, 1, tf.shape(keys)[1]]) # (h*N, T_q, T_k) outputs *= query_masks # broadcasting. (N, T_q, C) # Dropouts outputs = tf.layers.dropout(outputs, rate=dropout_rate, training=tf.convert_to_tensor(is_training)) # Weighted sum outputs = tf.matmul(outputs, V_) # ( h*N, T_q, C/h) # Restore shape outputs = tf.concat(tf.split(outputs, num_heads, axis=0), axis=2 ) # (N, T_q, C) # Residual connection outputs += queries # Normalize outputs = normalize(outputs) # (N, T_q, C) return outputs def feedforward(inputs, num_units=[2048, 512], scope="multihead_attention", reuse=None): '''Point-wise feed forward net. Args: inputs: A 3d tensor with shape of [N, T, C]. num_units: A list of two integers. scope: Optional scope for `variable_scope`. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: A 3d tensor with the same shape and dtype as inputs ''' with tf.variable_scope(scope, reuse=reuse): # Inner layer params = {"inputs": inputs, "filters": num_units[0], "kernel_size": 1, "activation": tf.nn.relu, "use_bias": True} outputs = tf.layers.conv1d(**params) # Readout layer params = {"inputs": outputs, "filters": num_units[1], "kernel_size": 1, "activation": None, "use_bias": True} outputs = tf.layers.conv1d(**params) # Residual connection outputs += inputs # Normalize outputs = normalize(outputs) return outputs def label_smoothing(inputs, epsilon=0.1): '''Applies label smoothing. See https://arxiv.org/abs/1512.00567. Args: inputs: A 3d tensor with shape of [N, T, V], where V is the number of vocabulary. epsilon: Smoothing rate. For example, ``` import tensorflow as tf inputs = tf.convert_to_tensor([[[0, 0, 1], [0, 1, 0], [1, 0, 0]], [[1, 0, 0], [1, 0, 0], [0, 1, 0]]], tf.float32) outputs = label_smoothing(inputs) with tf.Session() as sess: print(sess.run([outputs])) >> [array([[[ 0.03333334, 0.03333334, 0.93333334], [ 0.03333334, 0.93333334, 0.03333334], [ 0.93333334, 0.03333334, 0.03333334]], [[ 0.93333334, 0.03333334, 0.03333334], [ 0.93333334, 0.03333334, 0.03333334], [ 0.03333334, 0.93333334, 0.03333334]]], dtype=float32)] ``` ''' K = inputs.get_shape().as_list()[-1] # number of channels return ((1-epsilon) * inputs) + (epsilon / K) class Transformer(): def __init__(self, maxlen, batch_size, vocab_size, hidden_units, num_blocks, num_heads, dropout_rate, learning_rate, file_name="", is_training=True): self.maxlen = maxlen self.batch_size = batch_size self.vocab_size = vocab_size self.hidden_units = hidden_units self.num_blocks = num_blocks self.num_heads = num_heads self.dropout_rate = dropout_rate self.learning_rate = learning_rate self.file_name = file_name self.is_training = is_training self.creat_model() def creat_model(self): self.graph = tf.Graph() with self.graph.as_default(): if self.is_training: X, Y, _, _ = load_data(self.file_name, self.maxlen) X_ = X + Y Y_ = Y + X # calc total batch count self.num_batch = len(X_) // self.batch_size # Convert to tensor X = tf.convert_to_tensor(X_, tf.int32) Y = tf.convert_to_tensor(Y_, tf.int32) # Create Queues input_queues = tf.train.slice_input_producer([X, Y]) # create batch queues self.x, self.y = tf.train.shuffle_batch(input_queues, num_threads=8, batch_size=self.batch_size, capacity=self.batch_size*64, min_after_dequeue=self.batch_size*32, allow_smaller_final_batch=False) else: # inference self.x = tf.placeholder(tf.int32, shape=(None, self.maxlen)) self.y = tf.placeholder(tf.int32, shape=(None, self.maxlen)) # define decoder inputs self.decoder_inputs = tf.concat((tf.ones_like(self.y[:, :1])*2, self.y[:, :-1]), -1) # 2:<S> # Encoder with tf.variable_scope("encoder"): ## Embedding self.enc = embedding(self.x, vocab_size=self.vocab_size, num_units=self.hidden_units, scale=True, scope="enc_embed") ## Positional Encoding self.enc += embedding(tf.tile(tf.expand_dims(tf.range(tf.shape(self.x)[1]), 0), [tf.shape(self.x)[0], 1]), vocab_size=self.maxlen, num_units=self.hidden_units, zero_pad=False, scale=False, scope="enc_pe") ## Dropout self.enc = tf.layers.dropout(self.enc, rate=self.dropout_rate, training=tf.convert_to_tensor(self.is_training)) ## Blocks for i in range(self.num_blocks): with tf.variable_scope("num_blocks_{}".format(i)): ### Multihead Attention self.enc = multihead_attention(queries=self.enc, keys=self.enc, num_units=self.hidden_units, num_heads=self.num_heads, dropout_rate=self.dropout_rate, is_training=self.is_training, causality=False) ### Feed Forward self.enc = feedforward(self.enc, num_units=[4*self.hidden_units, self.hidden_units]) # Decoder with tf.variable_scope("decoder"): ## Embedding self.dec = embedding(self.decoder_inputs, vocab_size=self.vocab_size, num_units=self.hidden_units, scale=True, scope="dec_embed") ## Positional Encoding self.dec += embedding(tf.tile(tf.expand_dims(tf.range(tf.shape(self.decoder_inputs)[1]), 0), [tf.shape(self.decoder_inputs)[0], 1]), vocab_size=self.maxlen, num_units=self.hidden_units, zero_pad=False, scale=False, scope="dec_pe") ## Dropout self.dec = tf.layers.dropout(self.dec, rate=self.dropout_rate, training=tf.convert_to_tensor(self.is_training)) ## Blocks for i in range(self.num_blocks): with tf.variable_scope("num_blocks_{}".format(i)): ## Multihead Attention ( self-attention) self.dec = multihead_attention(queries=self.dec, keys=self.dec, num_units=self.hidden_units, num_heads=self.num_heads, dropout_rate=self.dropout_rate, is_training=self.is_training, causality=True, scope="self_attention") ## Multihead Attention ( vanilla attention) self.dec = multihead_attention(queries=self.dec, keys=self.enc, num_units=self.hidden_units, num_heads=self.num_heads, dropout_rate=self.dropout_rate, is_training=self.is_training, causality=False, scope="vanilla_attention") ## Feed Forward self.dec = feedforward(self.dec, num_units=[4*self.hidden_units, self.hidden_units]) # Final linear projection self.logits = tf.layers.dense(self.dec, self.vocab_size) self.preds = tf.to_int32(tf.arg_max(self.logits, dimension=-1)) self.istarget = tf.to_float(tf.not_equal(self.y, 0)) self.acc = tf.reduce_sum(tf.to_float(tf.equal(self.preds, self.y))*self.istarget)/ (tf.reduce_sum(self.istarget)) tf.summary.scalar('acc', self.acc) if self.is_training: # Loss self.y_smoothed = label_smoothing(tf.one_hot(self.y, depth=self.vocab_size)) self.loss = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y_smoothed) self.mean_loss = tf.reduce_sum(self.loss*self.istarget) / (tf.reduce_sum(self.istarget)) # Training Scheme self.global_step = tf.Variable(0, name='global_step', trainable=False) self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate, beta1=0.9, beta2=0.98, epsilon=1e-8) self.train_op = self.optimizer.minimize(self.mean_loss, global_step=self.global_step) # Summary tf.summary.scalar('mean_loss', self.mean_loss) self.merged = tf.summary.merge_all()
from __future__ import print_function # Copyright (c) 2011, Roger Lew [see LICENSE.txt] # This software is funded in part by NIH Grant P20 RR016454. """ This module contains custom aggregators for sqlite3 sqlite has the following aggregate functions built-in: avg(X) count(X) count(*) group_concat(X) group_concat(X,Y) max(X) min(X) sum(X) total(X) The aggregate functions in sqlite are much faster then the methods implemented here. On the downside they are rather limited. This module implements the following aggregate functions: abs_mean(X) arbitrary(X) ci(X) datarange(X) geometric_mean(X) hasinf(X) hasnan(X) kurt(X) kurtp(X) median(X) mode(X) prod(X) rms(X) sem(X) skew(X) skewp(X) stdev(X) stdevp(X) var(X) varp(X) The respective docstrings for these modules provide more information as to there specific functionality. The aggregate functions ignore NULL, non-float text, and nan values. When X is empty the aggregates return None. Inf values may cause the aggregate to return None or Inf depending on function. See the test module for specifics. All the functions except for median and mode are implemented with running tallies. """ import sys import inspect from math import sqrt,isnan,isinf,log10,log,exp,floor from copy import copy from collections import Counter # Python 2 to 3 workarounds import sys if sys.version_info[0] == 2: _strobj = basestring _xrange = xrange elif sys.version_info[0] == 3: _strobj = str _xrange = range maxfloat= sys.float_info.max minfloat=-1.*sys.float_info.max def getaggregators(): """returns a generator of the (name,arity,function) of the available aggregators""" mod=sys.modules[__name__] for name,func in inspect.getmembers(mod,inspect.isclass): if hasattr(func,'step') and hasattr(func,'finalize'): arity=len(inspect.getargspec(func.step).args)-1 yield (name,arity,func) def isfloat(x): """ >>> isfloat(12) True >>> isfloat(12) True >>> isfloat('a') False >>> isfloat(float('nan')) True >>> isfloat(float('inf')) True """ try: float(x) except: return False return True def _flatten(x): """_flatten(sequence) -> list Returns a single, flat list which contains all elements retrieved from the sequence and all recursively contained sub-sequences (iterables). Examples: >>> [1, 2, [3,4], (5,6)] [1, 2, [3, 4], (5, 6)] >>> _flatten([[[1,2,3], (42,None)], [4,5], [6], 7, MyVector(8,9,10)]) [1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]""" result = [] for el in x: #if isinstance(el, (list, tuple)): if hasattr(el, "__iter__") and not isinstance(el, basestring): result.extend(_flatten(el)) else: result.append(el) return result def hist(V, bins=10, range=None, density=False, weights=None, cumulative=False): # docstring mostly borrowed from numpy.histogram and pylab.hist # numpy doesn't offer the cummulative. pylab.hist always makes a histogram # plot. This function requires neither numpy or pylab and returns the # same values. It has been tested """ Compute the histogram of a set of data. Parameters ---------- V : list_like Input data. The histogram is computed over the flattened array. bins : int or sequence of scalars, optional If `bins` is an int, it defines the number of equal-width bins in the given range (10, by default). If `bins` is a sequence, it defines the bin edges, including the rightmost edge, allowing for non-uniform bin widths. range : (float, float), optional The lower and upper range of the bins. If not provided, range is simply ``(min(V), max(V))``. Values outside the range are ignored. density : bool, optional If False, the result will contain the number of samples in each bin. If True, the result is the value of the probability *density* function at the bin, normalized such that the *integral* over the range is 1. Note that the sum of the histogram values will not be equal to 1 unless bins of unity width are chosen; it is not a probability *mass* function. weights : list_like, optional An array of weights, of the same shape as `V`. Each value in `V` only contributes its associated weight towards the bin count (instead of 1). If `density` is True, the weights are normalized, so that the integral of the density over the range remains 1 cumulative : bool, options If True, then a histogram is computed where each bin gives the counts in that bin plus all bins for smaller values. The last bin gives the total number of datapoints. If normed is also True then the histogram is normalized such that the last bin equals 1. If cumulative evaluates to less than 0 (e.g. -1), the direction of accumulation is reversed. In this case, if normed is also True, then the histogram is normalized such that the first bin equals 1. Returns ------- hist : list The values of the histogram. See `density` and `weights` for a description of the possible semantics. bin_edges : list Return the bin edges ``(length(hist)+1)``. """ if bins < 1: raise Exception('bins must be >= 1') if not isinstance(cumulative, bool): raise TypeError('cumulative must be a bool') if not isinstance(density, bool): raise TypeError('cumulative must be a bool') if range == None: vmin, vmax = min(V), max(V) else: vmin, vmax = range rng = vmax - vmin # the range of the histogram dbin = rng / float(bins) # the spacing between the bins # build the weights if they aren't specified if weights == None: W = [1. for i in _xrange(len(V))] else: W = weights if len(V) != len(W): raise Exception('V and weights must be same length') histCounter = Counter() # a multi-set object from collections for i, (v, w) in enumerate(zip(V, W)): # the range defines a closed interval. The floor function # treats it as open so if we find a value that is equal # to max we move it to the appropriate bin. if v==vmax: v-=dbin/2. # based on the min and the range rescale the data so it # has a min of 0 and a max given by the number of bins histCounter[floor(bins*(v-vmin)/rng)] += w N = [histCounter[0]] # the counts B = [vmin] # the bin edges to be returned if cumulative: for i in _xrange(1, bins): B.append((i/float(bins))*rng+vmin) N.append(N[-1]+histCounter[i]) else: for i in _xrange(1, bins): B.append((i/float(bins))*rng+vmin) N.append(histCounter[i]) B.append(vmax) # append the last edge if cumulative and density: total = sum(v for k,v in histCounter.items() if k<bins) for i in _xrange(bins): N[i] /= total if not cumulative and density: total = sum(v for k,v in histCounter.items() if k<bins) for i in _xrange(bins): N[i] /= (dbin*total) ## for n,b in zip(N, B): ## print(_str(b,'f',3),n) return N,B class ignore: """getaggregators shouldn't return this""" def __init__(self): pass class hasnan: """ Returns 1 if array contains 1 or more 'nan' values Returns 0 if the array does not contain any 'nan' values """ def __init__(self): self.value=False def step(self, value): if isfloat(value): if isnan(float(value)): self.value=True def finalize(self): return self.value class hasinf: """ Returns 1 if array contains 1 or more 'inf' values Returns 0 if the array does not contain any 'inf' values """ def __init__(self): self.value=False def step(self, value): if isfloat(value): if isinf(float(value)): self.value=True def finalize(self): return self.value class arbitrary: """ sqlite does not guarentee the order of returned rows will be sorted. This will most likely return the first value. It is intended to be used in cases where you know all of the values are the same. """ def __init__(self): self.value=None def step(self, value): if self.value==None: self.value=value def finalize(self): return self.value class datarange: """ Returns non if given an empty set. Otherwise returns the range of the elements. """ def __init__(self): global maxfloat,minfloat self.min=maxfloat self.max=minfloat def step(self, value): if isfloat(value): v=float(value) if not isnan(v): if v<self.min: self.min=v if v>self.max: self.max=v def finalize(self): if self.min==maxfloat and self.max==minfloat: return None return self.max-self.min class abs_mean: """ Takes the absolute value of the elements and computes the mean. """ def __init__(self): self.s=0. self.N=0 def step(self, value): if isfloat(value): v=float(value) if not isnan(v): self.s+=abs(v) self.N+=1 def finalize(self): if self.N==0: return None return self.s/float(self.N) class geometric_mean: """ Takes the absolute value of the elements and computes the mean. Modeled after scipy.stats.gmean. If x contains any values < 0. return nan, if """ def __init__(self): self.s=0. self.N=0 self.ret_value = -1 def step(self, value): if isfloat(value): v=float(value) if not isnan(v): if v<0: self.ret_value=None elif v==0 and self.ret_value!=None: self.ret_value=0. else: self.s+=log(v) self.N+=1 def finalize(self): if self.N==0: return None if self.ret_value != -1: return self.ret_value return exp(self.s/float(self.N)) class median: """ Returns the median of the elements. """ def __init__(self): self.sequence=[] def step(self, value): if isfloat(value): v=float(value) if not isnan(v): self.sequence.append(v) def finalize(self): N=len(self.sequence) if N==0: return None if N%2==0: return sum(sorted(self.sequence)[int(N/2-1):int(N/2)+1])/2. else: return sorted(self.sequence)[int(N/2)] class mode: """ Returns the mode of the elements. """ def __init__(self): # importing Counter here means it doesn't pollute the namespace. from collections import Counter self.counter=Counter() def step(self, value): if isfloat(value): v=float(value) if not isnan(v): self.counter[v]+=1 def finalize(self): if self.counter=={}: return None return self.counter.most_common()[0][0] class var: """ The variance is calculated using the "n-1" method. Estimates variance based on a sample. The variance is a measure of how widely values are dispersed from the average value (the mean). Assumes that its arguments are a sample of the population. If your data represents the entire population, then compute the standard deviation using VARP. s^2 = \frac{1}{N-1} \sum_{i=1}^N (x_i - \overline{x})^2, """ def __init__(self): self.x=[0.,-1.,-1.,-1.,-1.] def step(self, value): [n,oldM,newM,oldS,newS]=self.x if isfloat(value): v=float(value) if not isnan(v): n+=1 if (n == 1): oldM = newM = v oldS = 0.0 else: newM = oldM + (v - oldM)/n newS = oldS + (v - oldM)*(v - newM) # set up for next iteration oldM = copy(newM) oldS = copy(newS) self.x=[n,oldM,newM,oldS,newS] def finalize(self): [n,oldM,newM,oldS,newS]=self.x if n<2: return None return newS/(n-1.) class varp: """ The variance is calculated using the "n" method. Calculates variance based on the entire population given as arguments. The variance is a measure of how widely values are dispersed from the average value (the mean). Assumes that its arguments are the entire population. If your data represents a sample of the population, then compute the variance using VAR. {s_N}^2 = \frac{1}{N} \sum_{i=1}^N (x_i - \overline{x})^2, """ def __init__(self): self.x=[0.,-1.,-1.,-1.,-1.] def step(self, value): [n,oldM,newM,oldS,newS]=self.x if isfloat(value): v=float(value) if not isnan(v): n+=1 if (n == 1): oldM = newM = v oldS = 0.0 else: newM = oldM + (v - oldM)/n newS = oldS + (v - oldM)*(v - newM) # set up for next iteration oldM = copy(newM) oldS = copy(newS) self.x=[n,oldM,newM,oldS,newS] def finalize(self): [n,oldM,newM,oldS,newS]=self.x if n==0: return None if n==1: return 0. return newS/float(n) class stdev: """ The standard deviation is calculated using the "n-1" method. Estimates standard deviation based on a sample. The standard deviation is a measure of how widely values are dispersed from the average value (the mean). Assumes that its arguments are a sample of the population. If your data represents the entire population, then compute the standard deviation using STDEVP. s^2 = \sqrt{\frac{1}{N-1} \sum_{i=1}^N (x_i - \overline{x})^2}, """ def __init__(self): self.x=[0.,-1.,-1.,-1.,-1.] def step(self, value): [n,oldM,newM,oldS,newS]=self.x if isfloat(value): v=float(value) if not isnan(v): n+=1 if (n == 1): oldM = newM = v oldS = 0.0 else: newM = oldM + (v - oldM)/n newS = oldS + (v - oldM)*(v - newM) # set up for next iteration oldM = copy(newM) oldS = copy(newS) self.x=[n,oldM,newM,oldS,newS] def finalize(self): [n,oldM,newM,oldS,newS]=self.x if n<2: return None return sqrt(newS/(n-1.)) class stdevp: """ The standard deviation is calculated using the "n" method. Calculates standard deviation based on the entire population given as arguments. The standard deviation is a measure of how widely values are dispersed from the average value (the mean). Assumes that its arguments are the entire population. If your data represents a sample of the population, then compute the standard deviation using STDEV. s_N = \sqrt{\frac{1}{N} \sum_{i=1}^N (x_i - \overline{x})^2}. """ def __init__(self): self.x=[0.,-1.,-1.,-1.,-1.] def step(self, value): [n,oldM,newM,oldS,newS]=self.x if isfloat(value): v=float(value) if not isnan(v): n+=1 if (n == 1): oldM = newM = v oldS = 0.0 else: newM = oldM + (v - oldM)/n newS = oldS + (v - oldM)*(v - newM) # set up for next iteration oldM = copy(newM) oldS = copy(newS) self.x=[n,oldM,newM,oldS,newS] def finalize(self): [n,oldM,newM,oldS,newS]=self.x if n==0: return None if n==1: return 0. return sqrt(newS/float(n)) class sem: """ The standard error of the mean (SEM) is the standard deviation of the sample mean estimate of a population mean. SEM is estimated by the sample estimate of the population standard deviation (sample standard deviation) divided by the square root of the sample size. SE_\bar{x}\ = \frac{s}{\sqrt{n}}, where {s} is the sample standard deviation """ def __init__(self): self.x=[0.,-1.,-1.,-1.,-1.] def step(self, value): [n,oldM,newM,oldS,newS]=self.x if isfloat(value): v=float(value) if not isnan(v): n+=1 if (n == 1): oldM = newM = v oldS = 0.0 else: newM = oldM + (v - oldM)/n newS = oldS + (v - oldM)*(v - newM) # set up for next iteration oldM = copy(newM) oldS = copy(newS) self.x=[n,oldM,newM,oldS,newS] def finalize(self): [n,oldM,newM,oldS,newS]=self.x if n<2: return None return sqrt(newS/(n-1.))/sqrt(n) class ci: """ 95% confidence interval based on the standard error of the mean. The confidence interval is estimated as 1.96*SEM. The lower bound can be computed as mean-ci. The upper bound can be computed as mean+ci. CI=1.96*SE_\bar{x}\ """ def __init__(self): self.x=[0.,-1.,-1.,-1.,-1.] def step(self, value): [n,oldM,newM,oldS,newS]=self.x if isfloat(value): v=float(value) if not isnan(v): n+=1 if (n == 1): oldM = newM = v oldS = 0.0 else: newM = oldM + (v - oldM)/n newS = oldS + (v - oldM)*(v - newM) # set up for next iteration oldM = copy(newM) oldS = copy(newS) self.x=[n,oldM,newM,oldS,newS] def finalize(self): [n,oldM,newM,oldS,newS]=self.x if n<2: return None return sqrt(newS/(n-1.))/sqrt(n)*1.96 class rms: """ The root mean square (abbreviated RMS or rms), also known as the quadratic mean, is a statistical measure of the magnitude of a varying quantity. x_{\mathrm{rms}} = \sqrt {{{x_1}^2 + {x_2}^2 + \cdots + {x_n}^2} \over n} """ def __init__(self): self.ss=0. self.N=0 def step(self, value): if isfloat(value): v=float(value) if not isnan(v): self.ss+=v**2. self.N+=1 def finalize(self): if self.N==0: return None return sqrt(self.ss/float(self.N)) class prod: """ Return the product of the elements """ def __init__(self): self.p=1. self.N=0 def step(self, value): if isfloat(value): v=float(value) if not isnan(v): self.p*=v self.N+=1 def finalize(self): if self.N==0: return None return self.p class skewp: """ skewness population estimate is based on the cumulants calculated from the raw moments. G_1 = \frac{k_3}{k_2^{3/2}}, where {k_3} and {k_2} are the 3rd and 2nd order cumulants respectively. see also: http://mathworld.wolfram.com/Skewness.html http://mathworld.wolfram.com/RawMoment.html http://mathworld.wolfram.com/Cumulant.html http://www.tc3.edu/instruct/sbrown/stat/shape.htm#SkewnessCompute """ def __init__(self): self.s1=0. self.s2=0. self.s3=0. self.N=0 def step(self, value): if isfloat(value): v=float(value) if not isnan(v): ov=copy(v) self.s1+=v v*=ov self.s2+=v v*=ov self.s3+=v self.N+=1 def finalize(self): if self.N<3: return None self.N=float(self.N) # calculate unbiased raw moments m1=self.s1/self.N m2=self.s2/self.N m3=self.s3/self.N # from the raw moments calculate cumulants k1 = m1 k2 = m2 - m1**2 k3 = 2.*m1**3. - 3.*m1*m2 + m3 num=k3 den=k2**1.5 if den==0: return None else: return num/den class skew: """ skewness sample estimate is based on the cumulants calculated from the raw moments. G_1 = \frac{\sqrt{N(N-1)}}{N-2} \frac{k_3}{k_2^{3/2}}, where {k_3} and {k_2} are the 3rd and 2nd order cumulants respectively. see also: http://mathworld.wolfram.com/Skewness.html http://mathworld.wolfram.com/RawMoment.html http://mathworld.wolfram.com/Cumulant.html http://www.tc3.edu/instruct/sbrown/stat/shape.htm#SkewnessCompute """ def __init__(self): self.s1=0. self.s2=0. self.s3=0. self.N=0 def step(self, value): if isfloat(value): v=float(value) if not isnan(v): ov=copy(v) self.s1+=v v*=ov self.s2+=v v*=ov self.s3+=v self.N+=1 def finalize(self): if self.N<3: return None self.N=float(self.N) # calculate unbiased raw moments m1=self.s1/self.N m2=self.s2/self.N m3=self.s3/self.N # from the raw moments calculate cumulants k1 = m1 k2 = m2 - m1**2 k3 = 2.*m1**3. - 3.*m1*m2 + m3 num=sqrt(self.N*(self.N-1.))/(self.N-2.)*k3 den=k2**1.5 if den==0: return None else: return num/den class kurtp: """ kurtosis population estimate is based on the cumulants calculated from the raw moments. G_2 = \frac{k_4}{k_{2}^2}, where {k_4} and {k_2} are the 4th and 2nd order cumulants respectively. see also: http://mathworld.wolfram.com/Kurtosis.html http://mathworld.wolfram.com/RawMoment.html http://mathworld.wolfram.com/Cumulant.html """ def __init__(self): self.s1=0. self.s2=0. self.s3=0. self.s4=0. self.N=0 def step(self, value): if isfloat(value): v=float(value) if not isnan(v): ov=copy(v) self.s1+=v v*=ov self.s2+=v v*=ov self.s3+=v v*=ov self.s4+=v self.N+=1 def finalize(self): if self.N<3: return None self.N=float(self.N) # calculate unbiased raw moments m1=self.s1/self.N m2=self.s2/self.N m3=self.s3/self.N m4=self.s4/self.N # from the raw moments calculate cumulants ## k1 = m1 k2 = m2 - m1**2 ## k3 = 2.*m1**3. - 3.*m1*m2 + m3 k4 = -6.*m1**4 + 12.*(m1**2)*m2 -3.*m2**2. - 4.*m1*m3 + m4 num=k4 den=k2**2. if den==0: return None else: return num/den class kurt: """ skewness sample estimate is based on the cumulants calculated from the raw moments. g_2 = \frac{k_4}{k_{2}^2}, G_2 = \frac{N-1}{(N-2)(N-3)}[(N+1)g_2 + 6] where {k_4} and {k_2} are the 4th and 2nd order cumulants respectively. see also: http://mathworld.wolfram.com/Kurtosis.html http://mathworld.wolfram.com/RawMoment.html http://mathworld.wolfram.com/Cumulant.html http://www.tc3.edu/instruct/sbrown/stat/shape.htm#KurtosisCompute """ def __init__(self): self.s1=0. self.s2=0. self.s3=0. self.s4=0. self.N=0 def step(self, value): if isfloat(value): v=float(value) if not isnan(v): ov=copy(v) self.s1+=v v*=ov self.s2+=v v*=ov self.s3+=v v*=ov self.s4+=v self.N+=1 def finalize(self): if self.N<3: return None self.N=float(self.N) # calculate unbiased raw moments m1=self.s1/self.N m2=self.s2/self.N m3=self.s3/self.N m4=self.s4/self.N # from the raw moments calculate cumulants ## k1 = m1 k2 = m2 - m1**2 ## k3 = 2.*m1**3. - 3.*m1*m2 + m3 k4 = -6.*m1**4 + 12.*(m1**2)*m2 -3.*m2**2. - 4.*m1*m3 + m4 num=k4 den=k2**2. if den==0.: return None g2=num/den return (self.N-1.)/((self.N-2.)*(self.N-3.))*((self.N+1.)*g2+6.)
import mock from olympia import amo from olympia.addons.models import Addon from olympia.amo import search from olympia.amo.tests import ESTestCaseWithAddons, TestCase from olympia.tags.models import Tag class TestESIndexing(ESTestCaseWithAddons): # This needs to be in its own class for data isolation. def test_indexed_count(self): # Did all the right addons get indexed? count = Addon.search().filter(type=1, is_disabled=False).count() # Created in the setUpClass. assert count == 4 == ( Addon.objects.filter(disabled_by_user=False, status__in=amo.VALID_ADDON_STATUSES).count()) def test_get_es_not_mocked(self): es = search.get_es() assert not issubclass(es.__class__, mock.Mock) class TestNoESIndexing(TestCase): def test_no_es(self): assert not getattr(self, 'es', False), ( 'TestCase should not have "es" attribute') def test_not_indexed(self): addon = Addon.objects.create(type=amo.ADDON_EXTENSION, status=amo.STATUS_PUBLIC) assert issubclass( Addon.search().filter(id__in=addon.id).count().__class__, mock.Mock) def test_get_es_mocked(self): es = search.get_es() assert issubclass(es.__class__, mock.Mock) class TestESWithoutMakingQueries(TestCase): # These tests test methods that don't directly call ES, so they work using # the faster TestCase class where ES is mocked. def test_clone(self): # Doing a filter creates a new ES object. qs = Addon.search() qs2 = qs.filter(type=1) assert 'bool' not in qs._build_query()['query'] assert 'filter' in qs2._build_query()['query']['bool'] def test_filter(self): qs = Addon.search().filter(type=1) assert qs._build_query()['query']['bool']['filter'] == ( [{'term': {'type': 1}}]) def test_in_filter(self): qs = Addon.search().filter(type__in=[1, 2]) assert qs._build_query()['query']['bool']['filter'] == ( [{'terms': {'type': [1, 2]}}]) def test_and(self): qs = Addon.search().filter(type=1, category__in=[1, 2]) filters = qs._build_query()['query']['bool']['filter'] # Filters: # [{'term': {'type': 1}}, {'terms': {'category': [1, 2]}}] assert len(filters) == 2 assert {'term': {'type': 1}} in filters assert {'terms': {'category': [1, 2]}} in filters def test_query(self): qs = Addon.search().query(type=1) assert qs._build_query()['query'] == ( {'term': {'type': 1}}) def test_query_match(self): qs = Addon.search().query(name__match='woo woo') assert qs._build_query()['query'] == ( {'match': {'name': 'woo woo'}}) def test_query_multiple_and_range(self): qs = Addon.search().query(type=1, status__gte=1) query = qs._build_query()['query'] # Query: # {'bool': {'must': [{'term': {'type': 1}}, # {'range': {'status': {'gte': 1}}}, ]}} assert list(query.keys()) == ['bool'] assert list(query['bool'].keys()) == ['must'] assert {'term': {'type': 1}} in query['bool']['must'] assert {'range': {'status': {'gte': 1}}} in query['bool']['must'] def test_query_fuzzy(self): fuzz = {'boost': 2, 'value': 'woo'} qs = Addon.search().query(type=1, status__fuzzy=fuzz) query = qs._build_query()['query'] # Query: # {'bool': {'must': [{'fuzzy': {'status': fuzz}}, # {'term': {'type': 1}}, ]}}) assert list(query.keys()) == ['bool'] assert list(query['bool'].keys()) == ['must'] assert {'term': {'type': 1}} in query['bool']['must'] assert {'fuzzy': {'status': fuzz}} in query['bool']['must'] def test_order_by_desc(self): qs = Addon.search().order_by('-rating') assert qs._build_query()['sort'] == [{'rating': 'desc'}] def test_order_by_asc(self): qs = Addon.search().order_by('rating') assert qs._build_query()['sort'] == ['rating'] def test_order_by_multiple(self): qs = Addon.search().order_by('-rating', 'id') assert qs._build_query()['sort'] == [{'rating': 'desc'}, 'id'] def test_slice(self): qs = Addon.search()[5:12] assert qs._build_query()['from'] == 5 assert qs._build_query()['size'] == 7 def test_slice_stop(self): qs = Addon.search()[:6] assert qs._build_query()['size'] == 6 def test_slice_stop_zero(self): qs = Addon.search()[:0] assert qs._build_query()['size'] == 0 def test_gte(self): qs = Addon.search().filter(type__in=[1, 2], status__gte=4) filters = qs._build_query()['query']['bool']['filter'] # Filters: # [ # {'terms': {'type': [1, 2]}}, # {'range': {'status': {'gte': 4}}}, # ] assert len(filters) assert {'terms': {'type': [1, 2]}} in filters assert {'range': {'status': {'gte': 4}}} in filters def test_lte(self): qs = Addon.search().filter(type__in=[1, 2], status__lte=4) filters = qs._build_query()['query']['bool']['filter'] # Filters: # [ # {'terms': {'type': [1, 2]}}, # {'range': {'status': {'lte': 4}}}, # ] assert len(filters) == 2 assert {'terms': {'type': [1, 2]}} in filters assert {'range': {'status': {'lte': 4}}} in filters def test_gt(self): qs = Addon.search().filter(type__in=[1, 2], status__gt=4) filters = qs._build_query()['query']['bool']['filter'] # Filters: # [ # {'terms': {'type': [1, 2]}}, # {'range': {'status': {'gt': 4}}}, # ] assert len(filters) == 2 assert {'terms': {'type': [1, 2]}} in filters assert {'range': {'status': {'gt': 4}}} in filters def test_lt(self): qs = Addon.search().filter(type__in=[1, 2], status__lt=4) filters = qs._build_query()['query']['bool']['filter'] # Filters: # [ # {'range': {'status': {'lt': 4}}}, # {'terms': {'type': [1, 2]}}, # ] assert len(filters) assert {'range': {'status': {'lt': 4}}} in filters assert {'terms': {'type': [1, 2]}} in filters def test_lt2(self): qs = Addon.search().filter(status__lt=4) assert qs._build_query()['query']['bool']['filter'] == ( [{'range': {'status': {'lt': 4}}}]) def test_range(self): qs = Addon.search().filter(date__range=('a', 'b')) assert qs._build_query()['query']['bool']['filter'] == ( [{'range': {'date': {'gte': 'a', 'lte': 'b'}}}]) def test_prefix(self): qs = Addon.search().query(name__startswith='woo') assert qs._build_query()['query'] == ( {'prefix': {'name': 'woo'}}) def test_values(self): qs = Addon.search().values('name') assert qs._build_query()['_source'] == ['id', 'name'] def test_values_dict(self): qs = Addon.search().values_dict('name') assert qs._build_query()['_source'] == ['id', 'name'] def test_empty_values_dict(self): qs = Addon.search().values_dict() assert qs._build_query()['_source'] == ['id'] def test_extra_values(self): qs = Addon.search().extra(values=['name']) assert qs._build_query()['_source'] == ['id', 'name'] qs = Addon.search().values('status').extra(values=['name']) assert qs._build_query()['_source'] == ['id', 'status', 'name'] def test_extra_values_dict(self): qs = Addon.search().extra(values_dict=['name']) assert qs._build_query()['_source'] == ['id', 'name'] qs = Addon.search().values_dict('status').extra(values_dict=['name']) assert qs._build_query()['_source'] == ['id', 'status', 'name'] def test_extra_order_by(self): qs = Addon.search().extra(order_by=['-rating']) assert qs._build_query()['sort'] == [{'rating': 'desc'}] qs = Addon.search().order_by('-id').extra(order_by=['-rating']) assert qs._build_query()['sort'] == [ {'id': 'desc'}, {'rating': 'desc'}] def test_extra_query(self): qs = Addon.search().extra(query={'type': 1}) assert qs._build_query()['query'] == ( {'term': {'type': 1}}) qs = Addon.search().filter(status=1).extra(query={'type': 1}) filtered = qs._build_query()['query']['bool'] assert filtered['must'] == ( [{'term': {'type': 1}}]) assert filtered['filter'] == [{'term': {'status': 1}}] def test_extra_filter(self): qs = Addon.search().extra(filter={'category__in': [1, 2]}) assert qs._build_query()['query']['bool']['filter'] == ( [{'terms': {'category': [1, 2]}}]) qs = (Addon.search().filter(type=1) .extra(filter={'category__in': [1, 2]})) filters = qs._build_query()['query']['bool']['filter'] # Filters: # [{'term': {'type': 1}}, {'terms': {'category': [1, 2]}}] assert len(filters) == 2 assert {'term': {'type': 1}} in filters assert {'terms': {'category': [1, 2]}} in filters def test_source(self): qs = Addon.search().source('versions') assert qs._build_query()['_source'] == ['id', 'versions'] class TestES(ESTestCaseWithAddons): def test_getitem(self): addons = list(Addon.search()) assert addons[0] == Addon.search()[0] def test_iter(self): qs = Addon.search().filter(type=1, is_disabled=False) assert len(qs) == len(list(qs)) def test_count(self): assert Addon.search().count() == 6 def test_count_uses_cached_results(self): qs = Addon.search() qs._results_cache = mock.Mock() qs._results_cache.count = mock.sentinel.count assert qs.count() == mock.sentinel.count def test_len(self): qs = Addon.search() qs._results_cache = [1] assert len(qs) == 1 def test_values_result(self): addons = [{'id': a.id, 'slug': a.slug} for a in self._addons] qs = Addon.search().values_dict('slug').order_by('id') assert list(qs) == addons def test_values_dict_result(self): addons = [{'id': a.id, 'slug': a.slug} for a in self._addons] qs = Addon.search().values_dict('slug').order_by('id') assert list(qs) == list(addons) def test_empty_values_dict_result(self): qs = Addon.search().values_dict() assert list(qs[0].keys()) == ['id'] def test_object_result(self): qs = Addon.search().filter(id=self._addons[0].id)[:1] assert self._addons[:1] == list(qs) def test_object_result_slice(self): addon = self._addons[0] qs = Addon.search().filter(id=addon.id) assert addon == qs[0] def test_extra_bad_key(self): with self.assertRaises(AssertionError): Addon.search().extra(x=1) def test_aggregations(self): Tag(tag_text='sky').save_tag(self._addons[0]) Tag(tag_text='sky').save_tag(self._addons[1]) Tag(tag_text='sky').save_tag(self._addons[2]) Tag(tag_text='earth').save_tag(self._addons[0]) Tag(tag_text='earth').save_tag(self._addons[1]) Tag(tag_text='ocean').save_tag(self._addons[0]) self.reindex(Addon) qs = Addon.search().aggregate(tags={'terms': {'field': 'tags'}}) results = list(qs) assert len(results) == 6 assert qs.aggregations == { u'tags': [ {u'doc_count': 3, u'key': u'sky'}, {u'doc_count': 2, u'key': u'earth'}, {u'doc_count': 1, u'key': u'ocean'}]}
# -*- coding: utf-8 -*- # Copyright 2014 Rob Ruana # Licensed under the BSD License, see LICENSE file for details. """Tests for :mod:`sphinxcontrib.napoleon.docstring` module.""" import textwrap from sphinxcontrib.napoleon import Config from sphinxcontrib.napoleon.docstring import GoogleDocstring, NumpyDocstring from unittest import TestCase try: # Python >=3.3 from unittest.mock import Mock except ImportError: from mock import Mock class BaseDocstringTest(TestCase): pass class GoogleDocstringTest(BaseDocstringTest): docstrings = [( """Single line summary""", """Single line summary""" ), ( """ Single line summary Extended description """, """ Single line summary Extended description """ ), ( """ Single line summary Args: arg1(str):Extended description of arg1 """, """ Single line summary :Parameters: **arg1** (*str*) -- Extended description of arg1""" ), ( """ Single line summary Args: arg1(str):Extended description of arg1 arg2 ( int ) : Extended description of arg2 Keyword Args: kwarg1(str):Extended description of kwarg1 kwarg2 ( int ) : Extended description of kwarg2""", """ Single line summary :Parameters: * **arg1** (*str*) -- Extended description of arg1 * **arg2** (*int*) -- Extended description of arg2 :Keyword Arguments: * **kwarg1** (*str*) -- Extended description of kwarg1 * **kwarg2** (*int*) -- Extended description of kwarg2""" ), ( """ Single line summary Arguments: arg1(str):Extended description of arg1 arg2 ( int ) : Extended description of arg2 Keyword Arguments: kwarg1(str):Extended description of kwarg1 kwarg2 ( int ) : Extended description of kwarg2""", """ Single line summary :Parameters: * **arg1** (*str*) -- Extended description of arg1 * **arg2** (*int*) -- Extended description of arg2 :Keyword Arguments: * **kwarg1** (*str*) -- Extended description of kwarg1 * **kwarg2** (*int*) -- Extended description of kwarg2""" ), ( """ Single line summary Return: str:Extended description of return value """, """ Single line summary :returns: *str* -- Extended description of return value""" ), ( """ Single line summary Returns: str:Extended description of return value """, """ Single line summary :returns: *str* -- Extended description of return value""" ), ( """ Single line summary Returns: Extended description of return value """, """ Single line summary :returns: Extended description of return value""" )] def test_docstrings(self): config = Config(napoleon_use_param=False, napoleon_use_rtype=False) for docstring, expected in self.docstrings: actual = str(GoogleDocstring(textwrap.dedent(docstring), config)) expected = textwrap.dedent(expected) self.assertEqual(expected, actual) def test_parameters_with_class_reference(self): docstring = """\ Construct a new XBlock. This class should only be used by runtimes. Arguments: runtime (:class:`Runtime`): Use it to access the environment. It is available in XBlock code as ``self.runtime``. field_data (:class:`FieldData`): Interface used by the XBlock fields to access their data from wherever it is persisted. scope_ids (:class:`ScopeIds`): Identifiers needed to resolve scopes. """ actual = str(GoogleDocstring(docstring)) expected = """\ Construct a new XBlock. This class should only be used by runtimes. :param runtime: Use it to access the environment. It is available in XBlock code as ``self.runtime``. :type runtime: :class:`Runtime` :param field_data: Interface used by the XBlock fields to access their data from wherever it is persisted. :type field_data: :class:`FieldData` :param scope_ids: Identifiers needed to resolve scopes. :type scope_ids: :class:`ScopeIds` """ self.assertEqual(expected, actual) def test_attributes_with_class_reference(self): docstring = """\ Attributes: in_attr(:class:`numpy.ndarray`): super-dooper attribute """ actual = str(GoogleDocstring(docstring)) expected = """\ .. attribute:: in_attr :class:`numpy.ndarray` super-dooper attribute """ self.assertEqual(expected, actual) docstring = """\ Attributes: in_attr(numpy.ndarray): super-dooper attribute """ actual = str(GoogleDocstring(docstring)) expected = """\ .. attribute:: in_attr *numpy.ndarray* super-dooper attribute """ self.assertEqual(expected, actual) class NumpyDocstringTest(BaseDocstringTest): docstrings = [( """Single line summary""", """Single line summary""" ), ( """ Single line summary Extended description """, """ Single line summary Extended description """ ), ( """ Single line summary Parameters ---------- arg1:str Extended description of arg1 """, """ Single line summary :Parameters: **arg1** (*str*) -- Extended description of arg1""" ), ( """ Single line summary Parameters ---------- arg1:str Extended description of arg1 arg2 : int Extended description of arg2 Keyword Arguments ----------------- kwarg1:str Extended description of kwarg1 kwarg2 : int Extended description of kwarg2 """, """ Single line summary :Parameters: * **arg1** (*str*) -- Extended description of arg1 * **arg2** (*int*) -- Extended description of arg2 :Keyword Arguments: * **kwarg1** (*str*) -- Extended description of kwarg1 * **kwarg2** (*int*) -- Extended description of kwarg2""" ), ( """ Single line summary Return ------ str Extended description of return value """, """ Single line summary :returns: *str* -- Extended description of return value""" ), ( """ Single line summary Returns ------- str Extended description of return value """, """ Single line summary :returns: *str* -- Extended description of return value""" )] def test_docstrings(self): config = Config(napoleon_use_param=False, napoleon_use_rtype=False) for docstring, expected in self.docstrings: actual = str(NumpyDocstring(textwrap.dedent(docstring), config)) expected = textwrap.dedent(expected) self.assertEqual(expected, actual) def test_parameters_with_class_reference(self): docstring = """\ Parameters ---------- param1 : :class:`MyClass <name.space.MyClass>` instance """ config = Config(napoleon_use_param=False) actual = str(NumpyDocstring(docstring, config)) expected = """\ :Parameters: **param1** (:class:`MyClass <name.space.MyClass>` instance) """ self.assertEqual(expected, actual) config = Config(napoleon_use_param=True) actual = str(NumpyDocstring(docstring, config)) expected = """\ :type param1: :class:`MyClass <name.space.MyClass>` instance """ self.assertEqual(expected, actual) def test_parameters_without_class_reference(self): docstring = """\ Parameters ---------- param1 : MyClass instance """ config = Config(napoleon_use_param=False) actual = str(NumpyDocstring(docstring, config)) expected = """\ :Parameters: **param1** (*MyClass instance*) """ self.assertEqual(expected, actual) config = Config(napoleon_use_param=True) actual = str(NumpyDocstring(textwrap.dedent(docstring), config)) expected = """\ :type param1: MyClass instance """ self.assertEqual(expected, actual) def test_see_also_refs(self): docstring = """\ numpy.multivariate_normal(mean, cov, shape=None, spam=None) See Also -------- some, other, funcs otherfunc : relationship """ actual = str(NumpyDocstring(docstring)) expected = """\ numpy.multivariate_normal(mean, cov, shape=None, spam=None) .. seealso:: :obj:`some`, :obj:`other`, :obj:`funcs` \n\ :obj:`otherfunc` relationship """ self.assertEqual(expected, actual) docstring = """\ numpy.multivariate_normal(mean, cov, shape=None, spam=None) See Also -------- some, other, funcs otherfunc : relationship """ config = Config() app = Mock() actual = str(NumpyDocstring(docstring, config, app, "method")) expected = """\ numpy.multivariate_normal(mean, cov, shape=None, spam=None) .. seealso:: :meth:`some`, :meth:`other`, :meth:`funcs` \n\ :meth:`otherfunc` relationship """ self.assertEqual(expected, actual)
#!/usr/bin/python2.7 from time import sleep, gmtime from mocker import Mocker, ANY, MockerTestCase from unittest import main, TestCase import logging LOGGER_LOGFILE = "./LOGS/test_FS.log" logger = logging.getLogger() logger.setLevel( logging.CRITICAL ) #logger.setLevel( logging.DEBUG ) ch = logging.StreamHandler() ch.setLevel( logging.DEBUG ) formatter = logging.Formatter('%(message)s') ch.setFormatter( formatter ) logger.addHandler( ch ) fh = logging.FileHandler( LOGGER_LOGFILE ) fh.setLevel( logging.INFO ) formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') fh.setFormatter(formatter) logger.addHandler(fh) # Function which replaces output of DLib.echo def fake_echo( *args, **kwargs ): print( "Args: ", args ) print( "KWArgs: ", kwargs ) def fake_echo_silent( *args, **kwargs ): pass ################################################### # Test config storage class test_ConfigStorage( MockerTestCase ): from modules import FS listEmpty = [] listOne = ["always_success", ""] params = { 'DEFAULT_CONFIG' : "./ExecCMD.yaml", 'DEFAULT_LOG' : "./LOGS/ExecCMD_TESTING.log", #'DEFAULT_LOGLEVEL' : logging.DEBUG, 'DEFAULT_LOGLEVEL' : logging.INFO, 'DONT_EXECUTE' : False, 'LIST_ACTIONS' : False, 'ACTION' : None, } #self.execDetails = [ # {"start":"AAA", "startSec":"BBBB"}, # {"start":"AAA", "startSec":"BBBB", "finish":"CCCC", "total":"DDDD"}, # ] execList_empty = [] execList_one_good = [ ["always_success", "always_fail" ] ] execList_one_bad = [ ["always_fail", "always_fail" ] ] execList_many_bad = [ ["always_success", [] ], ["always_success", "always_fail" ], ["always_success", [] ], ["always_success", [] ], ["always_fail", [] ] ] dataNone = [ {"confAddr" : None, "actionName" : None}, {"raise" : ValueError} ] dataEmpty = [ {"confAddr" : "", "actionName" : ""}, {"raise" : ValueError} ] dataIncorrect = [ {"confAddr" : 123, "actionName" : 456}, {"raise" : TypeError} ] dataNoConfig = [ {"confAddr" : "./NOSUCHCONFIG.yaml", "actionName" : ""}, {"raise" : ValueError} ] dataGoodconfigNoaction = [ {"confAddr" : "./CONFIGS/testconf.yaml", "actionName" : ""}, {"raise" : ValueError} ] dataGoodconfigBadaction = [ {"confAddr" : "./CONFIGS/testconf.yaml", "actionName" : 123}, {"raise" : TypeError} ] dataGoodconfigWrongaction = [ {"confAddr" : "./CONFIGS/testconf.yaml", "actionName" : "thereisnosuchaction"}, {"raise" : ValueError} ] dataGoodconfigCorrectaction = [ {"confAddr" : "./CONFIGS/testconf.yaml", "actionName" : "test"}, {"isinstance" : FS.ConfigStorage } ] def prepare_replaceExecCMD(self, cmd, output): self.m_exec = self.mocker.replace('ExecLib.execCMD') self.m_exec( cmd=cmd, listoferrors=ANY ) self.mocker.result( output ) def prepare_replaceEcho(self, printParams=False): self.m_echo = self.mocker.replace('DLib.echo') self.m_echo( ANY, level=ANY ) self.mocker.count( 1, None ) if printParams: self.mocker.call( fake_echo ) else: self.mocker.call( fake_echo_silent ) ######################### #### Check procedures ######################### def setUp(self): print("replacing CommandExecutor") from modules import FS self.testObj = FS.ConfigStorage FS.logger = logger print("Done") #from ExecLib import runCommandList #self.runCommandList = runCommandList #self.m_Details = self.mocker.replace('DLib.getExecDetails') ##self.m_Details( processPeriod="START", data=ANY ) def checkValues( self, result, compareData ): print("\n\nCHECK = %s" % result ) for key, value in compareData.iteritems(): if key == "result": self.assertEqual( result, value ) continue if key == "isinstance": self.assertIsInstance( result, value) #self.assertEqual( isinstance(result, value), True ) continue if key == "raise": print("SHOULD RAISE ERROR!!!!!!!!!!!!") self.fail( "Missing expected data!" ) def checkProcedure( self, testData ): self.mocker.replay() print("Test with %s" % testData[0]) print("Checks %s" % testData[1]) if testData[1].has_key("raise"): print("SHOULD RAISE ERROR!!!!!!!!!!!!") with self.assertRaises( testData[1]["raise"] ): check = self.testObj( **testData[0] ) return check = self.testObj( **testData[0] ) print "CHECK = %s" % check if check and isinstance( check, dict ): for key in check: if check[ key ] != testData[1][key]: print( " . \"%s\":" % key ) print( "\t%s" % check[key] ) print( "\t%s" % testData[1][key] ) self.assertEqual( check[ key ], testData[1][key] ) self.checkValues(check, testData[1]) #self.assertEqual( check, testData[1] ) ######################### #### TESTS ######################### #def prepare_replaceExecCMD(self, cmd, output): # self.m_exec = self.mocker.replace('ExecLib.execCMD') # self.m_exec( cmd=cmd, listoferrors=ANY ) # self.mocker.result( output ) def test_init_empty(self): self.checkProcedure( self.dataEmpty ) self.checkProcedure( self.dataNone ) def test_init_incorrect(self): self.checkProcedure( self.dataIncorrect ) def test_init_noConfig(self): self.checkProcedure( self.dataNoConfig ) def test_init_GoodConfig_BadActions(self): self.checkProcedure( self.dataGoodconfigNoaction ) self.checkProcedure( self.dataGoodconfigBadaction ) self.checkProcedure( self.dataGoodconfigWrongaction ) def test_init_GoodConfig_GoodAction(self): self.checkProcedure( self.dataGoodconfigCorrectaction )
from SPARQLWrapper import SPARQLWrapper, JSON import numpy as np import math import time from database_interface.data_interface.edge_query_result import EdgeQueryResult from database_interface.search_filters.prefix_filter import PrefixFilter class FreebaseInterface: endpoint = None prefix = None max_entities_per_query = 200 name_relation = None def __init__(self, ): self.endpoint = "http://localhost:8890/sparql" self.prefix = "http://rdf.freebase.com/ns/" self.name_relation = "http://www.w3.org/2000/01/rdf-schema#label" self.frontier_filter = PrefixFilter("http://rdf.freebase.com/ns/") """ Construct a query to retrieve property fields associated to a set of vertices """ def construct_property_query(self, center_vertices, forward=True): center = "s" if forward else "o" other = "o" if forward else "s" query_string = "PREFIX ns: <" + self.prefix + ">\n" query_string += "PREFIX rdf: <http://www.w3.org/2000/01/rdf-schema#>\n" query_string += "select * where {\n" query_string += "?s ?r ?o .\n" query_string += "values ?" + center + " {" + " ".join( ["ns:" + v.split("/ns/")[-1] for v in center_vertices]) + "}\n" query_string += "values ?r { rdf:label }\n" query_string += "FILTER (lang(?o) = \'en\')" query_string += "}" return query_string """ Construct a query to retrieve all neighbors of a set of vertices. - hyperedges: If true, retrieve event neighbors. If false, retrieve entity neighbors. - forward: If true, retrieve edges where the centroids are the subject. If false, retrieve edges where the centroids are the object. """ def construct_neighbor_query(self, center_vertices, hyperedges=True, forward=True): center = "s" if forward else "o" other = "o" if forward else "s" query_string = "PREFIX ns: <" + self.prefix + ">\n" query_string += "select * where {\n" query_string += "?s ?r ?o .\n" query_string += "values ?" + center + " {" + " ".join(["ns:" + v.split("/ns/")[-1] for v in center_vertices]) + "}\n" query_string += "filter ( " if hyperedges: query_string += "( not exists { ?" + other + " ns:type.object.name ?name } && !isLiteral(?" + other + ") && strstarts(str(?"+other+"), \"" + self.prefix + "\") )" else: query_string += "( exists { ?" + other + " ns:type.object.name ?name } || isLiteral(?" + other + ") )" query_string += "\n&& (!isLiteral(?" + other + ") || lang(?" + other + ") = 'en' || datatype(?" + other + ") != xsd:string || datatype(?" + other + ") != rdf:langString )" # Take out all schemastaging for now. Might consider putting some parts back in later: query_string += "\n&& !strstarts(str(?r), \"http://rdf.freebase.com/ns/base.schemastaging\" )" query_string += "\n&& !strstarts(str(?r), \"http://rdf.freebase.com/key/wikipedia\" )" query_string += "\n&& !strstarts(str(?r), \"http://rdf.freebase.com/ns/common.topic.topic_equivalent_webpage\" )" query_string += "\n&& !strstarts(str(?r), \"http://rdf.freebase.com/ns/common.topic.webpage\" )" query_string += "\n&& !strstarts(str(?r), \"http://rdf.freebase.com/ns/type.object.key\" )" query_string += "\n&& !strstarts(str(?r), \"http://rdf.freebase.com/ns/base.yupgrade.user.topics\" )" query_string += "\n&& !strstarts(str(?r), \"http://rdf.freebase.com/ns/common.topic.description\" )" query_string += "\n&& !strstarts(str(?r), \"http://rdf.freebase.com/ns/common.document.text\" )" query_string += "\n&& !strstarts(str(?r), \"http://rdf.freebase.com/ns/type.type.instance\" )" query_string += "\n&& !strstarts(str(?r), \"http://rdf.freebase.com/ns/type.object.type\" )" query_string += " )\n" query_string += "}" return query_string """ Construct a query to retrieve all neighbors of a set of vertices """ def construct_neighbor_query_old(self, center_vertices, direction='s'): property_list = ["ns:type.object.name"] opposite_direction = "o" if direction == "s" else "s" query_string = "PREFIX ns: <" + self.prefix + ">\n" query_string += "select * where {\n" query_string += "?s ?r ?o .\n" query_string += "\n".join(["?" + opposite_direction + " " + prop + " ?prop" for prop in property_list]) query_string += "FILTER (?" + direction + " in (" + ", ".join(["ns:" + v.split("/ns/")[-1] for v in center_vertices]) + "))\n" query_string += "}" return query_string """ Retrieve the 1-neighborhood of a set of vertices in the hypergraph """ def get_adjacent_edges(self, node_identifiers, target="entities", literals_only=False): edge_query_result = EdgeQueryResult() self.retrieve_edges_in_one_direction(node_identifiers, edge_query_result, subject=True, target=target, literals_only=literals_only) self.retrieve_edges_in_one_direction(node_identifiers, edge_query_result, subject=False, target=target, literals_only=literals_only) #print("done") return edge_query_result """ Retrieve names and append the property to the hypergraph """ def retrieve_and_append_name(self, hypergraph, ingoing_edges, outgoing_edges): new_vertices = self.retrieve_new_vertices(ingoing_edges, outgoing_edges) names = self.get_properties(new_vertices, "ns:type.object.name") hypergraph.set_vertex_properties(names, "name") """ Retrieve all new, unique subject/objects """ def retrieve_new_vertices(self, ingoing_edges, outgoing_edges): outgoing_vertices = self.slice_empty(outgoing_edges, 2) ingoing_vertices = self.slice_empty(ingoing_edges, 0) new_vertices = np.concatenate((outgoing_vertices, ingoing_vertices)) new_vertices = np.unique(new_vertices) return new_vertices def slice_empty(self, outgoing_edges, slice): if outgoing_edges.shape[0] > 0: outgoing_vertices = outgoing_edges[:, slice] else: outgoing_vertices = np.array([]) return outgoing_vertices """ Retrieve properties from DB """ def get_property(self, vertices, property): db_interface = self.initialize_sparql_interface() number_of_batches = math.ceil(vertices.shape[0] / self.max_entities_per_query) result_list = [] for i,center_vertex_batch in enumerate(np.array_split(vertices, number_of_batches)): query_string = self.construct_property_query(center_vertex_batch, property) results = self.execute_query(db_interface, query_string) for j,result in enumerate(results["results"]["bindings"]): result_list.append([ result["s"]["value"], result["prop"]["value"]] ) result_list = np.array(result_list) return result_list """ Retrieve edges from DB going one direction. """ def retrieve_edges_in_one_direction(self, center_vertices, edge_query_result, subject=True, target="entities", literals_only=False): db_interface = self.initialize_sparql_interface() number_of_batches = math.ceil(center_vertices.shape[0] / self.max_entities_per_query) for i,center_vertex_batch in enumerate(np.array_split(center_vertices, number_of_batches)): db_interface = self.initialize_sparql_interface() if target == "entities": if not literals_only: query_string = self.construct_neighbor_query(center_vertex_batch, hyperedges=False, forward=subject) else: query_string = self.construct_property_query(center_vertex_batch, forward=subject) else: query_string = self.construct_neighbor_query(center_vertex_batch, hyperedges=True, forward=subject) #print("#", end='', flush=True) results = self.execute_query(db_interface, query_string) if results is None: print("Query failed to work five times. Skipping.") continue #print(query_string) #print(len(results["results"]["bindings"])) for j,result in enumerate(results["results"]["bindings"]): #print(result["s"]["value"]) #print(result["r"]["value"]) #print(result["o"]["value"]) # Retrieving literals only crashes SPARQL DB. So, we filter in python instead: #if literals_only: # print(result) if literals_only and subject and result["o"]["type"] != "literal": continue elif literals_only and not subject and result["s"]["type"] != "literal": continue if target == "event" and subject and not (len(result["o"]["value"]) > 28 and result["o"]["value"][28] == ""): continue elif target == "event" and object and not (len(result["s"]["value"]) > 28 and result["s"]["value"][28] == ""): continue if result["r"]["value"] == self.name_relation: edge_query_result.append_name(result["s"]["value"], result["o"]["value"]) else: edge_query_result.append_edge([ result["s"]["value"], result["r"]["value"], result["o"]["value"]], forward=subject ) if subject: edge_query_result.append_vertex(result["o"]["value"],result["o"]["type"]) else: edge_query_result.append_vertex(result["s"]["value"],result["s"]["type"]) #exit() #print("\r" + (i+1) * " "+"\r", end="", flush=True) def execute_query(self, db_interface, query_string): #print(query_string) db_interface.setQuery(query_string) retrieved = False trial_counter = 0 while not retrieved: try: results = db_interface.query().convert() retrieved = True except: trial_counter += 1 if trial_counter == 5: return None print("Query failed. Reattempting in 5 seconds...\n") print(query_string) time.sleep(5) return results def initialize_sparql_interface(self): sparql = SPARQLWrapper(self.endpoint) sparql.setReturnFormat(JSON) return sparql
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import hashlib import json import fixtures from oslotest import mockpatch import six from stevedore import extension from heat.common import exception from heat.common import template_format from heat.engine.cfn import functions as cfn_funcs from heat.engine.cfn import template as cfn_t from heat.engine.clients.os import nova from heat.engine import environment from heat.engine import function from heat.engine.hot import template as hot_t from heat.engine import parameters from heat.engine import rsrc_defn from heat.engine import stack from heat.engine import template from heat.tests import common from heat.tests.openstack.nova import fakes as fakes_nova from heat.tests import utils mapping_template = template_format.parse('''{ "AWSTemplateFormatVersion" : "2010-09-09", "Mappings" : { "ValidMapping" : { "TestKey" : { "TestValue" : "wibble" } }, "InvalidMapping" : { "ValueList" : [ "foo", "bar" ], "ValueString" : "baz" }, "MapList": [ "foo", { "bar" : "baz" } ], "MapString": "foobar" } }''') empty_template = template_format.parse('''{ "HeatTemplateFormatVersion" : "2012-12-12", }''') empty_template20161014 = template_format.parse('''{ "HeatTemplateFormatVersion" : "2016-10-14", }''') parameter_template = template_format.parse('''{ "HeatTemplateFormatVersion" : "2012-12-12", "Parameters" : { "foo" : { "Type" : "String" }, "blarg" : { "Type" : "String", "Default": "quux" } } }''') resource_template = template_format.parse('''{ "HeatTemplateFormatVersion" : "2012-12-12", "Resources" : { "foo" : { "Type" : "GenericResourceType" }, "blarg" : { "Type" : "GenericResourceType" } } }''') def join(raw): tmpl = template.Template(mapping_template) return function.resolve(tmpl.parse(None, raw)) class DummyClass(object): metadata = None def metadata_get(self): return self.metadata def metadata_set(self, metadata): self.metadata = metadata class TemplatePluginFixture(fixtures.Fixture): def __init__(self, templates=None): templates = templates or {} super(TemplatePluginFixture, self).__init__() self.templates = [extension.Extension(k, None, v, None) for (k, v) in templates.items()] def _get_template_extension_manager(self): return extension.ExtensionManager.make_test_instance(self.templates) def setUp(self): super(TemplatePluginFixture, self).setUp() def clear_template_classes(): template._template_classes = None clear_template_classes() self.useFixture(mockpatch.PatchObject( template, '_get_template_extension_manager', new=self._get_template_extension_manager)) self.addCleanup(clear_template_classes) class TestTemplatePluginManager(common.HeatTestCase): def test_template_NEW_good(self): class NewTemplate(template.Template): SECTIONS = (VERSION, MAPPINGS) = ('NEWTemplateFormatVersion', '__undefined__') RESOURCES = 'thingies' def param_schemata(self): pass def get_section_name(self, section): pass def parameters(self, stack_identifier, user_params): pass def validate_resource_definitions(self, stack): pass def resource_definitions(self, stack): pass def add_resource(self, definition, name=None): pass def __getitem__(self, section): return {} def functions(self): return {} class NewTemplatePrint(function.Function): def result(self): return 'always this' self.useFixture(TemplatePluginFixture( {'NEWTemplateFormatVersion.2345-01-01': NewTemplate})) t = {'NEWTemplateFormatVersion': '2345-01-01'} tmpl = template.Template(t) err = tmpl.validate() self.assertIsNone(err) class TestTemplateVersion(common.HeatTestCase): versions = (('heat_template_version', '2013-05-23'), ('HeatTemplateFormatVersion', '2012-12-12'), ('AWSTemplateFormatVersion', '2010-09-09')) def test_hot_version(self): tmpl = { 'heat_template_version': '2013-05-23', 'foo': 'bar', 'parameters': {} } self.assertEqual(('heat_template_version', '2013-05-23'), template.get_version(tmpl, self.versions)) def test_cfn_version(self): tmpl = { 'AWSTemplateFormatVersion': '2010-09-09', 'foo': 'bar', 'Parameters': {} } self.assertEqual(('AWSTemplateFormatVersion', '2010-09-09'), template.get_version(tmpl, self.versions)) def test_heat_cfn_version(self): tmpl = { 'HeatTemplateFormatVersion': '2012-12-12', 'foo': 'bar', 'Parameters': {} } self.assertEqual(('HeatTemplateFormatVersion', '2012-12-12'), template.get_version(tmpl, self.versions)) def test_missing_version(self): tmpl = { 'foo': 'bar', 'Parameters': {} } ex = self.assertRaises(exception.InvalidTemplateVersion, template.get_version, tmpl, self.versions) self.assertEqual('The template version is invalid: Template version ' 'was not provided', six.text_type(ex)) def test_ambiguous_version(self): tmpl = { 'AWSTemplateFormatVersion': '2010-09-09', 'HeatTemplateFormatVersion': '2012-12-12', 'foo': 'bar', 'Parameters': {} } self.assertRaises(exception.InvalidTemplateVersion, template.get_version, tmpl, self.versions) class ParserTest(common.HeatTestCase): def test_list(self): raw = ['foo', 'bar', 'baz'] parsed = join(raw) for i in six.moves.xrange(len(raw)): self.assertEqual(raw[i], parsed[i]) self.assertIsNot(raw, parsed) def test_dict(self): raw = {'foo': 'bar', 'blarg': 'wibble'} parsed = join(raw) for k in raw: self.assertEqual(raw[k], parsed[k]) self.assertIsNot(raw, parsed) def test_dict_list(self): raw = {'foo': ['bar', 'baz'], 'blarg': 'wibble'} parsed = join(raw) self.assertEqual(raw['blarg'], parsed['blarg']) for i in six.moves.xrange(len(raw['foo'])): self.assertEqual(raw['foo'][i], parsed['foo'][i]) self.assertIsNot(raw, parsed) self.assertIsNot(raw['foo'], parsed['foo']) def test_list_dict(self): raw = [{'foo': 'bar', 'blarg': 'wibble'}, 'baz', 'quux'] parsed = join(raw) for i in six.moves.xrange(1, len(raw)): self.assertEqual(raw[i], parsed[i]) for k in raw[0]: self.assertEqual(raw[0][k], parsed[0][k]) self.assertIsNot(raw, parsed) self.assertIsNot(raw[0], parsed[0]) def test_join(self): raw = {'Fn::Join': [' ', ['foo', 'bar', 'baz']]} self.assertEqual('foo bar baz', join(raw)) def test_join_none(self): raw = {'Fn::Join': [' ', ['foo', None, 'baz']]} self.assertEqual('foo baz', join(raw)) def test_join_list(self): raw = [{'Fn::Join': [' ', ['foo', 'bar', 'baz']]}, 'blarg', 'wibble'] parsed = join(raw) self.assertEqual('foo bar baz', parsed[0]) for i in six.moves.xrange(1, len(raw)): self.assertEqual(raw[i], parsed[i]) self.assertIsNot(raw, parsed) def test_join_dict_val(self): raw = {'quux': {'Fn::Join': [' ', ['foo', 'bar', 'baz']]}, 'blarg': 'wibble'} parsed = join(raw) self.assertEqual('foo bar baz', parsed['quux']) self.assertEqual(raw['blarg'], parsed['blarg']) self.assertIsNot(raw, parsed) class TestTemplateValidate(common.HeatTestCase): def test_template_validate_cfn_check_t_digest(self): t = { 'AWSTemplateFormatVersion': '2010-09-09', 'Description': 'foo', 'Parameters': {}, 'Mappings': {}, 'Resources': { 'server': { 'Type': 'OS::Nova::Server' } }, 'Outputs': {}, } tmpl = template.Template(t) self.assertIsNone(tmpl.t_digest) tmpl.validate() self.assertEqual( hashlib.sha256(six.text_type(t).encode('utf-8')).hexdigest(), tmpl.t_digest, 'invalid template digest') def test_template_validate_cfn_good(self): t = { 'AWSTemplateFormatVersion': '2010-09-09', 'Description': 'foo', 'Parameters': {}, 'Mappings': {}, 'Resources': { 'server': { 'Type': 'OS::Nova::Server' } }, 'Outputs': {}, } tmpl = template.Template(t) err = tmpl.validate() self.assertIsNone(err) # test with alternate version key t = { 'HeatTemplateFormatVersion': '2012-12-12', 'Description': 'foo', 'Parameters': {}, 'Mappings': {}, 'Resources': { 'server': { 'Type': 'OS::Nova::Server' } }, 'Outputs': {}, } tmpl = template.Template(t) err = tmpl.validate() self.assertIsNone(err) def test_template_validate_cfn_bad_section(self): t = { 'AWSTemplateFormatVersion': '2010-09-09', 'Description': 'foo', 'Parameteers': {}, 'Mappings': {}, 'Resources': { 'server': { 'Type': 'OS::Nova::Server' } }, 'Outputs': {}, } tmpl = template.Template(t) err = self.assertRaises(exception.InvalidTemplateSection, tmpl.validate) self.assertIn('Parameteers', six.text_type(err)) def test_template_validate_cfn_empty(self): t = template_format.parse(''' AWSTemplateFormatVersion: 2010-09-09 Parameters: Resources: Outputs: ''') tmpl = template.Template(t) err = tmpl.validate() self.assertIsNone(err) def test_template_validate_hot_check_t_digest(self): t = { 'heat_template_version': '2015-04-30', 'description': 'foo', 'parameters': {}, 'resources': { 'server': { 'type': 'OS::Nova::Server' } }, 'outputs': {}, } tmpl = template.Template(t) self.assertIsNone(tmpl.t_digest) tmpl.validate() self.assertEqual(hashlib.sha256( six.text_type(t).encode('utf-8')).hexdigest(), tmpl.t_digest, 'invalid template digest') def test_template_validate_hot_good(self): t = { 'heat_template_version': '2013-05-23', 'description': 'foo', 'parameters': {}, 'resources': { 'server': { 'type': 'OS::Nova::Server' } }, 'outputs': {}, } tmpl = template.Template(t) err = tmpl.validate() self.assertIsNone(err) def test_template_validate_hot_bad_section(self): t = { 'heat_template_version': '2013-05-23', 'description': 'foo', 'parameteers': {}, 'resources': { 'server': { 'type': 'OS::Nova::Server' } }, 'outputs': {}, } tmpl = template.Template(t) err = self.assertRaises(exception.InvalidTemplateSection, tmpl.validate) self.assertIn('parameteers', six.text_type(err)) class TemplateTest(common.HeatTestCase): def setUp(self): super(TemplateTest, self).setUp() self.ctx = utils.dummy_context() @staticmethod def resolve(snippet, template, stack=None): return function.resolve(template.parse(stack, snippet)) def test_defaults(self): empty = template.Template(empty_template) self.assertNotIn('AWSTemplateFormatVersion', empty) self.assertEqual('No description', empty['Description']) self.assertEqual({}, empty['Mappings']) self.assertEqual({}, empty['Resources']) self.assertEqual({}, empty['Outputs']) def test_aws_version(self): tmpl = template.Template(mapping_template) self.assertEqual(('AWSTemplateFormatVersion', '2010-09-09'), tmpl.version) def test_heat_version(self): tmpl = template.Template(resource_template) self.assertEqual(('HeatTemplateFormatVersion', '2012-12-12'), tmpl.version) def test_invalid_hot_version(self): invalid_hot_version_tmp = template_format.parse( '''{ "heat_template_version" : "2012-12-12", }''') init_ex = self.assertRaises(exception.InvalidTemplateVersion, template.Template, invalid_hot_version_tmp) valid_versions = ['2013-05-23', '2014-10-16', '2015-04-30', '2015-10-15', '2016-04-08', '2016-10-14'] ex_error_msg = ('The template version is invalid: ' '"heat_template_version: 2012-12-12". ' '"heat_template_version" should be one of: %s' % ', '.join(valid_versions)) self.assertEqual(ex_error_msg, six.text_type(init_ex)) def test_invalid_version_not_in_hot_versions(self): invalid_hot_version_tmp = template_format.parse( '''{ "heat_template_version" : "2012-12-12", }''') versions = { ('heat_template_version', '2013-05-23'): hot_t.HOTemplate20130523, ('heat_template_version', '2013-06-23'): hot_t.HOTemplate20130523 } temp_copy = copy.deepcopy(template._template_classes) template._template_classes = versions init_ex = self.assertRaises(exception.InvalidTemplateVersion, template.Template, invalid_hot_version_tmp) ex_error_msg = ('The template version is invalid: ' '"heat_template_version: 2012-12-12". ' '"heat_template_version" should be ' 'one of: 2013-05-23, 2013-06-23') self.assertEqual(ex_error_msg, six.text_type(init_ex)) template._template_classes = temp_copy def test_invalid_aws_version(self): invalid_aws_version_tmp = template_format.parse( '''{ "AWSTemplateFormatVersion" : "2012-12-12", }''') init_ex = self.assertRaises(exception.InvalidTemplateVersion, template.Template, invalid_aws_version_tmp) ex_error_msg = ('The template version is invalid: ' '"AWSTemplateFormatVersion: 2012-12-12". ' '"AWSTemplateFormatVersion" should be: 2010-09-09') self.assertEqual(ex_error_msg, six.text_type(init_ex)) def test_invalid_version_not_in_aws_versions(self): invalid_aws_version_tmp = template_format.parse( '''{ "AWSTemplateFormatVersion" : "2012-12-12", }''') versions = { ('AWSTemplateFormatVersion', '2010-09-09'): cfn_t.CfnTemplate, ('AWSTemplateFormatVersion', '2011-06-23'): cfn_t.CfnTemplate } temp_copy = copy.deepcopy(template._template_classes) template._template_classes = versions init_ex = self.assertRaises(exception.InvalidTemplateVersion, template.Template, invalid_aws_version_tmp) ex_error_msg = ('The template version is invalid: ' '"AWSTemplateFormatVersion: 2012-12-12". ' '"AWSTemplateFormatVersion" should be ' 'one of: 2010-09-09, 2011-06-23') self.assertEqual(ex_error_msg, six.text_type(init_ex)) template._template_classes = temp_copy def test_invalid_heat_version(self): invalid_heat_version_tmp = template_format.parse( '''{ "HeatTemplateFormatVersion" : "2010-09-09", }''') init_ex = self.assertRaises(exception.InvalidTemplateVersion, template.Template, invalid_heat_version_tmp) ex_error_msg = ('The template version is invalid: ' '"HeatTemplateFormatVersion: 2010-09-09". ' '"HeatTemplateFormatVersion" should be one of: ' '2012-12-12, 2016-10-14') self.assertEqual(ex_error_msg, six.text_type(init_ex)) def test_invalid_version_not_in_heat_versions(self): invalid_heat_version_tmp = template_format.parse( '''{ "HeatTemplateFormatVersion" : "2010-09-09", }''') versions = { ('HeatTemplateFormatVersion', '2012-12-12'): cfn_t.CfnTemplate, ('HeatTemplateFormatVersion', '2014-12-12'): cfn_t.CfnTemplate } temp_copy = copy.deepcopy(template._template_classes) template._template_classes = versions init_ex = self.assertRaises(exception.InvalidTemplateVersion, template.Template, invalid_heat_version_tmp) ex_error_msg = ('The template version is invalid: ' '"HeatTemplateFormatVersion: 2010-09-09". ' '"HeatTemplateFormatVersion" should be ' 'one of: 2012-12-12, 2014-12-12') self.assertEqual(ex_error_msg, six.text_type(init_ex)) template._template_classes = temp_copy def test_invalid_template(self): scanner_error = ''' 1 Mappings: ValidMapping: TestKey: TestValue ''' parser_error = ''' Mappings: ValidMapping: TestKey: {TestKey1: "Value1" TestKey2: "Value2"} ''' self.assertRaises(ValueError, template_format.parse, scanner_error) self.assertRaises(ValueError, template_format.parse, parser_error) def test_invalid_section(self): tmpl = template.Template({'HeatTemplateFormatVersion': '2012-12-12', 'Foo': ['Bar']}) self.assertNotIn('Foo', tmpl) def test_find_in_map(self): tmpl = template.Template(mapping_template) stk = stack.Stack(self.ctx, 'test', tmpl) find = {'Fn::FindInMap': ["ValidMapping", "TestKey", "TestValue"]} self.assertEqual("wibble", self.resolve(find, tmpl, stk)) def test_find_in_invalid_map(self): tmpl = template.Template(mapping_template) stk = stack.Stack(self.ctx, 'test', tmpl) finds = ({'Fn::FindInMap': ["InvalidMapping", "ValueList", "foo"]}, {'Fn::FindInMap': ["InvalidMapping", "ValueString", "baz"]}, {'Fn::FindInMap': ["MapList", "foo", "bar"]}, {'Fn::FindInMap': ["MapString", "foo", "bar"]}) for find in finds: self.assertRaises((KeyError, TypeError), self.resolve, find, tmpl, stk) def test_bad_find_in_map(self): tmpl = template.Template(mapping_template) stk = stack.Stack(self.ctx, 'test', tmpl) finds = ({'Fn::FindInMap': "String"}, {'Fn::FindInMap': {"Dict": "String"}}, {'Fn::FindInMap': ["ShortList", "foo"]}, {'Fn::FindInMap': ["ReallyShortList"]}) for find in finds: self.assertRaises(KeyError, self.resolve, find, tmpl, stk) def test_param_refs(self): env = environment.Environment({'foo': 'bar', 'blarg': 'wibble'}) tmpl = template.Template(parameter_template, env=env) stk = stack.Stack(self.ctx, 'test', tmpl) p_snippet = {"Ref": "foo"} self.assertEqual("bar", self.resolve(p_snippet, tmpl, stk)) def test_param_ref_missing(self): env = environment.Environment({'foo': 'bar'}) tmpl = template.Template(parameter_template, env=env) stk = stack.Stack(self.ctx, 'test', tmpl) tmpl.env = environment.Environment({}) stk.parameters = parameters.Parameters(stk.identifier(), tmpl) snippet = {"Ref": "foo"} self.assertRaises(exception.UserParameterMissing, self.resolve, snippet, tmpl, stk) def test_resource_refs(self): tmpl = template.Template(resource_template) stk = stack.Stack(self.ctx, 'test', tmpl) self.m.StubOutWithMock(stk['foo'], 'FnGetRefId') stk['foo'].FnGetRefId().MultipleTimes().AndReturn('bar') self.m.ReplayAll() r_snippet = {"Ref": "foo"} self.assertEqual("bar", self.resolve(r_snippet, tmpl, stk)) self.m.VerifyAll() def test_resource_refs_param(self): tmpl = template.Template(resource_template) stk = stack.Stack(self.ctx, 'test', tmpl) p_snippet = {"Ref": "baz"} parsed = tmpl.parse(stk, p_snippet) self.assertIsInstance(parsed, cfn_funcs.ParamRef) def test_select_from_list(self): tmpl = template.Template(empty_template) data = {"Fn::Select": ["1", ["foo", "bar"]]} self.assertEqual("bar", self.resolve(data, tmpl)) def test_select_from_list_integer_index(self): tmpl = template.Template(empty_template) data = {"Fn::Select": [1, ["foo", "bar"]]} self.assertEqual("bar", self.resolve(data, tmpl)) def test_select_from_list_out_of_bound(self): tmpl = template.Template(empty_template) data = {"Fn::Select": ["0", ["foo", "bar"]]} self.assertEqual("foo", self.resolve(data, tmpl)) data = {"Fn::Select": ["1", ["foo", "bar"]]} self.assertEqual("bar", self.resolve(data, tmpl)) data = {"Fn::Select": ["2", ["foo", "bar"]]} self.assertEqual("", self.resolve(data, tmpl)) def test_select_from_dict(self): tmpl = template.Template(empty_template) data = {"Fn::Select": ["red", {"red": "robin", "re": "foo"}]} self.assertEqual("robin", self.resolve(data, tmpl)) def test_select_int_from_dict(self): tmpl = template.Template(empty_template) data = {"Fn::Select": ["2", {"1": "bar", "2": "foo"}]} self.assertEqual("foo", self.resolve(data, tmpl)) def test_select_from_none(self): tmpl = template.Template(empty_template) data = {"Fn::Select": ["red", None]} self.assertEqual("", self.resolve(data, tmpl)) def test_select_from_dict_not_existing(self): tmpl = template.Template(empty_template) data = {"Fn::Select": ["green", {"red": "robin", "re": "foo"}]} self.assertEqual("", self.resolve(data, tmpl)) def test_select_from_serialized_json_map(self): tmpl = template.Template(empty_template) js = json.dumps({"red": "robin", "re": "foo"}) data = {"Fn::Select": ["re", js]} self.assertEqual("foo", self.resolve(data, tmpl)) def test_select_from_serialized_json_list(self): tmpl = template.Template(empty_template) js = json.dumps(["foo", "fee", "fum"]) data = {"Fn::Select": ["0", js]} self.assertEqual("foo", self.resolve(data, tmpl)) def test_select_empty_string(self): tmpl = template.Template(empty_template) data = {"Fn::Select": ["0", '']} self.assertEqual("", self.resolve(data, tmpl)) data = {"Fn::Select": ["1", '']} self.assertEqual("", self.resolve(data, tmpl)) data = {"Fn::Select": ["one", '']} self.assertEqual("", self.resolve(data, tmpl)) def test_equals(self): tpl = template_format.parse(''' HeatTemplateFormatVersion: 2016-10-14 Parameters: env_type: Type: String Default: 'test' ''') snippet = {'Fn::Equals': [{'Ref': 'env_type'}, 'prod']} # when param 'env_type' is 'test', equals function resolve to false tmpl = template.Template(tpl) stk = stack.Stack(utils.dummy_context(), 'test_equals_false', tmpl) resolved = self.resolve(snippet, tmpl, stk) self.assertFalse(resolved) # when param 'env_type' is 'prod', equals function resolve to true tmpl = template.Template(tpl, env=environment.Environment( {'env_type': 'prod'})) stk = stack.Stack(utils.dummy_context(), 'test_equals_true', tmpl) resolved = self.resolve(snippet, tmpl, stk) self.assertTrue(resolved) def test_equals_invalid_args(self): tmpl = template.Template(empty_template20161014) snippet = {'Fn::Equals': ['test', 'prod', 'invalid']} exc = self.assertRaises(ValueError, self.resolve, snippet, tmpl) self.assertIn('Arguments to "Fn::Equals" must be of the form: ' '[value_1, value_2]', six.text_type(exc)) # test invalid type snippet = {'Fn::Equals': {"equal": False}} exc = self.assertRaises(ValueError, self.resolve, snippet, tmpl) self.assertIn('Arguments to "Fn::Equals" must be of the form: ' '[value_1, value_2]', six.text_type(exc)) def test_join(self): tmpl = template.Template(empty_template) join = {"Fn::Join": [" ", ["foo", "bar"]]} self.assertEqual("foo bar", self.resolve(join, tmpl)) def test_split_ok(self): tmpl = template.Template(empty_template) data = {"Fn::Split": [";", "foo; bar; achoo"]} self.assertEqual(['foo', ' bar', ' achoo'], self.resolve(data, tmpl)) def test_split_no_delim_in_str(self): tmpl = template.Template(empty_template) data = {"Fn::Split": [";", "foo, bar, achoo"]} self.assertEqual(['foo, bar, achoo'], self.resolve(data, tmpl)) def test_base64(self): tmpl = template.Template(empty_template) snippet = {"Fn::Base64": "foobar"} # For now, the Base64 function just returns the original text, and # does not convert to base64 (see issue #133) self.assertEqual("foobar", self.resolve(snippet, tmpl)) def test_get_azs(self): tmpl = template.Template(empty_template) snippet = {"Fn::GetAZs": ""} self.assertEqual(["nova"], self.resolve(snippet, tmpl)) def test_get_azs_with_stack(self): tmpl = template.Template(empty_template) snippet = {"Fn::GetAZs": ""} stk = stack.Stack(self.ctx, 'test_stack', template.Template(empty_template)) self.m.StubOutWithMock(nova.NovaClientPlugin, '_create') fc = fakes_nova.FakeClient() nova.NovaClientPlugin._create().AndReturn(fc) self.m.ReplayAll() self.assertEqual(["nova1"], self.resolve(snippet, tmpl, stk)) def test_replace_string_values(self): tmpl = template.Template(empty_template) snippet = {"Fn::Replace": [ {'$var1': 'foo', '%var2%': 'bar'}, '$var1 is %var2%' ]} self.assertEqual('foo is bar', self.resolve(snippet, tmpl)) def test_replace_number_values(self): tmpl = template.Template(empty_template) snippet = {"Fn::Replace": [ {'$var1': 1, '%var2%': 2}, '$var1 is not %var2%' ]} self.assertEqual('1 is not 2', self.resolve(snippet, tmpl)) snippet = {"Fn::Replace": [ {'$var1': 1.3, '%var2%': 2.5}, '$var1 is not %var2%' ]} self.assertEqual('1.3 is not 2.5', self.resolve(snippet, tmpl)) def test_replace_none_values(self): tmpl = template.Template(empty_template) snippet = {"Fn::Replace": [ {'$var1': None, '${var2}': None}, '"$var1" is "${var2}"' ]} self.assertEqual('"" is ""', self.resolve(snippet, tmpl)) def test_replace_missing_key(self): tmpl = template.Template(empty_template) snippet = {"Fn::Replace": [ {'$var1': 'foo', 'var2': 'bar'}, '"$var1" is "${var3}"' ]} self.assertEqual('"foo" is "${var3}"', self.resolve(snippet, tmpl)) def test_replace_param_values(self): env = environment.Environment({'foo': 'wibble'}) tmpl = template.Template(parameter_template, env=env) stk = stack.Stack(self.ctx, 'test_stack', tmpl) snippet = {"Fn::Replace": [ {'$var1': {'Ref': 'foo'}, '%var2%': {'Ref': 'blarg'}}, '$var1 is %var2%' ]} self.assertEqual('wibble is quux', self.resolve(snippet, tmpl, stk)) def test_member_list2map_good(self): tmpl = template.Template(empty_template) snippet = {"Fn::MemberListToMap": [ 'Name', 'Value', ['.member.0.Name=metric', '.member.0.Value=cpu', '.member.1.Name=size', '.member.1.Value=56']]} self.assertEqual({'metric': 'cpu', 'size': '56'}, self.resolve(snippet, tmpl)) def test_member_list2map_good2(self): tmpl = template.Template(empty_template) snippet = {"Fn::MemberListToMap": [ 'Key', 'Value', ['.member.2.Key=metric', '.member.2.Value=cpu', '.member.5.Key=size', '.member.5.Value=56']]} self.assertEqual({'metric': 'cpu', 'size': '56'}, self.resolve(snippet, tmpl)) def test_resource_facade(self): metadata_snippet = {'Fn::ResourceFacade': 'Metadata'} deletion_policy_snippet = {'Fn::ResourceFacade': 'DeletionPolicy'} update_policy_snippet = {'Fn::ResourceFacade': 'UpdatePolicy'} parent_resource = DummyClass() parent_resource.metadata_set({"foo": "bar"}) parent_resource.t = rsrc_defn.ResourceDefinition( 'parent', 'SomeType', deletion_policy=rsrc_defn.ResourceDefinition.RETAIN, update_policy={"blarg": "wibble"}) parent_resource.stack = stack.Stack(self.ctx, 'toplevel_stack', template.Template(empty_template)) stk = stack.Stack(self.ctx, 'test_stack', template.Template(empty_template), parent_resource='parent', owner_id=45) stk._parent_stack = dict(parent=parent_resource) self.assertEqual({"foo": "bar"}, self.resolve(metadata_snippet, stk.t, stk)) self.assertEqual('Retain', self.resolve(deletion_policy_snippet, stk.t, stk)) self.assertEqual({"blarg": "wibble"}, self.resolve(update_policy_snippet, stk.t, stk)) def test_resource_facade_function(self): deletion_policy_snippet = {'Fn::ResourceFacade': 'DeletionPolicy'} parent_resource = DummyClass() parent_resource.metadata_set({"foo": "bar"}) parent_resource.stack = stack.Stack(self.ctx, 'toplevel_stack', template.Template(empty_template)) del_policy = cfn_funcs.Join(parent_resource.stack, 'Fn::Join', ['eta', ['R', 'in']]) parent_resource.t = rsrc_defn.ResourceDefinition( 'parent', 'SomeType', deletion_policy=del_policy) stk = stack.Stack(self.ctx, 'test_stack', template.Template(empty_template), parent_resource='parent') stk._parent_stack = dict(parent=parent_resource) self.assertEqual('Retain', self.resolve(deletion_policy_snippet, stk.t, stk)) def test_resource_facade_invalid_arg(self): snippet = {'Fn::ResourceFacade': 'wibble'} stk = stack.Stack(self.ctx, 'test_stack', template.Template(empty_template)) error = self.assertRaises(ValueError, self.resolve, snippet, stk.t, stk) self.assertIn(list(six.iterkeys(snippet))[0], six.text_type(error)) def test_resource_facade_missing_deletion_policy(self): snippet = {'Fn::ResourceFacade': 'DeletionPolicy'} parent_resource = DummyClass() parent_resource.metadata_set({"foo": "bar"}) parent_resource.t = rsrc_defn.ResourceDefinition('parent', 'SomeType') parent_resource.stack = stack.Stack(self.ctx, 'toplevel_stack', template.Template(empty_template)) stk = stack.Stack(self.ctx, 'test_stack', template.Template(empty_template), parent_resource='parent', owner_id=78) stk._parent_stack = dict(parent=parent_resource) self.assertEqual('Delete', self.resolve(snippet, stk.t, stk)) def test_prevent_parameters_access(self): expected_description = "This can be accessed" tmpl = template.Template({ 'AWSTemplateFormatVersion': '2010-09-09', 'Description': expected_description, 'Parameters': { 'foo': {'Type': 'String', 'Required': True} } }) self.assertEqual(expected_description, tmpl['Description']) keyError = self.assertRaises(KeyError, tmpl.__getitem__, 'Parameters') self.assertIn("can not be accessed directly", six.text_type(keyError)) def test_parameters_section_not_iterable(self): expected_description = "This can be accessed" tmpl = template.Template({ 'AWSTemplateFormatVersion': '2010-09-09', 'Description': expected_description, 'Parameters': { 'foo': {'Type': 'String', 'Required': True} } }) self.assertEqual(expected_description, tmpl['Description']) self.assertNotIn('Parameters', six.iterkeys(tmpl)) def test_add_resource(self): cfn_tpl = template_format.parse(''' AWSTemplateFormatVersion: 2010-09-09 Resources: resource1: Type: AWS::EC2::Instance Properties: property1: value1 Metadata: foo: bar DependsOn: dummy DeletionPolicy: Retain UpdatePolicy: foo: bar resource2: Type: AWS::EC2::Instance ''') source = template.Template(cfn_tpl) empty = template.Template(copy.deepcopy(empty_template)) stk = stack.Stack(self.ctx, 'test_stack', source) for defn in six.itervalues(source.resource_definitions(stk)): empty.add_resource(defn) self.assertEqual(cfn_tpl['Resources'], empty.t['Resources']) def test_create_empty_template_default_version(self): empty_template = template.Template.create_empty_template() self.assertEqual(hot_t.HOTemplate20150430, empty_template.__class__) self.assertEqual({}, empty_template['parameter_groups']) self.assertEqual({}, empty_template['resources']) self.assertEqual({}, empty_template['outputs']) def test_create_empty_template_returns_correct_version(self): t = template_format.parse(''' AWSTemplateFormatVersion: 2010-09-09 Parameters: Resources: Outputs: ''') aws_tmpl = template.Template(t) empty_template = template.Template.create_empty_template( version=aws_tmpl.version) self.assertEqual(aws_tmpl.__class__, empty_template.__class__) self.assertEqual({}, empty_template['Mappings']) self.assertEqual({}, empty_template['Resources']) self.assertEqual({}, empty_template['Outputs']) t = template_format.parse(''' HeatTemplateFormatVersion: 2012-12-12 Parameters: Resources: Outputs: ''') heat_tmpl = template.Template(t) empty_template = template.Template.create_empty_template( version=heat_tmpl.version) self.assertEqual(heat_tmpl.__class__, empty_template.__class__) self.assertEqual({}, empty_template['Mappings']) self.assertEqual({}, empty_template['Resources']) self.assertEqual({}, empty_template['Outputs']) t = template_format.parse(''' heat_template_version: 2015-04-30 parameter_groups: resources: outputs: ''') hot_tmpl = template.Template(t) empty_template = template.Template.create_empty_template( version=hot_tmpl.version) self.assertEqual(hot_tmpl.__class__, empty_template.__class__) self.assertEqual({}, empty_template['parameter_groups']) self.assertEqual({}, empty_template['resources']) self.assertEqual({}, empty_template['outputs']) def test_create_empty_template_from_another_template(self): res_param_template = template_format.parse('''{ "HeatTemplateFormatVersion" : "2012-12-12", "Parameters" : { "foo" : { "Type" : "String" }, "blarg" : { "Type" : "String", "Default": "quux" } }, "Resources" : { "foo" : { "Type" : "GenericResourceType" }, "blarg" : { "Type" : "GenericResourceType" } } }''') env = environment.Environment({'foo': 'bar'}) hot_tmpl = template.Template(res_param_template, env) empty_template = template.Template.create_empty_template( from_template=hot_tmpl) self.assertEqual({}, empty_template['Resources']) self.assertEqual(hot_tmpl.env, empty_template.env) class TemplateFnErrorTest(common.HeatTestCase): scenarios = [ ('select_from_list_not_int', dict(expect=TypeError, snippet={"Fn::Select": ["one", ["foo", "bar"]]})), ('select_from_dict_not_str', dict(expect=TypeError, snippet={"Fn::Select": [1, {"red": "robin", "re": "foo"}]})), ('select_from_serialized_json_wrong', dict(expect=ValueError, snippet={"Fn::Select": ["not", "no json"]})), ('select_wrong_num_args_1', dict(expect=ValueError, snippet={"Fn::Select": []})), ('select_wrong_num_args_2', dict(expect=ValueError, snippet={"Fn::Select": ["4"]})), ('select_wrong_num_args_3', dict(expect=ValueError, snippet={"Fn::Select": ["foo", {"foo": "bar"}, ""]})), ('select_wrong_num_args_4', dict(expect=TypeError, snippet={'Fn::Select': [['f'], {'f': 'food'}]})), ('split_no_delim', dict(expect=ValueError, snippet={"Fn::Split": ["foo, bar, achoo"]})), ('split_no_list', dict(expect=TypeError, snippet={"Fn::Split": "foo, bar, achoo"})), ('base64_list', dict(expect=TypeError, snippet={"Fn::Base64": ["foobar"]})), ('base64_dict', dict(expect=TypeError, snippet={"Fn::Base64": {"foo": "bar"}})), ('replace_list_value', dict(expect=TypeError, snippet={"Fn::Replace": [ {'$var1': 'foo', '%var2%': ['bar']}, '$var1 is %var2%']})), ('replace_list_mapping', dict(expect=TypeError, snippet={"Fn::Replace": [ ['var1', 'foo', 'var2', 'bar'], '$var1 is ${var2}']})), ('replace_dict', dict(expect=TypeError, snippet={"Fn::Replace": {}})), ('replace_missing_template', dict(expect=ValueError, snippet={"Fn::Replace": [['var1', 'foo', 'var2', 'bar']]})), ('replace_none_template', dict(expect=TypeError, snippet={"Fn::Replace": [['var2', 'bar'], None]})), ('replace_list_string', dict(expect=TypeError, snippet={"Fn::Replace": [ {'var1': 'foo', 'var2': 'bar'}, ['$var1 is ${var2}']]})), ('join_string', dict(expect=TypeError, snippet={"Fn::Join": [" ", "foo"]})), ('join_dict', dict(expect=TypeError, snippet={"Fn::Join": [" ", {"foo": "bar"}]})), ('join_wrong_num_args_1', dict(expect=ValueError, snippet={"Fn::Join": []})), ('join_wrong_num_args_2', dict(expect=ValueError, snippet={"Fn::Join": [" "]})), ('join_wrong_num_args_3', dict(expect=ValueError, snippet={"Fn::Join": [" ", {"foo": "bar"}, ""]})), ('join_string_nodelim', dict(expect=TypeError, snippet={"Fn::Join": "o"})), ('join_string_nodelim_1', dict(expect=TypeError, snippet={"Fn::Join": "oh"})), ('join_string_nodelim_2', dict(expect=TypeError, snippet={"Fn::Join": "ohh"})), ('join_dict_nodelim1', dict(expect=TypeError, snippet={"Fn::Join": {"foo": "bar"}})), ('join_dict_nodelim2', dict(expect=TypeError, snippet={"Fn::Join": {"foo": "bar", "blarg": "wibble"}})), ('join_dict_nodelim3', dict(expect=TypeError, snippet={"Fn::Join": {"foo": "bar", "blarg": "wibble", "baz": "quux"}})), ('member_list2map_no_key_or_val', dict(expect=TypeError, snippet={"Fn::MemberListToMap": [ 'Key', ['.member.2.Key=metric', '.member.2.Value=cpu', '.member.5.Key=size', '.member.5.Value=56']]})), ('member_list2map_no_list', dict(expect=TypeError, snippet={"Fn::MemberListToMap": [ 'Key', '.member.2.Key=metric']})), ('member_list2map_not_string', dict(expect=TypeError, snippet={"Fn::MemberListToMap": [ 'Name', ['Value'], ['.member.0.Name=metric', '.member.0.Value=cpu', '.member.1.Name=size', '.member.1.Value=56']]})), ] def test_bad_input(self): tmpl = template.Template(empty_template) def resolve(s): return TemplateTest.resolve(s, tmpl) error = self.assertRaises(self.expect, resolve, self.snippet) self.assertIn(list(six.iterkeys(self.snippet))[0], six.text_type(error)) class ResolveDataTest(common.HeatTestCase): def setUp(self): super(ResolveDataTest, self).setUp() self.username = 'parser_stack_test_user' self.ctx = utils.dummy_context() self.stack = stack.Stack(self.ctx, 'resolve_test_stack', template.Template(empty_template)) def resolve(self, snippet): return function.resolve(self.stack.t.parse(self.stack, snippet)) def test_join_split(self): # join snippet = {'Fn::Join': [';', ['one', 'two', 'three']]} self.assertEqual('one;two;three', self.resolve(snippet)) # join then split snippet = {'Fn::Split': [';', snippet]} self.assertEqual(['one', 'two', 'three'], self.resolve(snippet)) def test_split_join_split_join(self): # each snippet in this test encapsulates # the snippet from the previous step, leading # to increasingly nested function calls # split snippet = {'Fn::Split': [',', 'one,two,three']} self.assertEqual(['one', 'two', 'three'], self.resolve(snippet)) # split then join snippet = {'Fn::Join': [';', snippet]} self.assertEqual('one;two;three', self.resolve(snippet)) # split then join then split snippet = {'Fn::Split': [';', snippet]} self.assertEqual(['one', 'two', 'three'], self.resolve(snippet)) # split then join then split then join snippet = {'Fn::Join': ['-', snippet]} self.assertEqual('one-two-three', self.resolve(snippet)) def test_join_recursive(self): raw = {'Fn::Join': ['\n', [{'Fn::Join': [' ', ['foo', 'bar']]}, 'baz']]} self.assertEqual('foo bar\nbaz', self.resolve(raw)) def test_join_not_string(self): snippet = {'Fn::Join': ['\n', [{'Fn::Join': [' ', ['foo', 45]]}, 'baz']]} error = self.assertRaises(TypeError, self.resolve, snippet) self.assertIn('45', six.text_type(error)) def test_base64_replace(self): raw = {'Fn::Base64': {'Fn::Replace': [ {'foo': 'bar'}, 'Meet at the foo']}} self.assertEqual('Meet at the bar', self.resolve(raw)) def test_replace_base64(self): raw = {'Fn::Replace': [{'foo': 'bar'}, { 'Fn::Base64': 'Meet at the foo'}]} self.assertEqual('Meet at the bar', self.resolve(raw)) def test_nested_selects(self): data = { 'a': ['one', 'two', 'three'], 'b': ['een', 'twee', {'d': 'D', 'e': 'E'}] } raw = {'Fn::Select': ['a', data]} self.assertEqual(data['a'], self.resolve(raw)) raw = {'Fn::Select': ['b', data]} self.assertEqual(data['b'], self.resolve(raw)) raw = { 'Fn::Select': ['1', { 'Fn::Select': ['b', data] }] } self.assertEqual('twee', self.resolve(raw)) raw = { 'Fn::Select': ['e', { 'Fn::Select': ['2', { 'Fn::Select': ['b', data] }] }] } self.assertEqual('E', self.resolve(raw)) def test_member_list_select(self): snippet = {'Fn::Select': ['metric', {"Fn::MemberListToMap": [ 'Name', 'Value', ['.member.0.Name=metric', '.member.0.Value=cpu', '.member.1.Name=size', '.member.1.Value=56']]}]} self.assertEqual('cpu', self.resolve(snippet))
# Global settings for core project. import os # PATH CONFIGURATION PROJECT_DIR = os.path.dirname(os.path.dirname(__file__)) PUBLIC_DIR = os.path.join(PROJECT_DIR, 'public') # Python dotted path to the WSGI application used by Django's runserver. WSGI_APPLICATION = 'plugin_manager.core.wsgi.application' # END PATH CONFIGURATION # DEBUG CONFIGURATION DEBUG = False TEMPLATE_DEBUG = True # END DEBUG CONFIGURATION # PAGINATION DEFAULT VALUE CONFIG NUM_RESULTS_PER_PAGE = 20 # END PAGINATION DEFAULT VALUE CONFIG # MANAGER CONFIGURATION ADMINS = ( # ('Your Name', '[email protected]'), ) MANAGERS = ADMINS # END MANAGER CONFIGURATION # URL CONFIGURATION ROOT_URLCONF = 'plugin_manager.core.urls' # END URL CONFIGURATION # GENERAL CONFIGURATION TEST_RUNNER = 'django.test.runner.DiscoverRunner' # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # On Unix systems, a value of None will cause Django to use the same # timezone as the operating system. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'UTC' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale. USE_L10N = True # If you set this to False, Django will not use timezone-aware datetimes. USE_TZ = True # Location of fixtures for the project FIXTURE_DIRS = ( os.path.join(PROJECT_DIR, 'fixtures'), ) # END GENERAL CONFIGURATION # MEDIA CONFIGURATION # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/home/media/media.lawrence.com/media/" MEDIA_ROOT = os.path.join(PUBLIC_DIR, 'media') # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://media.lawrence.com/media/", "http://example.com/media/" MEDIA_URL = '/media/' # END MEDIA CONFIGURATION # STATIC FILE CONFIGURATION # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/home/media/media.lawrence.com/static/" STATIC_ROOT = os.path.join(PUBLIC_DIR, 'static') # URL prefix for static files. # Example: "http://media.lawrence.com/static/" STATIC_URL = '/static/' # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. os.path.join(PROJECT_DIR, 'static'), ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', ) # END STATIC FILE CONFIGURATION SOCKETIO_ENABLED = False # TEMPLATE CONFIGURATION GRAPPELLI_ADMIN_TITLE = 'Admin' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ) TEMPLATE_DIRS = ( # Put strings here, like "/home/html/django_templates" or # "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. os.path.join(PROJECT_DIR, 'templates'), ) TEMPLATE_CONTEXT_PROCESSORS = ( 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', 'django.core.context_processors.debug', 'django.core.context_processors.i18n', 'django.core.context_processors.media', 'django.core.context_processors.static', 'django.core.context_processors.request', 'plugin_manager.core.context_processors.sidebar_lists', 'sekizai.context_processors.sekizai', ) # END TEMPLATE CONFIGURATION # MIDDLEWARE CONFIGURATION MIDDLEWARE_CLASSES = ( 'corsheaders.middleware.CorsMiddleware', 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'stronghold.middleware.LoginRequiredMiddleware', ) # END MIDDLEWARE CONFIGURATION AUTHENTICATION_BACKENDS = ( 'django.contrib.auth.backends.ModelBackend', # default ) ANONYMOUS_USER_ID = -1 # APP CONFIGURATION INSTALLED_APPS = ( # Django Core 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.humanize', 'django_extensions', 'django.contrib.admin', # 3rd Party 'corsheaders', 'grappelli', 'djcelery', 'sekizai', 'crispy_forms', 'stronghold', 'django_tables2', 'bootstrapform', # Project 'plugin_manager.accounts', 'plugin_manager.hosts', 'plugin_manager.launch_window', ) # END APP CONFIGURATION FABFILE_PATH = os.path.join(os.path.dirname(PROJECT_DIR), 'fabfile.py') # STRONGHOLD CONFIGURATION LOGIN_URL = '/login/' LOGIN_REDIRECT_URL = '/' STRONGHOLD_PUBLIC_NAMED_URLS = ( 'password_reset', 'password_reset_done', 'password_reset_complete', 'business_redirect_setup', ) STRONGHOLD_PUBLIC_URLS = ( r'^/reset/[0-9A-Za-z_\-]+/[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20}/', r'^/api/v1/.*', r'^/hosts/logs/elk*', ) # END STRONGHOLD CONFIGURATION # CRISPY CONFIGURATION CRISPY_TEMPLATE_PACK = "bootstrap3" # END CRISPY CONFIGURATION # EMAIL CONFIGURATION AUTH_USER_MODEL = 'accounts.DeployUser' # END EMAIL CONFIGURATION # EMAIL CONFIGURATION EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' # EMAIL_HOST = 'localhost' # EMAIL_PORT = 25 # EMAIL_USE_TLS = False EMAIL_FROM = '[email protected]' # END EMAIL CONFIGURATION # LOGGING CONFIGURATION # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } } # END LOGGING CONFIGURATION CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache', 'LOCATION': os.path.join(PUBLIC_DIR, '.django_cache'), } } FABRIC_TASK_CACHE_TIMEOUT = 60 * 60 * 24 # one day # celery CELERY_RESULT_BACKEND = 'djcelery.backends.database:DatabaseBackend' CELERY_TIMEZONE = 'UTC' # elasticsearch ELK_URL = "http://localhost" ELK_PORT = '9200' # cors #TODO enable white list CORS_ORIGIN_ALLOW_ALL = True
#!/usr/bin/env python # copyright 2013, y-p @ github """ Search the git history for all commits touching a named method You need the sh module to run this WARNING: this script uses git clean -f, running it on a repo with untracked files will probably erase them. Usage:: $ ./find_commits_touching_func.py (see arguments below) """ import logging import re import os import argparse from collections import namedtuple from dateutil.parser import parse try: import sh except ImportError: raise ImportError("The 'sh' package is required to run this script.") desc = """ Find all commits touching a specified function across the codebase. """.strip() argparser = argparse.ArgumentParser(description=desc) argparser.add_argument( "funcname", metavar="FUNCNAME", help="Name of function/method to search for changes on", ) argparser.add_argument( "-f", "--file-masks", metavar="f_re(,f_re)*", default=[r"\.py.?$"], help="comma separated list of regexes to match " "filenames against\ndefaults all .py? files", ) argparser.add_argument( "-d", "--dir-masks", metavar="d_re(,d_re)*", default=[], help="comma separated list of regexes to match base " "path against", ) argparser.add_argument( "-p", "--path-masks", metavar="p_re(,p_re)*", default=[], help="comma separated list of regexes to match full " "file path against", ) argparser.add_argument( "-y", "--saw-the-warning", action="store_true", default=False, help="must specify this to run, acknowledge you " "realize this will erase untracked files", ) argparser.add_argument( "--debug-level", default="CRITICAL", help="debug level of messages (DEBUG, INFO, etc...)", ) args = argparser.parse_args() lfmt = logging.Formatter(fmt="%(levelname)-8s %(message)s", datefmt="%m-%d %H:%M:%S") shh = logging.StreamHandler() shh.setFormatter(lfmt) logger = logging.getLogger("findit") logger.addHandler(shh) Hit = namedtuple("Hit", "commit path") HASH_LEN = 8 def clean_checkout(comm): h, s, d = get_commit_vitals(comm) if len(s) > 60: s = s[:60] + "..." s = s.split("\n")[0] logger.info("CO: %s %s" % (comm, s)) sh.git("checkout", comm, _tty_out=False) sh.git("clean", "-f") def get_hits(defname, files=()): cs = set() for f in files: try: r = sh.git( "blame", "-L", r"/def\s*{start}/,/def/".format(start=defname), f, _tty_out=False, ) except sh.ErrorReturnCode_128: logger.debug("no matches in %s" % f) continue lines = r.strip().splitlines()[:-1] # remove comment lines lines = [x for x in lines if not re.search(r"^\w+\s*\(.+\)\s*#", x)] hits = set(map(lambda x: x.split(" ")[0], lines)) cs.update({Hit(commit=c, path=f) for c in hits}) return cs def get_commit_info(c, fmt, sep="\t"): r = sh.git( "log", "--format={}".format(fmt), "{}^..{}".format(c, c), "-n", "1", _tty_out=False, ) return str(r).split(sep) def get_commit_vitals(c, hlen=HASH_LEN): h, s, d = get_commit_info(c, "%H\t%s\t%ci", "\t") return h[:hlen], s, parse(d) def file_filter(state, dirname, fnames): if args.dir_masks and not any(re.search(x, dirname) for x in args.dir_masks): return for f in fnames: p = os.path.abspath(os.path.join(os.path.realpath(dirname), f)) if any(re.search(x, f) for x in args.file_masks) or any( re.search(x, p) for x in args.path_masks ): if os.path.isfile(p): state["files"].append(p) def search(defname, head_commit="HEAD"): HEAD, s = get_commit_vitals("HEAD")[:2] logger.info("HEAD at %s: %s" % (HEAD, s)) done_commits = set() # allhits = set() files = [] state = dict(files=files) os.walk(".", file_filter, state) # files now holds a list of paths to files # seed with hits from q allhits = set(get_hits(defname, files=files)) q = {HEAD} try: while q: h = q.pop() clean_checkout(h) hits = get_hits(defname, files=files) for x in hits: prevc = get_commit_vitals(x.commit + "^")[0] if prevc not in done_commits: q.add(prevc) allhits.update(hits) done_commits.add(h) logger.debug("Remaining: %s" % q) finally: logger.info("Restoring HEAD to %s" % HEAD) clean_checkout(HEAD) return allhits def pprint_hits(hits): SUBJ_LEN = 50 PATH_LEN = 20 hits = list(hits) max_p = 0 for hit in hits: p = hit.path.split(os.path.realpath(os.curdir) + os.path.sep)[-1] max_p = max(max_p, len(p)) if max_p < PATH_LEN: SUBJ_LEN += PATH_LEN - max_p PATH_LEN = max_p def sorter(i): h, s, d = get_commit_vitals(hits[i].commit) return hits[i].path, d print( ("\nThese commits touched the %s method in these files " "on these dates:\n") % args.funcname ) for i in sorted(range(len(hits)), key=sorter): hit = hits[i] h, s, d = get_commit_vitals(hit.commit) p = hit.path.split(os.path.realpath(os.curdir) + os.path.sep)[-1] fmt = "{:%d} {:10} {:<%d} {:<%d}" % (HASH_LEN, SUBJ_LEN, PATH_LEN) if len(s) > SUBJ_LEN: s = s[: SUBJ_LEN - 5] + " ..." print(fmt.format(h[:HASH_LEN], d.isoformat()[:10], s, p[-20:])) print("\n") def main(): if not args.saw_the_warning: argparser.print_help() print( """ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! WARNING: this script uses git clean -f, running it on a repo with untracked files. It's recommended that you make a fresh clone and run from its root directory. You must specify the -y argument to ignore this warning. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! """ ) return if isinstance(args.file_masks, str): args.file_masks = args.file_masks.split(",") if isinstance(args.path_masks, str): args.path_masks = args.path_masks.split(",") if isinstance(args.dir_masks, str): args.dir_masks = args.dir_masks.split(",") logger.setLevel(getattr(logging, args.debug_level)) hits = search(args.funcname) pprint_hits(hits) if __name__ == "__main__": import sys sys.exit(main())
# Copyright 2014 IBM Corp. # Copyright 2015 Clinton Knight # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from manila.api.openstack import versioned_method from manila import exception from manila.i18n import _ from manila import utils # Define the minimum and maximum version of the API across all of the # REST API. The format of the version is: # X.Y where: # # - X will only be changed if a significant backwards incompatible API # change is made which affects the API as whole. That is, something # that is only very very rarely incremented. # # - Y when you make any change to the API. Note that this includes # semantic changes which may not affect the input or output formats or # even originate in the API code layer. We are not distinguishing # between backwards compatible and backwards incompatible changes in # the versioning system. It must be made clear in the documentation as # to what is a backwards compatible change and what is a backwards # incompatible one. # # You must update the API version history string below with a one or # two line description as well as update rest_api_version_history.rst REST_API_VERSION_HISTORY = """ REST API Version History: * 1.0 - Initial version. Includes all V1 APIs and extensions in Kilo. * 2.0 - Versions API updated to reflect beginning of microversions epoch. * 2.1 - Share create() doesn't ignore availability_zone field of share. * 2.2 - Snapshots become optional feature. * 2.3 - Share instances admin API * 2.4 - Consistency Group support * 2.5 - Share Migration admin API * 2.6 - Return share_type UUID instead of name in Share API * 2.7 - Rename old extension-like API URLs to core-API-like * 2.8 - Attr "is_public" can be set for share using API "manage" * 2.9 - Add export locations API * 2.10 - Field 'access_rules_status' was added to shares and share instances. * 2.11 - Share Replication support * 2.12 - Manage/unmanage snapshot API. * 2.13 - Add "cephx" auth type to allow_access * 2.14 - 'Preferred' attribute in export location metadata * 2.15 - Added Share migration 'migration_cancel', 'migration_get_progress', 'migration_complete' APIs, renamed 'migrate_share' to 'migration_start' and added notify parameter to 'migration_start'. * 2.16 - Add user_id in share show/create/manage API. * 2.17 - Added project_id and user_id fields to the JSON response of snapshot show/create/manage API. * 2.18 - Add gateway to the JSON response of share network show API. * 2.19 - Share snapshot instances admin APIs (list/show/detail/reset-status). * 2.20 - Add MTU to the JSON response of share network show API. * 2.21 - Add access_key to the response of access_list API. * 2.22 - Updated migration_start API with 'preserve-metadata', 'writable', 'nondisruptive' and 'new_share_network_id' parameters, renamed 'force_host_copy' to 'force_host_assisted_migration', removed 'notify' parameter and removed previous migrate_share API support. Updated reset_task_state API to accept 'None' value. * 2.23 - Added share_type to filter results of scheduler-stats/pools API. * 2.24 - Added optional create_share_from_snapshot_support extra spec, which was previously inferred from the 'snapshot_support' extra spec. Also made the 'snapshot_support' extra spec optional. * 2.25 - Added quota-show detail API. * 2.26 - Removed 'nova_net_id' parameter from share_network API. * 2.27 - Added share revert to snapshot API. * 2.28 - Added transitional states to access rules and replaced all transitional access_rules_status values of shares (share_instances) with 'syncing'. Share action API 'access_allow' now accepts rules even when a share or any of its instances may have an access_rules_status set to 'error'. * 2.29 - Updated migration_start API adding mandatory parameter 'preserve_snapshots' and changed 'preserve_metadata', 'writable', 'nondisruptive' to be mandatory as well. All previous migration_start APIs prior to this microversion are now unsupported. * 2.30 - Added cast_rules_to_readonly field to share_instances. * 2.31 - Convert consistency groups to share groups. * 2.32 - Added mountable snapshots APIs. * 2.33 - Added 'created_at' and 'updated_at' to the response of access_list API. * 2.34 - Added 'availability_zone_id' and 'consistent_snapshot_support' fields to 'share_group' object. * 2.35 - Added support to retrieve shares filtered by export_location_id and export_location_path. * 2.36 - Added like filter support in ``shares``, ``snapshots``, ``share-networks``, ``share-groups`` list APIs. * 2.37 - Added /messages APIs. * 2.38 - Support IPv6 validation in allow_access API to enable IPv6 in manila. * 2.39 - Added share-type quotas. * 2.40 - Added share group and share group snapshot quotas. * 2.41 - Added 'description' in share type create/list APIs. * 2.42 - Added ``with_count`` in share list API to get total count info. * 2.43 - Added filter search by extra spec for share type list. * 2.44 - Added 'ou' field to 'security_service' object. * 2.45 - Added access metadata for share access and also introduced the GET /share-access-rules API. The prior API to retrieve access rules will not work with API version >=2.45. * 2.46 - Added 'is_default' field to 'share_type' and 'share_group_type' objects. * 2.47 - Export locations for non-active share replicas are no longer retrievable through the export locations APIs: GET /v2/{tenant_id}/shares/{share_id}/export_locations and GET /v2/{tenant_id}/shares/{share_id}/export_locations/{ export_location_id}. A new API is introduced at this version: GET /v2/{tenant_id}/share-replicas/{ replica_id}/export-locations to allow retrieving individual replica export locations if available. * 2.48 - Added support for extra-spec "availability_zones" within Share types along with validation in the API. * 2.49 - Added Manage/Unmanage Share Server APIs. Updated Manage/Unmanage Shares and Snapshots APIs to work in ``driver_handles_shares_servers`` enabled mode. * 2.50 - Added update share type API to Share Type APIs. Through this API we can update the ``name``, ``description`` and/or ``share_type_access:is_public`` fields of the share type. * 2.51 - Added Share Network with multiple Subnets. Updated Share Networks to handle with one or more subnets in different availability zones. * 2.52 - Added 'created_before' and 'created_since' field to list messages filters, support querying user messages within the specified time period. * 2.53 - Added quota control to share replicas. * 2.54 - Share and share instance objects include a new field called "progress" which indicates the completion of a share creation operation as a percentage. * 2.55 - Share groups feature is no longer considered experimental. * 2.56 - Share replication feature is no longer considered experimental. * 2.57 - Added Share server migration operations: 'share_server_migration_check' 'share_server_migration_cancel' 'share_server_migration_complete' 'share_server_migration_start' 'share_server_migration_get_progress' 'share_server_reset_task_state' * 2.58 - Added 'share_groups' and 'share_group_snapshots' to the limits view. * 2.59 - Add driver ``details`` field to migration get progress. * 2.60 - API URLs no longer need to include a project_id parameter. * 2.61 - Added optional provisioning:max_share_size and provisioning:min_share_size extra specs, which can add minimum and maximum share size restrictions on a per share-type granularity. * 2.62 - Added quota control to per share size. * 2.63 - Changed the existing behavior of 'add_security_service' action on the share network's endpoint to allow the addition of security services, even when the share network is in use. Also, added new actions on the share network's endpoint: 'update_security_service', 'update_security_service_check' and 'add_security_service_check'. * 2.64 - Added 'force' field to extend share api, which can extend share directly without validation through share scheduler. * 2.65 - Added ability to set affinity scheduler hints via the share create API. * 2.66 - Added filter search by group spec for share group type list. * 2.67 - Added ability to set 'only_host' scheduler hint for the share create and share replica create API. * 2.68 - Added admin only capabilities to share metadata API * 2.69 - Added new share action to soft delete share to recycle bin or restore share from recycle bin. Also, a new parameter called `is_soft_deleted` was added so users can filter out shares in the recycle bin while listing shares. * 2.70 - Added support for multiple share network subnets in the same availability zone. Also, users can add subnets for an in-use share network. """ # The minimum and maximum versions of the API supported # The default api version request is defined to be the # minimum version of the API supported. _MIN_API_VERSION = "2.0" _MAX_API_VERSION = "2.70" DEFAULT_API_VERSION = _MIN_API_VERSION # NOTE(cyeoh): min and max versions declared as functions so we can # mock them for unittests. Do not use the constants directly anywhere # else. def min_api_version(): return APIVersionRequest(_MIN_API_VERSION) def max_api_version(): return APIVersionRequest(_MAX_API_VERSION) class APIVersionRequest(utils.ComparableMixin): """This class represents an API Version Request. This class includes convenience methods for manipulation and comparison of version numbers as needed to implement API microversions. """ def __init__(self, version_string=None, experimental=False): """Create an API version request object.""" self._ver_major = None self._ver_minor = None self._experimental = experimental if version_string is not None: match = re.match(r"^([1-9]\d*)\.([1-9]\d*|0)$", version_string) if match: self._ver_major = int(match.group(1)) self._ver_minor = int(match.group(2)) else: raise exception.InvalidAPIVersionString(version=version_string) def __str__(self): """Debug/Logging representation of object.""" params = { 'major': self._ver_major, 'minor': self._ver_minor, 'experimental': self._experimental, } return ("API Version Request Major: %(major)s, Minor: %(minor)s, " "Experimental: %(experimental)s" % params) def is_null(self): return self._ver_major is None and self._ver_minor is None def _cmpkey(self): """Return the value used by ComparableMixin for rich comparisons.""" return self._ver_major, self._ver_minor @property def experimental(self): return self._experimental @experimental.setter def experimental(self, value): if type(value) != bool: msg = _('The experimental property must be a bool value.') raise exception.InvalidParameterValue(err=msg) self._experimental = value def matches_versioned_method(self, method): """Compares this version to that of a versioned method.""" if type(method) != versioned_method.VersionedMethod: msg = _('An API version request must be compared ' 'to a VersionedMethod object.') raise exception.InvalidParameterValue(err=msg) return self.matches(method.start_version, method.end_version, method.experimental) def matches(self, min_version, max_version, experimental=False): """Compares this version to the specified min/max range. Returns whether the version object represents a version greater than or equal to the minimum version and less than or equal to the maximum version. If min_version is null then there is no minimum limit. If max_version is null then there is no maximum limit. If self is null then raise ValueError. :param min_version: Minimum acceptable version. :param max_version: Maximum acceptable version. :param experimental: Whether to match experimental APIs. :returns: boolean """ if self.is_null(): raise ValueError # NOTE(cknight): An experimental request should still match a # non-experimental API, so the experimental check isn't just # looking for equality. if not self.experimental and experimental: return False if isinstance(min_version, str): min_version = APIVersionRequest(version_string=min_version) if isinstance(max_version, str): max_version = APIVersionRequest(version_string=max_version) if not (min_version or max_version): return True elif (min_version and max_version and max_version.is_null() and min_version.is_null()): return True elif not max_version or max_version.is_null(): return min_version <= self elif not min_version or min_version.is_null(): return self <= max_version else: return min_version <= self <= max_version def get_string(self): """Returns a string representation of this object. If this method is used to create an APIVersionRequest, the resulting object will be an equivalent request. """ if self.is_null(): raise ValueError return ("%(major)s.%(minor)s" % {'major': self._ver_major, 'minor': self._ver_minor})
# Copyright (c) 2016 Clinton Knight # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import mock from jacket.storage import exception from jacket.storage import test from jacket.tests.storage.unit.volume.drivers.netapp.dataontap.performance \ import fakes as fake from jacket.storage.volume.drivers.netapp.dataontap.performance import perf_base @ddt.ddt class PerformanceLibraryTestCase(test.TestCase): def setUp(self): super(PerformanceLibraryTestCase, self).setUp() with mock.patch.object(perf_base.PerformanceLibrary, '_init_counter_info'): self.zapi_client = mock.Mock() self.perf_library = perf_base.PerformanceLibrary(self.zapi_client) self.perf_library.system_object_name = 'system' self.perf_library.avg_processor_busy_base_counter_name = ( 'cpu_elapsed_time1') def test_init(self): mock_zapi_client = mock.Mock() mock_init_counter_info = self.mock_object( perf_base.PerformanceLibrary, '_init_counter_info') library = perf_base.PerformanceLibrary(mock_zapi_client) self.assertEqual(mock_zapi_client, library.zapi_client) mock_init_counter_info.assert_called_once_with() def test_init_counter_info(self): self.perf_library._init_counter_info() self.assertIsNone(self.perf_library.system_object_name) self.assertIsNone( self.perf_library.avg_processor_busy_base_counter_name) def test_get_node_utilization_kahuna_overutilized(self): mock_get_kahuna_utilization = self.mock_object( self.perf_library, '_get_kahuna_utilization', mock.Mock(return_value=61.0)) mock_get_average_cpu_utilization = self.mock_object( self.perf_library, '_get_average_cpu_utilization', mock.Mock(return_value=25.0)) result = self.perf_library._get_node_utilization('fake1', 'fake2', 'fake_node') self.assertAlmostEqual(100.0, result) mock_get_kahuna_utilization.assert_called_once_with('fake1', 'fake2') self.assertFalse(mock_get_average_cpu_utilization.called) @ddt.data({'cpu': -0.01, 'cp_time': 10000, 'poll_time': 0}, {'cpu': 1.01, 'cp_time': 0, 'poll_time': 1000}, {'cpu': 0.50, 'cp_time': 0, 'poll_time': 0}) @ddt.unpack def test_get_node_utilization_zero_time(self, cpu, cp_time, poll_time): mock_get_kahuna_utilization = self.mock_object( self.perf_library, '_get_kahuna_utilization', mock.Mock(return_value=59.0)) mock_get_average_cpu_utilization = self.mock_object( self.perf_library, '_get_average_cpu_utilization', mock.Mock(return_value=cpu)) mock_get_total_consistency_point_time = self.mock_object( self.perf_library, '_get_total_consistency_point_time', mock.Mock(return_value=cp_time)) mock_get_consistency_point_p2_flush_time = self.mock_object( self.perf_library, '_get_consistency_point_p2_flush_time', mock.Mock(return_value=cp_time)) mock_get_total_time = self.mock_object( self.perf_library, '_get_total_time', mock.Mock(return_value=poll_time)) mock_get_adjusted_consistency_point_time = self.mock_object( self.perf_library, '_get_adjusted_consistency_point_time') result = self.perf_library._get_node_utilization('fake1', 'fake2', 'fake_node') expected = max(min(100.0, 100.0 * cpu), 0) self.assertEqual(expected, result) mock_get_kahuna_utilization.assert_called_once_with('fake1', 'fake2') mock_get_average_cpu_utilization.assert_called_once_with('fake1', 'fake2') mock_get_total_consistency_point_time.assert_called_once_with('fake1', 'fake2') mock_get_consistency_point_p2_flush_time.assert_called_once_with( 'fake1', 'fake2') mock_get_total_time.assert_called_once_with('fake1', 'fake2', 'total_cp_msecs') self.assertFalse(mock_get_adjusted_consistency_point_time.called) @ddt.data({'cpu': 0.75, 'adjusted_cp_time': 8000, 'expected': 80}, {'cpu': 0.80, 'adjusted_cp_time': 7500, 'expected': 80}, {'cpu': 0.50, 'adjusted_cp_time': 11000, 'expected': 100}) @ddt.unpack def test_get_node_utilization(self, cpu, adjusted_cp_time, expected): mock_get_kahuna_utilization = self.mock_object( self.perf_library, '_get_kahuna_utilization', mock.Mock(return_value=59.0)) mock_get_average_cpu_utilization = self.mock_object( self.perf_library, '_get_average_cpu_utilization', mock.Mock(return_value=cpu)) mock_get_total_consistency_point_time = self.mock_object( self.perf_library, '_get_total_consistency_point_time', mock.Mock(return_value=90.0)) mock_get_consistency_point_p2_flush_time = self.mock_object( self.perf_library, '_get_consistency_point_p2_flush_time', mock.Mock(return_value=50.0)) mock_get_total_time = self.mock_object( self.perf_library, '_get_total_time', mock.Mock(return_value=10000)) mock_get_adjusted_consistency_point_time = self.mock_object( self.perf_library, '_get_adjusted_consistency_point_time', mock.Mock(return_value=adjusted_cp_time)) result = self.perf_library._get_node_utilization('fake1', 'fake2', 'fake_node') self.assertEqual(expected, result) mock_get_kahuna_utilization.assert_called_once_with('fake1', 'fake2') mock_get_average_cpu_utilization.assert_called_once_with('fake1', 'fake2') mock_get_total_consistency_point_time.assert_called_once_with('fake1', 'fake2') mock_get_consistency_point_p2_flush_time.assert_called_once_with( 'fake1', 'fake2') mock_get_total_time.assert_called_once_with('fake1', 'fake2', 'total_cp_msecs') mock_get_adjusted_consistency_point_time.assert_called_once_with( 90.0, 50.0) def test_get_node_utilization_calculation_error(self): self.mock_object(self.perf_library, '_get_kahuna_utilization', mock.Mock(return_value=59.0)) self.mock_object(self.perf_library, '_get_average_cpu_utilization', mock.Mock(return_value=25.0)) self.mock_object(self.perf_library, '_get_total_consistency_point_time', mock.Mock(return_value=90.0)) self.mock_object(self.perf_library, '_get_consistency_point_p2_flush_time', mock.Mock(return_value=50.0)) self.mock_object(self.perf_library, '_get_total_time', mock.Mock(return_value=10000)) self.mock_object(self.perf_library, '_get_adjusted_consistency_point_time', mock.Mock(side_effect=ZeroDivisionError)) result = self.perf_library._get_node_utilization('fake1', 'fake2', 'fake_node') self.assertEqual(perf_base.DEFAULT_UTILIZATION, result) def test_get_kahuna_utilization(self): mock_get_performance_counter = self.mock_object( self.perf_library, '_get_performance_counter_average_multi_instance', mock.Mock(return_value=[0.2, 0.3])) result = self.perf_library._get_kahuna_utilization('fake_t1', 'fake_t2') self.assertAlmostEqual(50.0, result) mock_get_performance_counter.assert_called_once_with( 'fake_t1', 'fake_t2', 'domain_busy:kahuna', 'processor_elapsed_time') def test_get_average_cpu_utilization(self): mock_get_performance_counter_average = self.mock_object( self.perf_library, '_get_performance_counter_average', mock.Mock(return_value=0.45)) result = self.perf_library._get_average_cpu_utilization('fake_t1', 'fake_t2') self.assertAlmostEqual(0.45, result) mock_get_performance_counter_average.assert_called_once_with( 'fake_t1', 'fake_t2', 'avg_processor_busy', 'cpu_elapsed_time1') def test_get_total_consistency_point_time(self): mock_get_performance_counter_delta = self.mock_object( self.perf_library, '_get_performance_counter_delta', mock.Mock(return_value=500)) result = self.perf_library._get_total_consistency_point_time( 'fake_t1', 'fake_t2') self.assertEqual(500, result) mock_get_performance_counter_delta.assert_called_once_with( 'fake_t1', 'fake_t2', 'total_cp_msecs') def test_get_consistency_point_p2_flush_time(self): mock_get_performance_counter_delta = self.mock_object( self.perf_library, '_get_performance_counter_delta', mock.Mock(return_value=500)) result = self.perf_library._get_consistency_point_p2_flush_time( 'fake_t1', 'fake_t2') self.assertEqual(500, result) mock_get_performance_counter_delta.assert_called_once_with( 'fake_t1', 'fake_t2', 'cp_phase_times:p2_flush') def test_get_total_time(self): mock_find_performance_counter_timestamp = self.mock_object( self.perf_library, '_find_performance_counter_timestamp', mock.Mock(side_effect=[100, 105])) result = self.perf_library._get_total_time('fake_t1', 'fake_t2', 'fake_counter') self.assertEqual(5000, result) mock_find_performance_counter_timestamp.assert_has_calls([ mock.call('fake_t1', 'fake_counter'), mock.call('fake_t2', 'fake_counter')]) def test_get_adjusted_consistency_point_time(self): result = self.perf_library._get_adjusted_consistency_point_time( 500, 200) self.assertAlmostEqual(250, result) def test_get_performance_counter_delta(self): result = self.perf_library._get_performance_counter_delta( fake.COUNTERS_T1, fake.COUNTERS_T2, 'total_cp_msecs') self.assertEqual(1482, result) def test_get_performance_counter_average(self): result = self.perf_library._get_performance_counter_average( fake.COUNTERS_T1, fake.COUNTERS_T2, 'domain_busy:kahuna', 'processor_elapsed_time', 'processor0') self.assertAlmostEqual(0.00281954360981, result) def test_get_performance_counter_average_multi_instance(self): result = ( self.perf_library._get_performance_counter_average_multi_instance( fake.COUNTERS_T1, fake.COUNTERS_T2, 'domain_busy:kahuna', 'processor_elapsed_time')) expected = [0.002819543609809441, 0.0033421611147606135] self.assertAlmostEqual(expected, result) def test_find_performance_counter_value(self): result = self.perf_library._find_performance_counter_value( fake.COUNTERS_T1, 'domain_busy:kahuna', instance_name='processor0') self.assertEqual('2712467226', result) def test_find_performance_counter_value_not_found(self): self.assertRaises( exception.NotFound, self.perf_library._find_performance_counter_value, fake.COUNTERS_T1, 'invalid', instance_name='processor0') def test_find_performance_counter_timestamp(self): result = self.perf_library._find_performance_counter_timestamp( fake.COUNTERS_T1, 'domain_busy') self.assertEqual('1453573777', result) def test_find_performance_counter_timestamp_not_found(self): self.assertRaises( exception.NotFound, self.perf_library._find_performance_counter_timestamp, fake.COUNTERS_T1, 'invalid', instance_name='processor0') def test_expand_performance_array(self): counter_info = { 'labels': ['idle', 'kahuna', 'storage', 'exempt'], 'name': 'domain_busy', } self.zapi_client.get_performance_counter_info = mock.Mock( return_value=counter_info) counter = { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor0', 'domain_busy': '969142314286,2567571412,2131582146,5383861579', 'instance-name': 'processor0', 'timestamp': '1453512244', } self.perf_library._expand_performance_array('wafl', 'domain_busy', counter) modified_counter = { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor0', 'domain_busy': '969142314286,2567571412,2131582146,5383861579', 'instance-name': 'processor0', 'timestamp': '1453512244', 'domain_busy:idle': '969142314286', 'domain_busy:kahuna': '2567571412', 'domain_busy:storage': '2131582146', 'domain_busy:exempt': '5383861579', } self.assertEqual(modified_counter, counter) def test_get_base_counter_name(self): counter_info = { 'base-counter': 'cpu_elapsed_time', 'labels': [], 'name': 'avg_processor_busy', } self.zapi_client.get_performance_counter_info = mock.Mock( return_value=counter_info) result = self.perf_library._get_base_counter_name( 'system:constituent', 'avg_processor_busy') self.assertEqual('cpu_elapsed_time', result)
# DEAP is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as # published by the Free Software Foundation, either version 3 of # the License, or (at your option) any later version. # # DEAP is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with DEAP. If not, see <http://www.gnu.org/licenses/>. # Highly modified by Jason W. Sidabras [email protected] # maximize H field in a sample # this is used to find non-obvious solutions to the planar micro resonator # turns elements to silver (1) or vacuum (0) import random from deap import base from deap import creator from deap import tools import shutil import os import re import subprocess mat_re = re.compile("MaterialValue") start_re = re.compile("begin \'ToplevelParts\'") end_re = re.compile("end \'ToplevelParts\'") slv_re = re.compile("SolveInside") from datetime import datetime startTime = datetime.now() creator.create("FitnessMax", base.Fitness, weights=(1.0,)) creator.create("Individual", list, typecode='f', fitness=creator.FitnessMax) toolbox = base.Toolbox() # Attribute generator # define 'attr_bool' to be an attribute ('gene') # which corresponds to integers sampled uniformly # from the range [0,1] (i.e. 0 or 1 with equal # probability) toolbox.register("attr_bool", random.randint, 0, 1) # Structure initializers # define 'individual' to be an individual # consisting of 2490 'attr_bool' elements ('genes') toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_bool, 1767) # define the population to be a list of individuals toolbox.register("population", tools.initRepeat, list, toolbox.individual) # colorize the solution for visual of generation def evalOneMax(individual): # Solutions results purge with shutil.rmtree file = "B:\\GA_modify.aedtresults" try: shutil.rmtree(file) except: pass files = ["B:\\tmp.fld", "B:\\GA_modify.aedt", "B:\\GA_modify.aedt.lock"] for file in files: try: os.remove(file) except: pass index = 0 list_vac = [] list_pec = [] for i in individual: if i == 1: list_pec.append("Elm_"+str(index)+"\'") index += 1 else: list_vac.append("Elm_"+str(index)+"\'") index += 1 vac_re = re.compile("|".join(list_vac)) pec_re = re.compile("|".join(list_pec)) file_out = open("GA_modify.aedt", 'wb+') with open("GA_PlanarResonator.aedt", "rb") as f: flag_start = 0 flag_vac = 0 flag_pec = 0 try: for line in f: try: line = line.decode('utf-8') except: file_out.write(line) continue if start_re.search(line): file_out.write(line.encode('utf-8')) flag_start = 1 elif end_re.search(line): file_out.write(line.encode('utf-8')) flag_start = 0 elif vac_re.search(line) and flag_start == 1: flag_vac = 1 file_out.write(line.encode('utf-8')) continue elif pec_re.search(line) and flag_start == 1: flag_pec = 1 file_out.write(line.encode('utf-8')) continue else: if flag_vac == 1 and mat_re.search(line): file_out.write(line.replace('pec', 'vacuum').encode('utf-8')) elif flag_vac == 1 and slv_re.search(line): file_out.write(line.replace('false', 'true').encode('utf-8')) flag_vac = 0 elif flag_pec == 1 and mat_re.search(line): file_out.write(line.replace('vacuum', 'pec').encode('utf-8')) elif flag_pec == 1 and slv_re.search(line): file_out.write(line.replace('true', 'false').encode('utf-8')) flag_pec = 0 else: file_out.write(line.encode('utf-8')) except UnicodeDecodeError: print("thing") file_out.close() cmdCommand = "ansysedt.exe -ng -WaitForLicense -RunScriptAndExit Calc_output.py -BatchSave GA_modify.aedt" #specify your cmd command process = subprocess.Popen(cmdCommand.split(), stdout=subprocess.PIPE, shell=True) output, error = process.communicate() try: with open("B:\\tmp.fld", "r") as out_file: for line in out_file: try: print(float(line)) output = float(line) break except: continue return output, except: print ("No tmp.fld, failed solution?") return 0, #---------- # Operator registration #---------- # register the goal / fitness function toolbox.register("evaluate", evalOneMax) # register the crossover operator toolbox.register("mate", tools.cxTwoPoint) # register a mutation operator with a probability to # flip each attribute/gene of 0.05 toolbox.register("mutate", tools.mutFlipBit, indpb=0.05) # operator for selecting individuals for breeding the next # generation: each individual of the current generation # is replaced by the 'fittest' (best) of three individuals # drawn randomly from the current generation. toolbox.register("select", tools.selTournament, tournsize=3) #---------- def main(): random.seed(42) # create an initial population of 300 individuals (where # each individual is a list of integers) pop = toolbox.population(n=60) # CXPB is the probability with which two individuals # are crossed # # MUTPB is the probability for mutating an individual # # NGEN is the number of generations for which the # evolution runs CXPB, MUTPB, NGEN = 0.55, 0.25, 30 print("Start of evolution") # Evaluate the entire population fitnesses = list(map(toolbox.evaluate, pop)) for ind, fit in zip(pop, fitnesses): ind.fitness.values = fit print(" Evaluated %i individuals" % len(pop)) # Begin the evolution for g in range(NGEN): print("-- Generation %i --" % g) # Select the next generation individuals offspring = toolbox.select(pop, len(pop)) # Clone the selected individuals offspring = list(map(toolbox.clone, offspring)) # Apply crossover and mutation on the offspring for child1, child2 in zip(offspring[::2], offspring[1::2]): # cross two individuals with probability CXPB if random.random() < CXPB: toolbox.mate(child1, child2) # fitness values of the children # must be recalculated later del child1.fitness.values del child2.fitness.values for mutant in offspring: # mutate an individual with probability MUTPB if random.random() < MUTPB: toolbox.mutate(mutant) del mutant.fitness.values # Evaluate the individuals with an invalid fitness invalid_ind = [ind for ind in offspring if not ind.fitness.valid] fitnesses = map(toolbox.evaluate, invalid_ind) for ind, fit in zip(invalid_ind, fitnesses): ind.fitness.values = fit print(" Evaluated %i individuals" % len(invalid_ind)) # The population is entirely replaced by the offspring pop[:] = offspring # Gather all the fitnesses in one list and print the stats fits = [ind.fitness.values[0] for ind in pop] length = len(pop) mean = sum(fits) / length sum2 = sum(x*x for x in fits) std = abs(sum2 / length - mean**2)**0.5 print(" Min %s" % min(fits)) print(" Max %s" % max(fits)) print(" Avg %s" % mean) print(" Std %s" % std) # Save progress best_ind = tools.selBest(pop, 1)[0] f = open('E:\\Dropbox\\_WorkingDir\\GA-PMR\\Solutions\\' + datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + '_best_individual_Gen_' + str(g), 'w+') f.write("%s\n" % (best_ind)) f.write(" Max %s" % max(fits)) f.close() print("Time: " + str(datetime.now() - startTime)) print("-- End of (successful) evolution --") best_ind = tools.selBest(pop, 1)[0] print("Best individual is %s, %s" % (best_ind, best_ind.fitness.values)) print(datetime.now() - startTime) # Save best individual final f = open('E:\\Dropbox\\_WorkingDir\\GA-PMR\\Solutions\\' + datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + '_best_individual_Gen_Final', 'w+') f.write("%s\n" % (best_ind)) f.write(" Max %s" % max(fits)) f.close() if __name__ == "__main__": main()
""" This module provides more sophisticated flow tracking and provides filtering and interception facilities. """ from __future__ import absolute_import from abc import abstractmethod, ABCMeta import hashlib import Cookie import cookielib import os import re from netlib import odict, wsgi, tcp import netlib.http from . import controller, protocol, tnetstring, filt, script, version from .onboarding import app from .protocol import http, handle from .proxy.config import HostMatcher from .proxy.connection import ClientConnection, ServerConnection import urlparse class AppRegistry: def __init__(self): self.apps = {} def add(self, app, domain, port): """ Add a WSGI app to the registry, to be served for requests to the specified domain, on the specified port. """ self.apps[(domain, port)] = wsgi.WSGIAdaptor( app, domain, port, version.NAMEVERSION ) def get(self, request): """ Returns an WSGIAdaptor instance if request matches an app, or None. """ if (request.host, request.port) in self.apps: return self.apps[(request.host, request.port)] if "host" in request.headers: host = request.headers["host"][0] return self.apps.get((host, request.port), None) class ReplaceHooks: def __init__(self): self.lst = [] def set(self, r): self.clear() for i in r: self.add(*i) def add(self, fpatt, rex, s): """ add a replacement hook. fpatt: a string specifying a filter pattern. rex: a regular expression. s: the replacement string returns true if hook was added, false if the pattern could not be parsed. """ cpatt = filt.parse(fpatt) if not cpatt: return False try: re.compile(rex) except re.error: return False self.lst.append((fpatt, rex, s, cpatt)) return True def get_specs(self): """ Retrieve the hook specifcations. Returns a list of (fpatt, rex, s) tuples. """ return [i[:3] for i in self.lst] def count(self): return len(self.lst) def run(self, f): for _, rex, s, cpatt in self.lst: if cpatt(f): if f.response: f.response.replace(rex, s) else: f.request.replace(rex, s) def clear(self): self.lst = [] class SetHeaders: def __init__(self): self.lst = [] def set(self, r): self.clear() for i in r: self.add(*i) def add(self, fpatt, header, value): """ Add a set header hook. fpatt: String specifying a filter pattern. header: Header name. value: Header value string Returns True if hook was added, False if the pattern could not be parsed. """ cpatt = filt.parse(fpatt) if not cpatt: return False self.lst.append((fpatt, header, value, cpatt)) return True def get_specs(self): """ Retrieve the hook specifcations. Returns a list of (fpatt, rex, s) tuples. """ return [i[:3] for i in self.lst] def count(self): return len(self.lst) def clear(self): self.lst = [] def run(self, f): for _, header, value, cpatt in self.lst: if cpatt(f): if f.response: del f.response.headers[header] else: del f.request.headers[header] for _, header, value, cpatt in self.lst: if cpatt(f): if f.response: f.response.headers.add(header, value) else: f.request.headers.add(header, value) class StreamLargeBodies(object): def __init__(self, max_size): self.max_size = max_size def run(self, flow, is_request): r = flow.request if is_request else flow.response code = flow.response.code if flow.response else None expected_size = netlib.http.expected_http_body_size( r.headers, is_request, flow.request.method, code ) if not (0 <= expected_size <= self.max_size): # r.stream may already be a callable, which we want to preserve. r.stream = r.stream or True class ClientPlaybackState: def __init__(self, flows, exit): self.flows, self.exit = flows, exit self.current = None self.testing = False # Disables actual replay for testing. def count(self): return len(self.flows) def done(self): if len(self.flows) == 0 and not self.current: return True return False def clear(self, flow): """ A request has returned in some way - if this is the one we're servicing, go to the next flow. """ if flow is self.current: self.current = None def tick(self, master): if self.flows and not self.current: self.current = self.flows.pop(0).copy() if not self.testing: master.replay_request(self.current) else: self.current.reply = controller.DummyReply() master.handle_request(self.current) if self.current.response: master.handle_response(self.current) class ServerPlaybackState: def __init__( self, headers, flows, exit, nopop, ignore_params, ignore_content, ignore_payload_params, ignore_host): """ headers: Case-insensitive list of request headers that should be included in request-response matching. """ self.headers = headers self.exit = exit self.nopop = nopop self.ignore_params = ignore_params self.ignore_content = ignore_content self.ignore_payload_params = ignore_payload_params self.ignore_host = ignore_host self.fmap = {} for i in flows: if i.response: l = self.fmap.setdefault(self._hash(i), []) l.append(i) def count(self): return sum(len(i) for i in self.fmap.values()) def _hash(self, flow): """ Calculates a loose hash of the flow request. """ r = flow.request _, _, path, _, query, _ = urlparse.urlparse(r.url) queriesArray = urlparse.parse_qsl(query, keep_blank_values=True) key = [ str(r.port), str(r.scheme), str(r.method), str(path), ] if not self.ignore_content: form_contents = r.get_form() if self.ignore_payload_params and form_contents: key.extend( p for p in form_contents if p[0] not in self.ignore_payload_params ) else: key.append(str(r.content)) if not self.ignore_host: key.append(r.host) filtered = [] ignore_params = self.ignore_params or [] for p in queriesArray: if p[0] not in ignore_params: filtered.append(p) for p in filtered: key.append(p[0]) key.append(p[1]) if self.headers: hdrs = [] for i in self.headers: v = r.headers[i] # Slightly subtle: we need to convert everything to strings # to prevent a mismatch between unicode/non-unicode. v = [str(x) for x in v] hdrs.append((i, v)) key.append(hdrs) return hashlib.sha256(repr(key)).digest() def next_flow(self, request): """ Returns the next flow object, or None if no matching flow was found. """ l = self.fmap.get(self._hash(request)) if not l: return None if self.nopop: return l[0] else: return l.pop(0) class StickyCookieState: def __init__(self, flt): """ flt: Compiled filter. """ self.jar = {} self.flt = flt def ckey(self, m, f): """ Returns a (domain, port, path) tuple. """ return ( m["domain"] or f.request.host, f.request.port, m["path"] or "/" ) def domain_match(self, a, b): if cookielib.domain_match(a, b): return True elif cookielib.domain_match(a, b.strip(".")): return True return False def handle_response(self, f): for i in f.response.headers["set-cookie"]: # FIXME: We now know that Cookie.py screws up some cookies with # valid RFC 822/1123 datetime specifications for expiry. Sigh. c = Cookie.SimpleCookie(str(i)) for m in c.values(): k = self.ckey(m, f) if self.domain_match(f.request.host, k[0]): self.jar[k] = m def handle_request(self, f): l = [] if f.match(self.flt): for i in self.jar.keys(): match = [ self.domain_match(f.request.host, i[0]), f.request.port == i[1], f.request.path.startswith(i[2]) ] if all(match): l.append(self.jar[i].output(header="").strip()) if l: f.request.stickycookie = True f.request.headers["cookie"] = l class StickyAuthState: def __init__(self, flt): """ flt: Compiled filter. """ self.flt = flt self.hosts = {} def handle_request(self, f): host = f.request.host if "authorization" in f.request.headers: self.hosts[host] = f.request.headers["authorization"] elif f.match(self.flt): if host in self.hosts: f.request.headers["authorization"] = self.hosts[host] class FlowList(object): __metaclass__ = ABCMeta def __iter__(self): return iter(self._list) def __contains__(self, item): return item in self._list def __getitem__(self, item): return self._list[item] def __nonzero__(self): return bool(self._list) def __len__(self): return len(self._list) def index(self, f): return self._list.index(f) @abstractmethod def _add(self, f): return @abstractmethod def _update(self, f): return @abstractmethod def _remove(self, f): return class FlowView(FlowList): def __init__(self, store, filt=None): self._list = [] if not filt: filt = lambda flow: True self._build(store, filt) self.store = store self.store.views.append(self) def _close(self): self.store.views.remove(self) def _build(self, flows, filt=None): if filt: self.filt = filt self._list = list(filter(self.filt, flows)) def _add(self, f): if self.filt(f): self._list.append(f) def _update(self, f): if f not in self._list: self._add(f) elif not self.filt(f): self._remove(f) def _remove(self, f): if f in self._list: self._list.remove(f) def _recalculate(self, flows): self._build(flows) class FlowStore(FlowList): """ Responsible for handling flows in the state: Keeps a list of all flows and provides views on them. """ def __init__(self): self._list = [] self._set = set() # Used for O(1) lookups self.views = [] self._recalculate_views() def get(self, flow_id): for f in self._list: if f.id == flow_id: return f def __contains__(self, f): return f in self._set def _add(self, f): """ Adds a flow to the state. The flow to add must not be present in the state. """ self._list.append(f) self._set.add(f) for view in self.views: view._add(f) def _update(self, f): """ Notifies the state that a flow has been updated. The flow must be present in the state. """ if f in self: for view in self.views: view._update(f) def _remove(self, f): """ Deletes a flow from the state. The flow must be present in the state. """ self._list.remove(f) self._set.remove(f) for view in self.views: view._remove(f) # Expensive bulk operations def _extend(self, flows): """ Adds a list of flows to the state. The list of flows to add must not contain flows that are already in the state. """ self._list.extend(flows) self._set.update(flows) self._recalculate_views() def _clear(self): self._list = [] self._set = set() self._recalculate_views() def _recalculate_views(self): """ Expensive operation: Recalculate all the views after a bulk change. """ for view in self.views: view._recalculate(self) # Utility functions. # There are some common cases where we need to argue about all flows # irrespective of filters on the view etc (i.e. on shutdown). def active_count(self): c = 0 for i in self._list: if not i.response and not i.error: c += 1 return c # TODO: Should accept_all operate on views or on all flows? def accept_all(self, master): for f in self._list: f.accept_intercept(master) def kill_all(self, master): for f in self._list: f.kill(master) class State(object): def __init__(self): self.flows = FlowStore() self.view = FlowView(self.flows, None) # These are compiled filt expressions: self.intercept = None @property def limit_txt(self): return getattr(self.view.filt, "pattern", None) def flow_count(self): return len(self.flows) # TODO: All functions regarding flows that don't cause side-effects should # be moved into FlowStore. def index(self, f): return self.flows.index(f) def active_flow_count(self): return self.flows.active_count() def add_flow(self, f): """ Add a request to the state. """ self.flows._add(f) return f def update_flow(self, f): """ Add a response to the state. """ self.flows._update(f) return f def delete_flow(self, f): self.flows._remove(f) def load_flows(self, flows): self.flows._extend(flows) def set_limit(self, txt): if txt == self.limit_txt: return if txt: f = filt.parse(txt) if not f: return "Invalid filter expression." self.view._close() self.view = FlowView(self.flows, f) else: self.view._close() self.view = FlowView(self.flows, None) def set_intercept(self, txt): if txt: f = filt.parse(txt) if not f: return "Invalid filter expression." self.intercept = f else: self.intercept = None @property def intercept_txt(self): return getattr(self.intercept, "pattern", None) def clear(self): self.flows._clear() def accept_all(self, master): self.flows.accept_all(master) def backup(self, f): f.backup() self.update_flow(f) def revert(self, f): f.revert() self.update_flow(f) def killall(self, master): self.flows.kill_all(master) class FlowMaster(controller.Master): def __init__(self, server, state): controller.Master.__init__(self, server) self.state = state self.server_playback = None self.client_playback = None self.kill_nonreplay = False self.scripts = [] self.pause_scripts = False self.stickycookie_state = False self.stickycookie_txt = None self.stickyauth_state = False self.stickyauth_txt = None self.anticache = False self.anticomp = False self.stream_large_bodies = False self.refresh_server_playback = False self.replacehooks = ReplaceHooks() self.setheaders = SetHeaders() self.replay_ignore_params = False self.replay_ignore_content = None self.replay_ignore_host = False self.stream = None self.apps = AppRegistry() def start_app(self, host, port): self.apps.add( app.mapp, host, port ) def add_event(self, e, level="info"): """ level: debug, info, error """ pass def unload_scripts(self): for s in self.scripts[:]: self.unload_script(s) def unload_script(self, script): script.unload() self.scripts.remove(script) def load_script(self, command): """ Loads a script. Returns an error description if something went wrong. """ try: s = script.Script(command, self) except script.ScriptError as v: return v.args[0] self.scripts.append(s) def run_single_script_hook(self, script, name, *args, **kwargs): if script and not self.pause_scripts: ret = script.run(name, *args, **kwargs) if not ret[0] and ret[1]: e = "Script error:\n" + ret[1][1] self.add_event(e, "error") def run_script_hook(self, name, *args, **kwargs): for script in self.scripts: self.run_single_script_hook(script, name, *args, **kwargs) def get_ignore_filter(self): return self.server.config.check_ignore.patterns def set_ignore_filter(self, host_patterns): self.server.config.check_ignore = HostMatcher(host_patterns) def get_tcp_filter(self): return self.server.config.check_tcp.patterns def set_tcp_filter(self, host_patterns): self.server.config.check_tcp = HostMatcher(host_patterns) def set_stickycookie(self, txt): if txt: flt = filt.parse(txt) if not flt: return "Invalid filter expression." self.stickycookie_state = StickyCookieState(flt) self.stickycookie_txt = txt else: self.stickycookie_state = None self.stickycookie_txt = None def set_stream_large_bodies(self, max_size): if max_size is not None: self.stream_large_bodies = StreamLargeBodies(max_size) else: self.stream_large_bodies = False def set_stickyauth(self, txt): if txt: flt = filt.parse(txt) if not flt: return "Invalid filter expression." self.stickyauth_state = StickyAuthState(flt) self.stickyauth_txt = txt else: self.stickyauth_state = None self.stickyauth_txt = None def start_client_playback(self, flows, exit): """ flows: List of flows. """ self.client_playback = ClientPlaybackState(flows, exit) def stop_client_playback(self): self.client_playback = None def start_server_playback( self, flows, kill, headers, exit, nopop, ignore_params, ignore_content, ignore_payload_params, ignore_host): """ flows: List of flows. kill: Boolean, should we kill requests not part of the replay? ignore_params: list of parameters to ignore in server replay ignore_content: true if request content should be ignored in server replay ignore_payload_params: list of content params to ignore in server replay ignore_host: true if request host should be ignored in server replay """ self.server_playback = ServerPlaybackState( headers, flows, exit, nopop, ignore_params, ignore_content, ignore_payload_params, ignore_host) self.kill_nonreplay = kill def stop_server_playback(self): if self.server_playback.exit: self.shutdown() self.server_playback = None def do_server_playback(self, flow): """ This method should be called by child classes in the handle_request handler. Returns True if playback has taken place, None if not. """ if self.server_playback: rflow = self.server_playback.next_flow(flow) if not rflow: return None response = http.HTTPResponse.from_state(rflow.response.get_state()) response.is_replay = True if self.refresh_server_playback: response.refresh() flow.reply(response) if self.server_playback.count() == 0: self.stop_server_playback() return True return None def tick(self, q, timeout): if self.client_playback: e = [ self.client_playback.done(), self.client_playback.exit, self.state.active_flow_count() == 0 ] if all(e): self.shutdown() self.client_playback.tick(self) if self.client_playback.done(): self.client_playback = None return super(FlowMaster, self).tick(q, timeout) def duplicate_flow(self, f): return self.load_flow(f.copy()) def create_request(self, method, scheme, host, port, path): """ this method creates a new artificial and minimalist request also adds it to flowlist """ c = ClientConnection.from_state(dict( address=dict(address=(host, port), use_ipv6=False), clientcert=None )) s = ServerConnection.from_state(dict( address=dict(address=(host, port), use_ipv6=False), state=[], source_address=None, # source_address=dict(address=(host, port), use_ipv6=False), cert=None, sni=host, ssl_established=True )) f = http.HTTPFlow(c, s) headers = odict.ODictCaseless() req = http.HTTPRequest( "absolute", method, scheme, host, port, path, (1, 1), headers, None, None, None, None) f.request = req return self.load_flow(f) def load_flow(self, f): """ Loads a flow, and returns a new flow object. """ if self.server and self.server.config.mode == "reverse": f.request.host, f.request.port = self.server.config.mode.dst[2:] f.request.scheme = "https" if self.server.config.mode.dst[ 1] else "http" f.reply = controller.DummyReply() if f.request: self.handle_request(f) if f.response: self.handle_responseheaders(f) self.handle_response(f) if f.error: self.handle_error(f) return f def load_flows(self, fr): """ Load flows from a FlowReader object. """ cnt = 0 for i in fr.stream(): cnt += 1 self.load_flow(i) return cnt def load_flows_file(self, path): path = os.path.expanduser(path) try: f = file(path, "rb") freader = FlowReader(f) except IOError as v: raise FlowReadError(v.strerror) return self.load_flows(freader) def process_new_request(self, f): if self.stickycookie_state: self.stickycookie_state.handle_request(f) if self.stickyauth_state: self.stickyauth_state.handle_request(f) if self.anticache: f.request.anticache() if self.anticomp: f.request.anticomp() if self.server_playback: pb = self.do_server_playback(f) if not pb: if self.kill_nonreplay: f.kill(self) else: f.reply() def process_new_response(self, f): if self.stickycookie_state: self.stickycookie_state.handle_response(f) def replay_request(self, f, block=False, run_scripthooks=True): """ Returns None if successful, or error message if not. """ if f.live and run_scripthooks: return "Can't replay live request." if f.intercepted: return "Can't replay while intercepting..." if f.request.content == http.CONTENT_MISSING: return "Can't replay request with missing content..." if f.request: f.backup() f.request.is_replay = True if f.request.content: f.request.headers[ "Content-Length"] = [str(len(f.request.content))] f.response = None f.error = None self.process_new_request(f) rt = http.RequestReplayThread( self.server.config, f, self.masterq if run_scripthooks else False, self.should_exit ) rt.start() # pragma: no cover if block: rt.join() def handle_log(self, l): self.add_event(l.msg, l.level) l.reply() def handle_clientconnect(self, cc): self.run_script_hook("clientconnect", cc) cc.reply() def handle_clientdisconnect(self, r): self.run_script_hook("clientdisconnect", r) r.reply() def handle_serverconnect(self, sc): self.run_script_hook("serverconnect", sc) sc.reply() def handle_error(self, f): self.state.update_flow(f) self.run_script_hook("error", f) if self.client_playback: self.client_playback.clear(f) f.reply() return f def handle_request(self, f): if f.live: app = self.apps.get(f.request) if app: err = app.serve( f, f.client_conn.wfile, **{"mitmproxy.master": self} ) if err: self.add_event("Error in wsgi app. %s" % err, "error") f.reply(protocol.KILL) return if f not in self.state.flows: # don't add again on replay self.state.add_flow(f) self.replacehooks.run(f) self.setheaders.run(f) self.run_script_hook("request", f) self.process_new_request(f) return f def handle_responseheaders(self, f): self.run_script_hook("responseheaders", f) try: if self.stream_large_bodies: self.stream_large_bodies.run(f, False) except netlib.http.HttpError: f.reply(protocol.KILL) return f.reply() return f def handle_response(self, f): self.state.update_flow(f) self.replacehooks.run(f) self.setheaders.run(f) self.run_script_hook("response", f) if self.client_playback: self.client_playback.clear(f) self.process_new_response(f) if self.stream: self.stream.add(f) return f def handle_intercept(self, f): self.state.update_flow(f) def handle_accept_intercept(self, f): self.state.update_flow(f) def shutdown(self): self.unload_scripts() controller.Master.shutdown(self) if self.stream: for i in self.state.flows: if not i.response: self.stream.add(i) self.stop_stream() def start_stream(self, fp, filt): self.stream = FilteredFlowWriter(fp, filt) def stop_stream(self): self.stream.fo.close() self.stream = None def read_flows_from_paths(paths): """ Given a list of filepaths, read all flows and return a list of them. From a performance perspective, streaming would be advisable - however, if there's an error with one of the files, we want it to be raised immediately. If an error occurs, a FlowReadError will be raised. """ try: flows = [] for path in paths: path = os.path.expanduser(path) with file(path, "rb") as f: flows.extend(FlowReader(f).stream()) except IOError as e: raise FlowReadError(e.strerror) return flows class FlowWriter: def __init__(self, fo): self.fo = fo def add(self, flow): d = flow.get_state() tnetstring.dump(d, self.fo) class FlowReadError(Exception): @property def strerror(self): return self.args[0] class FlowReader: def __init__(self, fo): self.fo = fo def stream(self): """ Yields Flow objects from the dump. """ off = 0 try: while True: data = tnetstring.load(self.fo) if tuple(data["version"][:2]) != version.IVERSION[:2]: v = ".".join(str(i) for i in data["version"]) raise FlowReadError( "Incompatible serialized data version: %s" % v ) off = self.fo.tell() yield handle.protocols[data["type"]]["flow"].from_state(data) except ValueError as v: # Error is due to EOF if self.fo.tell() == off and self.fo.read() == '': return raise FlowReadError("Invalid data format.") class FilteredFlowWriter: def __init__(self, fo, filt): self.fo = fo self.filt = filt def add(self, f): if self.filt and not f.match(self.filt): return d = f.get_state() tnetstring.dump(d, self.fo)
""" Script to generate the MNIST+ dataset. The purpose of this dataset is to make a more challenging MNIST-like dataset, with multiple factors of variation. These factors can serve to evaluate a model's performance at learning invariant features, or its ability to disentangle factors of variation in a multi-task classification setting. The dataset is stored under $PYLEARN2_DATA_PATH. The dataset variants are created as follows. For each MNIST image, we: 1. Perform a random rotation of the image (optional) 2. Rescale the image from 28x28 to 48x48, yielding variable `image`. 3.1 Extract a random patch `textured_patch` from a fixed or random image of the Brodatz texture dataset. 3.2 Generate mask of MNIST digit outline, by thresholding MNIST digit at 0.1 3.3 Fuse MNIST digit and textured patch as follows: textured_patch[mask] <= image[mask]; image <= textured_patch; 4. Randomly select position of light source (optional) 5. Perform embossing operation, given fixed lighting position obtained in 4. """ import numpy import pickle import pylab as pl from copy import copy from optparse import OptionParser from pylearn2.datasets import mnist from pylearn2.utils import string_utils import warnings try: from PIL import Image except ImportError: warnings.warn("Couldn't import Image from PIL, so far make_mnistplus " "is only supported with PIL") OUTPUT_SIZE = 48 DOWN_SAMPLE = 1 def to_array(img): """ Convert PIL.Image to numpy.ndarray. :param img: numpy.ndarray """ return numpy.array(img.getdata()) / 255. def to_img(arr, os): """ Convert numpy.ndarray to PIL.Image :param arr: numpy.ndarray :param os: integer, size of output image. """ return Image.fromarray(arr.reshape(os, os) * 255.) def emboss(img, azi=45., ele=18., dep=2): """ Perform embossing of image `img`. :param img: numpy.ndarray, matrix representing image to emboss. :param azi: azimuth (in degrees) :param ele: elevation (in degrees) :param dep: depth, (0-100) """ # defining azimuth, elevation, and depth ele = (ele * 2 * numpy.pi) / 360. azi = (azi * 2 * numpy.pi) / 360. a = numpy.asarray(img).astype('float') # find the gradient grad = numpy.gradient(a) # (it is two arrays: grad_x and grad_y) grad_x, grad_y = grad # getting the unit incident ray gd = numpy.cos(ele) # length of projection of ray on ground plane dx = gd * numpy.cos(azi) dy = gd * numpy.sin(azi) dz = numpy.sin(ele) # adjusting the gradient by the "depth" factor # (I think this is how GIMP defines it) grad_x = grad_x * dep / 100. grad_y = grad_y * dep / 100. # finding the unit normal vectors for the image leng = numpy.sqrt(grad_x**2 + grad_y**2 + 1.) uni_x = grad_x/leng uni_y = grad_y/leng uni_z = 1./leng # take the dot product a2 = 255 * (dx*uni_x + dy*uni_y + dz*uni_z) # avoid overflow a2 = a2.clip(0, 255) # you must convert back to uint8 /before/ converting to an image return Image.fromarray(a2.astype('uint8')) def extract_patch(textid, os, downsample): """ Extract a patch of texture #textid of Brodatz dataset. :param textid: id of texture image to load. :param os: size of MNIST+ output images. :param downsample: integer, downsampling factor. """ temp = '${PYLEARN2_DATA_PATH}/textures/brodatz/D%i.gif' % textid fname = string_utils.preprocess(temp) img_i = Image.open(fname) img_i = img_i.resize((img_i.size[0]/downsample, img_i.size[1]/downsample), Image.BILINEAR) x = numpy.random.randint(0, img_i.size[0] - os) y = numpy.random.randint(0, img_i.size[1] - os) patch = img_i.crop((x, y, x+os, y+os)) return patch, (x, y) def gendata(enable, os, downsample, textid=None, seed=2313, verbose=False): """ Generate the MNIST+ dataset. :param enable: dictionary of flags with keys ['texture', 'azimuth', 'rotation', 'elevation'] to enable/disable a given factor of variation. :param textid: if enable['texture'], id number of the Brodatz texture to load. If textid is None, we load a random texture for each MNIST image. :param os: output size (width and height) of MNIST+ images. :param downsample: factor by which to downsample texture. :param seed: integer for seeding RNG. :param verbose: bool """ rng = numpy.random.RandomState(seed) data = mnist.MNIST('train') test = mnist.MNIST('test') data.X = numpy.vstack((data.X, test.X)) data.y = numpy.hstack((data.y, test.y)) del test output = {} output['data'] = numpy.zeros((len(data.X), os*os)) output['label'] = numpy.zeros(len(data.y)) if enable['azimuth']: output['azimuth'] = numpy.zeros(len(data.y)) if enable['elevation']: output['elevation'] = numpy.zeros(len(data.y)) if enable['rotation']: output['rotation'] = numpy.zeros(len(data.y)) if enable['texture']: output['texture_id'] = numpy.zeros(len(data.y)) output['texture_pos'] = numpy.zeros((len(data.y), 2)) for i in xrange(len(data.X)): # get MNIST image frgd_img = to_img(data.X[i], 28) frgd_img = frgd_img.convert('L') if enable['rotation']: rot = rng.randint(0, 360) output['rotation'][i] = rot frgd_img = frgd_img.rotate(rot, Image.BILINEAR) frgd_img = frgd_img.resize((os, os), Image.BILINEAR) if enable['texture']: if textid is None: # extract patch from texture database. Note that texture #14 # does not exist. textid = 14 while textid == 14: textid = rng.randint(1, 113) patch_img, (px, py) = extract_patch(textid, os, downsample) patch_arr = to_array(patch_img) # store output details output['texture_id'][i] = textid output['texture_pos'][i] = (px, py) # generate binary mask for digit outline frgd_arr = to_array(frgd_img) mask_arr = frgd_arr > 0.1 # copy contents of masked-MNIST image into background texture blend_arr = copy(patch_arr) blend_arr[mask_arr] = frgd_arr[mask_arr] # this now because the image to emboss frgd_img = to_img(blend_arr, os) azi = 45 if enable['azimuth']: azi = rng.randint(0, 360) output['azimuth'][i] = azi ele = 18. if enable['elevation']: ele = rng.randint(0, 60) output['elevation'][i] = ele mboss_img = emboss(frgd_img, azi=azi, ele=ele) mboss_arr = to_array(mboss_img) output['data'][i] = mboss_arr output['label'][i] = data.y[i] if verbose: pl.imshow(mboss_arr.reshape(os, os)) pl.gray() pl.show() fname = 'mnistplus' if enable['azimuth']: fname += "_azi" if enable['rotation']: fname += "_rot" if enable['texture']: fname += "_tex" fp = open(fname+'.pkl','w') pickle.dump(output, fp, protocol=pickle.HIGHEST_PROTOCOL) fp.close() if __name__ == '__main__': parser = OptionParser() parser.add_option('-v', action='store_true', dest='verbose') parser.add_option('--azimuth', action='store_true', dest='azimuth', help='Enable random azimuth for light-source used in embossing.') parser.add_option('--elevation', action='store_true', dest='elevation', help='Enable random elevation for light-source used in embossing.') parser.add_option('--rotation', action='store_true', dest='rotation', help='Randomly rotate MNIST digit prior to embossing.') parser.add_option('--texture', action='store_true', dest='texture', help='Perform joint embossing of fused {MNIST + Texture} image.') parser.add_option('--textid', action='store', type='int', dest='textid', help='If specified, use a single texture ID for all MNIST images.', default=None) parser.add_option('--output_size', action='store', type='int', dest='os', help='Integer specifying size of (square) output images.', default=OUTPUT_SIZE) parser.add_option('--downsample', action='store', type='int', dest='downsample', default=DOWN_SAMPLE, help='Downsampling factor for Brodatz textures.') (opts, args) = parser.parse_args() enable = {'texture': opts.texture, 'azimuth': opts.azimuth, 'rotation': opts.rotation, 'elevation': opts.elevation} gendata(enable=enable, os=opts.os, downsample=opts.downsample, verbose=opts.verbose, textid=opts.textid)
""" This module defines handlers for storing sessions when handles sessions of users connecting to the server. There are two similar but separate stores of sessions: ServerSessionHandler - this stores generic game sessions for the game. These sessions has no knowledge about how they are connected to the world. PortalSessionHandler - this stores sessions created by twisted protocols. These are dumb connectors that handle network communication but holds no game info. """ import time from django.conf import settings from src.commands.cmdhandler import CMD_LOGINSTART from src.utils.utils import variable_from_module, is_iter, \ to_str, to_unicode, strip_control_sequences try: import cPickle as pickle except ImportError: import pickle # delayed imports _PlayerDB = None _ServerSession = None _ServerConfig = None _ScriptDB = None # AMP signals PCONN = chr(1) # portal session connect PDISCONN = chr(2) # portal session disconnect PSYNC = chr(3) # portal session sync SLOGIN = chr(4) # server session login SDISCONN = chr(5) # server session disconnect SDISCONNALL = chr(6) # server session disconnect all SSHUTD = chr(7) # server shutdown SSYNC = chr(8) # server session sync SCONN = chr(9) # server portal connection (for bots) PCONNSYNC = chr(10) # portal post-syncing session # i18n from django.utils.translation import ugettext as _ SERVERNAME = settings.SERVERNAME MULTISESSION_MODE = settings.MULTISESSION_MODE IDLE_TIMEOUT = settings.IDLE_TIMEOUT def delayed_import(): "Helper method for delayed import of all needed entities" global _ServerSession, _PlayerDB, _ServerConfig, _ScriptDB if not _ServerSession: # we allow optional arbitrary serversession class for overloading modulename, classname = settings.SERVER_SESSION_CLASS.rsplit(".", 1) _ServerSession = variable_from_module(modulename, classname) if not _PlayerDB: from src.players.models import PlayerDB as _PlayerDB if not _ServerConfig: from src.server.models import ServerConfig as _ServerConfig if not _ScriptDB: from src.scripts.models import ScriptDB as _ScriptDB # including once to avoid warnings in Python syntax checkers _ServerSession, _PlayerDB, _ServerConfig, _ScriptDB #----------------------------------------------------------- # SessionHandler base class #------------------------------------------------------------ class SessionHandler(object): """ This handler holds a stack of sessions. """ def __init__(self): """ Init the handler. """ self.sessions = {} def get_sessions(self, include_unloggedin=False): """ Returns the connected session objects. """ if include_unloggedin: return self.sessions.values() else: return [session for session in self.sessions.values() if session.logged_in] def get_session(self, sessid): """ Get session by sessid """ return self.sessions.get(sessid, None) def get_all_sync_data(self): """ Create a dictionary of sessdata dicts representing all sessions in store. """ return dict((sessid, sess.get_sync_data()) for sessid, sess in self.sessions.items()) def oobstruct_parser(self, oobstruct): """ Helper method for each session to use to parse oob structures (The 'oob' kwarg of the msg() method). Allowed input oob structures are: cmdname ((cmdname,), (cmdname,)) (cmdname,(arg, )) (cmdname,(arg1,arg2)) (cmdname,{key:val,key2:val2}) (cmdname, (args,), {kwargs}) ((cmdname, (arg1,arg2)), cmdname, (cmdname, (arg1,))) outputs an ordered structure on the form ((cmdname, (args,), {kwargs}), ...), where the two last parts of each tuple may be empty """ def _parse(oobstruct): slen = len(oobstruct) if not oobstruct: return tuple(None, (), {}) elif not hasattr(oobstruct, "__iter__"): # a singular command name, without arguments or kwargs return (oobstruct.lower(), (), {}) # regardless of number of args/kwargs, the first element must be # the function name. We will not catch this error if not, but # allow it to propagate. if slen == 1: return (oobstruct[0].lower(), (), {}) elif slen == 2: if isinstance(oobstruct[1], dict): # cmdname, {kwargs} return (oobstruct[0].lower(), (), dict(oobstruct[1])) elif isinstance(oobstruct[1], (tuple, list)): # cmdname, (args,) return (oobstruct[0].lower(), list(oobstruct[1]), {}) else: # cmdname, cmdname return ((oobstruct[0].lower(), (), {}), (oobstruct[1].lower(), (), {})) else: # cmdname, (args,), {kwargs} return (oobstruct[0].lower(), list(oobstruct[1]), dict(oobstruct[2])) if hasattr(oobstruct, "__iter__"): # differentiate between (cmdname, cmdname), # (cmdname, (args), {kwargs}) and ((cmdname,(args),{kwargs}), # (cmdname,(args),{kwargs}), ...) if oobstruct and isinstance(oobstruct[0], basestring): return (list(_parse(oobstruct)),) else: out = [] for oobpart in oobstruct: out.append(_parse(oobpart)) return (list(out),) return (_parse(oobstruct),) #------------------------------------------------------------ # Server-SessionHandler class #------------------------------------------------------------ class ServerSessionHandler(SessionHandler): """ This object holds the stack of sessions active in the game at any time. A session register with the handler in two steps, first by registering itself with the connect() method. This indicates an non-authenticated session. Whenever the session is authenticated the session together with the related player is sent to the login() method. """ # AMP communication methods def __init__(self): """ Init the handler. """ self.sessions = {} self.server = None self.server_data = {"servername": SERVERNAME} def portal_connect(self, portalsession): """ Called by Portal when a new session has connected. Creates a new, unlogged-in game session. portalsession is a dictionary of all property:value keys defining the session and which is marked to be synced. """ delayed_import() global _ServerSession, _PlayerDB, _ScriptDB sess = _ServerSession() sess.sessionhandler = self sess.load_sync_data(portalsession) if sess.logged_in and sess.uid: # this can happen in the case of auto-authenticating # protocols like SSH sess.player = _PlayerDB.objects.get_player_from_uid(sess.uid) sess.at_sync() # validate all scripts _ScriptDB.objects.validate() self.sessions[sess.sessid] = sess sess.data_in(CMD_LOGINSTART) def portal_session_sync(self, portalsessiondata): """ Called by Portal when it wants to update a single session (e.g. because of all negotiation protocols have finally replied) """ sessid = portalsessiondata.get("sessid") session = self.sessions.get(sessid) if session: session.load_sync_data(portalsessiondata) def portal_disconnect(self, sessid): """ Called by Portal when portal reports a closing of a session from the portal side. """ session = self.sessions.get(sessid, None) if not session: return player = session.player if player: nsess = len(self.sessions_from_player(player)) remaintext = nsess and "%i session%s remaining" % (nsess, nsess > 1 and "s" or "") or "no more sessions" session.log(_('Connection dropped: %s %s (%s)' % (session.player, session.address, remaintext))) session.at_disconnect() session.disconnect() del self.sessions[session.sessid] def portal_sessions_sync(self, portalsessions): """ Syncing all session ids of the portal with the ones of the server. This is instantiated by the portal when reconnecting. portalsessions is a dictionary {sessid: {property:value},...} defining each session and the properties in it which should be synced. """ delayed_import() global _ServerSession, _PlayerDB, _ServerConfig, _ScriptDB for sess in self.sessions.values(): # we delete the old session to make sure to catch eventual # lingering references. del sess for sessid, sessdict in portalsessions.items(): sess = _ServerSession() sess.sessionhandler = self sess.load_sync_data(sessdict) if sess.uid: sess.player = _PlayerDB.objects.get_player_from_uid(sess.uid) self.sessions[sessid] = sess sess.at_sync() # after sync is complete we force-validate all scripts # (this also starts them) init_mode = _ServerConfig.objects.conf("server_restart_mode", default=None) _ScriptDB.objects.validate(init_mode=init_mode) _ServerConfig.objects.conf("server_restart_mode", delete=True) # announce the reconnection self.announce_all(_(" ... Server restarted.")) # server-side access methods def start_bot_session(self, protocol_path, configdict): """ This method allows the server-side to force the Portal to create a new bot session using the protocol specified by protocol_path, which should be the full python path to the class, including the class name, like "src.server.portal.irc.IRCClient". The new session will use the supplied player-bot uid to initiate an already logged-in connection. The Portal will treat this as a normal connection and henceforth so will the Server. """ data = {"protocol_path":protocol_path, "config":configdict} self.server.amp_protocol.call_remote_PortalAdmin(0, operation=SCONN, data=data) def portal_shutdown(self): """ Called by server when shutting down the portal. """ self.server.amp_protocol.call_remote_PortalAdmin(0, operation=SSHUTD, data="") def login(self, session, player, testmode=False): """ Log in the previously unloggedin session and the player we by now should know is connected to it. After this point we assume the session to be logged in one way or another. testmode - this is used by unittesting for faking login without any AMP being actually active """ # we have to check this first before uid has been assigned # this session. if not self.sessions_from_player(player): player.is_connected = True # sets up and assigns all properties on the session session.at_login(player) # player init player.at_init() # Check if this is the first time the *player* logs in if player.db.FIRST_LOGIN: player.at_first_login() del player.db.FIRST_LOGIN player.at_pre_login() if MULTISESSION_MODE == 0: # disconnect all previous sessions. self.disconnect_duplicate_sessions(session) nsess = len(self.sessions_from_player(player)) totalstring = "%i session%s total" % (nsess, nsess > 1 and "s" or "") session.log(_('Logged in: %s %s (%s)' % (player, session.address, totalstring))) session.logged_in = True # sync the portal to the session sessdata = session.get_sync_data() if not testmode: self.server.amp_protocol.call_remote_PortalAdmin(session.sessid, operation=SLOGIN, data=sessdata) player.at_post_login(sessid=session.sessid) def disconnect(self, session, reason=""): """ Called from server side to remove session and inform portal of this fact. """ session = self.sessions.get(session.sessid) if not session: return if hasattr(session, "player") and session.player: # only log accounts logging off nsess = len(self.sessions_from_player(session.player)) - 1 remaintext = nsess and "%i session%s remaining" % (nsess, nsess > 1 and "s" or "") or "no more sessions" session.log(_('Logged out: %s %s (%s)' % (session.player, session.address, remaintext))) session.at_disconnect() sessid = session.sessid del self.sessions[sessid] # inform portal that session should be closed. self.server.amp_protocol.call_remote_PortalAdmin(sessid, operation=SDISCONN, data=reason) def all_sessions_portal_sync(self): """ This is called by the server when it reboots. It syncs all session data to the portal. Returns a deferred! """ sessdata = self.get_all_sync_data() return self.server.amp_protocol.call_remote_PortalAdmin(0, operation=SSYNC, data=sessdata) def disconnect_all_sessions(self, reason=_("You have been disconnected.")): """ Cleanly disconnect all of the connected sessions. """ for session in self.sessions: del session # tell portal to disconnect all sessions self.server.amp_protocol.call_remote_PortalAdmin(0, operation=SDISCONNALL, data=reason) def disconnect_duplicate_sessions(self, curr_session, reason=_("Logged in from elsewhere. Disconnecting.")): """ Disconnects any existing sessions with the same user. """ uid = curr_session.uid doublet_sessions = [sess for sess in self.sessions.values() if sess.logged_in and sess.uid == uid and sess != curr_session] for session in doublet_sessions: self.disconnect(session, reason) def validate_sessions(self): """ Check all currently connected sessions (logged in and not) and see if any are dead. """ tcurr = time.time() reason = _("Idle timeout exceeded, disconnecting.") for session in (session for session in self.sessions.values() if session.logged_in and IDLE_TIMEOUT > 0 and (tcurr - session.cmd_last) > IDLE_TIMEOUT): self.disconnect(session, reason=reason) def player_count(self): """ Get the number of connected players (not sessions since a player may have more than one session depending on settings). Only logged-in players are counted here. """ return len(set(session.uid for session in self.sessions.values() if session.logged_in)) def session_from_sessid(self, sessid): """ Return session based on sessid, or None if not found """ if is_iter(sessid): return [self.sessions.get(sid) for sid in sessid if sid in self.sessions] return self.sessions.get(sessid) def session_from_player(self, player, sessid): """ Given a player and a session id, return the actual session object """ if is_iter(sessid): sessions = [self.sessions.get(sid) for sid in sessid] s = [sess for sess in sessions if sess and sess.logged_in and player.uid == sess.uid] return s session = self.sessions.get(sessid) return session and session.logged_in and player.uid == session.uid and session or None def sessions_from_player(self, player): """ Given a player, return all matching sessions. """ uid = player.uid return [session for session in self.sessions.values() if session.logged_in and session.uid == uid] def sessions_from_character(self, character): """ Given a game character, return any matching sessions. """ sessid = character.sessid.get() if is_iter(sessid): return [self.sessions.get(sess) for sess in sessid if sessid in self.sessions] return self.sessions.get(sessid) def announce_all(self, message): """ Send message to all connected sessions """ for sess in self.sessions.values(): self.data_out(sess, message) def data_out(self, session, text="", **kwargs): """ Sending data Server -> Portal """ text = text and to_str(to_unicode(text), encoding=session.encoding) self.server.amp_protocol.call_remote_MsgServer2Portal(sessid=session.sessid, msg=text, data=kwargs) def data_in(self, sessid, text="", **kwargs): """ Data Portal -> Server """ session = self.sessions.get(sessid, None) if session: text = text and to_unicode(strip_control_sequences(text), encoding=session.encoding) session.data_in(text=text, **kwargs) SESSIONS = ServerSessionHandler()
from django.conf import settings from django.contrib import messages as django_messages from django.core.exceptions import ObjectDoesNotExist from django.http import Http404, HttpResponseRedirect from django.urls import reverse, reverse_lazy from django.views.generic import ( CreateView, DetailView, FormView, ListView, TemplateView, UpdateView, View, ) from common.mixins import LargePanelMixin, PrivateMixin from common.views import BaseOAuth2AuthorizationView from data_import.models import DataType # TODO: move this to common from open_humans.mixins import SourcesContextMixin from .forms import ( MessageProjectMembersForm, OAuth2DataRequestProjectForm, OnSiteDataRequestProjectForm, RemoveProjectMembersForm, SelectDatatypesForm, ) from .models import ( ActivityFeed, DataRequestProject, DataRequestProjectMember, OAuth2DataRequestProject, OnSiteDataRequestProject, ) MAX_UNAPPROVED_MEMBERS = settings.MAX_UNAPPROVED_MEMBERS class CoordinatorOrActiveMixin(object): """ - Always let the coordinator view this page - Only let members view it if the project is active - Only let members view it if the project is not approved and less than MAX_UNAPPROVED_MEMBERS have joined. """ def dispatch(self, *args, **kwargs): project = self.get_object() if project.coordinator == self.request.user: return super(CoordinatorOrActiveMixin, self).dispatch(*args, **kwargs) if not project.active: raise Http404 if not project.approved and project.authorized_members > MAX_UNAPPROVED_MEMBERS: django_messages.error( self.request, ( """Sorry, "{}" has not been approved and has exceeded the {} member limit for unapproved projects.""".format( project.name, MAX_UNAPPROVED_MEMBERS ) ), ) return HttpResponseRedirect(reverse("my-member-data")) return super(CoordinatorOrActiveMixin, self).dispatch(*args, **kwargs) class ProjectMemberMixin(object): """ Add project_member and related helper methods. """ @property def project_member(self): project = self.get_object() project_member, _ = DataRequestProjectMember.objects.get_or_create( member=self.request.user.member, project=project ) return project_member @property def project_joined_by_member(self): return self.project_member and self.project_member.joined @property def project_authorized_by_member(self): return self.project_member and self.project_member.authorized def authorize_member(self, hidden): project = self.get_object() self.request.user.log( "direct-sharing:{0}:authorize".format(project.type), {"project-id": project.id}, ) django_messages.success( self.request, ('You have successfully joined the project "{}".'.format(project.name)), ) if ( project.approved and not ActivityFeed.objects.filter( member=self.project_member.member, project=project, action="joined-project", ).exists() ): event = ActivityFeed( member=self.project_member.member, project=project, action="joined-project", ) event.save() project_member = self.project_member # The OAuth2 projects have join and authorize in the same step if project.type == "oauth2": project_member.joined = True project_member.authorized = True project_member.revoked = False project_member.username_shared = project.request_username_access project_member.all_sources_shared = project.all_sources_access project_member.visible = not hidden # visible is the opposite of hidden project_member.erasure_requested = None project_member.save() # if this is a new DataRequestProjectMember object, the docs state that # manytomany fields should be saved separately from initial creation project_member.granted_sources.set(project.requested_sources.all()) class OnSiteDetailView(ProjectMemberMixin, CoordinatorOrActiveMixin, DetailView): """ A base DetailView for on-site projects. """ model = OnSiteDataRequestProject class JoinOnSiteDataRequestProjectView(PrivateMixin, LargePanelMixin, OnSiteDetailView): """ Display the consent form for a project. """ template_name = "private_sharing/join-on-site.html" def get_login_message(self): project = self.get_object() return 'Please log in to join "{0}"'.format(project.name) def get(self, request, *args, **kwargs): """ If the member has already accepted the consent form redirect them to the authorize page. """ if self.project_joined_by_member: return HttpResponseRedirect( reverse_lazy( "direct-sharing:authorize-on-site", kwargs={"slug": self.get_object().slug}, ) ) return super().get(request, *args, **kwargs) # pylint: disable=unused-argument def post(self, request, *args, **kwargs): project = self.get_object() project_member = self.project_member project_member.joined = True # store the consent text that the user has consented to project_member.consent_text = project.consent_text # if the user joins again after revoking the study then reset their # revoked and authorized status project_member.revoked = False project_member.authorized = False project_member.save() request.user.log("direct-sharing:on-site:consent", {"project-id": project.id}) return HttpResponseRedirect( reverse_lazy( "direct-sharing:authorize-on-site", kwargs={"slug": project.slug} ) ) class ConnectedSourcesMixin(object): """ Add context for connected/unconnected sources. """ def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) project = self.get_object() requested_sources = project.requested_sources.all() context.update( { "project_authorized_by_member": self.project_authorized_by_member, "sources": requested_sources, } ) return context class AuthorizeOnSiteDataRequestProjectView( PrivateMixin, LargePanelMixin, ConnectedSourcesMixin, OnSiteDetailView ): """ Display the requested permissions for a project. """ template_name = "private_sharing/authorize-on-site.html" def get_login_message(self): project = self.get_object() return 'Please log in to authorize "{0}"'.format(project.name) def get(self, request, *args, **kwargs): """ If the member hasn't already accepted the consent form redirect them to the consent form page. """ # the opposite of the test in the join page if not self.project_joined_by_member: return HttpResponseRedirect( reverse_lazy( "direct-sharing:join-on-site", kwargs={"slug": self.get_object().slug}, ) ) return super().get(request, *args, **kwargs) # pylint: disable=unused-argument def post(self, request, *args, **kwargs): # repeating this because making a function for these two lines # would add more complexity than it would save. if not self.project_joined_by_member: return HttpResponseRedirect( reverse_lazy( "direct-sharing:join-on-site", kwargs={"slug": self.get_object().slug}, ) ) if self.request.POST.get("cancel") == "cancel": self.project_member.delete() return HttpResponseRedirect(reverse("home")) if self.request.POST.get("hide-membership") == "hidden_membership": hidden = True else: hidden = False self.authorize_member(hidden) project = self.get_object() if project.post_sharing_url: redirect_url = project.post_sharing_url.replace( "PROJECT_MEMBER_ID", self.project_member.project_member_id ) else: redirect_url = reverse("activity", kwargs={"slug": project.slug}) return HttpResponseRedirect(redirect_url) def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context.update( {"project": self.get_object(), "username": self.request.user.username} ) return context class AuthorizeOAuth2ProjectView( ConnectedSourcesMixin, ProjectMemberMixin, BaseOAuth2AuthorizationView ): """ Override oauth2_provider view to add origin, context, and customize login prompt. """ template_name = "private_sharing/authorize-oauth2.html" def dispatch(self, *args, **kwargs): try: if not self.application.oauth2datarequestproject: raise Http404 except (ObjectDoesNotExist, AttributeError): raise Http404 if not self.application.oauth2datarequestproject.active: return HttpResponseRedirect(reverse("direct-sharing:authorize-inactive")) return super().dispatch(*args, **kwargs) def get_object(self): return self.application.oauth2datarequestproject def post(self, request, *args, **kwargs): """ Get whether or not the member has requested hidden membership. """ self.hidden = request.POST.get("hide-membership", None) return super().post(request, *args, **kwargs) def form_valid(self, form): """ Override the OAuth2 AuthorizationView's form_valid to authorize a project member if the user authorizes the OAuth2 request. """ allow = form.cleaned_data.get("allow") if allow: if self.hidden == "hidden_membership": hidden = True else: hidden = False self.authorize_member(hidden) return super().form_valid(form) def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context.update( { "object": self.get_object(), "project": self.get_object(), # XXX: BaseOAuth2AuthorizationView doesn't provide the request # context for some reason "request": self.request, "username": self.request.user.username, } ) return context class CoordinatorOnlyView(View): """ Only let coordinators and superusers view these pages. """ def dispatch(self, *args, **kwargs): self.object = self.get_object() if self.object.coordinator.user != self.request.user: if not self.request.user.is_superuser: raise Http404 return super().dispatch(*args, **kwargs) class UpdateDataRequestProjectView( PrivateMixin, LargePanelMixin, CoordinatorOnlyView, UpdateView ): """ Base view for updating a project. """ success_url = reverse_lazy("direct-sharing:manage-projects") def get_login_message(self): project = self.get_object() return 'Please log in to edit "{0}"'.format(project.name) class CreateDataRequestProjectView(PrivateMixin, LargePanelMixin, CreateView): """ Base view for creating a project. """ login_message = "Please log in to create a project." success_url = reverse_lazy("direct-sharing:manage-projects") def form_valid(self, form): """ Override to add current user as coordinator. """ form.instance.coordinator = self.request.user.member return super().form_valid(form) class CreateOAuth2DataRequestProjectView(CreateDataRequestProjectView): """ Create an OAuth2DataRequestProject. """ template_name = "private_sharing/create-project.html" model = OAuth2DataRequestProject form_class = OAuth2DataRequestProjectForm class CreateOnSiteDataRequestProjectView(CreateDataRequestProjectView): """ Create an OnSiteDataRequestProject. """ template_name = "private_sharing/create-project.html" model = OnSiteDataRequestProject form_class = OnSiteDataRequestProjectForm class UpdateOAuth2DataRequestProjectView(UpdateDataRequestProjectView): """ Update an OAuth2DataRequestProject. """ template_name = "private_sharing/update-project.html" model = OAuth2DataRequestProject form_class = OAuth2DataRequestProjectForm def get_initial(self): """ Populate the form with common DataRequestProject bits """ initial = super().get_initial() initial["enrollment_url"] = self.object.enrollment_url initial["redirect_url"] = self.object.redirect_url initial["deauth_webhook"] = self.object.deauth_webhook return initial class UpdateOnSiteDataRequestProjectView(UpdateDataRequestProjectView): """ Update an OnSiteDataRequestProject. """ template_name = "private_sharing/update-project.html" model = OnSiteDataRequestProject form_class = OnSiteDataRequestProjectForm def get_initial(self): """ Populate the form with common DataRequestProject bits """ initial = super().get_initial() initial["consent_text"] = self.object.consent_text initial["post_sharing_url"] = self.object.post_sharing_url return initial class RefreshTokenMixin(object): """ A mixin that adds a POST handler for refreshing a project's token. """ # pylint: disable=unused-argument def post(self, request, *args, **kwargs): if self.request.POST.get("refresh_token") == "refresh_token": self.object.refresh_token() return self.get(request, *args, **kwargs) class OAuth2DataRequestProjectDetailView( PrivateMixin, CoordinatorOnlyView, RefreshTokenMixin, DetailView ): """ Display an OAuth2DataRequestProject. """ template_name = "private_sharing/project-detail.html" model = OAuth2DataRequestProject def get_login_message(self): project = self.get_object() msg = 'Please log in to view project information for "{0}".'.format( project.name ) return msg class OnSiteDataRequestProjectDetailView( PrivateMixin, CoordinatorOnlyView, RefreshTokenMixin, DetailView ): """ Display an OnSiteDataRequestProject. """ template_name = "private_sharing/project-detail.html" model = OnSiteDataRequestProject def get_login_message(self): project = self.get_object() msg = 'Please log in to view project information for "{0}".'.format( project.name ) return msg class ManageDataRequestActivitiesView(PrivateMixin, TemplateView): """ A view for listing all data request activities for the current user. """ login_message = "Please log in to manage your projects." template_name = "private_sharing/manage.html" def get_context_data(self, **kwargs): context = super(ManageDataRequestActivitiesView, self).get_context_data( **kwargs ) query = {"coordinator__user": self.request.user} oauth2 = OAuth2DataRequestProject.objects.filter(**query) onsite = OnSiteDataRequestProject.objects.filter(**query) context.update({"onsite": onsite, "oauth2": oauth2}) return context class InDevelopmentView(TemplateView): """ Add in-development projects to template context. """ template_name = "private_sharing/in-development.html" def get_context_data(self, **kwargs): context = super(InDevelopmentView, self).get_context_data(**kwargs) context.update( {"projects": DataRequestProject.objects.filter(approved=False, active=True)} ) return context class OverviewView(SourcesContextMixin, TemplateView): """ Add current sources to template context. """ template_name = "direct-sharing/overview.html" class ProjectLeaveView(PrivateMixin, DetailView): """ Let a member remove themselves from a project. """ template_name = "private_sharing/leave-project.html" model = DataRequestProjectMember # pylint: disable=unused-argument def post(self, *args, **kwargs): project_member = self.get_object() remove_datafiles = self.request.POST.get("remove_datafiles", "off") == "on" erasure_requested = self.request.POST.get("erasure_requested", "off") == "on" done_by = "self" project_member.leave_project( remove_datafiles=remove_datafiles, done_by=done_by, erasure_requested=erasure_requested, ) if "next" in self.request.GET: return HttpResponseRedirect(self.request.GET["next"]) else: return HttpResponseRedirect(reverse("my-member-connections")) class BaseProjectMembersView(PrivateMixin, CoordinatorOnlyView, DetailView, FormView): """ Base class for views for coordinators to take bulk action on proj members. """ model = DataRequestProject def get_login_message(self): project = self.get_object() return 'Please log in to work on "{0}".'.format(project.name) def get_form_kwargs(self, *args, **kwargs): kwargs = super(BaseProjectMembersView, self).get_form_kwargs(*args, **kwargs) kwargs["project"] = self.get_object() return kwargs def get_success_url(self): project = self.get_object() return reverse_lazy( "direct-sharing:detail-{}".format(project.type), kwargs={"slug": project.slug}, ) class MessageProjectMembersView(BaseProjectMembersView): """ A view for coordinators to message their project members. """ form_class = MessageProjectMembersForm template_name = "private_sharing/message-project-members.html" def form_valid(self, form): form.send_messages(self.get_object()) django_messages.success(self.request, "Your message was sent successfully.") return super(MessageProjectMembersView, self).form_valid(form) class RemoveProjectMembersView(BaseProjectMembersView): """ A view for coordinators to remove project members. """ form_class = RemoveProjectMembersForm template_name = "private_sharing/remove-project-members.html" def form_valid(self, form): form.remove_members(self.get_object()) django_messages.success(self.request, "Project member(s) removed.") return super(RemoveProjectMembersView, self).form_valid(form) class DataRequestProjectWithdrawnView(PrivateMixin, CoordinatorOnlyView, ListView): """ A view for coordinators to list members that have requested data removal. """ model = DataRequestProject paginate_by = 100 template_name = "private_sharing/project-withdrawn-members-view.html" def get_login_message(self): project = self.get_object() return 'Please log in to work on "{0}".'.format(project.name) def withdrawn_members(self): """ Returns a queryset with the members that have requested data erasure. """ return self.object.project_members.get_queryset().filter(revoked=True) def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context["object"] = self.object context["object_list"] = self.withdrawn_members() return context def get_object(self, queryset=None): """ Impliment get_object as a convenience funtion. """ slug = self.request.path.split("/")[4] if queryset is None: queryset = self.get_queryset() self.object = queryset.get(slug=slug) return self.object class SelectDatatypesView(PrivateMixin, CoordinatorOnlyView, UpdateView): """ Select the datatypes for a project. """ form_class = SelectDatatypesForm model = DataRequestProject success_url = reverse_lazy("direct-sharing:manage-projects") template_name = "private_sharing/select-datatypes.html" def dispatch(self, *args, **kwargs): """ Override dispatch to redirect if project is approved """ self.object = self.get_object() if self.object.approved: django_messages.error( self.request, ( "Sorry, {0} has been approved and the project's datatypes cannot be changed " "without re-approval.".format(self.object.name) ), ) return HttpResponseRedirect( reverse( "direct-sharing:detail-{0}".format(self.object.type), kwargs={"slug": self.object.slug}, ) ) return super().dispatch(*args, **kwargs) def get_context_data(self, *args, **kwargs): context = super().get_context_data(*args, **kwargs) datatypes_sorted = DataType.sorted_by_ancestors() try: max_depth = max([i["depth"] for i in datatypes_sorted]) except ValueError: max_depth = 0 context.update({"datatypes_sorted": datatypes_sorted, "max_depth": max_depth}) return context def get_success_url(self): return reverse_lazy( "direct-sharing:detail-{0}".format(self.object.type), args=[self.object.slug], )
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2018, Abhijeet Kasurde <[email protected]> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = r''' --- module: vmware_vmkernel_facts short_description: Gathers VMKernel facts about an ESXi host description: - This module can be used to gather VMKernel facts about an ESXi host from given ESXi hostname or cluster name. version_added: '2.5' author: - Abhijeet Kasurde (@Akasurde) notes: - Tested on vSphere 6.5 requirements: - python >= 2.6 - PyVmomi options: cluster_name: description: - Name of the cluster. - VMKernel facts about each ESXi server will be returned for the given cluster. - If C(esxi_hostname) is not given, this parameter is required. esxi_hostname: description: - ESXi hostname. - VMKernel facts about this ESXi server will be returned. - If C(cluster_name) is not given, this parameter is required. extends_documentation_fragment: vmware.documentation ''' EXAMPLES = r''' - name: Gather VMKernel facts about all ESXi Host in given Cluster vmware_vmkernel_facts: hostname: '{{ vcenter_hostname }}' username: '{{ vcenter_username }}' password: '{{ vcenter_password }}' cluster_name: cluster_name delegate_to: localhost register: cluster_host_vmks - name: Gather VMKernel facts about ESXi Host vmware_vmkernel_facts: hostname: '{{ vcenter_hostname }}' username: '{{ vcenter_username }}' password: '{{ vcenter_password }}' esxi_hostname: '{{ esxi_hostname }}' delegate_to: localhost register: host_vmks ''' RETURN = r''' host_vmk_facts: description: metadata about VMKernel present on given host system returned: success type: dict sample: { "10.76.33.208": [ { "device": "vmk0", "dhcp": true, "enable_ft": false, "enable_management": true, "enable_vmotion": false, "enable_vsan": false, "ipv4_address": "10.76.33.28", "ipv4_subnet_mask": "255.255.255.0", "key": "key-vim.host.VirtualNic-vmk0", "mac": "52:54:00:12:50:ce", "mtu": 1500, "portgroup": "Management Network", "stack": "defaultTcpipStack" }, ] } ''' try: from pyVmomi import vim, vmodl except ImportError: pass from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi from ansible.module_utils._text import to_native class VmkernelFactsManager(PyVmomi): def __init__(self, module): super(VmkernelFactsManager, self).__init__(module) cluster_name = self.params.get('cluster_name', None) esxi_host_name = self.params.get('esxi_hostname', None) self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) self.service_type_vmks = dict() self.get_all_vmks_by_service_type() def get_all_vmks_by_service_type(self): """ Function to return information about service types and VMKernel """ for host in self.hosts: self.service_type_vmks[host.name] = dict(vmotion=[], vsan=[], management=[], faultToleranceLogging=[]) for service_type in self.service_type_vmks[host.name].keys(): vmks_list = self.query_service_type_for_vmks(host, service_type) self.service_type_vmks[host.name][service_type] = vmks_list def query_service_type_for_vmks(self, host_system, service_type): """ Function to return list of VMKernels Args: host_system: Host system managed object service_type: Name of service type Returns: List of VMKernel which belongs to that service type """ vmks_list = [] query = None try: query = host_system.configManager.virtualNicManager.QueryNetConfig(service_type) except vim.fault.HostConfigFault as config_fault: self.module.fail_json(msg="Failed to get all VMKs for service type %s due to" " host config fault : %s" % (service_type, to_native(config_fault.msg))) except vmodl.fault.InvalidArgument as invalid_argument: self.module.fail_json(msg="Failed to get all VMKs for service type %s due to" " invalid arguments : %s" % (service_type, to_native(invalid_argument.msg))) except Exception as e: self.module.fail_json(msg="Failed to get all VMKs for service type %s due to" "%s" % (service_type, to_native(e))) if not query.selectedVnic: return vmks_list selected_vnics = [vnic for vnic in query.selectedVnic] vnics_with_service_type = [vnic.device for vnic in query.candidateVnic if vnic.key in selected_vnics] return vnics_with_service_type def gather_host_vmk_facts(self): hosts_facts = {} for host in self.hosts: host_vmk_facts = [] host_network_system = host.config.network if host_network_system: vmks_config = host.config.network.vnic for vmk in vmks_config: host_vmk_facts.append(dict( device=vmk.device, key=vmk.key, portgroup=vmk.portgroup, ipv4_address=vmk.spec.ip.ipAddress, ipv4_subnet_mask=vmk.spec.ip.subnetMask, dhcp=vmk.spec.ip.dhcp, mac=vmk.spec.mac, mtu=vmk.spec.mtu, stack=vmk.spec.netStackInstanceKey, enable_vsan=vmk.device in self.service_type_vmks[host.name]['vsan'], enable_vmotion=vmk.device in self.service_type_vmks[host.name]['vmotion'], enable_management=vmk.device in self.service_type_vmks[host.name]['management'], enable_ft=vmk.device in self.service_type_vmks[host.name]['faultToleranceLogging'], ) ) hosts_facts[host.name] = host_vmk_facts return hosts_facts def main(): argument_spec = vmware_argument_spec() argument_spec.update( cluster_name=dict(type='str', required=False), esxi_hostname=dict(type='str', required=False), ) module = AnsibleModule( argument_spec=argument_spec, required_one_of=[ ['cluster_name', 'esxi_hostname'], ], supports_check_mode=True ) vmware_vmk_config = VmkernelFactsManager(module) module.exit_json(changed=False, host_vmk_facts=vmware_vmk_config.gather_host_vmk_facts()) if __name__ == "__main__": main()
# -*- coding: utf-8 -*- import logging import unittest from stalker import (db, Repository, Project, Structure, FilenameTemplate, Status, StatusList, Task, Version) from stalker.db import DBSession from anima.dcc.base import DCCBase logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) class EnvironmentBaseTestCase(unittest.TestCase): """tests the DCCBase class """ @classmethod def setUpClass(cls): """set up the test in class level """ DBSession.remove() DBSession.configure(extension=None) @classmethod def tearDownClass(cls): """cleanup the test """ DBSession.remove() DBSession.configure(extension=None) def setUp(self): """set up the test """ db.setup({'sqlalchemy.url': 'sqlite:///:memory:'}) def test_get_version_from_full_path_with_multiple_repositories(self): """testing if the get version from full path is working fine with multiple repositories and with same version names """ repo1 = Repository( name='Test Repo 1', linux_path='/mnt/T/', windows_path='T:/', osx_path='/Volumes/T/' ) DBSession.add(repo1) repo2 = Repository( name='Test Repo 2', linux_path='/mnt/S/', windows_path='S:/', osx_path='/Volumes/S/' ) DBSession.add(repo2) task_ft = FilenameTemplate( name='Task Filename Template', target_entity_type='Task', path='$REPO{{project.repository.code}}/{{project.code}}/' '{%- for parent_task in parent_tasks -%}' '{{parent_task.nice_name}}/{%- endfor -%}', filename='{{task.nice_name}}_{{version.take_name}}' '_v{{"%03d"|format(version.version_number)}}', ) DBSession.add(task_ft) structure1 = Structure( name='Commercial Project Structure', templates=[task_ft] ) DBSession.add(structure1) status1 = Status(name='Status 1', code='STS1') status2 = Status(name='Status 2', code='STS2') status3 = Status(name='Status 3', code='STS3') DBSession.add_all([status1, status2, status3]) proj_status_list = \ StatusList.query.filter_by(target_entity_type='Project').first() task_status_list = \ StatusList.query.filter_by(target_entity_type='Task').first() version_status_list = StatusList( name='Version Statuses', target_entity_type='Version', statuses=[status1, status2, status3] ) DBSession.add(version_status_list) project1 = Project( name='Test Project 1', code='TP1', repositories=[repo1], structure=structure1, status_list=proj_status_list ) DBSession.add(project1) project2 = Project( name='Test Project 2', code='TP2', repositories=[repo2], structure=structure1, status_list=proj_status_list ) DBSession.add(project2) task1 = Task( name='Test Task 1', code='TT1', project=project1, status_list=task_status_list ) DBSession.add(task1) task2 = Task( name='Test Task 1', code='TT1', project=project2, status_list=task_status_list ) DBSession.add(task2) DBSession.commit() # now create versions version1 = Version( task=task1, status_list=version_status_list ) DBSession.add(version1) DBSession.commit() version1.update_paths() version2 = Version( task=task2, status_list=version_status_list ) DBSession.add(version2) DBSession.commit() version2.update_paths() DBSession.commit() logger.debug('version1.full_path : %s' % version1.full_path) logger.debug('version2.full_path : %s' % version2.full_path) # now try to get the versions with an DCCBase instance dcc = DCCBase() # version1 version1_found = dcc.get_version_from_full_path( '/mnt/T/TP1/Test_Task_1/Test_Task_1_Main_v001' ) self.assertEqual(version1_found, version1) # version2 version2_found = dcc.get_version_from_full_path( '/mnt/S/TP2/Test_Task_1/Test_Task_1_Main_v001' ) self.assertEqual(version2_found, version2) # version1 in windows version1_found = dcc.get_version_from_full_path( 'T:/TP1/Test_Task_1/Test_Task_1_Main_v001' ) self.assertEqual(version1_found, version1) # version2 in windows version2_found = dcc.get_version_from_full_path( 'S:/TP2/Test_Task_1/Test_Task_1_Main_v001' ) self.assertEqual(version2_found, version2) # version1 in linux version1_found = dcc.get_version_from_full_path( '/mnt/T/TP1/Test_Task_1/Test_Task_1_Main_v001' ) self.assertEqual(version1_found, version1) # version2 in linux version2_found = dcc.get_version_from_full_path( '/mnt/S/TP2/Test_Task_1/Test_Task_1_Main_v001' ) self.assertEqual(version2_found, version2) # version1 in osx version1_found = dcc.get_version_from_full_path( '/Volumes/T/TP1/Test_Task_1/Test_Task_1_Main_v001' ) self.assertEqual(version1_found, version1) # version2 in osx version2_found = dcc.get_version_from_full_path( '/Volumes/S/TP2/Test_Task_1/Test_Task_1_Main_v001' ) self.assertEqual(version2_found, version2) def test_get_versions_from_path_handles_empty_and_None_path(self): """testing if no errors will be raised for a path which is None or an empty string """ dcc = DCCBase() versions = dcc.get_versions_from_path('') self.assertEqual(versions, []) versions = dcc.get_versions_from_path(None) self.assertEqual(versions, []) def test_get_versions_from_path_with_multiple_repositories(self): """testing if the get versions_from_path is working fine with multiple repositories and with same version names """ repo0 = Repository( name='Test Repo 0', linux_path='/mnt/T/with_a_very_long_path_which_will_cause_errors/', windows_path='T:/with_a_very_long_path_which_will_cause_errors/', osx_path='/Volumes/T/' 'with_a_very_long_path_which_will_cause_errors/' ) DBSession.add(repo0) repo1 = Repository( name='Test Repo 1', linux_path='/mnt/T/', windows_path='T:/', osx_path='/Volumes/T/' ) DBSession.add(repo1) repo2 = Repository( name='Test Repo 2', linux_path='/mnt/S/', windows_path='S:/', osx_path='/Volumes/S/' ) DBSession.add(repo2) task_ft = FilenameTemplate( name='Task Filename Template', target_entity_type='Task', path='$REPO{{project.repository.code}}/' '{{project.code}}/{%- for parent_task in parent_tasks -%}' '{{parent_task.nice_name}}/{%- endfor -%}', filename='{{task.nice_name}}_{{version.take_name}}' '_v{{"%03d"|format(version.version_number)}}', ) DBSession.add(task_ft) structure1 = Structure( name='Commercial Project Structure', templates=[task_ft] ) DBSession.add(structure1) status1 = Status(name='Status 1', code='STS1') status2 = Status(name='Status 2', code='STS2') status3 = Status(name='Status 3', code='STS3') DBSession.add_all([status1, status2, status3]) proj_status_list = \ StatusList.query.filter_by(target_entity_type='Project').first() task_status_list = \ StatusList.query.filter_by(target_entity_type='Task').first() project1 = Project( name='Test Project 1', code='TP1', repositories=[repo1], structure=structure1, status_list=proj_status_list ) DBSession.add(project1) project2 = Project( name='Test Project 2', code='TP2', repositories=[repo2], structure=structure1, status_list=proj_status_list ) DBSession.add(project2) task1 = Task( name='Test Task 1', code='TT1', project=project1, status_list=task_status_list ) DBSession.add(task1) task2 = Task( name='Test Task 1', code='TT1', project=project2, status_list=task_status_list ) DBSession.add(task2) DBSession.commit() # now create versions version1 = Version( task=task1 ) DBSession.add(version1) DBSession.commit() version1.update_paths() version2 = Version( task=task1 ) DBSession.add(version2) DBSession.commit() version2.update_paths() version3 = Version( task=task2 ) DBSession.add(version3) DBSession.commit() version3.update_paths() version4 = Version( task=task2 ) DBSession.add(version4) DBSession.commit() version4.update_paths() DBSession.commit() logger.debug('version1.full_path : %s' % version1.full_path) logger.debug('version2.full_path : %s' % version2.full_path) logger.debug('version3.full_path : %s' % version2.full_path) logger.debug('version4.full_path : %s' % version2.full_path) # now try to get the versions with an DCCBase instance dcc = DCCBase() # version1, version2 versions_found = dcc.get_versions_from_path( '/mnt/T/TP1/Test_Task_1' ) self.assertEqual(versions_found, [version1, version2]) # version3, version4 versions_found = dcc.get_versions_from_path( '/mnt/S/TP2/Test_Task_1' ) self.assertEqual(versions_found, [version3, version4]) # version1, version2 in windows versions_found = dcc.get_versions_from_path( 'T:/TP1/Test_Task_1' ) self.assertEqual(versions_found, [version1, version2]) # version3, version4 in windows versions_found = dcc.get_versions_from_path( 'S:/TP2/Test_Task_1' ) self.assertEqual(versions_found, [version3, version4]) # version1, version2 in linux versions_found = dcc.get_versions_from_path( '/mnt/T/TP1/Test_Task_1' ) self.assertEqual(versions_found, [version1, version2]) # version3, version4 in linux versions_found = dcc.get_versions_from_path( '/mnt/S/TP2/Test_Task_1' ) self.assertEqual(versions_found, [version3, version4]) # version1, version2 in osx versions_found = dcc.get_versions_from_path( '/Volumes/T/TP1/Test_Task_1' ) self.assertEqual(versions_found, [version1, version2]) # version3, version4 in linux versions_found = dcc.get_versions_from_path( '/Volumes/S/TP2/Test_Task_1' ) self.assertEqual(versions_found, [version3, version4]) def test_trim_repo_path_with_multiple_repositories(self): """testing if the trim_repo_path is working fine with multiple repositories and with same version names """ repo0 = Repository( name='Test Repo 0', linux_path='/mnt/T/with_a_very_long_path_which_will_cause_errors/', windows_path='T:/with_a_very_long_path_which_will_cause_errors/', osx_path='/Volumes/T/' 'with_a_very_long_path_which_will_cause_errors/' ) DBSession.add(repo0) repo1 = Repository( name='Test Repo 1', linux_path='/mnt/T/', windows_path='T:/', osx_path='/Volumes/T/' ) DBSession.add(repo1) repo2 = Repository( name='Test Repo 2', linux_path='/mnt/S/', windows_path='S:/', osx_path='/Volumes/S/' ) DBSession.add(repo2) task_ft = FilenameTemplate( name='Task Filename Template', target_entity_type='Task', path='{{project.code}}/{%- for parent_task in parent_tasks -%}' '{{parent_task.nice_name}}/{%- endfor -%}', filename='{{task.nice_name}}_{{version.take_name}}' '_v{{"%03d"|format(version.version_number)}}', ) DBSession.add(task_ft) structure1 = Structure( name='Commercial Project Structure', templates=[task_ft] ) DBSession.add(structure1) status1 = Status(name='Status 1', code='STS1') status2 = Status(name='Status 2', code='STS2') status3 = Status(name='Status 3', code='STS3') DBSession.add_all([status1, status2, status3]) proj_status_list = \ StatusList.query.filter_by(target_entity_type='Project').first() task_status_list = \ StatusList.query.filter_by(target_entity_type='Task').first() DBSession.add(task_status_list) project1 = Project( name='Test Project 1', code='TP1', repositories=[repo1], structure=structure1, status_list=proj_status_list ) DBSession.add(project1) project2 = Project( name='Test Project 2', code='TP2', repositories=[repo2], structure=structure1, status_list=proj_status_list ) DBSession.add(project2) task1 = Task( name='Test Task 1', code='TT1', project=project1, status_list=task_status_list ) DBSession.add(task1) task2 = Task( name='Test Task 1', code='TT1', project=project2, status_list=task_status_list ) DBSession.add(task2) DBSession.commit() # now create versions version1 = Version(task=task1) DBSession.add(version1) DBSession.commit() version1.update_paths() version2 = Version(task=task1) DBSession.add(version2) DBSession.commit() version2.update_paths() version3 = Version(task=task2) DBSession.add(version3) DBSession.commit() version3.update_paths() version4 = Version(task=task2) DBSession.add(version4) DBSession.commit() version4.update_paths() DBSession.commit() logger.debug('version1.full_path : %s' % version1.full_path) logger.debug('version2.full_path : %s' % version2.full_path) logger.debug('version3.full_path : %s' % version2.full_path) logger.debug('version4.full_path : %s' % version2.full_path) # now try to get the versions with an DCCBase instance dcc = DCCBase() expected_value1 = 'TP1/Test_Task_1/Test_Task_1_Main_v001' expected_value2 = 'TP2/Test_Task_1/Test_Task_1_Main_v001' # version1 native trimmed_path = dcc.trim_repo_path( '/mnt/T/TP1/Test_Task_1/Test_Task_1_Main_v001' ) self.assertEqual(trimmed_path, expected_value1) # version2 native trimmed_path = dcc.trim_repo_path( '/mnt/S/TP2/Test_Task_1/Test_Task_1_Main_v001' ) self.assertEqual(trimmed_path, expected_value2) # version1 windows trimmed_path = dcc.trim_repo_path( 'T:/TP1/Test_Task_1/Test_Task_1_Main_v001' ) self.assertEqual(trimmed_path, expected_value1) # version2 windows trimmed_path = dcc.trim_repo_path( 'S:/TP2/Test_Task_1/Test_Task_1_Main_v001' ) self.assertEqual(trimmed_path, expected_value2) # version1 linux trimmed_path = dcc.trim_repo_path( '/mnt/T/TP1/Test_Task_1/Test_Task_1_Main_v001' ) self.assertEqual(trimmed_path, expected_value1) # version2 linux trimmed_path = dcc.trim_repo_path( '/mnt/S/TP2/Test_Task_1/Test_Task_1_Main_v001' ) self.assertEqual(trimmed_path, expected_value2) # version1 osx trimmed_path = dcc.trim_repo_path( '/Volumes/T/TP1/Test_Task_1/Test_Task_1_Main_v001' ) self.assertEqual(trimmed_path, expected_value1) # version2 osx trimmed_path = dcc.trim_repo_path( '/Volumes/S/TP2/Test_Task_1/Test_Task_1_Main_v001' ) self.assertEqual(trimmed_path, expected_value2)
# Copyright 2014 Cloudera Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ibis.util as util import ibis.expr.types as ir import ibis.expr.operations as ops class FormatMemo(object): # A little sanity hack to simplify the below def __init__(self): from collections import defaultdict self.formatted = {} self.aliases = {} self.ops = {} self.counts = defaultdict(lambda: 0) def __contains__(self, obj): return self._key(obj) in self.formatted def _key(self, obj): return obj._repr() def observe(self, obj, formatter=lambda x: x._repr()): key = self._key(obj) if key not in self.formatted: self.aliases[key] = 'ref_%d' % len(self.formatted) self.formatted[key] = formatter(obj) self.ops[key] = obj self.counts[key] += 1 def count(self, obj): return self.counts[self._key(obj)] def get_alias(self, obj): return self.aliases[self._key(obj)] def get_formatted(self, obj): return self.formatted[self._key(obj)] class ExprFormatter(object): """ For creating a nice tree-like representation of an expression graph for displaying in the console. TODO: detect reused DAG nodes and do not display redundant information """ def __init__(self, expr, indent_size=2, base_level=0, memo=None, memoize=True): self.expr = expr self.indent_size = indent_size self.base_level = base_level self.memoize = memoize # For tracking "extracted" objects, like tables, that we don't want to # print out more than once, and simply alias in the expression tree self.memo = memo or FormatMemo() def get_result(self): what = self.expr.op() if self.memoize: self._memoize_tables() if isinstance(what, ir.HasSchema): # This should also catch aggregations if not self.memoize and what in self.memo: text = 'Table: %s' % self.memo.get_alias(what) elif isinstance(what, ops.PhysicalTable): text = self._format_table(what) else: # Any other node type text = self._format_node(what) elif isinstance(what, ops.TableColumn): text = self._format_column(self.expr) elif isinstance(what, ir.Node): text = self._format_node(what) elif isinstance(what, ops.Literal): text = 'Literal[%s] %s' % (self._get_type_display(), str(what.value)) if isinstance(self.expr, ir.ValueExpr) and self.expr._name is not None: text = '{0} = {1}'.format(self.expr.get_name(), text) if self.memoize: alias_to_text = [(self.memo.aliases[x], self.memo.formatted[x], self.memo.ops[x]) for x in self.memo.formatted] alias_to_text.sort() # A hack to suppress printing out of a ref that is the result of # the top level expression refs = [x + '\n' + y for x, y, op in alias_to_text if not op.equals(what)] text = '\n\n'.join(refs + [text]) return self._indent(text, self.base_level) def _memoize_tables(self): table_memo_ops = (ops.Aggregation, ops.Projection, ops.SelfReference) def walk(expr): op = expr.op() def visit(arg): if isinstance(arg, list): [visit(x) for x in arg] elif isinstance(arg, ir.Expr): walk(arg) if isinstance(op, ops.PhysicalTable): self.memo.observe(op, self._format_table) elif isinstance(op, ir.Node): visit(op.args) if isinstance(op, table_memo_ops): self.memo.observe(op, self._format_node) elif isinstance(op, ir.HasSchema): self.memo.observe(op, self._format_table) walk(self.expr) def _indent(self, text, indents=1): return util.indent(text, self.indent_size * indents) def _format_table(self, table): # format the schema rows = ['name: {0!s}\nschema:'.format(table.name)] rows.extend([' %s : %s' % tup for tup in zip(table.schema.names, table.schema.types)]) opname = type(table).__name__ type_display = self._get_type_display(table) opline = '%s[%s]' % (opname, type_display) return '{0}\n{1}'.format(opline, self._indent('\n'.join(rows))) def _format_column(self, expr): # HACK: if column is pulled from a Filter of another table, this parent # will not be found in the memo col = expr.op() parent_op = col.parent().op() if parent_op in self.memo: table_formatted = self.memo.get_alias(parent_op) else: table_formatted = '\n' + self._indent(self._format_node(parent_op)) return ("Column[%s] '%s' from table %s" % (self.expr.type(), col.name, table_formatted)) def _format_node(self, op): formatted_args = [] def visit(what, extra_indents=0): if isinstance(what, ir.Expr): result = self._format_subexpr(what) else: result = self._indent(str(what)) if extra_indents > 0: result = util.indent(result, self.indent_size) formatted_args.append(result) arg_names = getattr(op, '_arg_names', None) if arg_names is None: for arg in op.args: if isinstance(arg, list): for x in arg: visit(x) else: visit(arg) else: for arg, name in zip(op.args, arg_names): if name is not None: name = self._indent('{0}:'.format(name)) if isinstance(arg, list): if name is not None and len(arg) > 0: formatted_args.append(name) indents = 1 else: indents = 0 for x in arg: visit(x, extra_indents=indents) else: if name is not None: formatted_args.append(name) indents = 1 else: indents = 0 visit(arg, extra_indents=indents) opname = type(op).__name__ type_display = self._get_type_display(op) opline = '%s[%s]' % (opname, type_display) return '\n'.join([opline] + formatted_args) def _format_subexpr(self, expr): formatter = ExprFormatter(expr, base_level=1, memo=self.memo, memoize=False) return formatter.get_result() def _get_type_display(self, expr=None): if expr is None: expr = self.expr if isinstance(expr, ir.Node): expr = expr.to_expr() if isinstance(expr, ir.TableExpr): return 'table' elif isinstance(expr, ir.ArrayExpr): return 'array(%s)' % expr.type() elif isinstance(expr, ir.ScalarExpr): return '%s' % expr.type() elif isinstance(expr, ir.ExprList): list_args = [self._get_type_display(arg) for arg in expr.op().args] return ', '.join(list_args) else: raise NotImplementedError
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'ClassicRecipe' db.create_table('layerindex_classicrecipe', ( ('recipe_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['layerindex.Recipe'], unique=True, primary_key=True)), ('cover_layerbranch', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['layerindex.LayerBranch'], null=True, on_delete=models.SET_NULL, blank=True)), ('cover_pn', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)), ('cover_status', self.gf('django.db.models.fields.CharField')(default='U', max_length=1)), ('cover_verified', self.gf('django.db.models.fields.BooleanField')(default=False)), ('cover_comment', self.gf('django.db.models.fields.TextField')(blank=True)), ('classic_category', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)), )) db.send_create_signal('layerindex', ['ClassicRecipe']) # Adding field 'LayerItem.classic' db.add_column('layerindex_layeritem', 'classic', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False) def backwards(self, orm): # Deleting model 'ClassicRecipe' db.delete_table('layerindex_classicrecipe') # Deleting field 'LayerItem.classic' db.delete_column('layerindex_layeritem', 'classic') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'layerindex.bbappend': { 'Meta': {'object_name': 'BBAppend'}, 'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'filepath': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'layerbranch': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['layerindex.LayerBranch']"}) }, 'layerindex.bbclass': { 'Meta': {'object_name': 'BBClass'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'layerbranch': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['layerindex.LayerBranch']"}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'layerindex.branch': { 'Meta': {'object_name': 'Branch'}, 'bitbake_branch': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'short_description': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}), 'sort_priority': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}) }, 'layerindex.classicrecipe': { 'Meta': {'object_name': 'ClassicRecipe', '_ormbases': ['layerindex.Recipe']}, 'classic_category': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'cover_comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'cover_layerbranch': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['layerindex.LayerBranch']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}), 'cover_pn': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'cover_status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '1'}), 'cover_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'recipe_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['layerindex.Recipe']", 'unique': 'True', 'primary_key': 'True'}) }, 'layerindex.layerbranch': { 'Meta': {'object_name': 'LayerBranch'}, 'actual_branch': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}), 'branch': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['layerindex.Branch']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'layer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['layerindex.LayerItem']"}), 'vcs_last_commit': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'vcs_last_fetch': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'vcs_last_rev': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}), 'vcs_subdir': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}) }, 'layerindex.layerdependency': { 'Meta': {'object_name': 'LayerDependency'}, 'dependency': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'dependents_set'", 'to': "orm['layerindex.LayerItem']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'layerbranch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'dependencies_set'", 'to': "orm['layerindex.LayerBranch']"}) }, 'layerindex.layeritem': { 'Meta': {'object_name': 'LayerItem'}, 'classic': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'description': ('django.db.models.fields.TextField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'index_preference': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'layer_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}), 'mailing_list_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'N'", 'max_length': '1'}), 'summary': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'usage_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'vcs_url': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'vcs_web_file_base_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'vcs_web_tree_base_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'vcs_web_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}) }, 'layerindex.layermaintainer': { 'Meta': {'object_name': 'LayerMaintainer'}, 'email': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'layerbranch': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['layerindex.LayerBranch']"}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'responsibility': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'A'", 'max_length': '1'}) }, 'layerindex.layernote': { 'Meta': {'object_name': 'LayerNote'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'layer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['layerindex.LayerItem']"}), 'text': ('django.db.models.fields.TextField', [], {}) }, 'layerindex.machine': { 'Meta': {'object_name': 'Machine'}, 'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'layerbranch': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['layerindex.LayerBranch']"}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, 'layerindex.recipe': { 'Meta': {'object_name': 'Recipe'}, 'bbclassextend': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'bugtracker': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'filepath': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'layerbranch': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['layerindex.LayerBranch']"}), 'license': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'pn': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'provides': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'pv': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'section': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'summary': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}) }, 'layerindex.recipechange': { 'Meta': {'object_name': 'RecipeChange'}, 'bugtracker': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}), 'changeset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['layerindex.RecipeChangeset']"}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'license': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'recipe': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['layerindex.Recipe']"}), 'section': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'summary': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}) }, 'layerindex.recipechangeset': { 'Meta': {'object_name': 'RecipeChangeset'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'layerindex.recipefiledependency': { 'Meta': {'object_name': 'RecipeFileDependency'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'layerbranch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['layerindex.LayerBranch']"}), 'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'recipe': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['layerindex.Recipe']"}) } } complete_apps = ['layerindex']
import os, sys, random, math import numpy as np #from pybrain.datasets import SupervisedDataSet from time import sleep # Helper functions class obstacle(object): def __init__(self,xmin,xmax,ymin,ymax): self.xmin = xmin self.xmax = xmax self.ymin = ymin self.ymax = ymax def argmax(b): maxVal = -100000000 maxData = None for i,a in enumerate(b): if a>maxVal: maxVal = a maxData = i return maxData class Space(object): def __init__(self, n, shape): self.n = n self.shape = shape def make_horizontal_wall(obs_left, obs_right,loc='top'): if loc == 'bot': return obstacle(obs_left.xmin,obs_right.xmax,min(obs_left.ymax,obs_right.ymax),min(obs_left.ymax,obs_right.ymax)+20) if loc == 'top': return obstacle(obs_left.xmin,obs_right.xmax,max(obs_left.ymin,obs_right.ymin)-20,max(obs_left.ymin,obs_right.ymin)) class Env(object): def __init__(self,viz=True): # CONSTANTS for how large the drawing window is. self.XSIZE = 480 self.YSIZE = 480 # When visualizing the learned policy, in order to speed up things, we only a fraction of all pixels on a lower resolution. Here are the parameters for that. self.MAGNIFY = 2 self.NOFPIXELSPLITS = 4 self.viz = viz # Obstacle definitions obs1 = obstacle(130,150,120,420) obs2 = obstacle(40,60,40,420) obs6 = obstacle(430,450,20,420) obs7 = obstacle(330,350,120,350) obs8 = obstacle(240,260,350,440) obs4 = make_horizontal_wall(obs2,obs6,'top') obs3 = make_horizontal_wall(obs2,obs1,'bot') obs5 = make_horizontal_wall(obs1,obs7,'top') obs10 = make_horizontal_wall(obs8,obs6,'bot') obs9 = obstacle(obs8.xmin,obs7.xmax,obs7.ymax,obs7.ymax+20) self.action_space= Space(8, (0,)) self.observation_space= Space(0, (4,) ) self.OBSTACLES = [obs1,obs2,obs3,obs4,obs5,obs6,obs7,obs8,obs9,obs10] self.obstaclePixels = [[False for i in range(0,self.YSIZE)] for j in range(0,self.XSIZE)] for obs in self.OBSTACLES: for i in range(obs.xmin,obs.xmax): for j in range(obs.ymin,obs.ymax): self.obstaclePixels[i][j] = True self.CRASH_COST = 1 self.GOAL_LINE_REWARD = 1 self.TRAIN_EVERY_NTH_STEP = 6 self.currentPos = (100.0,100.0) self.currentDir = random.random()*math.pi*2 self.currentSpeedPerStep = 1.0 self.currentRotationPerStep = 0.04 # There are multiple view of the window. Here, we store the current state self.displayBufferEmpty = True self.isLearning = True # Prepare screen #if self.viz: self.isPaused = False self.displayDirection = 0 self.iteration = 0 #print self.allPixelsDS def inr1(self,x,y): if self.OBSTACLES[1].xmax <= x <= self.OBSTACLES[0].xmin and self.OBSTACLES[1].ymin <= y <= self.OBSTACLES[1].ymax: return True def inr2(self,x,y): if self.OBSTACLES[3].xmin <= x <= self.OBSTACLES[3].xmax and self.OBSTACLES[3].ymax <= y <= self.OBSTACLES[4].ymin : return True def inr3(self,x,y): if self.OBSTACLES[6].xmax <= x <= self.OBSTACLES[5].xmin and self.OBSTACLES[5].ymin <= y <= self.OBSTACLES[5].ymax: return True def inr4(self,x,y): if self.OBSTACLES[9].xmin <= x <= self.OBSTACLES[9].xmax and self.OBSTACLES[8].ymax <= y <= self.OBSTACLES[9].ymin: return True def inside(self,x,y): #R1 if self.inr1(x,y): return True #R2 elif self.inr2(x,y): return True #R3 elif self.inr3(x,y): return True #R4 elif self.inr4(x,y): return True else: return False def reset(self,net=None,iteration=0,viz=True): #self.currentPos = (400.0,400.0) rand_x = random.random()*self.XSIZE rand_y = random.random()*self.YSIZE while not self.inside(rand_x,rand_y): rand_x = random.random()*self.XSIZE rand_y = random.random()*self.YSIZE self.currentPos = (.25*self.XSIZE,.1*self.YSIZE) self.currentPos = (rand_x, rand_y) self.currentDir = math.pi*.5 self.currentSpeedPerStep = 1.0 self.currentRotationPerStep = 0.04 self.iteration += 1 return np.array([self.currentPos[0]/self.XSIZE, self.currentPos[1]/self.YSIZE, math.sin(self.currentDir*0.25*math.pi)\ ,math.cos(self.currentDir*0.25*math.pi)]) #return np.array([self.currentPos[0]/self.XSIZE, self.currentPos[1]/self.YSIZE]) def step(self, action): targetDirDiscrete = action targetDir = targetDirDiscrete*math.pi*2/8.0 stepStartingPos = self.currentPos # Simulate the cars for some steps. Also draw the trajectory of the car. for i in range(0,self.TRAIN_EVERY_NTH_STEP): if (self.currentDir>math.pi*2): self.currentDir -= 2*math.pi elif (self.currentDir<0): self.currentDir += 2*math.pi if targetDir < self.currentDir: if ((2*math.pi - self.currentDir) + targetDir) > (self.currentDir - targetDir): self.currentDir = max(targetDir,self.currentDir-self.currentRotationPerStep) else: self.currentDir = max(targetDir,self.currentDir+self.currentRotationPerStep) else: if ((2*math.pi - targetDir) + self.currentDir) > (targetDir - self.currentDir): self.currentDir = min(targetDir,self.currentDir+self.currentRotationPerStep) else: self.currentDir = min(targetDir,self.currentDir-self.currentRotationPerStep) self.oldPos = self.currentPos self.currentPos = (self.currentPos[0]+self.currentSpeedPerStep*math.sin(self.currentDir),self.currentPos[1]+self.currentSpeedPerStep*math.cos(self.currentDir)) # hitting the border bad = -1*self.CRASH_COST good = self.GOAL_LINE_REWARD*1 #print inside(self.currentPos[0],self.currentPos[1]) R = 0 if not self.inside(self.currentPos[0],self.currentPos[1]): done = True R += 0 elif((self.currentPos[1]<self.YSIZE/2) and (self.currentPos[0]>self.XSIZE/2) and (stepStartingPos[0]<self.XSIZE/2)): R += 0 #R = 0 done = False elif ((self.currentPos[1]>self.YSIZE/2) and (self.currentPos[0]>self.XSIZE/2) and (stepStartingPos[1]<self.YSIZE/2)): R += 0 #R = 0 done = False elif ((self.currentPos[1]>self.YSIZE/2) and (self.currentPos[0]<self.XSIZE/2) and (stepStartingPos[0]>self.XSIZE/2)): R += 0 done = True else: if self.inr1(self.currentPos[0],self.currentPos[1]): R += .5*(stepStartingPos[1] - self.currentPos[1])/ self.YSIZE if self.inr2(self.currentPos[0],self.currentPos[1]): R += .7*(self.currentPos[0] - stepStartingPos[0])/ self.XSIZE if self.inr3(self.currentPos[0],self.currentPos[1]): R += (self.currentPos[1] - stepStartingPos[1])/ self.YSIZE if self.inr4(self.currentPos[0],self.currentPos[1]): R += 1.5*(stepStartingPos[0] - self.currentPos[0])/ self.XSIZE done = False S_dash = np.array([self.currentPos[0]/self.XSIZE, self.currentPos[1]/self.YSIZE,math.sin(self.currentDir*0.25*math.pi),math.cos(self.currentDir*0.25*math.pi)]) return (S_dash, R, done, {'info':'data'}) def close(self): sys.exit(0) def render(self, mode): pass
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Base metrics support is extended for a concrete monitoring system.""" import datetime import logging import re import sys import threading import time class Metric(object): """A metric with unique combination of name and bindings.""" @property def family(self): """The metric family this instance belongs to. Members of a family share the same name but different label bindings. """ return self.__family @property def name(self): return self.__family.name @property def labels(self): return self.__labels @property def last_modified(self): """In real seconds.""" return self.__last_modified @property def mutex(self): return self.__mutex def __init__(self, family, labels): self.__mutex = threading.Lock() self.__name = family.name self.__last_modified = None self.__family = family self.__labels = labels def touch(self, utc=None): """Update last modified time""" self.__last_modified = utc or datetime.datetime.utcnow() self.__family.registry.queue_update(self) class Counter(Metric): @property def count(self): """Returns the current [local] counter value.""" return self.__count def __init__(self, family, labels): super(Counter, self).__init__(family, labels) self.__count = 0 def inc(self, amount=1, utc=None): with self.mutex: self.__count += amount self.touch(utc=utc) class Gauge(Metric): @property def value(self): return self.__compute() def __init__(self, family, labels, compute=None): super(Gauge, self).__init__(family, labels) func = lambda: self.__value self.__value = 0 self.__compute = compute or func def track(self, func, *pos_args, **kwargs): """Add to gauge while function call is in progress.""" try: self.inc() return func(*pos_args, **kwargs) finally: self.dec() def set(self, value, utc=None): """Set the gauge to an absolute value.""" with self.mutex: self.__value = value self.touch(utc=utc) def inc(self, amount=1, utc=None): """Increment the gauge by an amount.""" with self.mutex: self.__value += amount self.touch(utc=utc) def dec(self, amount=1, utc=None): """Decrement the gauge by an amount.""" with self.mutex: self.__value -= amount self.touch(utc=utc) class Timer(Metric): """Observes how long functions take to execute.""" @property def count(self): """The number of timings captured.""" return self.__count @property def total_seconds(self): """The total time across all the captured timings.""" return self.__total def __init__(self, family, labels): super(Timer, self).__init__(family, labels) self.__count = 0 self.__total = 0 def observe(self, seconds, utc=None): """Capture a timing observation.""" with self.mutex: self.__count += 1 self.__total += seconds self.touch(utc=utc) class MetricFamily(object): """A Factory for a counter or Gauge metric with specifically bound labels.""" GAUGE = 'GAUGE' COUNTER = 'COUNTER' TIMER = 'TIMER' @property def start_time(self): """The start time values are relative to.""" return self.__registry.start_time @property def name(self): """The name for this family will be the name of its Metric instances.""" return self.__name @property def registry(self): """The MetricsRegistry containing this family.""" return self.__registry @property def family_type(self): """Returns the type of metrics in this family (GAUGE, COUNTER, TIMER).""" return self.__family_type @property def mutex(self): """Returns lock for this family.""" return self.__mutex @property def instance_list(self): """Return all the label binding metric variations within this family.""" return self.__instances.values() def __init__(self, registry, name, factory, family_type): self.__mutex = threading.Lock() self.__name = name self.__factory = factory self.__instances = {} self.__registry = registry self.__family_type = family_type def get(self, labels): """Returns a metric instance with bound labels.""" key = ''.join('{0}={1}'.format(key, value) for key, value in labels.items()) with self.__mutex: got = self.__instances.get(key) if got is None: got = self.__factory(self, labels) self.__instances[key] = got return got class BaseMetricsRegistry(object): """Provides base class interface for metrics management. Specific metric stores would subclass this to specialize to push into their own systems. While having this registry be abstract is overkill, it is for what feels like practical reasons where there is no easy to use system for our use case of short lived batch jobs so there's going to be a lot of maintainence here and trials of different systems making this investment more appealing. """ # pylint: disable=too-many-public-methods @staticmethod def default_determine_outcome_labels(result, base_labels): """Return the outcome labels for a set of tracking labels.""" ex_type, _, _ = sys.exc_info() labels = dict(base_labels) labels.update({ 'success': ex_type is None, 'exception_type': '' if ex_type is None else ex_type.__name__ }) return labels @staticmethod def determine_outcome_labels_from_error_result(result, base_labels): if result is None: # Call itself threw an exception before it could return the error _, result, _ = sys.exc_info() labels = dict(base_labels) labels.update({ 'success': result is None, 'exception_type': '' if result is None else result.__class__.__name__ }) return labels @property def options(self): """Configured options.""" return self.__options @property def start_time(self): """When the registry started -- values are relative to this utc time.""" return self.__start_time @property def metric_family_list(self): """Return all the metric families.""" return self.__metric_families.values() @staticmethod def __make_context_labels(options): if not hasattr(options, 'monitoring_context_labels'): return {} labels = {} matcher = re.compile(r'(\w+)=(.*)') for binding in (options.monitoring_context_labels or '').split(','): if not binding: continue try: match = matcher.match(binding) labels[match.group(1)] = match.group(2) except Exception as ex: raise ValueError( 'Invalid monitoring_context_labels binding "%s": %s' % ( binding, ex)) return labels def __init__(self, options): """Constructs registry with options from init_argument_parser.""" self.__start_time = datetime.datetime.utcnow() self.__options = options self.__pusher_thread = None self.__pusher_thread_event = threading.Event() self.__metric_families = {} self.__family_mutex = threading.Lock() self.__updated_metrics = set([]) self.__update_mutex = threading.Lock() self.__inject_labels = self.__make_context_labels(options) if self.__inject_labels: logging.debug('Injecting additional metric labels %s', self.__inject_labels) def _do_make_family(self, family_type, name, label_names): """Creates new metric-system specific gauge family. Args: family_type: MetricFamily.COUNTER, GUAGE, or TIMER name: [string] Metric name. label_names: [list of string] The labels used to distinguish instances. Returns: specialized MetricFamily for the given type and registry implementation. """ raise NotImplementedError() def queue_update(self, metric): """Add metric to list of metrics to push out.""" with self.__update_mutex: self.__updated_metrics.add(metric) def inc_counter(self, name, labels, **kwargs): """Track number of completed calls to the given function.""" counter = self.get_metric(MetricFamily.COUNTER, name, labels) counter.inc(**kwargs) return counter def count_call(self, name, labels, func, *pos_args, **kwargs): """Track number of completed calls to the given function.""" labels = dict(labels) success = False try: result = func(*pos_args, **kwargs) success = True return result finally: labels['success'] = success self.inc_counter(name, labels, **kwargs) def set(self, name, labels, value): """Sets the implied gauge with the specified value.""" gauge = self.get_metric(MetricFamily.GAUGE, name, labels) gauge.set(value) return gauge def track_call(self, name, labels, func, *pos_args, **kwargs): """Track number of active calls to the given function.""" gauge = self.get_metric(MetricFamily.GAUGE, name, labels) return gauge.track(func, *pos_args, **kwargs) def observe_timer(self, name, labels, seconds): """Add an observation to the specified timer.""" timer = self.get_metric(MetricFamily.TIMER, name, labels) timer.observe(seconds) return timer def time_call(self, name, labels, label_func, time_func, *pos_args, **kwargs): """Track number of completed calls to the given function.""" try: start_time = time.time() result = time_func(*pos_args, **kwargs) outcome_labels = label_func(result, labels) return result except: try: outcome_labels = label_func(None, labels) except Exception as ex: logging.exception('label_func failed with %s', str(ex)) raise ex raise finally: timer = self.get_metric(MetricFamily.TIMER, name, outcome_labels) timer.observe(time.time() - start_time) def lookup_family_or_none(self, name): return self.__metric_families.get(name) def __normalize_labels(self, labels): result = dict(self.__inject_labels) result.update(labels) return result def get_metric(self, family_type, name, labels): """Return instance in family with given name and labels. Returns the existing instance if present, otherwise makes a new one. """ labels = self.__normalize_labels(labels) family = self.__metric_families.get(name) if family: if family.family_type != family_type: raise TypeError('{have} is not a {want}'.format( have=family, want=family_type)) return family.get(labels) family = self._do_make_family(family_type, name, labels.keys()) with self.__family_mutex: if name not in self.__metric_families: self.__metric_families[name] = family return family.get(labels) def track_and_time_call( self, name, labels, outcome_labels_func, result_func, *pos_args, **kwargs): """Call the function with the given arguments while instrumenting it. This will instrument both tracking of call counts in progress as well as the final outcomes in terms of performance and outcome. """ tracking_name = name + '_InProgress' outcome_name = name + '_Outcome' return self.track_call( tracking_name, labels, self.time_call, outcome_name, labels, outcome_labels_func, result_func, *pos_args, **kwargs) def start_pusher_thread(self): """Starts thread for pushing metrics.""" def delay_func(): """Helper function for push thread""" # pylint: disable=broad-except try: if self.__pusher_thread: self.__pusher_thread_event.wait( self.options.monitoring_flush_frequency) return self.__pusher_thread is not None except Exception as ex: logging.error('Pusher thread delay func caught %s', ex) return False self.__pusher_thread = threading.Thread( name='MetricsManager', target=self.flush_every_loop, args=[delay_func]) self.__pusher_thread.start() return True def stop_pusher_thread(self): """Stop thread for pushing metrics.""" logging.debug('Signaling pusher thread %s', self.__pusher_thread) pusher_thread = self.__pusher_thread self.__pusher_thread = None self.__pusher_thread_event.set() # Give a chance for the thread to self-terminate before we continue. # It's ok if this times out, but logging is cleaner to give it a chance. if pusher_thread is not None: pusher_thread.join(2) def flush_every_loop(self, ready_func): """Start a loop that pushes while the ready_func is true.""" logging.debug('Starting loop to push metrics...') while ready_func(): self.flush_updated_metrics() logging.debug('Ending loop to push metrics...') def _do_flush_updated_metrics(self, updated_metrics): """Writes metrics to the server.""" raise NotImplementedError() def _do_flush_final_metrics(self): """Notifies that we're doing updating and it is safe to push final metrics. This is only informative for implementations that are not incremental. """ pass def flush_final_metrics(self): """Push the final metrics to the metrics server.""" if not self.options.monitoring_enabled: logging.warning('Monitoring disabled -- dont push final metrics.') return self._do_flush_final_metrics() def flush_updated_metrics(self): """Push incremental metrics to the metrics server.""" if not self.options.monitoring_enabled: logging.warning('Monitoring disabled -- dont push incremental metrics.') return with self.__update_mutex: updated_metrics = self.__updated_metrics self.__updated_metrics = set([]) self._do_flush_updated_metrics(updated_metrics)
# -*- encoding: utf-8 -*- from __future__ import unicode_literals from django.core.checks import Error, Warning as DjangoWarning from django.db import models from django.test.utils import override_settings from django.test.testcases import skipIfDBFeature from .base import IsolatedModelsTestCase class RelativeFieldTests(IsolatedModelsTestCase): def test_valid_foreign_key_without_accessor(self): class Target(models.Model): # There would be a clash if Model.field installed an accessor. model = models.IntegerField() class Model(models.Model): field = models.ForeignKey(Target, related_name='+') field = Model._meta.get_field('field') errors = field.check() self.assertEqual(errors, []) def test_foreign_key_to_missing_model(self): # Model names are resolved when a model is being created, so we cannot # test relative fields in isolation and we need to attach them to a # model. class Model(models.Model): foreign_key = models.ForeignKey('Rel1') field = Model._meta.get_field('foreign_key') errors = field.check() expected = [ Error( ("Field defines a relation with model 'Rel1', " "which is either not installed, or is abstract."), hint=None, obj=field, id='fields.E300', ), ] self.assertEqual(errors, expected) def test_many_to_many_to_missing_model(self): class Model(models.Model): m2m = models.ManyToManyField("Rel2") field = Model._meta.get_field('m2m') errors = field.check(from_model=Model) expected = [ Error( ("Field defines a relation with model 'Rel2', " "which is either not installed, or is abstract."), hint=None, obj=field, id='fields.E300', ), ] self.assertEqual(errors, expected) def test_many_to_many_with_useless_options(self): class Model(models.Model): name = models.CharField(max_length=20) class ModelM2M(models.Model): m2m = models.ManyToManyField(Model, null=True, validators=['']) errors = ModelM2M.check() field = ModelM2M._meta.get_field('m2m') expected = [ DjangoWarning( 'null has no effect on ManyToManyField.', hint=None, obj=field, id='fields.W340', ) ] expected.append( DjangoWarning( 'ManyToManyField does not support validators.', hint=None, obj=field, id='fields.W341', ) ) self.assertEqual(errors, expected) def test_ambiguous_relationship_model(self): class Person(models.Model): pass class Group(models.Model): field = models.ManyToManyField('Person', through="AmbiguousRelationship", related_name='tertiary') class AmbiguousRelationship(models.Model): # Too much foreign keys to Person. first_person = models.ForeignKey(Person, related_name="first") second_person = models.ForeignKey(Person, related_name="second") second_model = models.ForeignKey(Group) field = Group._meta.get_field('field') errors = field.check(from_model=Group) expected = [ Error( ("The model is used as an intermediate model by " "'invalid_models_tests.Group.field', but it has more than one " "foreign key to 'Person', which is ambiguous. You must specify " "which foreign key Django should use via the through_fields " "keyword argument."), hint=('If you want to create a recursive relationship, use ' 'ForeignKey("self", symmetrical=False, ' 'through="AmbiguousRelationship").'), obj=field, id='fields.E335', ), ] self.assertEqual(errors, expected) def test_relationship_model_with_foreign_key_to_wrong_model(self): class WrongModel(models.Model): pass class Person(models.Model): pass class Group(models.Model): members = models.ManyToManyField('Person', through="InvalidRelationship") class InvalidRelationship(models.Model): person = models.ForeignKey(Person) wrong_foreign_key = models.ForeignKey(WrongModel) # The last foreign key should point to Group model. field = Group._meta.get_field('members') errors = field.check(from_model=Group) expected = [ Error( ("The model is used as an intermediate model by " "'invalid_models_tests.Group.members', but it does not " "have a foreign key to 'Group' or 'Person'."), hint=None, obj=InvalidRelationship, id='fields.E336', ), ] self.assertEqual(errors, expected) def test_relationship_model_missing_foreign_key(self): class Person(models.Model): pass class Group(models.Model): members = models.ManyToManyField('Person', through="InvalidRelationship") class InvalidRelationship(models.Model): group = models.ForeignKey(Group) # No foreign key to Person field = Group._meta.get_field('members') errors = field.check(from_model=Group) expected = [ Error( ("The model is used as an intermediate model by " "'invalid_models_tests.Group.members', but it does not have " "a foreign key to 'Group' or 'Person'."), hint=None, obj=InvalidRelationship, id='fields.E336', ), ] self.assertEqual(errors, expected) def test_missing_relationship_model(self): class Person(models.Model): pass class Group(models.Model): members = models.ManyToManyField('Person', through="MissingM2MModel") field = Group._meta.get_field('members') errors = field.check(from_model=Group) expected = [ Error( ("Field specifies a many-to-many relation through model " "'MissingM2MModel', which has not been installed."), hint=None, obj=field, id='fields.E331', ), ] self.assertEqual(errors, expected) def test_symmetrical_self_referential_field(self): class Person(models.Model): # Implicit symmetrical=False. friends = models.ManyToManyField('self', through="Relationship") class Relationship(models.Model): first = models.ForeignKey(Person, related_name="rel_from_set") second = models.ForeignKey(Person, related_name="rel_to_set") field = Person._meta.get_field('friends') errors = field.check(from_model=Person) expected = [ Error( 'Many-to-many fields with intermediate tables must not be symmetrical.', hint=None, obj=field, id='fields.E332', ), ] self.assertEqual(errors, expected) def test_too_many_foreign_keys_in_self_referential_model(self): class Person(models.Model): friends = models.ManyToManyField('self', through="InvalidRelationship", symmetrical=False) class InvalidRelationship(models.Model): first = models.ForeignKey(Person, related_name="rel_from_set_2") second = models.ForeignKey(Person, related_name="rel_to_set_2") third = models.ForeignKey(Person, related_name="too_many_by_far") field = Person._meta.get_field('friends') errors = field.check(from_model=Person) expected = [ Error( ("The model is used as an intermediate model by " "'invalid_models_tests.Person.friends', but it has more than two " "foreign keys to 'Person', which is ambiguous. You must specify " "which two foreign keys Django should use via the through_fields " "keyword argument."), hint='Use through_fields to specify which two foreign keys Django should use.', obj=InvalidRelationship, id='fields.E333', ), ] self.assertEqual(errors, expected) def test_symmetric_self_reference_with_intermediate_table(self): class Person(models.Model): # Explicit symmetrical=True. friends = models.ManyToManyField('self', through="Relationship", symmetrical=True) class Relationship(models.Model): first = models.ForeignKey(Person, related_name="rel_from_set") second = models.ForeignKey(Person, related_name="rel_to_set") field = Person._meta.get_field('friends') errors = field.check(from_model=Person) expected = [ Error( 'Many-to-many fields with intermediate tables must not be symmetrical.', hint=None, obj=field, id='fields.E332', ), ] self.assertEqual(errors, expected) def test_symmetric_self_reference_with_intermediate_table_and_through_fields(self): """Using through_fields in a m2m with an intermediate model shouldn't mask its incompatibility with symmetry.""" class Person(models.Model): # Explicit symmetrical=True. friends = models.ManyToManyField('self', symmetrical=True, through="Relationship", through_fields=('first', 'second')) class Relationship(models.Model): first = models.ForeignKey(Person, related_name="rel_from_set") second = models.ForeignKey(Person, related_name="rel_to_set") referee = models.ForeignKey(Person, related_name="referred") field = Person._meta.get_field('friends') errors = field.check(from_model=Person) expected = [ Error( 'Many-to-many fields with intermediate tables must not be symmetrical.', hint=None, obj=field, id='fields.E332', ), ] self.assertEqual(errors, expected) def test_foreign_key_to_abstract_model(self): class Model(models.Model): foreign_key = models.ForeignKey('AbstractModel') class AbstractModel(models.Model): class Meta: abstract = True field = Model._meta.get_field('foreign_key') errors = field.check() expected = [ Error( ("Field defines a relation with model 'AbstractModel', " "which is either not installed, or is abstract."), hint=None, obj=field, id='fields.E300', ), ] self.assertEqual(errors, expected) def test_m2m_to_abstract_model(self): class AbstractModel(models.Model): class Meta: abstract = True class Model(models.Model): m2m = models.ManyToManyField('AbstractModel') field = Model._meta.get_field('m2m') errors = field.check(from_model=Model) expected = [ Error( ("Field defines a relation with model 'AbstractModel', " "which is either not installed, or is abstract."), hint=None, obj=field, id='fields.E300', ), ] self.assertEqual(errors, expected) def test_unique_m2m(self): class Person(models.Model): name = models.CharField(max_length=5) class Group(models.Model): members = models.ManyToManyField('Person', unique=True) field = Group._meta.get_field('members') errors = field.check(from_model=Group) expected = [ Error( 'ManyToManyFields cannot be unique.', hint=None, obj=field, id='fields.E330', ), ] self.assertEqual(errors, expected) def test_foreign_key_to_non_unique_field(self): class Target(models.Model): bad = models.IntegerField() # No unique=True class Model(models.Model): foreign_key = models.ForeignKey('Target', to_field='bad') field = Model._meta.get_field('foreign_key') errors = field.check() expected = [ Error( "'Target.bad' must set unique=True because it is referenced by a foreign key.", hint=None, obj=field, id='fields.E311', ), ] self.assertEqual(errors, expected) def test_foreign_key_to_non_unique_field_under_explicit_model(self): class Target(models.Model): bad = models.IntegerField() class Model(models.Model): field = models.ForeignKey(Target, to_field='bad') field = Model._meta.get_field('field') errors = field.check() expected = [ Error( "'Target.bad' must set unique=True because it is referenced by a foreign key.", hint=None, obj=field, id='fields.E311', ), ] self.assertEqual(errors, expected) def test_foreign_object_to_non_unique_fields(self): class Person(models.Model): # Note that both fields are not unique. country_id = models.IntegerField() city_id = models.IntegerField() class MMembership(models.Model): person_country_id = models.IntegerField() person_city_id = models.IntegerField() person = models.ForeignObject(Person, from_fields=['person_country_id', 'person_city_id'], to_fields=['country_id', 'city_id']) field = MMembership._meta.get_field('person') errors = field.check() expected = [ Error( ("None of the fields 'country_id', 'city_id' on model 'Person' " "have a unique=True constraint."), hint=None, obj=field, id='fields.E310', ) ] self.assertEqual(errors, expected) def test_on_delete_set_null_on_non_nullable_field(self): class Person(models.Model): pass class Model(models.Model): foreign_key = models.ForeignKey('Person', on_delete=models.SET_NULL) field = Model._meta.get_field('foreign_key') errors = field.check() expected = [ Error( 'Field specifies on_delete=SET_NULL, but cannot be null.', hint='Set null=True argument on the field, or change the on_delete rule.', obj=field, id='fields.E320', ), ] self.assertEqual(errors, expected) def test_on_delete_set_default_without_default_value(self): class Person(models.Model): pass class Model(models.Model): foreign_key = models.ForeignKey('Person', on_delete=models.SET_DEFAULT) field = Model._meta.get_field('foreign_key') errors = field.check() expected = [ Error( 'Field specifies on_delete=SET_DEFAULT, but has no default value.', hint='Set a default value, or change the on_delete rule.', obj=field, id='fields.E321', ), ] self.assertEqual(errors, expected) @skipIfDBFeature('interprets_empty_strings_as_nulls') def test_nullable_primary_key(self): class Model(models.Model): field = models.IntegerField(primary_key=True, null=True) field = Model._meta.get_field('field') errors = field.check() expected = [ Error( 'Primary keys must not have null=True.', hint='Set null=False on the field, or remove primary_key=True argument.', obj=field, id='fields.E007', ), ] self.assertEqual(errors, expected) def test_not_swapped_model(self): class SwappableModel(models.Model): # A model that can be, but isn't swapped out. References to this # model should *not* raise any validation error. class Meta: swappable = 'TEST_SWAPPABLE_MODEL' class Model(models.Model): explicit_fk = models.ForeignKey(SwappableModel, related_name='explicit_fk') implicit_fk = models.ForeignKey('invalid_models_tests.SwappableModel', related_name='implicit_fk') explicit_m2m = models.ManyToManyField(SwappableModel, related_name='explicit_m2m') implicit_m2m = models.ManyToManyField( 'invalid_models_tests.SwappableModel', related_name='implicit_m2m') explicit_fk = Model._meta.get_field('explicit_fk') self.assertEqual(explicit_fk.check(), []) implicit_fk = Model._meta.get_field('implicit_fk') self.assertEqual(implicit_fk.check(), []) explicit_m2m = Model._meta.get_field('explicit_m2m') self.assertEqual(explicit_m2m.check(from_model=Model), []) implicit_m2m = Model._meta.get_field('implicit_m2m') self.assertEqual(implicit_m2m.check(from_model=Model), []) @override_settings(TEST_SWAPPED_MODEL='invalid_models_tests.Replacement') def test_referencing_to_swapped_model(self): class Replacement(models.Model): pass class SwappedModel(models.Model): class Meta: swappable = 'TEST_SWAPPED_MODEL' class Model(models.Model): explicit_fk = models.ForeignKey(SwappedModel, related_name='explicit_fk') implicit_fk = models.ForeignKey('invalid_models_tests.SwappedModel', related_name='implicit_fk') explicit_m2m = models.ManyToManyField(SwappedModel, related_name='explicit_m2m') implicit_m2m = models.ManyToManyField( 'invalid_models_tests.SwappedModel', related_name='implicit_m2m') fields = [ Model._meta.get_field('explicit_fk'), Model._meta.get_field('implicit_fk'), Model._meta.get_field('explicit_m2m'), Model._meta.get_field('implicit_m2m'), ] expected_error = Error( ("Field defines a relation with the model " "'invalid_models_tests.SwappedModel', which has been swapped out."), hint="Update the relation to point at 'settings.TEST_SWAPPED_MODEL'.", id='fields.E301', ) for field in fields: expected_error.obj = field errors = field.check(from_model=Model) self.assertEqual(errors, [expected_error]) class AccessorClashTests(IsolatedModelsTestCase): def test_fk_to_integer(self): self._test_accessor_clash( target=models.IntegerField(), relative=models.ForeignKey('Target')) def test_fk_to_fk(self): self._test_accessor_clash( target=models.ForeignKey('Another'), relative=models.ForeignKey('Target')) def test_fk_to_m2m(self): self._test_accessor_clash( target=models.ManyToManyField('Another'), relative=models.ForeignKey('Target')) def test_m2m_to_integer(self): self._test_accessor_clash( target=models.IntegerField(), relative=models.ManyToManyField('Target')) def test_m2m_to_fk(self): self._test_accessor_clash( target=models.ForeignKey('Another'), relative=models.ManyToManyField('Target')) def test_m2m_to_m2m(self): self._test_accessor_clash( target=models.ManyToManyField('Another'), relative=models.ManyToManyField('Target')) def _test_accessor_clash(self, target, relative): class Another(models.Model): pass class Target(models.Model): model_set = target class Model(models.Model): rel = relative errors = Model.check() expected = [ Error( "Reverse accessor for 'Model.rel' clashes with field name 'Target.model_set'.", hint=("Rename field 'Target.model_set', or add/change " "a related_name argument to the definition " "for field 'Model.rel'."), obj=Model._meta.get_field('rel'), id='fields.E302', ), ] self.assertEqual(errors, expected) def test_clash_between_accessors(self): class Target(models.Model): pass class Model(models.Model): foreign = models.ForeignKey(Target) m2m = models.ManyToManyField(Target) errors = Model.check() expected = [ Error( "Reverse accessor for 'Model.foreign' clashes with reverse accessor for 'Model.m2m'.", hint=("Add or change a related_name argument to the definition " "for 'Model.foreign' or 'Model.m2m'."), obj=Model._meta.get_field('foreign'), id='fields.E304', ), Error( "Reverse accessor for 'Model.m2m' clashes with reverse accessor for 'Model.foreign'.", hint=("Add or change a related_name argument to the definition " "for 'Model.m2m' or 'Model.foreign'."), obj=Model._meta.get_field('m2m'), id='fields.E304', ), ] self.assertEqual(errors, expected) def test_m2m_to_m2m_with_inheritance(self): """ Ref #22047. """ class Target(models.Model): pass class Model(models.Model): children = models.ManyToManyField('Child', related_name="m2m_clash", related_query_name="no_clash") class Parent(models.Model): m2m_clash = models.ManyToManyField('Target') class Child(Parent): pass errors = Model.check() expected = [ Error( "Reverse accessor for 'Model.children' clashes with field name 'Child.m2m_clash'.", hint=("Rename field 'Child.m2m_clash', or add/change " "a related_name argument to the definition " "for field 'Model.children'."), obj=Model._meta.get_field('children'), id='fields.E302', ) ] self.assertEqual(errors, expected) class ReverseQueryNameClashTests(IsolatedModelsTestCase): def test_fk_to_integer(self): self._test_reverse_query_name_clash( target=models.IntegerField(), relative=models.ForeignKey('Target')) def test_fk_to_fk(self): self._test_reverse_query_name_clash( target=models.ForeignKey('Another'), relative=models.ForeignKey('Target')) def test_fk_to_m2m(self): self._test_reverse_query_name_clash( target=models.ManyToManyField('Another'), relative=models.ForeignKey('Target')) def test_m2m_to_integer(self): self._test_reverse_query_name_clash( target=models.IntegerField(), relative=models.ManyToManyField('Target')) def test_m2m_to_fk(self): self._test_reverse_query_name_clash( target=models.ForeignKey('Another'), relative=models.ManyToManyField('Target')) def test_m2m_to_m2m(self): self._test_reverse_query_name_clash( target=models.ManyToManyField('Another'), relative=models.ManyToManyField('Target')) def _test_reverse_query_name_clash(self, target, relative): class Another(models.Model): pass class Target(models.Model): model = target class Model(models.Model): rel = relative errors = Model.check() expected = [ Error( "Reverse query name for 'Model.rel' clashes with field name 'Target.model'.", hint=("Rename field 'Target.model', or add/change " "a related_name argument to the definition " "for field 'Model.rel'."), obj=Model._meta.get_field('rel'), id='fields.E303', ), ] self.assertEqual(errors, expected) class ExplicitRelatedNameClashTests(IsolatedModelsTestCase): def test_fk_to_integer(self): self._test_explicit_related_name_clash( target=models.IntegerField(), relative=models.ForeignKey('Target', related_name='clash')) def test_fk_to_fk(self): self._test_explicit_related_name_clash( target=models.ForeignKey('Another'), relative=models.ForeignKey('Target', related_name='clash')) def test_fk_to_m2m(self): self._test_explicit_related_name_clash( target=models.ManyToManyField('Another'), relative=models.ForeignKey('Target', related_name='clash')) def test_m2m_to_integer(self): self._test_explicit_related_name_clash( target=models.IntegerField(), relative=models.ManyToManyField('Target', related_name='clash')) def test_m2m_to_fk(self): self._test_explicit_related_name_clash( target=models.ForeignKey('Another'), relative=models.ManyToManyField('Target', related_name='clash')) def test_m2m_to_m2m(self): self._test_explicit_related_name_clash( target=models.ManyToManyField('Another'), relative=models.ManyToManyField('Target', related_name='clash')) def _test_explicit_related_name_clash(self, target, relative): class Another(models.Model): pass class Target(models.Model): clash = target class Model(models.Model): rel = relative errors = Model.check() expected = [ Error( "Reverse accessor for 'Model.rel' clashes with field name 'Target.clash'.", hint=("Rename field 'Target.clash', or add/change " "a related_name argument to the definition " "for field 'Model.rel'."), obj=Model._meta.get_field('rel'), id='fields.E302', ), Error( "Reverse query name for 'Model.rel' clashes with field name 'Target.clash'.", hint=("Rename field 'Target.clash', or add/change " "a related_name argument to the definition " "for field 'Model.rel'."), obj=Model._meta.get_field('rel'), id='fields.E303', ), ] self.assertEqual(errors, expected) class ExplicitRelatedQueryNameClashTests(IsolatedModelsTestCase): def test_fk_to_integer(self): self._test_explicit_related_query_name_clash( target=models.IntegerField(), relative=models.ForeignKey('Target', related_query_name='clash')) def test_fk_to_fk(self): self._test_explicit_related_query_name_clash( target=models.ForeignKey('Another'), relative=models.ForeignKey('Target', related_query_name='clash')) def test_fk_to_m2m(self): self._test_explicit_related_query_name_clash( target=models.ManyToManyField('Another'), relative=models.ForeignKey('Target', related_query_name='clash')) def test_m2m_to_integer(self): self._test_explicit_related_query_name_clash( target=models.IntegerField(), relative=models.ManyToManyField('Target', related_query_name='clash')) def test_m2m_to_fk(self): self._test_explicit_related_query_name_clash( target=models.ForeignKey('Another'), relative=models.ManyToManyField('Target', related_query_name='clash')) def test_m2m_to_m2m(self): self._test_explicit_related_query_name_clash( target=models.ManyToManyField('Another'), relative=models.ManyToManyField('Target', related_query_name='clash')) def _test_explicit_related_query_name_clash(self, target, relative): class Another(models.Model): pass class Target(models.Model): clash = target class Model(models.Model): rel = relative errors = Model.check() expected = [ Error( "Reverse query name for 'Model.rel' clashes with field name 'Target.clash'.", hint=("Rename field 'Target.clash', or add/change a related_name " "argument to the definition for field 'Model.rel'."), obj=Model._meta.get_field('rel'), id='fields.E303', ), ] self.assertEqual(errors, expected) class SelfReferentialM2MClashTests(IsolatedModelsTestCase): def test_clash_between_accessors(self): class Model(models.Model): first_m2m = models.ManyToManyField('self', symmetrical=False) second_m2m = models.ManyToManyField('self', symmetrical=False) errors = Model.check() expected = [ Error( "Reverse accessor for 'Model.first_m2m' clashes with reverse accessor for 'Model.second_m2m'.", hint=("Add or change a related_name argument to the definition " "for 'Model.first_m2m' or 'Model.second_m2m'."), obj=Model._meta.get_field('first_m2m'), id='fields.E304', ), Error( "Reverse accessor for 'Model.second_m2m' clashes with reverse accessor for 'Model.first_m2m'.", hint=("Add or change a related_name argument to the definition " "for 'Model.second_m2m' or 'Model.first_m2m'."), obj=Model._meta.get_field('second_m2m'), id='fields.E304', ), ] self.assertEqual(errors, expected) def test_accessor_clash(self): class Model(models.Model): model_set = models.ManyToManyField("self", symmetrical=False) errors = Model.check() expected = [ Error( "Reverse accessor for 'Model.model_set' clashes with field name 'Model.model_set'.", hint=("Rename field 'Model.model_set', or add/change " "a related_name argument to the definition " "for field 'Model.model_set'."), obj=Model._meta.get_field('model_set'), id='fields.E302', ), ] self.assertEqual(errors, expected) def test_reverse_query_name_clash(self): class Model(models.Model): model = models.ManyToManyField("self", symmetrical=False) errors = Model.check() expected = [ Error( "Reverse query name for 'Model.model' clashes with field name 'Model.model'.", hint=("Rename field 'Model.model', or add/change a related_name " "argument to the definition for field 'Model.model'."), obj=Model._meta.get_field('model'), id='fields.E303', ), ] self.assertEqual(errors, expected) def test_clash_under_explicit_related_name(self): class Model(models.Model): clash = models.IntegerField() m2m = models.ManyToManyField("self", symmetrical=False, related_name='clash') errors = Model.check() expected = [ Error( "Reverse accessor for 'Model.m2m' clashes with field name 'Model.clash'.", hint=("Rename field 'Model.clash', or add/change a related_name " "argument to the definition for field 'Model.m2m'."), obj=Model._meta.get_field('m2m'), id='fields.E302', ), Error( "Reverse query name for 'Model.m2m' clashes with field name 'Model.clash'.", hint=("Rename field 'Model.clash', or add/change a related_name " "argument to the definition for field 'Model.m2m'."), obj=Model._meta.get_field('m2m'), id='fields.E303', ), ] self.assertEqual(errors, expected) def test_valid_model(self): class Model(models.Model): first = models.ManyToManyField("self", symmetrical=False, related_name='first_accessor') second = models.ManyToManyField("self", symmetrical=False, related_name='second_accessor') errors = Model.check() self.assertEqual(errors, []) class SelfReferentialFKClashTests(IsolatedModelsTestCase): def test_accessor_clash(self): class Model(models.Model): model_set = models.ForeignKey("Model") errors = Model.check() expected = [ Error( "Reverse accessor for 'Model.model_set' clashes with field name 'Model.model_set'.", hint=("Rename field 'Model.model_set', or add/change " "a related_name argument to the definition " "for field 'Model.model_set'."), obj=Model._meta.get_field('model_set'), id='fields.E302', ), ] self.assertEqual(errors, expected) def test_reverse_query_name_clash(self): class Model(models.Model): model = models.ForeignKey("Model") errors = Model.check() expected = [ Error( "Reverse query name for 'Model.model' clashes with field name 'Model.model'.", hint=("Rename field 'Model.model', or add/change " "a related_name argument to the definition " "for field 'Model.model'."), obj=Model._meta.get_field('model'), id='fields.E303', ), ] self.assertEqual(errors, expected) def test_clash_under_explicit_related_name(self): class Model(models.Model): clash = models.CharField(max_length=10) foreign = models.ForeignKey("Model", related_name='clash') errors = Model.check() expected = [ Error( "Reverse accessor for 'Model.foreign' clashes with field name 'Model.clash'.", hint=("Rename field 'Model.clash', or add/change " "a related_name argument to the definition " "for field 'Model.foreign'."), obj=Model._meta.get_field('foreign'), id='fields.E302', ), Error( "Reverse query name for 'Model.foreign' clashes with field name 'Model.clash'.", hint=("Rename field 'Model.clash', or add/change " "a related_name argument to the definition " "for field 'Model.foreign'."), obj=Model._meta.get_field('foreign'), id='fields.E303', ), ] self.assertEqual(errors, expected) class ComplexClashTests(IsolatedModelsTestCase): # New tests should not be included here, because this is a single, # self-contained sanity check, not a test of everything. def test_complex_clash(self): class Target(models.Model): tgt_safe = models.CharField(max_length=10) clash = models.CharField(max_length=10) model = models.CharField(max_length=10) clash1_set = models.CharField(max_length=10) class Model(models.Model): src_safe = models.CharField(max_length=10) foreign_1 = models.ForeignKey(Target, related_name='id') foreign_2 = models.ForeignKey(Target, related_name='src_safe') m2m_1 = models.ManyToManyField(Target, related_name='id') m2m_2 = models.ManyToManyField(Target, related_name='src_safe') errors = Model.check() expected = [ Error( "Reverse accessor for 'Model.foreign_1' clashes with field name 'Target.id'.", hint=("Rename field 'Target.id', or add/change a related_name " "argument to the definition for field 'Model.foreign_1'."), obj=Model._meta.get_field('foreign_1'), id='fields.E302', ), Error( "Reverse query name for 'Model.foreign_1' clashes with field name 'Target.id'.", hint=("Rename field 'Target.id', or add/change a related_name " "argument to the definition for field 'Model.foreign_1'."), obj=Model._meta.get_field('foreign_1'), id='fields.E303', ), Error( "Reverse accessor for 'Model.foreign_1' clashes with reverse accessor for 'Model.m2m_1'.", hint=("Add or change a related_name argument to " "the definition for 'Model.foreign_1' or 'Model.m2m_1'."), obj=Model._meta.get_field('foreign_1'), id='fields.E304', ), Error( "Reverse query name for 'Model.foreign_1' clashes with reverse query name for 'Model.m2m_1'.", hint=("Add or change a related_name argument to " "the definition for 'Model.foreign_1' or 'Model.m2m_1'."), obj=Model._meta.get_field('foreign_1'), id='fields.E305', ), Error( "Reverse accessor for 'Model.foreign_2' clashes with reverse accessor for 'Model.m2m_2'.", hint=("Add or change a related_name argument " "to the definition for 'Model.foreign_2' or 'Model.m2m_2'."), obj=Model._meta.get_field('foreign_2'), id='fields.E304', ), Error( "Reverse query name for 'Model.foreign_2' clashes with reverse query name for 'Model.m2m_2'.", hint=("Add or change a related_name argument to " "the definition for 'Model.foreign_2' or 'Model.m2m_2'."), obj=Model._meta.get_field('foreign_2'), id='fields.E305', ), Error( "Reverse accessor for 'Model.m2m_1' clashes with field name 'Target.id'.", hint=("Rename field 'Target.id', or add/change a related_name " "argument to the definition for field 'Model.m2m_1'."), obj=Model._meta.get_field('m2m_1'), id='fields.E302', ), Error( "Reverse query name for 'Model.m2m_1' clashes with field name 'Target.id'.", hint=("Rename field 'Target.id', or add/change a related_name " "argument to the definition for field 'Model.m2m_1'."), obj=Model._meta.get_field('m2m_1'), id='fields.E303', ), Error( "Reverse accessor for 'Model.m2m_1' clashes with reverse accessor for 'Model.foreign_1'.", hint=("Add or change a related_name argument to the definition " "for 'Model.m2m_1' or 'Model.foreign_1'."), obj=Model._meta.get_field('m2m_1'), id='fields.E304', ), Error( "Reverse query name for 'Model.m2m_1' clashes with reverse query name for 'Model.foreign_1'.", hint=("Add or change a related_name argument to " "the definition for 'Model.m2m_1' or 'Model.foreign_1'."), obj=Model._meta.get_field('m2m_1'), id='fields.E305', ), Error( "Reverse accessor for 'Model.m2m_2' clashes with reverse accessor for 'Model.foreign_2'.", hint=("Add or change a related_name argument to the definition " "for 'Model.m2m_2' or 'Model.foreign_2'."), obj=Model._meta.get_field('m2m_2'), id='fields.E304', ), Error( "Reverse query name for 'Model.m2m_2' clashes with reverse query name for 'Model.foreign_2'.", hint=("Add or change a related_name argument to the definition " "for 'Model.m2m_2' or 'Model.foreign_2'."), obj=Model._meta.get_field('m2m_2'), id='fields.E305', ), ] self.assertEqual(errors, expected) class M2mThroughFieldsTests(IsolatedModelsTestCase): def test_m2m_field_argument_validation(self): """ Tests that ManyToManyField accepts the ``through_fields`` kwarg only if an intermediary table is specified. """ class Fan(models.Model): pass self.assertRaisesMessage( ValueError, 'Cannot specify through_fields without a through model', models.ManyToManyField, Fan, through_fields=('f1', 'f2')) def test_invalid_order(self): """ Tests that mixing up the order of link fields to ManyToManyField.through_fields triggers validation errors. """ class Fan(models.Model): pass class Event(models.Model): invitees = models.ManyToManyField(Fan, through='Invitation', through_fields=('invitee', 'event')) class Invitation(models.Model): event = models.ForeignKey(Event) invitee = models.ForeignKey(Fan) inviter = models.ForeignKey(Fan, related_name='+') field = Event._meta.get_field('invitees') errors = field.check(from_model=Event) expected = [ Error( ("'Invitation.invitee' is not a foreign key to 'Event'."), hint="Did you mean one of the following foreign keys to 'Event': event?", obj=field, id='fields.E339'), Error( ("'Invitation.event' is not a foreign key to 'Fan'."), hint="Did you mean one of the following foreign keys to 'Fan': invitee, inviter?", obj=field, id='fields.E339'), ] self.assertEqual(expected, errors) def test_invalid_field(self): """ Tests that providing invalid field names to ManyToManyField.through_fields triggers validation errors. """ class Fan(models.Model): pass class Event(models.Model): invitees = models.ManyToManyField(Fan, through='Invitation', through_fields=('invalid_field_1', 'invalid_field_2')) class Invitation(models.Model): event = models.ForeignKey(Event) invitee = models.ForeignKey(Fan) inviter = models.ForeignKey(Fan, related_name='+') field = Event._meta.get_field('invitees') errors = field.check(from_model=Event) expected = [ Error( ("The intermediary model 'invalid_models_tests.Invitation' has no field 'invalid_field_1'."), hint="Did you mean one of the following foreign keys to 'Event': event?", obj=field, id='fields.E338'), Error( ("The intermediary model 'invalid_models_tests.Invitation' has no field 'invalid_field_2'."), hint="Did you mean one of the following foreign keys to 'Fan': invitee, inviter?", obj=field, id='fields.E338'), ] self.assertEqual(expected, errors) def test_explicit_field_names(self): """ Tests that if ``through_fields`` kwarg is given, it must specify both link fields of the intermediary table. """ class Fan(models.Model): pass class Event(models.Model): invitees = models.ManyToManyField(Fan, through='Invitation', through_fields=(None, 'invitee')) class Invitation(models.Model): event = models.ForeignKey(Event) invitee = models.ForeignKey(Fan) inviter = models.ForeignKey(Fan, related_name='+') field = Event._meta.get_field('invitees') errors = field.check(from_model=Event) expected = [ Error( ("Field specifies 'through_fields' but does not provide the names " "of the two link fields that should be used for the relation " "through model 'invalid_models_tests.Invitation'."), hint=("Make sure you specify 'through_fields' as " "through_fields=('field1', 'field2')"), obj=field, id='fields.E337')] self.assertEqual(expected, errors)
# encoding: utf-8 # # This file was taken from webpyte (r179): # http://code.google.com/p/webpyte/source/browse/trunk/webpyte/email_validator.py # According to the docstring, it is licensed as 'public domain' # # Modifications: # * Wed Mar 25 2009 Felix Schwarz # - Removed 'from __future__ import absolute_import to stay compatible with Python 2.3/2.4 # * Fri Mar 27 2009 Felix Schwarz # - Disabled DNS server discovery on module import # - added __all__ declaration # - modified domain validator so that domains without second-level domain will # be accepted as well. # """A method of validating e-mail addresses and mail domains. This module aims to provide the ultimate functions for: * domain validation, and * e-mail validation. Why not just use a regular expression? ====================================== http://haacked.com/archive/2007/08/21/i-knew-how-to-validate-an-email-address-until-i.aspx There are many regular expressions out there for this. The "perfect one" is several KB long and therefore unmaintainable (Perl people wrote it...). This is 2009 and domain rules are changing too. Impossible domain names have become possible, international domain names are real... So validating an e-mail address is more complex than you might think. Take a look at some of the rules: http://en.wikipedia.org/wiki/E-mail_address#RFC_specification How to do it then? ================== I believe the solution should combine simple regular expressions with imperative programming. E-mail validation is also dependent on the robustness principle: "Be conservative in what you do, be liberal in what you accept from others." http://en.wikipedia.org/wiki/Postel%27s_law This module recognizes that e-mail validation can be done in several different ways, according to purpose: 1) Most of the time you just want validation according to the standard rules. So just say: v = EmailValidator() 2) If you are creating e-mail addresses for your server or your organization, you might need to satisfy a stricter policy such as "dash is not allowed in email addresses". The EmailValidator constructor accepts a *local_part_chars* argument to help build the right regular expression for you. Example: v = EmailValidator(local_part_chars='.-+_') 3) What about typos? An erroneous dot at the end of a typed email is typical. Other common errors with the dots revolve around the @: [email protected]. These typing mistakes can be automatically corrected, saving you from doing it manually. For this you use the *fix* flag when instantiating a validator: d = DomainValidator(fix=True) domain, error_message = d.validate('.supercalifragilistic.com.br') if error_message: print 'Invalid domain: ' + domain else: print 'Valid domain: ' + domain 4) TODO: Squash the bugs in this feature! Paranoid people may wish to verify that the informed domain actually exists. For that you can pass a *lookup_dns='a'* argument to the constructor, or even *lookup_dns='mx'* to verify that the domain actually has e-mail servers. To use this feature, you need to install the *pydns* library: easy_install -UZ pydns How to use ========== The validating methods return a tuple (email, error_msg). *email* is the trimmed and perhaps fixed email. *error_msg* is an empty string when the e-mail is valid. Typical usage is: v = EmailValidator() # or EmailValidator(fix=True) email = raw_input('Type an email: ') email, err = v.validate(email) if err: print 'Error: ' + err else: print 'E-mail is valid: ' + email # the email, corrected There is also an EmailHarvester class to collect e-mail addresses from any text. Authors: Nando Florestan, Marco Ferreira Code written in 2009 and donated to the public domain. """ import re __all__ = ['ValidationException', 'BaseValidator', 'DomainValidator', 'EmailValidator', 'EmailHarvester'] class ValidationException(ValueError): pass class BaseValidator(object): def validate_or_raise(self, *a, **k): """Some people would condemn this whole module screaming: "Don't return success codes, use exceptions!" This method allows them to be happy, too. """ validate, err = self.validate(*a, **k) if err: raise ValidationException(err) return validate class DomainValidator(BaseValidator): """A domain name validator that is ready for internationalized domains. http://en.wikipedia.org/wiki/Internationalized_domain_name http://en.wikipedia.org/wiki/Top-level_domain """ # non_international_regex = re.compile(r"^[a-z0-9][a-z0-9\.\-]*\.[a-z]+$", #domain_pattern = r'[\w][\w\.\-]+?\.[\w]+' # fs: New domain regex that accepts domains without second-level domain also domain_pattern = r'[\w]+([\w\.\-]+\w)?' domain_regex = \ re.compile('^' + domain_pattern + '$', re.IGNORECASE | re.UNICODE) # OpenDNS has a feature that bites us. If you are using OpenDNS, and you # type in your browser a domain that does not exist, OpenDNS catches that # and presents a page. "Did you mean www.hovercraft.eels?" # For us, this feature appears as a false positive when looking up the # DNS server. So we try to work around it: false_positive_ips = ['208.67.217.132'] def __init__(self, fix=False, lookup_dns=None): self.fix = fix if lookup_dns: try: import DNS except ImportError: # pragma: no cover raise ImportError("To enable DNS lookup of domains install the PyDNS package.") lookup_dns = lookup_dns.lower() if lookup_dns not in ('a', 'mx'): raise RuntimeError("Not a valid *lookup_dns* value: " + lookup_dns) self._lookup_dns = lookup_dns def _apply_common_rules(self, part, maxlength): """This method contains the rules that must be applied to both the domain and the local part of the e-mail address. """ part = part.strip() if self.fix: part = part.strip('.') if not part: return part, 'It cannot be empty.' if len(part) > maxlength: return part, 'It cannot be longer than %i chars.' % maxlength if part[0] == '.': return part, 'It cannot start with a dot.' if part[-1] == '.': return part, 'It cannot end with a dot.' if '..' in part: return part, 'It cannot contain consecutive dots.' return part, '' def validate_domain(self, part): part, err = self._apply_common_rules(part, maxlength=255) if err: return part, 'Invalid domain: %s' % err if not self.domain_regex.search(part): return part, 'Invalid domain.' if self._lookup_dns and not self.lookup_domain(part): return part, 'Domain does not seem to exist.' return part.lower(), '' validate = validate_domain # TODO: As an option, DNS lookup on the domain: # http://mail.python.org/pipermail/python-list/2008-July/497997.html def lookup_domain(self, domain, lookup_record=None, **kw): """Looks up the DNS record for *domain* and returns: * None if it does not exist, * The IP address if looking up the "A" record, or * The list of hosts in the "MX" record. The return value, if treated as a boolean, says whether a domain exists. You can pass "a" or "mx" as the *lookup_record* parameter. Otherwise, the *lookup_dns* parameter from the constructor is used. "a" means verify that the domain exists. "mx" means verify that the domain exists and specifies mail servers. """ import DNS lookup_record = lookup_record.lower() if lookup_record else self._lookup_dns if lookup_record not in ('a', 'mx'): raise RuntimeError("Not a valid lookup_record value: " + lookup_record) if lookup_record == "a": request = DNS.Request(domain, **kw) try: answers = request.req().answers except (DNS.Lib.PackError, UnicodeError): # A part of the domain name is longer than 63. return False if not answers: return False result = answers[0]['data'] # This is an IP address if result in self.false_positive_ips: # pragma: no cover return False return result try: return DNS.mxlookup(domain) except UnicodeError: pass return False class EmailValidator(DomainValidator): # TODO: Implement all rules! # http://tools.ietf.org/html/rfc3696 # http://en.wikipedia.org/wiki/E-mail_address#RFC_specification # TODO: Local part in quotes? # TODO: Quoted-printable local part? def __init__(self, local_part_chars=".-+_!#$%&'/=`|~?^{}*", **k): super(EmailValidator, self).__init__(**k) # Add a backslash before the dash so it can go into the regex: self.local_part_pattern = '[a-z0-9' + local_part_chars.replace('-', r'\-') + ']+' # Regular expression for validation: self.local_part_regex = re.compile('^' + self.local_part_pattern + '$', re.IGNORECASE) def validate_local_part(self, part): part, err = self._apply_common_rules(part, maxlength=64) if err: return part, 'Invalid local part: %s' % err if not self.local_part_regex.search(part): return part, 'Invalid local part.' return part, '' # We don't go lowercase because the local part is case-sensitive. def validate_email(self, email): if not email: return email, 'The e-mail is empty.' parts = email.split('@') if len(parts) != 2: return email, 'An email address must contain a single @' local, domain = parts # Validate the domain domain, err = self.validate_domain(domain) if err: return email, "The e-mail has a problem to the right of the @: %s" % err # Validate the local part local, err = self.validate_local_part(local) if err: return email, "The email has a problem to the left of the @: %s" % err # It is valid return local + '@' + domain, '' validate = validate_email class EmailHarvester(EmailValidator): def __init__(self, *a, **k): super(EmailHarvester, self).__init__(*a, **k) # Regular expression for harvesting: self.harvest_regex = \ re.compile(self.local_part_pattern + '@' + self.domain_pattern, re.IGNORECASE | re.UNICODE) def harvest(self, text): """Iterator that yields the e-mail addresses contained in *text*.""" for match in self.harvest_regex.finditer(text): # TODO: optionally validate before yielding? # TODO: keep a list of harvested but not validated? yield match.group().replace('..', '.') # rfc822_specials = '()<>@,;:\\"[]' # is_address_valid(addr): # # First we validate the name portion (name@domain) # c = 0 # while c < len(addr): # if addr[c] == '"' and (not c or addr[c - 1] == '.' or addr[c - 1] == '"'): # c = c + 1 # while c < len(addr): # if addr[c] == '"': break # if addr[c] == '\\' and addr[c + 1] == ' ': # c = c + 2 # continue # if ord(addr[c]) < 32 or ord(addr[c]) >= 127: return 0 # c = c + 1 # else: return 0 # if addr[c] == '@': break # if addr[c] != '.': return 0 # c = c + 1 # continue # if addr[c] == '@': break # if ord(addr[c]) <= 32 or ord(addr[c]) >= 127: return 0 # if addr[c] in rfc822_specials: return 0 # c = c + 1 # if not c or addr[c - 1] == '.': return 0 # # # Next we validate the domain portion (name@domain) # domain = c = c + 1 # if domain >= len(addr): return 0 # count = 0 # while c < len(addr): # if addr[c] == '.': # if c == domain or addr[c - 1] == '.': return 0 # count = count + 1 # if ord(addr[c]) <= 32 or ord(addr[c]) >= 127: return 0 # if addr[c] in rfc822_specials: return 0 # c = c + 1 # # return count >= 1
# coding=utf-8 # Copyright 2014-2017 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from time import time from oslo_log import log as logging from neutron.plugins.common import constants as plugin_const from neutron_lbaas.services.loadbalancer import constants as lb_const from f5_openstack_agent.lbaasv2.drivers.bigip import l7policy_service from f5_openstack_agent.lbaasv2.drivers.bigip.lbaas_service import \ LbaasServiceObject from f5_openstack_agent.lbaasv2.drivers.bigip import listener_service from f5_openstack_agent.lbaasv2.drivers.bigip import pool_service from f5_openstack_agent.lbaasv2.drivers.bigip import virtual_address from requests import HTTPError LOG = logging.getLogger(__name__) class LBaaSBuilder(object): # F5 LBaaS Driver using iControl for BIG-IP to # create objects (vips, pools) - not using an iApp.""" def __init__(self, conf, driver, l2_service=None): self.conf = conf self.driver = driver self.l2_service = l2_service self.service_adapter = driver.service_adapter self.listener_builder = listener_service.ListenerServiceBuilder( self.service_adapter, driver.cert_manager, conf.f5_parent_ssl_profile) self.pool_builder = pool_service.PoolServiceBuilder( self.service_adapter ) self.l7service = l7policy_service.L7PolicyService(conf) self.esd = None def init_esd(self, esd): self.esd = esd def is_esd(self, esd): return self.esd.is_esd(esd) def assure_service(self, service, traffic_group, all_subnet_hints): """Assure that a service is configured on the BIGIP.""" start_time = time() LOG.debug("assuring loadbalancers") self._assure_loadbalancer_created(service, all_subnet_hints) LOG.debug("assuring monitors") self._assure_monitors_created(service) LOG.debug("assuring pools") self._assure_pools_created(service) LOG.debug("assuring pool members") self._assure_members(service, all_subnet_hints) LOG.debug("assuring l7 policies") self._assure_l7policies_created(service) LOG.debug("assuring listeners") self._assure_listeners_created(service) LOG.debug("deleting listeners") self._assure_listeners_deleted(service) LOG.debug("deleting l7 policies") self._assure_l7policies_deleted(service) LOG.debug("deleting pools") self._assure_pools_deleted(service) LOG.debug("deleting monitors") self._assure_monitors_deleted(service) LOG.debug("deleting loadbalancers") self._assure_loadbalancer_deleted(service) LOG.debug(" _assure_service took %.5f secs" % (time() - start_time)) return all_subnet_hints @staticmethod def _set_status_as_active(svc_obj, force=False): # If forced, then set to ACTIVE else hold ERROR preserve_statuses = \ tuple([plugin_const.ERROR, plugin_const.PENDING_DELETE]) ps = svc_obj['provisioning_status'] svc_obj['provisioning_status'] = plugin_const.ACTIVE \ if ps not in preserve_statuses or force else ps @staticmethod def _set_status_as_error(svc_obj): svc_obj['provisioning_status'] = plugin_const.ERROR @staticmethod def _is_not_pending_delete(svc_obj): return svc_obj['provisioning_status'] != plugin_const.PENDING_DELETE @staticmethod def _is_pending_delete(svc_obj): return svc_obj['provisioning_status'] == plugin_const.PENDING_DELETE @staticmethod def _is_not_error(svc_obj): return svc_obj['provisioning_status'] != plugin_const.ERROR def _assure_loadbalancer_created(self, service, all_subnet_hints): if 'loadbalancer' not in service: return bigips = self.driver.get_config_bigips() loadbalancer = service["loadbalancer"] set_active = True if self._is_not_pending_delete(loadbalancer): vip_address = virtual_address.VirtualAddress( self.service_adapter, loadbalancer) for bigip in bigips: try: vip_address.assure(bigip) except Exception as error: LOG.error(str(error)) self._set_status_as_error(loadbalancer) set_active = False self._set_status_as_active(loadbalancer, force=set_active) if self.driver.l3_binding: loadbalancer = service["loadbalancer"] self.driver.l3_binding.bind_address( subnet_id=loadbalancer["vip_subnet_id"], ip_address=loadbalancer["vip_address"]) self._update_subnet_hints(loadbalancer["provisioning_status"], loadbalancer["vip_subnet_id"], loadbalancer["network_id"], all_subnet_hints, False) def _assure_listeners_created(self, service): if 'listeners' not in service: return listeners = service["listeners"] loadbalancer = service["loadbalancer"] networks = service.get("networks", list()) pools = service.get("pools", list()) l7policies = service.get("l7policies", list()) l7rules = service.get("l7policy_rules", list()) bigips = self.driver.get_config_bigips() for listener in listeners: error = False if self._is_not_pending_delete(listener): svc = {"loadbalancer": loadbalancer, "listener": listener, "pools": pools, "l7policies": l7policies, "l7policy_rules": l7rules, "networks": networks} # create_listener() will do an update if VS exists error = self.listener_builder.create_listener( svc, bigips) if error: loadbalancer['provisioning_status'] = \ plugin_const.ERROR listener['provisioning_status'] = plugin_const.ERROR else: listener['provisioning_status'] = plugin_const.ACTIVE if listener['admin_state_up']: listener['operating_status'] = lb_const.ONLINE def _assure_pools_created(self, service): if "pools" not in service: return pools = service.get("pools", list()) loadbalancer = service.get("loadbalancer", dict()) monitors = \ [monitor for monitor in service.get("healthmonitors", list()) if monitor['provisioning_status'] != plugin_const.PENDING_DELETE] bigips = self.driver.get_config_bigips() error = None for pool in pools: if pool['provisioning_status'] != plugin_const.PENDING_DELETE: svc = {"loadbalancer": loadbalancer, "pool": pool} svc['members'] = self._get_pool_members(service, pool['id']) svc['healthmonitors'] = monitors error = self.pool_builder.create_pool(svc, bigips) if error: pool['provisioning_status'] = plugin_const.ERROR loadbalancer['provisioning_status'] = plugin_const.ERROR else: pool['provisioning_status'] = plugin_const.ACTIVE pool['operating_status'] = lb_const.ONLINE def _get_pool_members(self, service, pool_id): """Return a list of members associated with given pool.""" members = [] for member in service['members']: if member['pool_id'] == pool_id: members.append(member) return members def _assure_monitors_created(self, service): monitors = service.get("healthmonitors", list()) loadbalancer = service.get("loadbalancer", dict()) bigips = self.driver.get_config_bigips() force_active_status = True for monitor in monitors: svc = {"loadbalancer": loadbalancer, "healthmonitor": monitor} if monitor['provisioning_status'] != plugin_const.PENDING_DELETE: if self.pool_builder.create_healthmonitor(svc, bigips): monitor['provisioning_status'] = plugin_const.ERROR force_active_status = False self._set_status_as_active(monitor, force=force_active_status) def _assure_monitors_deleted(self, service): monitors = service["healthmonitors"] loadbalancer = service["loadbalancer"] bigips = self.driver.get_config_bigips() for monitor in monitors: svc = {"loadbalancer": loadbalancer, "healthmonitor": monitor} if monitor['provisioning_status'] == plugin_const.PENDING_DELETE: if self.pool_builder.delete_healthmonitor(svc, bigips): monitor['provisioning_status'] = plugin_const.ERROR def _assure_members(self, service, all_subnet_hints): if not (("pools" in service) and ("members" in service)): return members = service["members"] loadbalancer = service["loadbalancer"] bigips = self.driver.get_config_bigips() # Group the members by pool. pool_to_member_map = dict() for member in members: if 'port' not in member and \ member['provisioning_status'] != plugin_const.PENDING_DELETE: LOG.warning("Member definition does not include Neutron port") pool_id = member.get('pool_id', None) if not pool_id: LOG.error("Pool member %s does not have a valid pool id", member.get('id', "NO MEMBER ID")) continue if pool_id not in pool_to_member_map: pool_to_member_map[pool_id] = list() pool_to_member_map[pool_id].append(member) # Assure members by pool for pool_id, pool_members in pool_to_member_map.iteritems(): pool = self.get_pool_by_id(service, pool_id) svc = {"loadbalancer": loadbalancer, "members": pool_members, "pool": pool} self.pool_builder.assure_pool_members(svc, bigips) pool_deleted = self._is_pending_delete(pool) for member in pool_members: if pool_deleted: member['provisioning_status'] = "PENDING_DELETE" member['parent_pool_deleted'] = True provisioning = member.get('provisioning_status') if 'missing' not in member \ and provisioning != "PENDING_DELETE": member['provisioning_status'] = "ACTIVE" elif 'missing' in member: member['provisioning_status'] = "ERROR" self._update_subnet_hints(member["provisioning_status"], member["subnet_id"], member["network_id"], all_subnet_hints, True) def _assure_loadbalancer_deleted(self, service): if (service['loadbalancer']['provisioning_status'] != plugin_const.PENDING_DELETE): return loadbalancer = service["loadbalancer"] bigips = self.driver.get_config_bigips() if self.driver.l3_binding: self.driver.l3_binding.unbind_address( subnet_id=loadbalancer["vip_subnet_id"], ip_address=loadbalancer["vip_address"]) vip_address = virtual_address.VirtualAddress( self.service_adapter, loadbalancer) for bigip in bigips: vip_address.assure(bigip, delete=True) def _assure_pools_deleted(self, service): if 'pools' not in service: return pools = service["pools"] loadbalancer = service["loadbalancer"] bigips = self.driver.get_config_bigips() service_members = service.get('members', list()) for pool in pools: pool_members = [member for member in service_members if member.get('pool_id') == pool['id']] svc = {"loadbalancer": loadbalancer, "pool": pool, "members": pool_members} # Is the pool being deleted? if pool['provisioning_status'] == plugin_const.PENDING_DELETE: # Delete pool error = self.pool_builder.delete_pool(svc, bigips) if error: pool['provisioning_status'] = plugin_const.ERROR def _assure_listeners_deleted(self, service): bigips = self.driver.get_config_bigips() if 'listeners' in service: listeners = service["listeners"] loadbalancer = service["loadbalancer"] for listener in listeners: error = False if listener['provisioning_status'] == \ plugin_const.PENDING_DELETE: svc = {"loadbalancer": loadbalancer, "listener": listener} error = \ self.listener_builder.delete_listener(svc, bigips) if error: listener['provisioning_status'] = plugin_const.ERROR self.listener_builder.delete_orphaned_listeners(service, bigips) @staticmethod def get_pool_by_id(service, pool_id): if pool_id and "pools" in service: pools = service["pools"] for pool in pools: if pool["id"] == pool_id: return pool return None def _update_subnet_hints(self, status, subnet_id, network_id, all_subnet_hints, is_member): bigips = self.driver.get_config_bigips() for bigip in bigips: subnet_hints = all_subnet_hints[bigip.device_name] if status != plugin_const.PENDING_DELETE: if subnet_id in subnet_hints['check_for_delete_subnets']: del subnet_hints['check_for_delete_subnets'][subnet_id] if subnet_id not in subnet_hints['do_not_delete_subnets']: subnet_hints['do_not_delete_subnets'].append(subnet_id) else: if subnet_id not in subnet_hints['do_not_delete_subnets']: subnet_hints['check_for_delete_subnets'][subnet_id] = \ {'network_id': network_id, 'subnet_id': subnet_id, 'is_for_member': is_member} def listener_exists(self, bigip, service): """Test the existence of the listener defined by service.""" try: # Throw an exception if the listener does not exist. self.listener_builder.get_listener(service, bigip) except HTTPError as err: LOG.debug("Virtual service service discovery error, %s." % err.message) return False return True def _assure_l7policies_created(self, service): if 'l7policies' not in service: return listener_policy_map = dict() bigips = self.driver.get_config_bigips() lbaas_service = LbaasServiceObject(service) l7policies = service['l7policies'] LOG.debug("L7 debug: processing policies: %s", l7policies) for l7policy in l7policies: LOG.debug("L7 debug: assuring policy: %s", l7policy) name = l7policy.get('name', None) if not self.esd.is_esd(name): listener_id = l7policy.get('listener_id', None) if not listener_id or listener_id in listener_policy_map: LOG.debug( "L7 debug: listener policies already added: %s", listener_id) continue listener_policy_map[listener_id] = \ self.l7service.build_policy(l7policy, lbaas_service) for listener_id, policy in listener_policy_map.items(): error = False if policy['f5_policy'].get('rules', list()): error = self.l7service.create_l7policy( policy['f5_policy'], bigips) for p in service['l7policies']: if self._is_not_pending_delete(p): if not error: self._set_status_as_active(p, force=True) else: self._set_status_as_error(p) loadbalancer = service.get('loadbalancer', {}) if not error: listener = lbaas_service.get_listener(listener_id) if listener: listener['f5_policy'] = policy['f5_policy'] else: loadbalancer['provisioning_status'] = \ plugin_const.ERROR def _assure_l7policies_deleted(self, service): if 'l7policies' not in service: return listener_policy_map = dict() bigips = self.driver.get_config_bigips() lbaas_service = LbaasServiceObject(service) l7policies = service['l7policies'] for l7policy in l7policies: name = l7policy.get('name', None) if not self.esd.is_esd(name): listener_id = l7policy.get('listener_id', None) if not listener_id or listener_id in listener_policy_map: continue listener_policy_map[listener_id] = \ self.l7service.build_policy(l7policy, lbaas_service) # Clean wrapper policy this is the legacy name of a policy loadbalancer = service.get('loadbalancer', dict()) tenant_id = loadbalancer.get('tenant_id', "") try: wrapper_policy = { 'name': 'wrapper_policy', 'partition': self.service_adapter.get_folder_name( tenant_id)} self.l7service.delete_l7policy(wrapper_policy, bigips) except HTTPError as err: if err.response.status_code != 404: LOG.error("Failed to remove wrapper policy: %s", err.message) except Exception as err: LOG.error("Failed to remove wrapper policy: %s", err.message) for _, policy in listener_policy_map.items(): error = False if not policy['f5_policy'].get('rules', list()): error = self.l7service.delete_l7policy( policy['f5_policy'], bigips) for p in policy['l7policies']: if self._is_not_pending_delete(p): if not error: self._set_status_as_active(p, force=True) else: self._set_status_as_error(p) else: if error: self._set_status_as_error(p) def get_listener_stats(self, service, stats): """Get statistics for a loadbalancer service. Sums values for stats defined in stats dictionary for all listeners defined in service object. For example, if loadbalancer has two listeners and stats defines a stat 'clientside.bitsIn' as a key, the sum of all pools' clientside.bitsIn will be returned in stats. Provisioning status is ignored -- PENDING_DELETE objects are included. :param service: defines loadbalancer and set of pools. :param stats: a dictionary that defines which stats to get. Should be initialized by caller with 0 values. :return: stats are appended to input stats dict (i.e., contains the sum of given stats for all BIG-IPs). """ listeners = service["listeners"] loadbalancer = service["loadbalancer"] bigips = self.driver.get_config_bigips() collected_stats = {} for stat in stats: collected_stats[stat] = 0 for listener in listeners: svc = {"loadbalancer": loadbalancer, "listener": listener} vs_stats = self.listener_builder.get_stats(svc, bigips, stats) for stat in stats: collected_stats[stat] += vs_stats[stat] return collected_stats def update_operating_status(self, service): bigip = self.driver.get_active_bigip() loadbalancer = service["loadbalancer"] status_keys = ['status.availabilityState', 'status.enabledState'] members = service["members"] for member in members: if member['provisioning_status'] == plugin_const.ACTIVE: pool = self.get_pool_by_id(service, member["pool_id"]) svc = {"loadbalancer": loadbalancer, "member": member, "pool": pool} status = self.pool_builder.get_member_status( svc, bigip, status_keys) member['operating_status'] = self.convert_operating_status( status) @staticmethod def convert_operating_status(status): """Convert object status to LBaaS operating status. status.availabilityState and status.enabledState = Operating Status available enabled ONLINE available disabled DISABLED offline - OFFLINE unknown - NO_MONITOR """ op_status = None available = status.get('status.availabilityState', '') if available == 'available': enabled = status.get('status.enabledState', '') if enabled == 'enabled': op_status = lb_const.ONLINE elif enabled == 'disabled': op_status = lb_const.DISABLED else: LOG.warning('Unexpected value %s for status.enabledState', enabled) elif available == 'offline': op_status = lb_const.OFFLINE elif available == 'unknown': op_status = lb_const.NO_MONITOR return op_status
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from code import Code from model import PropertyType import any_helper import cpp_util import schema_util class CppTypeGenerator(object): """Manages the types of properties and provides utilities for getting the C++ type out of a model.Property """ def __init__(self, root_namespace, namespace=None, cpp_namespace=None): """Creates a cpp_type_generator. The given root_namespace should be of the format extensions::api::sub. The generator will generate code suitable for use in the given namespace. """ self._type_namespaces = {} self._root_namespace = root_namespace.split('::') self._cpp_namespaces = {} if namespace and cpp_namespace: self._namespace = namespace self.AddNamespace(namespace, cpp_namespace) def AddNamespace(self, namespace, cpp_namespace): """Maps a model.Namespace to its C++ namespace name. All mappings are beneath the root namespace. """ for type_ in namespace.types: if type_ in self._type_namespaces: raise ValueError('Type %s is declared in both %s and %s' % (type_, namespace.name, self._type_namespaces[type_].name)) self._type_namespaces[type_] = namespace self._cpp_namespaces[namespace] = cpp_namespace def ExpandParams(self, params): """Returns the given parameters with PropertyType.CHOICES parameters expanded so that each choice is a separate parameter. """ expanded = [] for param in params: if param.type_ == PropertyType.CHOICES: for choice in param.choices.values(): expanded.append(choice) else: expanded.append(param) return expanded def GetAllPossibleParameterLists(self, params): """Returns all possible parameter lists for the given set of parameters. Every combination of arguments passed to any of the PropertyType.CHOICES parameters will have a corresponding parameter list returned here. """ if not params: return [[]] partial_parameter_lists = self.GetAllPossibleParameterLists(params[1:]) return [[param] + partial_list for param in self.ExpandParams(params[:1]) for partial_list in partial_parameter_lists] def GetCppNamespaceName(self, namespace): """Gets the mapped C++ namespace name for the given namespace relative to the root namespace. """ return self._cpp_namespaces[namespace] def GetRootNamespaceStart(self): """Get opening root namespace declarations. """ c = Code() for namespace in self._root_namespace: c.Append('namespace %s {' % namespace) return c def GetRootNamespaceEnd(self): """Get closing root namespace declarations. """ c = Code() for namespace in reversed(self._root_namespace): c.Append('} // %s' % namespace) return c def GetNamespaceStart(self): """Get opening self._namespace namespace declaration. """ return Code().Append('namespace %s {' % self.GetCppNamespaceName(self._namespace)) def GetNamespaceEnd(self): """Get closing self._namespace namespace declaration. """ return Code().Append('} // %s' % self.GetCppNamespaceName(self._namespace)) def GetEnumNoneValue(self, prop): """Gets the enum value in the given model.Property indicating no value has been set. """ return '%s_NONE' % prop.unix_name.upper() def GetEnumValue(self, prop, enum_value): """Gets the enum value of the given model.Property of the given type. e.g VAR_STRING """ return '%s_%s' % ( prop.unix_name.upper(), cpp_util.Classname(enum_value.upper())) def GetChoicesEnumType(self, prop): """Gets the type of the enum for the given model.Property. e.g VarType """ return cpp_util.Classname(prop.name) + 'Type' def GetType(self, prop, pad_for_generics=False, wrap_optional=False): """Translates a model.Property into its C++ type. If REF types from different namespaces are referenced, will resolve using self._type_namespaces. Use pad_for_generics when using as a generic to avoid operator ambiguity. Use wrap_optional to wrap the type in a scoped_ptr<T> if the Property is optional. """ cpp_type = None if prop.type_ == PropertyType.REF: dependency_namespace = self._ResolveTypeNamespace(prop.ref_type) if not dependency_namespace: raise KeyError('Cannot find referenced type: %s' % prop.ref_type) if self._namespace != dependency_namespace: cpp_type = '%s::%s' % (self._cpp_namespaces[dependency_namespace], schema_util.StripSchemaNamespace(prop.ref_type)) else: cpp_type = schema_util.StripSchemaNamespace(prop.ref_type) elif prop.type_ == PropertyType.BOOLEAN: cpp_type = 'bool' elif prop.type_ == PropertyType.INTEGER: cpp_type = 'int' elif prop.type_ == PropertyType.DOUBLE: cpp_type = 'double' elif prop.type_ == PropertyType.STRING: cpp_type = 'std::string' elif prop.type_ == PropertyType.ENUM: cpp_type = cpp_util.Classname(prop.name) elif prop.type_ == PropertyType.ADDITIONAL_PROPERTIES: cpp_type = 'base::DictionaryValue' elif prop.type_ == PropertyType.ANY: cpp_type = any_helper.ANY_CLASS elif prop.type_ == PropertyType.OBJECT: cpp_type = cpp_util.Classname(prop.name) elif prop.type_ == PropertyType.ARRAY: item_type = prop.item_type if item_type.type_ == PropertyType.REF: item_type = self.GetReferencedProperty(item_type) if item_type.type_ in ( PropertyType.REF, PropertyType.ANY, PropertyType.OBJECT): cpp_type = 'std::vector<linked_ptr<%s> > ' else: cpp_type = 'std::vector<%s> ' cpp_type = cpp_type % self.GetType( prop.item_type, pad_for_generics=True) elif prop.type_ == PropertyType.BINARY: cpp_type = 'std::string' else: raise NotImplementedError(prop.type_) # Enums aren't wrapped because C++ won't allow it. Optional enums have a # NONE value generated instead. if wrap_optional and prop.optional and prop.type_ != PropertyType.ENUM: cpp_type = 'scoped_ptr<%s> ' % cpp_type if pad_for_generics: return cpp_type return cpp_type.strip() def GenerateForwardDeclarations(self): """Returns the forward declarations for self._namespace. Use after GetRootNamespaceStart. Assumes all namespaces are relative to self._root_namespace. """ c = Code() namespace_type_dependencies = self._NamespaceTypeDependencies() for namespace in sorted(namespace_type_dependencies.keys(), key=lambda ns: ns.name): c.Append('namespace %s {' % namespace.name) for type_ in sorted(namespace_type_dependencies[namespace], key=schema_util.StripSchemaNamespace): type_name = schema_util.StripSchemaNamespace(type_) if namespace.types[type_].type_ == PropertyType.STRING: c.Append('typedef std::string %s;' % type_name) elif namespace.types[type_].type_ == PropertyType.ARRAY: c.Append('typedef std::vector<%(item_type)s> %(name)s;') c.Substitute({ 'name': type_name, 'item_type': self.GetType(namespace.types[type_].item_type, wrap_optional=True)}) else: c.Append('struct %s;' % type_name) c.Append('}') c.Concat(self.GetNamespaceStart()) for (name, type_) in self._namespace.types.items(): if not type_.functions and type_.type_ == PropertyType.OBJECT: c.Append('struct %s;' % schema_util.StripSchemaNamespace(name)) c.Concat(self.GetNamespaceEnd()) return c def GenerateIncludes(self): """Returns the #include lines for self._namespace. """ c = Code() for header in sorted( ['%s/%s.h' % (dependency.source_file_dir, self._cpp_namespaces[dependency]) for dependency in self._NamespaceTypeDependencies().keys()]): c.Append('#include "%s"' % header) return c def _ResolveTypeNamespace(self, ref_type): """Resolves a type, which must be explicitly qualified, to its enclosing namespace. """ if ref_type in self._type_namespaces: return self._type_namespaces[ref_type] raise KeyError(('Cannot resolve type: %s.' % ref_type) + 'Maybe it needs a namespace prefix if it comes from another namespace?') return None def GetReferencedProperty(self, prop): """Returns the property a property of type REF is referring to. If the property passed in is not of type PropertyType.REF, it will be returned unchanged. """ if prop.type_ != PropertyType.REF: return prop return self._ResolveTypeNamespace(prop.ref_type).types.get(prop.ref_type, None) def _NamespaceTypeDependencies(self): """Returns a dict containing a mapping of model.Namespace to the C++ type of type dependencies for self._namespace. """ dependencies = set() for function in self._namespace.functions.values(): for param in function.params: dependencies |= self._PropertyTypeDependencies(param) if function.callback: for param in function.callback.params: dependencies |= self._PropertyTypeDependencies(param) for type_ in self._namespace.types.values(): for prop in type_.properties.values(): dependencies |= self._PropertyTypeDependencies(prop) dependency_namespaces = dict() for dependency in dependencies: namespace = self._ResolveTypeNamespace(dependency) if namespace != self._namespace: dependency_namespaces.setdefault(namespace, []) dependency_namespaces[namespace].append(dependency) return dependency_namespaces def _PropertyTypeDependencies(self, prop): """Recursively gets all the type dependencies of a property. """ deps = set() if prop: if prop.type_ == PropertyType.REF: deps.add(prop.ref_type) elif prop.type_ == PropertyType.ARRAY: deps = self._PropertyTypeDependencies(prop.item_type) elif prop.type_ == PropertyType.OBJECT: for p in prop.properties.values(): deps |= self._PropertyTypeDependencies(p) return deps def GeneratePropertyValues(self, property, line, nodoc=False): """Generates the Code to display all value-containing properties. """ c = Code() if not nodoc: c.Comment(property.description) if property.has_value: c.Append(line % { "type": self._GetPrimitiveType(property.type_), "name": property.name, "value": property.value }) else: has_child_code = False c.Sblock('namespace %s {' % property.name) for child_property in property.properties.values(): child_code = self.GeneratePropertyValues( child_property, line, nodoc=nodoc) if child_code: has_child_code = True c.Concat(child_code) c.Eblock('} // namespace %s' % property.name) if not has_child_code: c = None return c def _GetPrimitiveType(self, type_): """Like |GetType| but only accepts and returns C++ primitive types. """ if type_ == PropertyType.BOOLEAN: return 'bool' elif type_ == PropertyType.INTEGER: return 'int' elif type_ == PropertyType.DOUBLE: return 'double' elif type_ == PropertyType.STRING: return 'char*' raise Exception(type_ + ' is not primitive')
from datetime import date, datetime, timedelta import numpy as np import pytest import pytz from pandas._libs.tslibs import iNaT, period as libperiod from pandas._libs.tslibs.ccalendar import DAYS, MONTHS from pandas._libs.tslibs.frequencies import INVALID_FREQ_ERR_MSG from pandas._libs.tslibs.parsing import DateParseError from pandas._libs.tslibs.period import IncompatibleFrequency from pandas._libs.tslibs.timezones import dateutil_gettz, maybe_get_tz from pandas.compat import iteritems, text_type from pandas.compat.numpy import np_datetime64_compat import pandas as pd from pandas import NaT, Period, Timedelta, Timestamp, offsets import pandas.core.indexes.period as period import pandas.util.testing as tm class TestPeriodConstruction(object): def test_construction(self): i1 = Period('1/1/2005', freq='M') i2 = Period('Jan 2005') assert i1 == i2 i1 = Period('2005', freq='A') i2 = Period('2005') i3 = Period('2005', freq='a') assert i1 == i2 assert i1 == i3 i4 = Period('2005', freq='M') i5 = Period('2005', freq='m') msg = r"Input has different freq=M from Period\(freq=A-DEC\)" with pytest.raises(IncompatibleFrequency, match=msg): i1 != i4 assert i4 == i5 i1 = Period.now('Q') i2 = Period(datetime.now(), freq='Q') i3 = Period.now('q') assert i1 == i2 assert i1 == i3 i1 = Period('1982', freq='min') i2 = Period('1982', freq='MIN') assert i1 == i2 i2 = Period('1982', freq=('Min', 1)) assert i1 == i2 i1 = Period(year=2005, month=3, day=1, freq='D') i2 = Period('3/1/2005', freq='D') assert i1 == i2 i3 = Period(year=2005, month=3, day=1, freq='d') assert i1 == i3 i1 = Period('2007-01-01 09:00:00.001') expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1000), freq='L') assert i1 == expected expected = Period(np_datetime64_compat( '2007-01-01 09:00:00.001Z'), freq='L') assert i1 == expected i1 = Period('2007-01-01 09:00:00.00101') expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1010), freq='U') assert i1 == expected expected = Period(np_datetime64_compat('2007-01-01 09:00:00.00101Z'), freq='U') assert i1 == expected msg = "Must supply freq for ordinal value" with pytest.raises(ValueError, match=msg): Period(ordinal=200701) with pytest.raises(ValueError, match="Invalid frequency: X"): Period('2007-1-1', freq='X') def test_construction_bday(self): # Biz day construction, roll forward if non-weekday i1 = Period('3/10/12', freq='B') i2 = Period('3/10/12', freq='D') assert i1 == i2.asfreq('B') i2 = Period('3/11/12', freq='D') assert i1 == i2.asfreq('B') i2 = Period('3/12/12', freq='D') assert i1 == i2.asfreq('B') i3 = Period('3/10/12', freq='b') assert i1 == i3 i1 = Period(year=2012, month=3, day=10, freq='B') i2 = Period('3/12/12', freq='B') assert i1 == i2 def test_construction_quarter(self): i1 = Period(year=2005, quarter=1, freq='Q') i2 = Period('1/1/2005', freq='Q') assert i1 == i2 i1 = Period(year=2005, quarter=3, freq='Q') i2 = Period('9/1/2005', freq='Q') assert i1 == i2 i1 = Period('2005Q1') i2 = Period(year=2005, quarter=1, freq='Q') i3 = Period('2005q1') assert i1 == i2 assert i1 == i3 i1 = Period('05Q1') assert i1 == i2 lower = Period('05q1') assert i1 == lower i1 = Period('1Q2005') assert i1 == i2 lower = Period('1q2005') assert i1 == lower i1 = Period('1Q05') assert i1 == i2 lower = Period('1q05') assert i1 == lower i1 = Period('4Q1984') assert i1.year == 1984 lower = Period('4q1984') assert i1 == lower def test_construction_month(self): expected = Period('2007-01', freq='M') i1 = Period('200701', freq='M') assert i1 == expected i1 = Period('200701', freq='M') assert i1 == expected i1 = Period(200701, freq='M') assert i1 == expected i1 = Period(ordinal=200701, freq='M') assert i1.year == 18695 i1 = Period(datetime(2007, 1, 1), freq='M') i2 = Period('200701', freq='M') assert i1 == i2 i1 = Period(date(2007, 1, 1), freq='M') i2 = Period(datetime(2007, 1, 1), freq='M') i3 = Period(np.datetime64('2007-01-01'), freq='M') i4 = Period(np_datetime64_compat('2007-01-01 00:00:00Z'), freq='M') i5 = Period(np_datetime64_compat('2007-01-01 00:00:00.000Z'), freq='M') assert i1 == i2 assert i1 == i3 assert i1 == i4 assert i1 == i5 def test_period_constructor_offsets(self): assert (Period('1/1/2005', freq=offsets.MonthEnd()) == Period('1/1/2005', freq='M')) assert (Period('2005', freq=offsets.YearEnd()) == Period('2005', freq='A')) assert (Period('2005', freq=offsets.MonthEnd()) == Period('2005', freq='M')) assert (Period('3/10/12', freq=offsets.BusinessDay()) == Period('3/10/12', freq='B')) assert (Period('3/10/12', freq=offsets.Day()) == Period('3/10/12', freq='D')) assert (Period(year=2005, quarter=1, freq=offsets.QuarterEnd(startingMonth=12)) == Period(year=2005, quarter=1, freq='Q')) assert (Period(year=2005, quarter=2, freq=offsets.QuarterEnd(startingMonth=12)) == Period(year=2005, quarter=2, freq='Q')) assert (Period(year=2005, month=3, day=1, freq=offsets.Day()) == Period(year=2005, month=3, day=1, freq='D')) assert (Period(year=2012, month=3, day=10, freq=offsets.BDay()) == Period(year=2012, month=3, day=10, freq='B')) expected = Period('2005-03-01', freq='3D') assert (Period(year=2005, month=3, day=1, freq=offsets.Day(3)) == expected) assert Period(year=2005, month=3, day=1, freq='3D') == expected assert (Period(year=2012, month=3, day=10, freq=offsets.BDay(3)) == Period(year=2012, month=3, day=10, freq='3B')) assert (Period(200701, freq=offsets.MonthEnd()) == Period(200701, freq='M')) i1 = Period(ordinal=200701, freq=offsets.MonthEnd()) i2 = Period(ordinal=200701, freq='M') assert i1 == i2 assert i1.year == 18695 assert i2.year == 18695 i1 = Period(datetime(2007, 1, 1), freq='M') i2 = Period('200701', freq='M') assert i1 == i2 i1 = Period(date(2007, 1, 1), freq='M') i2 = Period(datetime(2007, 1, 1), freq='M') i3 = Period(np.datetime64('2007-01-01'), freq='M') i4 = Period(np_datetime64_compat('2007-01-01 00:00:00Z'), freq='M') i5 = Period(np_datetime64_compat('2007-01-01 00:00:00.000Z'), freq='M') assert i1 == i2 assert i1 == i3 assert i1 == i4 assert i1 == i5 i1 = Period('2007-01-01 09:00:00.001') expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1000), freq='L') assert i1 == expected expected = Period(np_datetime64_compat( '2007-01-01 09:00:00.001Z'), freq='L') assert i1 == expected i1 = Period('2007-01-01 09:00:00.00101') expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1010), freq='U') assert i1 == expected expected = Period(np_datetime64_compat('2007-01-01 09:00:00.00101Z'), freq='U') assert i1 == expected def test_invalid_arguments(self): with pytest.raises(ValueError): Period(datetime.now()) with pytest.raises(ValueError): Period(datetime.now().date()) with pytest.raises(ValueError): Period(1.6, freq='D') with pytest.raises(ValueError): Period(ordinal=1.6, freq='D') with pytest.raises(ValueError): Period(ordinal=2, value=1, freq='D') with pytest.raises(ValueError): Period(month=1) with pytest.raises(ValueError): Period('-2000', 'A') with pytest.raises(DateParseError): Period('0', 'A') with pytest.raises(DateParseError): Period('1/1/-2000', 'A') def test_constructor_corner(self): expected = Period('2007-01', freq='2M') assert Period(year=2007, month=1, freq='2M') == expected assert Period(None) is NaT p = Period('2007-01-01', freq='D') result = Period(p, freq='A') exp = Period('2007', freq='A') assert result == exp def test_constructor_infer_freq(self): p = Period('2007-01-01') assert p.freq == 'D' p = Period('2007-01-01 07') assert p.freq == 'H' p = Period('2007-01-01 07:10') assert p.freq == 'T' p = Period('2007-01-01 07:10:15') assert p.freq == 'S' p = Period('2007-01-01 07:10:15.123') assert p.freq == 'L' p = Period('2007-01-01 07:10:15.123000') assert p.freq == 'L' p = Period('2007-01-01 07:10:15.123400') assert p.freq == 'U' def test_multiples(self): result1 = Period('1989', freq='2A') result2 = Period('1989', freq='A') assert result1.ordinal == result2.ordinal assert result1.freqstr == '2A-DEC' assert result2.freqstr == 'A-DEC' assert result1.freq == offsets.YearEnd(2) assert result2.freq == offsets.YearEnd() assert (result1 + 1).ordinal == result1.ordinal + 2 assert (1 + result1).ordinal == result1.ordinal + 2 assert (result1 - 1).ordinal == result2.ordinal - 2 assert (-1 + result1).ordinal == result2.ordinal - 2 @pytest.mark.parametrize('month', MONTHS) def test_period_cons_quarterly(self, month): # bugs in scikits.timeseries freq = 'Q-%s' % month exp = Period('1989Q3', freq=freq) assert '1989Q3' in str(exp) stamp = exp.to_timestamp('D', how='end') p = Period(stamp, freq=freq) assert p == exp stamp = exp.to_timestamp('3D', how='end') p = Period(stamp, freq=freq) assert p == exp @pytest.mark.parametrize('month', MONTHS) def test_period_cons_annual(self, month): # bugs in scikits.timeseries freq = 'A-%s' % month exp = Period('1989', freq=freq) stamp = exp.to_timestamp('D', how='end') + timedelta(days=30) p = Period(stamp, freq=freq) assert p == exp + 1 assert isinstance(p, Period) @pytest.mark.parametrize('day', DAYS) @pytest.mark.parametrize('num', range(10, 17)) def test_period_cons_weekly(self, num, day): daystr = '2011-02-%d' % num freq = 'W-%s' % day result = Period(daystr, freq=freq) expected = Period(daystr, freq='D').asfreq(freq) assert result == expected assert isinstance(result, Period) def test_period_from_ordinal(self): p = Period('2011-01', freq='M') res = Period._from_ordinal(p.ordinal, freq='M') assert p == res assert isinstance(res, Period) def test_period_cons_nat(self): p = Period('NaT', freq='M') assert p is NaT p = Period('nat', freq='W-SUN') assert p is NaT p = Period(iNaT, freq='D') assert p is NaT p = Period(iNaT, freq='3D') assert p is NaT p = Period(iNaT, freq='1D1H') assert p is NaT p = Period('NaT') assert p is NaT p = Period(iNaT) assert p is NaT def test_period_cons_mult(self): p1 = Period('2011-01', freq='3M') p2 = Period('2011-01', freq='M') assert p1.ordinal == p2.ordinal assert p1.freq == offsets.MonthEnd(3) assert p1.freqstr == '3M' assert p2.freq == offsets.MonthEnd() assert p2.freqstr == 'M' result = p1 + 1 assert result.ordinal == (p2 + 3).ordinal assert result.freq == p1.freq assert result.freqstr == '3M' result = p1 - 1 assert result.ordinal == (p2 - 3).ordinal assert result.freq == p1.freq assert result.freqstr == '3M' msg = ('Frequency must be positive, because it' ' represents span: -3M') with pytest.raises(ValueError, match=msg): Period('2011-01', freq='-3M') msg = ('Frequency must be positive, because it' ' represents span: 0M') with pytest.raises(ValueError, match=msg): Period('2011-01', freq='0M') def test_period_cons_combined(self): p = [(Period('2011-01', freq='1D1H'), Period('2011-01', freq='1H1D'), Period('2011-01', freq='H')), (Period(ordinal=1, freq='1D1H'), Period(ordinal=1, freq='1H1D'), Period(ordinal=1, freq='H'))] for p1, p2, p3 in p: assert p1.ordinal == p3.ordinal assert p2.ordinal == p3.ordinal assert p1.freq == offsets.Hour(25) assert p1.freqstr == '25H' assert p2.freq == offsets.Hour(25) assert p2.freqstr == '25H' assert p3.freq == offsets.Hour() assert p3.freqstr == 'H' result = p1 + 1 assert result.ordinal == (p3 + 25).ordinal assert result.freq == p1.freq assert result.freqstr == '25H' result = p2 + 1 assert result.ordinal == (p3 + 25).ordinal assert result.freq == p2.freq assert result.freqstr == '25H' result = p1 - 1 assert result.ordinal == (p3 - 25).ordinal assert result.freq == p1.freq assert result.freqstr == '25H' result = p2 - 1 assert result.ordinal == (p3 - 25).ordinal assert result.freq == p2.freq assert result.freqstr == '25H' msg = ('Frequency must be positive, because it' ' represents span: -25H') with pytest.raises(ValueError, match=msg): Period('2011-01', freq='-1D1H') with pytest.raises(ValueError, match=msg): Period('2011-01', freq='-1H1D') with pytest.raises(ValueError, match=msg): Period(ordinal=1, freq='-1D1H') with pytest.raises(ValueError, match=msg): Period(ordinal=1, freq='-1H1D') msg = ('Frequency must be positive, because it' ' represents span: 0D') with pytest.raises(ValueError, match=msg): Period('2011-01', freq='0D0H') with pytest.raises(ValueError, match=msg): Period(ordinal=1, freq='0D0H') # You can only combine together day and intraday offsets msg = ('Invalid frequency: 1W1D') with pytest.raises(ValueError, match=msg): Period('2011-01', freq='1W1D') msg = ('Invalid frequency: 1D1W') with pytest.raises(ValueError, match=msg): Period('2011-01', freq='1D1W') class TestPeriodMethods(object): def test_round_trip(self): p = Period('2000Q1') new_p = tm.round_trip_pickle(p) assert new_p == p def test_hash(self): assert (hash(Period('2011-01', freq='M')) == hash(Period('2011-01', freq='M'))) assert (hash(Period('2011-01-01', freq='D')) != hash(Period('2011-01', freq='M'))) assert (hash(Period('2011-01', freq='3M')) != hash(Period('2011-01', freq='2M'))) assert (hash(Period('2011-01', freq='M')) != hash(Period('2011-02', freq='M'))) # -------------------------------------------------------------- # to_timestamp @pytest.mark.parametrize('tzstr', ['Europe/Brussels', 'Asia/Tokyo', 'US/Pacific']) def test_to_timestamp_tz_arg(self, tzstr): p = Period('1/1/2005', freq='M').to_timestamp(tz=tzstr) exp = Timestamp('1/1/2005', tz='UTC').tz_convert(tzstr) exp_zone = pytz.timezone(tzstr).normalize(p) assert p == exp assert p.tz == exp_zone.tzinfo assert p.tz == exp.tz p = Period('1/1/2005', freq='3H').to_timestamp(tz=tzstr) exp = Timestamp('1/1/2005', tz='UTC').tz_convert(tzstr) exp_zone = pytz.timezone(tzstr).normalize(p) assert p == exp assert p.tz == exp_zone.tzinfo assert p.tz == exp.tz p = Period('1/1/2005', freq='A').to_timestamp(freq='A', tz=tzstr) exp = Timestamp('31/12/2005', tz='UTC').tz_convert(tzstr) exp_zone = pytz.timezone(tzstr).normalize(p) assert p == exp assert p.tz == exp_zone.tzinfo assert p.tz == exp.tz p = Period('1/1/2005', freq='A').to_timestamp(freq='3H', tz=tzstr) exp = Timestamp('1/1/2005', tz='UTC').tz_convert(tzstr) exp_zone = pytz.timezone(tzstr).normalize(p) assert p == exp assert p.tz == exp_zone.tzinfo assert p.tz == exp.tz @pytest.mark.parametrize('tzstr', ['dateutil/Europe/Brussels', 'dateutil/Asia/Tokyo', 'dateutil/US/Pacific']) def test_to_timestamp_tz_arg_dateutil(self, tzstr): tz = maybe_get_tz(tzstr) p = Period('1/1/2005', freq='M').to_timestamp(tz=tz) exp = Timestamp('1/1/2005', tz='UTC').tz_convert(tzstr) assert p == exp assert p.tz == dateutil_gettz(tzstr.split('/', 1)[1]) assert p.tz == exp.tz p = Period('1/1/2005', freq='M').to_timestamp(freq='3H', tz=tz) exp = Timestamp('1/1/2005', tz='UTC').tz_convert(tzstr) assert p == exp assert p.tz == dateutil_gettz(tzstr.split('/', 1)[1]) assert p.tz == exp.tz def test_to_timestamp_tz_arg_dateutil_from_string(self): p = Period('1/1/2005', freq='M').to_timestamp(tz='dateutil/Europe/Brussels') assert p.tz == dateutil_gettz('Europe/Brussels') def test_to_timestamp_mult(self): p = Period('2011-01', freq='M') assert p.to_timestamp(how='S') == Timestamp('2011-01-01') expected = Timestamp('2011-02-01') - Timedelta(1, 'ns') assert p.to_timestamp(how='E') == expected p = Period('2011-01', freq='3M') assert p.to_timestamp(how='S') == Timestamp('2011-01-01') expected = Timestamp('2011-04-01') - Timedelta(1, 'ns') assert p.to_timestamp(how='E') == expected def test_to_timestamp(self): p = Period('1982', freq='A') start_ts = p.to_timestamp(how='S') aliases = ['s', 'StarT', 'BEGIn'] for a in aliases: assert start_ts == p.to_timestamp('D', how=a) # freq with mult should not affect to the result assert start_ts == p.to_timestamp('3D', how=a) end_ts = p.to_timestamp(how='E') aliases = ['e', 'end', 'FINIsH'] for a in aliases: assert end_ts == p.to_timestamp('D', how=a) assert end_ts == p.to_timestamp('3D', how=a) from_lst = ['A', 'Q', 'M', 'W', 'B', 'D', 'H', 'Min', 'S'] def _ex(p): return Timestamp((p + p.freq).start_time.value - 1) for i, fcode in enumerate(from_lst): p = Period('1982', freq=fcode) result = p.to_timestamp().to_period(fcode) assert result == p assert p.start_time == p.to_timestamp(how='S') assert p.end_time == _ex(p) # Frequency other than daily p = Period('1985', freq='A') result = p.to_timestamp('H', how='end') expected = Timestamp(1986, 1, 1) - Timedelta(1, 'ns') assert result == expected result = p.to_timestamp('3H', how='end') assert result == expected result = p.to_timestamp('T', how='end') expected = Timestamp(1986, 1, 1) - Timedelta(1, 'ns') assert result == expected result = p.to_timestamp('2T', how='end') assert result == expected result = p.to_timestamp(how='end') expected = Timestamp(1986, 1, 1) - Timedelta(1, 'ns') assert result == expected expected = datetime(1985, 1, 1) result = p.to_timestamp('H', how='start') assert result == expected result = p.to_timestamp('T', how='start') assert result == expected result = p.to_timestamp('S', how='start') assert result == expected result = p.to_timestamp('3H', how='start') assert result == expected result = p.to_timestamp('5S', how='start') assert result == expected # -------------------------------------------------------------- # Rendering: __repr__, strftime, etc def test_repr(self): p = Period('Jan-2000') assert '2000-01' in repr(p) p = Period('2000-12-15') assert '2000-12-15' in repr(p) def test_repr_nat(self): p = Period('nat', freq='M') assert repr(NaT) in repr(p) def test_millisecond_repr(self): p = Period('2000-01-01 12:15:02.123') assert repr(p) == "Period('2000-01-01 12:15:02.123', 'L')" def test_microsecond_repr(self): p = Period('2000-01-01 12:15:02.123567') assert repr(p) == "Period('2000-01-01 12:15:02.123567', 'U')" def test_strftime(self): # GH#3363 p = Period('2000-1-1 12:34:12', freq='S') res = p.strftime('%Y-%m-%d %H:%M:%S') assert res == '2000-01-01 12:34:12' assert isinstance(res, text_type) class TestPeriodProperties(object): "Test properties such as year, month, weekday, etc...." @pytest.mark.parametrize('freq', ['A', 'M', 'D', 'H']) def test_is_leap_year(self, freq): # GH 13727 p = Period('2000-01-01 00:00:00', freq=freq) assert p.is_leap_year assert isinstance(p.is_leap_year, bool) p = Period('1999-01-01 00:00:00', freq=freq) assert not p.is_leap_year p = Period('2004-01-01 00:00:00', freq=freq) assert p.is_leap_year p = Period('2100-01-01 00:00:00', freq=freq) assert not p.is_leap_year def test_quarterly_negative_ordinals(self): p = Period(ordinal=-1, freq='Q-DEC') assert p.year == 1969 assert p.quarter == 4 assert isinstance(p, Period) p = Period(ordinal=-2, freq='Q-DEC') assert p.year == 1969 assert p.quarter == 3 assert isinstance(p, Period) p = Period(ordinal=-2, freq='M') assert p.year == 1969 assert p.month == 11 assert isinstance(p, Period) def test_freq_str(self): i1 = Period('1982', freq='Min') assert i1.freq == offsets.Minute() assert i1.freqstr == 'T' def test_period_deprecated_freq(self): cases = {"M": ["MTH", "MONTH", "MONTHLY", "Mth", "month", "monthly"], "B": ["BUS", "BUSINESS", "BUSINESSLY", "WEEKDAY", "bus"], "D": ["DAY", "DLY", "DAILY", "Day", "Dly", "Daily"], "H": ["HR", "HOUR", "HRLY", "HOURLY", "hr", "Hour", "HRly"], "T": ["minute", "MINUTE", "MINUTELY", "minutely"], "S": ["sec", "SEC", "SECOND", "SECONDLY", "second"], "L": ["MILLISECOND", "MILLISECONDLY", "millisecond"], "U": ["MICROSECOND", "MICROSECONDLY", "microsecond"], "N": ["NANOSECOND", "NANOSECONDLY", "nanosecond"]} msg = INVALID_FREQ_ERR_MSG for exp, freqs in iteritems(cases): for freq in freqs: with pytest.raises(ValueError, match=msg): Period('2016-03-01 09:00', freq=freq) with pytest.raises(ValueError, match=msg): Period(ordinal=1, freq=freq) # check supported freq-aliases still works p1 = Period('2016-03-01 09:00', freq=exp) p2 = Period(ordinal=1, freq=exp) assert isinstance(p1, Period) assert isinstance(p2, Period) def test_start_time(self): freq_lst = ['A', 'Q', 'M', 'D', 'H', 'T', 'S'] xp = datetime(2012, 1, 1) for f in freq_lst: p = Period('2012', freq=f) assert p.start_time == xp assert Period('2012', freq='B').start_time == datetime(2012, 1, 2) assert Period('2012', freq='W').start_time == datetime(2011, 12, 26) def test_end_time(self): p = Period('2012', freq='A') def _ex(*args): return Timestamp(Timestamp(datetime(*args)).value - 1) xp = _ex(2013, 1, 1) assert xp == p.end_time p = Period('2012', freq='Q') xp = _ex(2012, 4, 1) assert xp == p.end_time p = Period('2012', freq='M') xp = _ex(2012, 2, 1) assert xp == p.end_time p = Period('2012', freq='D') xp = _ex(2012, 1, 2) assert xp == p.end_time p = Period('2012', freq='H') xp = _ex(2012, 1, 1, 1) assert xp == p.end_time p = Period('2012', freq='B') xp = _ex(2012, 1, 3) assert xp == p.end_time p = Period('2012', freq='W') xp = _ex(2012, 1, 2) assert xp == p.end_time # Test for GH 11738 p = Period('2012', freq='15D') xp = _ex(2012, 1, 16) assert xp == p.end_time p = Period('2012', freq='1D1H') xp = _ex(2012, 1, 2, 1) assert xp == p.end_time p = Period('2012', freq='1H1D') xp = _ex(2012, 1, 2, 1) assert xp == p.end_time def test_anchor_week_end_time(self): def _ex(*args): return Timestamp(Timestamp(datetime(*args)).value - 1) p = Period('2013-1-1', 'W-SAT') xp = _ex(2013, 1, 6) assert p.end_time == xp def test_properties_annually(self): # Test properties on Periods with annually frequency. a_date = Period(freq='A', year=2007) assert a_date.year == 2007 def test_properties_quarterly(self): # Test properties on Periods with daily frequency. qedec_date = Period(freq="Q-DEC", year=2007, quarter=1) qejan_date = Period(freq="Q-JAN", year=2007, quarter=1) qejun_date = Period(freq="Q-JUN", year=2007, quarter=1) # for x in range(3): for qd in (qedec_date, qejan_date, qejun_date): assert (qd + x).qyear == 2007 assert (qd + x).quarter == x + 1 def test_properties_monthly(self): # Test properties on Periods with daily frequency. m_date = Period(freq='M', year=2007, month=1) for x in range(11): m_ival_x = m_date + x assert m_ival_x.year == 2007 if 1 <= x + 1 <= 3: assert m_ival_x.quarter == 1 elif 4 <= x + 1 <= 6: assert m_ival_x.quarter == 2 elif 7 <= x + 1 <= 9: assert m_ival_x.quarter == 3 elif 10 <= x + 1 <= 12: assert m_ival_x.quarter == 4 assert m_ival_x.month == x + 1 def test_properties_weekly(self): # Test properties on Periods with daily frequency. w_date = Period(freq='W', year=2007, month=1, day=7) # assert w_date.year == 2007 assert w_date.quarter == 1 assert w_date.month == 1 assert w_date.week == 1 assert (w_date - 1).week == 52 assert w_date.days_in_month == 31 assert Period(freq='W', year=2012, month=2, day=1).days_in_month == 29 def test_properties_weekly_legacy(self): # Test properties on Periods with daily frequency. w_date = Period(freq='W', year=2007, month=1, day=7) assert w_date.year == 2007 assert w_date.quarter == 1 assert w_date.month == 1 assert w_date.week == 1 assert (w_date - 1).week == 52 assert w_date.days_in_month == 31 exp = Period(freq='W', year=2012, month=2, day=1) assert exp.days_in_month == 29 msg = INVALID_FREQ_ERR_MSG with pytest.raises(ValueError, match=msg): Period(freq='WK', year=2007, month=1, day=7) def test_properties_daily(self): # Test properties on Periods with daily frequency. b_date = Period(freq='B', year=2007, month=1, day=1) # assert b_date.year == 2007 assert b_date.quarter == 1 assert b_date.month == 1 assert b_date.day == 1 assert b_date.weekday == 0 assert b_date.dayofyear == 1 assert b_date.days_in_month == 31 assert Period(freq='B', year=2012, month=2, day=1).days_in_month == 29 d_date = Period(freq='D', year=2007, month=1, day=1) assert d_date.year == 2007 assert d_date.quarter == 1 assert d_date.month == 1 assert d_date.day == 1 assert d_date.weekday == 0 assert d_date.dayofyear == 1 assert d_date.days_in_month == 31 assert Period(freq='D', year=2012, month=2, day=1).days_in_month == 29 def test_properties_hourly(self): # Test properties on Periods with hourly frequency. h_date1 = Period(freq='H', year=2007, month=1, day=1, hour=0) h_date2 = Period(freq='2H', year=2007, month=1, day=1, hour=0) for h_date in [h_date1, h_date2]: assert h_date.year == 2007 assert h_date.quarter == 1 assert h_date.month == 1 assert h_date.day == 1 assert h_date.weekday == 0 assert h_date.dayofyear == 1 assert h_date.hour == 0 assert h_date.days_in_month == 31 assert Period(freq='H', year=2012, month=2, day=1, hour=0).days_in_month == 29 def test_properties_minutely(self): # Test properties on Periods with minutely frequency. t_date = Period(freq='Min', year=2007, month=1, day=1, hour=0, minute=0) # assert t_date.quarter == 1 assert t_date.month == 1 assert t_date.day == 1 assert t_date.weekday == 0 assert t_date.dayofyear == 1 assert t_date.hour == 0 assert t_date.minute == 0 assert t_date.days_in_month == 31 assert Period(freq='D', year=2012, month=2, day=1, hour=0, minute=0).days_in_month == 29 def test_properties_secondly(self): # Test properties on Periods with secondly frequency. s_date = Period(freq='Min', year=2007, month=1, day=1, hour=0, minute=0, second=0) # assert s_date.year == 2007 assert s_date.quarter == 1 assert s_date.month == 1 assert s_date.day == 1 assert s_date.weekday == 0 assert s_date.dayofyear == 1 assert s_date.hour == 0 assert s_date.minute == 0 assert s_date.second == 0 assert s_date.days_in_month == 31 assert Period(freq='Min', year=2012, month=2, day=1, hour=0, minute=0, second=0).days_in_month == 29 class TestPeriodField(object): def test_get_period_field_array_raises_on_out_of_range(self): msg = "Buffer dtype mismatch, expected 'int64_t' but got 'double'" with pytest.raises(ValueError, match=msg): libperiod.get_period_field_arr(-1, np.empty(1), 0) class TestComparisons(object): def setup_method(self, method): self.january1 = Period('2000-01', 'M') self.january2 = Period('2000-01', 'M') self.february = Period('2000-02', 'M') self.march = Period('2000-03', 'M') self.day = Period('2012-01-01', 'D') def test_equal(self): assert self.january1 == self.january2 def test_equal_Raises_Value(self): with pytest.raises(period.IncompatibleFrequency): self.january1 == self.day def test_notEqual(self): assert self.january1 != 1 assert self.january1 != self.february def test_greater(self): assert self.february > self.january1 def test_greater_Raises_Value(self): with pytest.raises(period.IncompatibleFrequency): self.january1 > self.day def test_greater_Raises_Type(self): with pytest.raises(TypeError): self.january1 > 1 def test_greaterEqual(self): assert self.january1 >= self.january2 def test_greaterEqual_Raises_Value(self): with pytest.raises(period.IncompatibleFrequency): self.january1 >= self.day with pytest.raises(TypeError): print(self.january1 >= 1) def test_smallerEqual(self): assert self.january1 <= self.january2 def test_smallerEqual_Raises_Value(self): with pytest.raises(period.IncompatibleFrequency): self.january1 <= self.day def test_smallerEqual_Raises_Type(self): with pytest.raises(TypeError): self.january1 <= 1 def test_smaller(self): assert self.january1 < self.february def test_smaller_Raises_Value(self): with pytest.raises(period.IncompatibleFrequency): self.january1 < self.day def test_smaller_Raises_Type(self): with pytest.raises(TypeError): self.january1 < 1 def test_sort(self): periods = [self.march, self.january1, self.february] correctPeriods = [self.january1, self.february, self.march] assert sorted(periods) == correctPeriods def test_period_nat_comp(self): p_nat = Period('NaT', freq='D') p = Period('2011-01-01', freq='D') nat = Timestamp('NaT') t = Timestamp('2011-01-01') # confirm Period('NaT') work identical with Timestamp('NaT') for left, right in [(p_nat, p), (p, p_nat), (p_nat, p_nat), (nat, t), (t, nat), (nat, nat)]: assert not left < right assert not left > right assert not left == right assert left != right assert not left <= right assert not left >= right class TestArithmetic(object): def test_sub_delta(self): left, right = Period('2011', freq='A'), Period('2007', freq='A') result = left - right assert result == 4 * right.freq with pytest.raises(period.IncompatibleFrequency): left - Period('2007-01', freq='M') def test_add_integer(self): per1 = Period(freq='D', year=2008, month=1, day=1) per2 = Period(freq='D', year=2008, month=1, day=2) assert per1 + 1 == per2 assert 1 + per1 == per2 def test_add_sub_nat(self): # GH#13071 p = Period('2011-01', freq='M') assert p + NaT is NaT assert NaT + p is NaT assert p - NaT is NaT assert NaT - p is NaT p = Period('NaT', freq='M') assert p + NaT is NaT assert NaT + p is NaT assert p - NaT is NaT assert NaT - p is NaT def test_add_invalid(self): # GH#4731 per1 = Period(freq='D', year=2008, month=1, day=1) per2 = Period(freq='D', year=2008, month=1, day=2) msg = r"unsupported operand type\(s\)" with pytest.raises(TypeError, match=msg): per1 + "str" with pytest.raises(TypeError, match=msg): "str" + per1 with pytest.raises(TypeError, match=msg): per1 + per2 boxes = [lambda x: x, lambda x: pd.Series([x]), lambda x: pd.Index([x])] ids = ['identity', 'Series', 'Index'] @pytest.mark.parametrize('lbox', boxes, ids=ids) @pytest.mark.parametrize('rbox', boxes, ids=ids) def test_add_timestamp_raises(self, rbox, lbox): # GH#17983 ts = Timestamp('2017') per = Period('2017', freq='M') # We may get a different message depending on which class raises # the error. msg = (r"cannot add|unsupported operand|" r"can only operate on a|incompatible type|" r"ufunc add cannot use operands") with pytest.raises(TypeError, match=msg): lbox(ts) + rbox(per) with pytest.raises(TypeError, match=msg): lbox(per) + rbox(ts) with pytest.raises(TypeError, match=msg): lbox(per) + rbox(per) def test_sub(self): per1 = Period('2011-01-01', freq='D') per2 = Period('2011-01-15', freq='D') off = per1.freq assert per1 - per2 == -14 * off assert per2 - per1 == 14 * off msg = r"Input has different freq=M from Period\(freq=D\)" with pytest.raises(period.IncompatibleFrequency, match=msg): per1 - Period('2011-02', freq='M') @pytest.mark.parametrize('n', [1, 2, 3, 4]) def test_sub_n_gt_1_ticks(self, tick_classes, n): # GH 23878 p1 = pd.Period('19910905', freq=tick_classes(n)) p2 = pd.Period('19920406', freq=tick_classes(n)) expected = (pd.Period(str(p2), freq=p2.freq.base) - pd.Period(str(p1), freq=p1.freq.base)) assert (p2 - p1) == expected @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('n', [1, 2, 3, 4]) @pytest.mark.parametrize('offset, kwd_name', [ (pd.offsets.YearEnd, 'month'), (pd.offsets.QuarterEnd, 'startingMonth'), (pd.offsets.MonthEnd, None), (pd.offsets.Week, 'weekday') ]) def test_sub_n_gt_1_offsets(self, offset, kwd_name, n, normalize): # GH 23878 kwds = {kwd_name: 3} if kwd_name is not None else {} p1_d = '19910905' p2_d = '19920406' p1 = pd.Period(p1_d, freq=offset(n, normalize, **kwds)) p2 = pd.Period(p2_d, freq=offset(n, normalize, **kwds)) expected = (pd.Period(p2_d, freq=p2.freq.base) - pd.Period(p1_d, freq=p1.freq.base)) assert (p2 - p1) == expected def test_add_offset(self): # freq is DateOffset for freq in ['A', '2A', '3A']: p = Period('2011', freq=freq) exp = Period('2013', freq=freq) assert p + offsets.YearEnd(2) == exp assert offsets.YearEnd(2) + p == exp for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), np.timedelta64(365, 'D'), timedelta(365)]: with pytest.raises(period.IncompatibleFrequency): p + o if isinstance(o, np.timedelta64): with pytest.raises(TypeError): o + p else: with pytest.raises(period.IncompatibleFrequency): o + p for freq in ['M', '2M', '3M']: p = Period('2011-03', freq=freq) exp = Period('2011-05', freq=freq) assert p + offsets.MonthEnd(2) == exp assert offsets.MonthEnd(2) + p == exp exp = Period('2012-03', freq=freq) assert p + offsets.MonthEnd(12) == exp assert offsets.MonthEnd(12) + p == exp for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), np.timedelta64(365, 'D'), timedelta(365)]: with pytest.raises(period.IncompatibleFrequency): p + o if isinstance(o, np.timedelta64): with pytest.raises(TypeError): o + p else: with pytest.raises(period.IncompatibleFrequency): o + p # freq is Tick for freq in ['D', '2D', '3D']: p = Period('2011-04-01', freq=freq) exp = Period('2011-04-06', freq=freq) assert p + offsets.Day(5) == exp assert offsets.Day(5) + p == exp exp = Period('2011-04-02', freq=freq) assert p + offsets.Hour(24) == exp assert offsets.Hour(24) + p == exp exp = Period('2011-04-03', freq=freq) assert p + np.timedelta64(2, 'D') == exp with pytest.raises(TypeError): np.timedelta64(2, 'D') + p exp = Period('2011-04-02', freq=freq) assert p + np.timedelta64(3600 * 24, 's') == exp with pytest.raises(TypeError): np.timedelta64(3600 * 24, 's') + p exp = Period('2011-03-30', freq=freq) assert p + timedelta(-2) == exp assert timedelta(-2) + p == exp exp = Period('2011-04-03', freq=freq) assert p + timedelta(hours=48) == exp assert timedelta(hours=48) + p == exp for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), np.timedelta64(4, 'h'), timedelta(hours=23)]: with pytest.raises(period.IncompatibleFrequency): p + o if isinstance(o, np.timedelta64): with pytest.raises(TypeError): o + p else: with pytest.raises(period.IncompatibleFrequency): o + p for freq in ['H', '2H', '3H']: p = Period('2011-04-01 09:00', freq=freq) exp = Period('2011-04-03 09:00', freq=freq) assert p + offsets.Day(2) == exp assert offsets.Day(2) + p == exp exp = Period('2011-04-01 12:00', freq=freq) assert p + offsets.Hour(3) == exp assert offsets.Hour(3) + p == exp exp = Period('2011-04-01 12:00', freq=freq) assert p + np.timedelta64(3, 'h') == exp with pytest.raises(TypeError): np.timedelta64(3, 'h') + p exp = Period('2011-04-01 10:00', freq=freq) assert p + np.timedelta64(3600, 's') == exp with pytest.raises(TypeError): np.timedelta64(3600, 's') + p exp = Period('2011-04-01 11:00', freq=freq) assert p + timedelta(minutes=120) == exp assert timedelta(minutes=120) + p == exp exp = Period('2011-04-05 12:00', freq=freq) assert p + timedelta(days=4, minutes=180) == exp assert timedelta(days=4, minutes=180) + p == exp for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), np.timedelta64(3200, 's'), timedelta(hours=23, minutes=30)]: with pytest.raises(period.IncompatibleFrequency): p + o if isinstance(o, np.timedelta64): with pytest.raises(TypeError): o + p else: with pytest.raises(period.IncompatibleFrequency): o + p def test_add_offset_nat(self): # freq is DateOffset for freq in ['A', '2A', '3A']: p = Period('NaT', freq=freq) for o in [offsets.YearEnd(2)]: assert p + o is NaT assert o + p is NaT for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), np.timedelta64(365, 'D'), timedelta(365)]: assert p + o is NaT if isinstance(o, np.timedelta64): with pytest.raises(TypeError): o + p else: assert o + p is NaT for freq in ['M', '2M', '3M']: p = Period('NaT', freq=freq) for o in [offsets.MonthEnd(2), offsets.MonthEnd(12)]: assert p + o is NaT if isinstance(o, np.timedelta64): with pytest.raises(TypeError): o + p else: assert o + p is NaT for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), np.timedelta64(365, 'D'), timedelta(365)]: assert p + o is NaT if isinstance(o, np.timedelta64): with pytest.raises(TypeError): o + p else: assert o + p is NaT # freq is Tick for freq in ['D', '2D', '3D']: p = Period('NaT', freq=freq) for o in [offsets.Day(5), offsets.Hour(24), np.timedelta64(2, 'D'), np.timedelta64(3600 * 24, 's'), timedelta(-2), timedelta(hours=48)]: assert p + o is NaT if isinstance(o, np.timedelta64): with pytest.raises(TypeError): o + p else: assert o + p is NaT for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), np.timedelta64(4, 'h'), timedelta(hours=23)]: assert p + o is NaT if isinstance(o, np.timedelta64): with pytest.raises(TypeError): o + p else: assert o + p is NaT for freq in ['H', '2H', '3H']: p = Period('NaT', freq=freq) for o in [offsets.Day(2), offsets.Hour(3), np.timedelta64(3, 'h'), np.timedelta64(3600, 's'), timedelta(minutes=120), timedelta(days=4, minutes=180)]: assert p + o is NaT if not isinstance(o, np.timedelta64): assert o + p is NaT for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), np.timedelta64(3200, 's'), timedelta(hours=23, minutes=30)]: assert p + o is NaT if isinstance(o, np.timedelta64): with pytest.raises(TypeError): o + p else: assert o + p is NaT def test_sub_offset(self): # freq is DateOffset for freq in ['A', '2A', '3A']: p = Period('2011', freq=freq) assert p - offsets.YearEnd(2) == Period('2009', freq=freq) for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), np.timedelta64(365, 'D'), timedelta(365)]: with pytest.raises(period.IncompatibleFrequency): p - o for freq in ['M', '2M', '3M']: p = Period('2011-03', freq=freq) assert p - offsets.MonthEnd(2) == Period('2011-01', freq=freq) assert p - offsets.MonthEnd(12) == Period('2010-03', freq=freq) for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), np.timedelta64(365, 'D'), timedelta(365)]: with pytest.raises(period.IncompatibleFrequency): p - o # freq is Tick for freq in ['D', '2D', '3D']: p = Period('2011-04-01', freq=freq) assert p - offsets.Day(5) == Period('2011-03-27', freq=freq) assert p - offsets.Hour(24) == Period('2011-03-31', freq=freq) assert p - np.timedelta64(2, 'D') == Period( '2011-03-30', freq=freq) assert p - np.timedelta64(3600 * 24, 's') == Period( '2011-03-31', freq=freq) assert p - timedelta(-2) == Period('2011-04-03', freq=freq) assert p - timedelta(hours=48) == Period('2011-03-30', freq=freq) for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), np.timedelta64(4, 'h'), timedelta(hours=23)]: with pytest.raises(period.IncompatibleFrequency): p - o for freq in ['H', '2H', '3H']: p = Period('2011-04-01 09:00', freq=freq) assert p - offsets.Day(2) == Period('2011-03-30 09:00', freq=freq) assert p - offsets.Hour(3) == Period('2011-04-01 06:00', freq=freq) assert p - np.timedelta64(3, 'h') == Period( '2011-04-01 06:00', freq=freq) assert p - np.timedelta64(3600, 's') == Period( '2011-04-01 08:00', freq=freq) assert p - timedelta(minutes=120) == Period( '2011-04-01 07:00', freq=freq) assert p - timedelta(days=4, minutes=180) == Period( '2011-03-28 06:00', freq=freq) for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), np.timedelta64(3200, 's'), timedelta(hours=23, minutes=30)]: with pytest.raises(period.IncompatibleFrequency): p - o def test_sub_offset_nat(self): # freq is DateOffset for freq in ['A', '2A', '3A']: p = Period('NaT', freq=freq) for o in [offsets.YearEnd(2)]: assert p - o is NaT for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), np.timedelta64(365, 'D'), timedelta(365)]: assert p - o is NaT for freq in ['M', '2M', '3M']: p = Period('NaT', freq=freq) for o in [offsets.MonthEnd(2), offsets.MonthEnd(12)]: assert p - o is NaT for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), np.timedelta64(365, 'D'), timedelta(365)]: assert p - o is NaT # freq is Tick for freq in ['D', '2D', '3D']: p = Period('NaT', freq=freq) for o in [offsets.Day(5), offsets.Hour(24), np.timedelta64(2, 'D'), np.timedelta64(3600 * 24, 's'), timedelta(-2), timedelta(hours=48)]: assert p - o is NaT for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), np.timedelta64(4, 'h'), timedelta(hours=23)]: assert p - o is NaT for freq in ['H', '2H', '3H']: p = Period('NaT', freq=freq) for o in [offsets.Day(2), offsets.Hour(3), np.timedelta64(3, 'h'), np.timedelta64(3600, 's'), timedelta(minutes=120), timedelta(days=4, minutes=180)]: assert p - o is NaT for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), np.timedelta64(3200, 's'), timedelta(hours=23, minutes=30)]: assert p - o is NaT @pytest.mark.parametrize('freq', ['M', '2M', '3M']) def test_nat_ops(self, freq): p = Period('NaT', freq=freq) assert p + 1 is NaT assert 1 + p is NaT assert p - 1 is NaT assert p - Period('2011-01', freq=freq) is NaT assert Period('2011-01', freq=freq) - p is NaT def test_period_ops_offset(self): p = Period('2011-04-01', freq='D') result = p + offsets.Day() exp = Period('2011-04-02', freq='D') assert result == exp result = p - offsets.Day(2) exp = Period('2011-03-30', freq='D') assert result == exp msg = r"Input cannot be converted to Period\(freq=D\)" with pytest.raises(period.IncompatibleFrequency, match=msg): p + offsets.Hour(2) with pytest.raises(period.IncompatibleFrequency, match=msg): p - offsets.Hour(2) def test_period_immutable(): # see gh-17116 per = Period('2014Q1') with pytest.raises(AttributeError): per.ordinal = 14 freq = per.freq with pytest.raises(AttributeError): per.freq = 2 * freq # TODO: This doesn't fail on all systems; track down which @pytest.mark.xfail(reason="Parses as Jan 1, 0007 on some systems", strict=False) def test_small_year_parsing(): per1 = Period('0001-01-07', 'D') assert per1.year == 1 assert per1.day == 7
# Copyright (c) 2019, CRS4 # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of # the Software, and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. try: import simplejson as json except ImportError: import json from django.contrib.auth.models import User from rest_framework import serializers from questionnaires_manager.models import QuestionsSet, Questionnaire, QuestionnaireStep,\ QuestionnaireRequest, QuestionnaireAnswers, QuestionnaireStepAnswers from slides_manager.serializers import SlidesSetSerializer class QuestionsSetSerializer(serializers.ModelSerializer): author = serializers.SlugRelatedField( slug_field='username', queryset=User.objects.all() ) class Meta: model = QuestionsSet fields = ('id', 'label', 'creation_date', 'questions_json', 'author') read_only_fields = ('id',) def validate_questions_json(self, value): try: json.loads(value) return value except ValueError: raise serializers.ValidationError('Not a valid JSON in \'questions_json\' field') class QuestionnaireSerializer(serializers.ModelSerializer): author = serializers.SlugRelatedField( slug_field='username', queryset=User.objects.all() ) class Meta: model = Questionnaire fields = ('id', 'label', 'creation_date', 'author', 'steps_count') read_only_fields = ('id', 'steps_count') class QuestionnaireStepSerializer(serializers.ModelSerializer): class Meta: model = QuestionnaireStep fields = ('id', 'questions', 'slides_set_a', 'slides_set_a_label', 'slides_set_b', 'slides_set_b_label', 'questionnaire', 'answers', 'step_index', 'creation_date') read_only_fields = ('id', 'answers') class QuestionnaireRequestSerializer(serializers.ModelSerializer): reviewer = serializers.SlugRelatedField( slug_field='username', queryset=User.objects.all() ) annotation_type = serializers.SerializerMethodField() started = serializers.SerializerMethodField() completed = serializers.SerializerMethodField() class Meta: model = QuestionnaireRequest fields = ('id', 'label', 'extended_label', 'annotation_type', 'questionnaire_panel_a', 'questionnaire_panel_b', 'reviewer', 'creation_date', 'start_date', 'completion_date', 'answers', 'started', 'completed') read_only_fields = ('id', 'answers', 'annotation_type') @staticmethod def get_annotation_type(obj): return 'QUESTIONNAIRE' @staticmethod def get_started(obj): return obj.is_started() @staticmethod def get_completed(obj): return obj.is_completed() class QuestionnaireRequestStatusSerializer(serializers.ModelSerializer): reviewer = serializers.SlugRelatedField( slug_field='username', queryset=User.objects.all() ) started = serializers.SerializerMethodField() completed = serializers.SerializerMethodField() can_be_closed = serializers.SerializerMethodField() class Meta: model = QuestionnaireRequest fields = ('id', 'label', 'extended_label', 'reviewer', 'creation_date', 'start_date', 'completion_date', 'started', 'completed', 'can_be_closed') @staticmethod def get_started(obj): return obj.is_started() @staticmethod def get_completed(obj): return obj.is_completed() @staticmethod def get_can_be_closed(obj): return obj.can_be_closed() class QuestionnaireAnswersSerializer(serializers.ModelSerializer): reviewer = serializers.SlugRelatedField( slug_field='username', queryset=User.objects.all() ) last_completed_step_index = serializers.SerializerMethodField() class Meta: model = QuestionnaireAnswers fields = ('id', 'questionnaire_request', 'questionnaire', 'reviewer', 'creation_date', 'completion_date', 'steps', 'steps_count', 'completed_steps_count', 'last_completed_step_index', 'remaining_steps') read_only_fields = ('id', 'creation_date', 'steps', 'steps_count', 'completed_steps_count', 'last_completed_step_index', 'remaining_steps') @staticmethod def get_last_completed_step_index(obj): return obj.get_last_completed_step_index() class QuestionnaireStepAnswersSerializer(serializers.ModelSerializer): class Meta: model = QuestionnaireStepAnswers fields = ('id', 'questionnaire_answers', 'questionnaire_step', 'answers_json', 'creation_date', 'step_index') read_only_fields = ('id', 'step_index') def validate_answers_json(self, value): try: json.loads(value) return value except ValueError: raise serializers.ValidationError('Not a valid JSON in \'answers_json\' field') class QuestionnaireDetailsSerializer(serializers.ModelSerializer): author = serializers.SlugRelatedField( slug_field='username', queryset=User.objects.all() ) steps = QuestionnaireStepSerializer(many=True, read_only=True) class Meta: model = Questionnaire fields = ('id', 'label', 'creation_date', 'author', 'steps') read_only_fields = ('id', 'steps') class QuestionnaireStepDetailsSerializer(QuestionnaireStepSerializer): questions = QuestionsSetSerializer(read_only=True) slides_set_a = SlidesSetSerializer(read_only=True, allow_null=True) slides_set_b = SlidesSetSerializer(read_only=True, allow_null=True) questionnaire = QuestionnaireSerializer(read_only=True) answers = QuestionnaireStepAnswersSerializer(many=True, read_only=True) class QuestionnaireRequestDetailsSerializer(QuestionnaireRequestSerializer): questionnaire_panel_a = QuestionnaireSerializer(read_only=True) questionnaire_panel_b = QuestionnaireSerializer(read_only=True) answers = serializers.SerializerMethodField() @staticmethod def get_answers(obj): answers = dict() try: answers['questionnaire_panel_a'] = QuestionnaireAnswersSerializer( QuestionnaireAnswers.objects.get(questionnaire_request=obj, questionnaire=obj.questionnaire_panel_a) ).data except QuestionnaireAnswers.DoesNotExist: answers['questionnaire_panel_a'] = None if obj.questionnaire_panel_b: try: answers['questionnaire_panel_b'] = QuestionnaireAnswersSerializer( QuestionnaireAnswers.objects.get(questionnaire_request=obj, questionnaire=obj.questionnaire_panel_b) ).data except QuestionnaireAnswers.DoesNotExist: answers['questionnaire_panel_b'] = None else: answers['questionnaire_panel_b'] = None return answers class QuestionnaireAnswersDetailsSerializer(QuestionnaireAnswersSerializer): steps = QuestionnaireStepAnswersSerializer(many=True, read_only=True)
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import with_statement import logging import os import pipes import random import shutil import subprocess import sys import tempfile import time import urllib2 from optparse import OptionParser from sys import stderr import boto from boto.ec2.blockdevicemapping import BlockDeviceMapping, EBSBlockDeviceType from boto import ec2 class UsageError(Exception): pass # A URL prefix from which to fetch AMI information AMI_PREFIX = "https://raw.github.com/mesos/spark-ec2/v2/ami-list" # Configure and parse our command-line arguments def parse_args(): parser = OptionParser(usage="spark-ec2 [options] <action> <cluster_name>" + "\n\n<action> can be: launch, destroy, login, stop, start, get-master", add_help_option=False) parser.add_option("-h", "--help", action="help", help="Show this help message and exit") parser.add_option("-s", "--slaves", type="int", default=1, help="Number of slaves to launch (default: 1)") parser.add_option("-w", "--wait", type="int", default=120, help="Seconds to wait for nodes to start (default: 120)") parser.add_option("-k", "--key-pair", help="Key pair to use on instances") parser.add_option("-i", "--identity-file", help="SSH private key file to use for logging into instances") parser.add_option("-t", "--instance-type", default="m1.large", help="Type of instance to launch (default: m1.large). " + "WARNING: must be 64-bit; small instances won't work") parser.add_option("-m", "--master-instance-type", default="", help="Master instance type (leave empty for same as instance-type)") parser.add_option("-r", "--region", default="us-east-1", help="EC2 region zone to launch instances in") parser.add_option("-z", "--zone", default="", help="Availability zone to launch instances in, or 'all' to spread " + "slaves across multiple (an additional $0.01/Gb for bandwidth" + "between zones applies)") parser.add_option("-a", "--ami", help="Amazon Machine Image ID to use") parser.add_option("-v", "--spark-version", default="1.0.0", help="Version of Spark to use: 'X.Y.Z' or a specific git hash") parser.add_option("--spark-git-repo", default="https://github.com/apache/spark", help="Github repo from which to checkout supplied commit hash") parser.add_option("--hadoop-major-version", default="1", help="Major version of Hadoop (default: 1)") parser.add_option("-D", metavar="[ADDRESS:]PORT", dest="proxy_port", help="Use SSH dynamic port forwarding to create a SOCKS proxy at " + "the given local address (for use with login)") parser.add_option("--resume", action="store_true", default=False, help="Resume installation on a previously launched cluster " + "(for debugging)") parser.add_option("--ebs-vol-size", metavar="SIZE", type="int", default=0, help="Attach a new EBS volume of size SIZE (in GB) to each node as " + "/vol. The volumes will be deleted when the instances terminate. " + "Only possible on EBS-backed AMIs.") parser.add_option("--swap", metavar="SWAP", type="int", default=1024, help="Swap space to set up per node, in MB (default: 1024)") parser.add_option("--spot-price", metavar="PRICE", type="float", help="If specified, launch slaves as spot instances with the given " + "maximum price (in dollars)") parser.add_option("--ganglia", action="store_true", default=True, help="Setup Ganglia monitoring on cluster (default: on). NOTE: " + "the Ganglia page will be publicly accessible") parser.add_option("--no-ganglia", action="store_false", dest="ganglia", help="Disable Ganglia monitoring for the cluster") parser.add_option("-u", "--user", default="root", help="The SSH user you want to connect as (default: root)") parser.add_option("--delete-groups", action="store_true", default=False, help="When destroying a cluster, delete the security groups that were created") parser.add_option("--use-existing-master", action="store_true", default=False, help="Launch fresh slaves, but use an existing stopped master if possible") parser.add_option("--worker-instances", type="int", default=1, help="Number of instances per worker: variable SPARK_WORKER_INSTANCES (default: 1)") parser.add_option("--master-opts", type="string", default="", help="Extra options to give to master through SPARK_MASTER_OPTS variable (e.g -Dspark.worker.timeout=180)") (opts, args) = parser.parse_args() if len(args) != 2: parser.print_help() sys.exit(1) (action, cluster_name) = args # Boto config check # http://boto.cloudhackers.com/en/latest/boto_config_tut.html home_dir = os.getenv('HOME') if home_dir == None or not os.path.isfile(home_dir + '/.boto'): if not os.path.isfile('/etc/boto.cfg'): if os.getenv('AWS_ACCESS_KEY_ID') == None: print >> stderr, ("ERROR: The environment variable AWS_ACCESS_KEY_ID " + "must be set") sys.exit(1) if os.getenv('AWS_SECRET_ACCESS_KEY') == None: print >> stderr, ("ERROR: The environment variable AWS_SECRET_ACCESS_KEY " + "must be set") sys.exit(1) return (opts, action, cluster_name) # Get the EC2 security group of the given name, creating it if it doesn't exist def get_or_make_group(conn, name): groups = conn.get_all_security_groups() group = [g for g in groups if g.name == name] if len(group) > 0: return group[0] else: print "Creating security group " + name return conn.create_security_group(name, "Spark EC2 group") # Wait for a set of launched instances to exit the "pending" state # (i.e. either to start running or to fail and be terminated) def wait_for_instances(conn, instances): while True: for i in instances: i.update() if len([i for i in instances if i.state == 'pending']) > 0: time.sleep(5) else: return # Check whether a given EC2 instance object is in a state we consider active, # i.e. not terminating or terminated. We count both stopping and stopped as # active since we can restart stopped clusters. def is_active(instance): return (instance.state in ['pending', 'running', 'stopping', 'stopped']) # Return correct versions of Spark and Shark, given the supplied Spark version def get_spark_shark_version(opts): spark_shark_map = {"0.7.3": "0.7.1", "0.8.0": "0.8.0", "0.8.1": "0.8.1", "0.9.0": "0.9.0", "0.9.1": "0.9.1", "1.0.0": "1.0.0"} version = opts.spark_version.replace("v", "") if version not in spark_shark_map: print >> stderr, "Don't know about Spark version: %s" % version sys.exit(1) return (version, spark_shark_map[version]) # Attempt to resolve an appropriate AMI given the architecture and # region of the request. def get_spark_ami(opts): instance_types = { "m1.small": "pvm", "m1.medium": "pvm", "m1.large": "pvm", "m1.xlarge": "pvm", "t1.micro": "pvm", "c1.medium": "pvm", "c1.xlarge": "pvm", "m2.xlarge": "pvm", "m2.2xlarge": "pvm", "m2.4xlarge": "pvm", "cc1.4xlarge": "hvm", "cc2.8xlarge": "hvm", "cg1.4xlarge": "hvm", "hs1.8xlarge": "hvm", "hi1.4xlarge": "hvm", "m3.xlarge": "hvm", "m3.2xlarge": "hvm", "cr1.8xlarge": "hvm", "i2.xlarge": "hvm", "i2.2xlarge": "hvm", "i2.4xlarge": "hvm", "i2.8xlarge": "hvm", "c3.large": "pvm", "c3.xlarge": "pvm", "c3.2xlarge": "pvm", "c3.4xlarge": "pvm", "c3.8xlarge": "pvm" } if opts.instance_type in instance_types: instance_type = instance_types[opts.instance_type] else: instance_type = "pvm" print >> stderr,\ "Don't recognize %s, assuming type is pvm" % opts.instance_type ami_path = "%s/%s/%s" % (AMI_PREFIX, opts.region, instance_type) try: ami = urllib2.urlopen(ami_path).read().strip() print "Spark AMI: " + ami except: print >> stderr, "Could not resolve AMI at: " + ami_path sys.exit(1) return ami # Launch a cluster of the given name, by setting up its security groups, # and then starting new instances in them. # Returns a tuple of EC2 reservation objects for the master and slaves # Fails if there already instances running in the cluster's groups. def launch_cluster(conn, opts, cluster_name): if opts.identity_file is None: print >> stderr, "ERROR: Must provide an identity file (-i) for ssh connections." sys.exit(1) if opts.key_pair is None: print >> stderr, "ERROR: Must provide a key pair name (-k) to use on instances." sys.exit(1) print "Setting up security groups..." master_group = get_or_make_group(conn, cluster_name + "-master") slave_group = get_or_make_group(conn, cluster_name + "-slaves") if master_group.rules == []: # Group was just now created master_group.authorize(src_group=master_group) master_group.authorize(src_group=slave_group) master_group.authorize('tcp', 22, 22, '0.0.0.0/0') master_group.authorize('tcp', 8080, 8081, '0.0.0.0/0') master_group.authorize('tcp', 19999, 19999, '0.0.0.0/0') master_group.authorize('tcp', 50030, 50030, '0.0.0.0/0') master_group.authorize('tcp', 50070, 50070, '0.0.0.0/0') master_group.authorize('tcp', 60070, 60070, '0.0.0.0/0') master_group.authorize('tcp', 4040, 4045, '0.0.0.0/0') if opts.ganglia: master_group.authorize('tcp', 5080, 5080, '0.0.0.0/0') if slave_group.rules == []: # Group was just now created slave_group.authorize(src_group=master_group) slave_group.authorize(src_group=slave_group) slave_group.authorize('tcp', 22, 22, '0.0.0.0/0') slave_group.authorize('tcp', 8080, 8081, '0.0.0.0/0') slave_group.authorize('tcp', 50060, 50060, '0.0.0.0/0') slave_group.authorize('tcp', 50075, 50075, '0.0.0.0/0') slave_group.authorize('tcp', 60060, 60060, '0.0.0.0/0') slave_group.authorize('tcp', 60075, 60075, '0.0.0.0/0') # Check if instances are already running in our groups existing_masters, existing_slaves = get_existing_cluster(conn, opts, cluster_name, die_on_error=False) if existing_slaves or (existing_masters and not opts.use_existing_master): print >> stderr, ("ERROR: There are already instances running in " + "group %s or %s" % (master_group.name, slave_group.name)) sys.exit(1) # Figure out Spark AMI if opts.ami is None: opts.ami = get_spark_ami(opts) print "Launching instances..." try: image = conn.get_all_images(image_ids=[opts.ami])[0] except: print >> stderr, "Could not find AMI " + opts.ami sys.exit(1) # Create block device mapping so that we can add an EBS volume if asked to block_map = BlockDeviceMapping() if opts.ebs_vol_size > 0: device = EBSBlockDeviceType() device.size = opts.ebs_vol_size device.delete_on_termination = True block_map["/dev/sdv"] = device # Launch slaves if opts.spot_price != None: # Launch spot instances with the requested price print ("Requesting %d slaves as spot instances with price $%.3f" % (opts.slaves, opts.spot_price)) zones = get_zones(conn, opts) num_zones = len(zones) i = 0 my_req_ids = [] for zone in zones: num_slaves_this_zone = get_partition(opts.slaves, num_zones, i) slave_reqs = conn.request_spot_instances( price = opts.spot_price, image_id = opts.ami, launch_group = "launch-group-%s" % cluster_name, placement = zone, count = num_slaves_this_zone, key_name = opts.key_pair, security_groups = [slave_group], instance_type = opts.instance_type, block_device_map = block_map) my_req_ids += [req.id for req in slave_reqs] i += 1 print "Waiting for spot instances to be granted..." try: while True: time.sleep(10) reqs = conn.get_all_spot_instance_requests() id_to_req = {} for r in reqs: id_to_req[r.id] = r active_instance_ids = [] for i in my_req_ids: if i in id_to_req and id_to_req[i].state == "active": active_instance_ids.append(id_to_req[i].instance_id) if len(active_instance_ids) == opts.slaves: print "All %d slaves granted" % opts.slaves reservations = conn.get_all_instances(active_instance_ids) slave_nodes = [] for r in reservations: slave_nodes += r.instances break else: print "%d of %d slaves granted, waiting longer" % ( len(active_instance_ids), opts.slaves) except: print "Canceling spot instance requests" conn.cancel_spot_instance_requests(my_req_ids) # Log a warning if any of these requests actually launched instances: (master_nodes, slave_nodes) = get_existing_cluster( conn, opts, cluster_name, die_on_error=False) running = len(master_nodes) + len(slave_nodes) if running: print >> stderr, ("WARNING: %d instances are still running" % running) sys.exit(0) else: # Launch non-spot instances zones = get_zones(conn, opts) num_zones = len(zones) i = 0 slave_nodes = [] for zone in zones: num_slaves_this_zone = get_partition(opts.slaves, num_zones, i) if num_slaves_this_zone > 0: slave_res = image.run(key_name = opts.key_pair, security_groups = [slave_group], instance_type = opts.instance_type, placement = zone, min_count = num_slaves_this_zone, max_count = num_slaves_this_zone, block_device_map = block_map) slave_nodes += slave_res.instances print "Launched %d slaves in %s, regid = %s" % (num_slaves_this_zone, zone, slave_res.id) i += 1 # Launch or resume masters if existing_masters: print "Starting master..." for inst in existing_masters: if inst.state not in ["shutting-down", "terminated"]: inst.start() master_nodes = existing_masters else: master_type = opts.master_instance_type if master_type == "": master_type = opts.instance_type if opts.zone == 'all': opts.zone = random.choice(conn.get_all_zones()).name master_res = image.run(key_name = opts.key_pair, security_groups = [master_group], instance_type = master_type, placement = opts.zone, min_count = 1, max_count = 1, block_device_map = block_map) master_nodes = master_res.instances print "Launched master in %s, regid = %s" % (zone, master_res.id) # Return all the instances return (master_nodes, slave_nodes) # Get the EC2 instances in an existing cluster if available. # Returns a tuple of lists of EC2 instance objects for the masters and slaves def get_existing_cluster(conn, opts, cluster_name, die_on_error=True): print "Searching for existing cluster " + cluster_name + "..." reservations = conn.get_all_instances() master_nodes = [] slave_nodes = [] for res in reservations: active = [i for i in res.instances if is_active(i)] for inst in active: group_names = [g.name for g in inst.groups] if group_names == [cluster_name + "-master"]: master_nodes.append(inst) elif group_names == [cluster_name + "-slaves"]: slave_nodes.append(inst) if any((master_nodes, slave_nodes)): print ("Found %d master(s), %d slaves" % (len(master_nodes), len(slave_nodes))) if master_nodes != [] or not die_on_error: return (master_nodes, slave_nodes) else: if master_nodes == [] and slave_nodes != []: print >> sys.stderr, "ERROR: Could not find master in group " + cluster_name + "-master" else: print >> sys.stderr, "ERROR: Could not find any existing cluster" sys.exit(1) # Deploy configuration files and run setup scripts on a newly launched # or started EC2 cluster. def setup_cluster(conn, master_nodes, slave_nodes, opts, deploy_ssh_key): master = master_nodes[0].public_dns_name if deploy_ssh_key: print "Generating cluster's SSH key on master..." key_setup = """ [ -f ~/.ssh/id_rsa ] || (ssh-keygen -q -t rsa -N '' -f ~/.ssh/id_rsa && cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys) """ ssh(master, opts, key_setup) dot_ssh_tar = ssh_read(master, opts, ['tar', 'c', '.ssh']) print "Transferring cluster's SSH key to slaves..." for slave in slave_nodes: print slave.public_dns_name ssh_write(slave.public_dns_name, opts, ['tar', 'x'], dot_ssh_tar) modules = ['spark', 'shark', 'ephemeral-hdfs', 'persistent-hdfs', 'mapreduce', 'spark-standalone', 'tachyon'] if opts.hadoop_major_version == "1": modules = filter(lambda x: x != "mapreduce", modules) if opts.ganglia: modules.append('ganglia') # NOTE: We should clone the repository before running deploy_files to # prevent ec2-variables.sh from being overwritten ssh(master, opts, "rm -rf spark-ec2 && git clone https://github.com/mesos/spark-ec2.git -b v3") print "Deploying files to master..." deploy_files(conn, "deploy.generic", opts, master_nodes, slave_nodes, modules) print "Running setup on master..." setup_spark_cluster(master, opts) print "Done!" def setup_standalone_cluster(master, slave_nodes, opts): slave_ips = '\n'.join([i.public_dns_name for i in slave_nodes]) ssh(master, opts, "echo \"%s\" > spark/conf/slaves" % (slave_ips)) ssh(master, opts, "/root/spark/sbin/start-all.sh") def setup_spark_cluster(master, opts): ssh(master, opts, "chmod u+x spark-ec2/setup.sh") ssh(master, opts, "spark-ec2/setup.sh") print "Spark standalone cluster started at http://%s:8080" % master if opts.ganglia: print "Ganglia started at http://%s:5080/ganglia" % master # Wait for a whole cluster (masters, slaves and ZooKeeper) to start up def wait_for_cluster(conn, wait_secs, master_nodes, slave_nodes): print "Waiting for instances to start up..." time.sleep(5) wait_for_instances(conn, master_nodes) wait_for_instances(conn, slave_nodes) print "Waiting %d more seconds..." % wait_secs time.sleep(wait_secs) # Get number of local disks available for a given EC2 instance type. def get_num_disks(instance_type): # From http://docs.amazonwebservices.com/AWSEC2/latest/UserGuide/index.html?InstanceStorage.html disks_by_instance = { "m1.small": 1, "m1.medium": 1, "m1.large": 2, "m1.xlarge": 4, "t1.micro": 1, "c1.medium": 1, "c1.xlarge": 4, "m2.xlarge": 1, "m2.2xlarge": 1, "m2.4xlarge": 2, "cc1.4xlarge": 2, "cc2.8xlarge": 4, "cg1.4xlarge": 2, "hs1.8xlarge": 24, "cr1.8xlarge": 2, "hi1.4xlarge": 2, "m3.xlarge": 0, "m3.2xlarge": 0, "i2.xlarge": 1, "i2.2xlarge": 2, "i2.4xlarge": 4, "i2.8xlarge": 8, "c3.large": 2, "c3.xlarge": 2, "c3.2xlarge": 2, "c3.4xlarge": 2, "c3.8xlarge": 2 } if instance_type in disks_by_instance: return disks_by_instance[instance_type] else: print >> stderr, ("WARNING: Don't know number of disks on instance type %s; assuming 1" % instance_type) return 1 # Deploy the configuration file templates in a given local directory to # a cluster, filling in any template parameters with information about the # cluster (e.g. lists of masters and slaves). Files are only deployed to # the first master instance in the cluster, and we expect the setup # script to be run on that instance to copy them to other nodes. def deploy_files(conn, root_dir, opts, master_nodes, slave_nodes, modules): active_master = master_nodes[0].public_dns_name num_disks = get_num_disks(opts.instance_type) hdfs_data_dirs = "/mnt/ephemeral-hdfs/data" mapred_local_dirs = "/mnt/hadoop/mrlocal" spark_local_dirs = "/mnt/spark" if num_disks > 1: for i in range(2, num_disks + 1): hdfs_data_dirs += ",/mnt%d/ephemeral-hdfs/data" % i mapred_local_dirs += ",/mnt%d/hadoop/mrlocal" % i spark_local_dirs += ",/mnt%d/spark" % i cluster_url = "%s:7077" % active_master if "." in opts.spark_version: # Pre-built spark & shark deploy (spark_v, shark_v) = get_spark_shark_version(opts) else: # Spark-only custom deploy spark_v = "%s|%s" % (opts.spark_git_repo, opts.spark_version) shark_v = "" modules = filter(lambda x: x != "shark", modules) template_vars = { "master_list": '\n'.join([i.public_dns_name for i in master_nodes]), "active_master": active_master, "slave_list": '\n'.join([i.public_dns_name for i in slave_nodes]), "cluster_url": cluster_url, "hdfs_data_dirs": hdfs_data_dirs, "mapred_local_dirs": mapred_local_dirs, "spark_local_dirs": spark_local_dirs, "swap": str(opts.swap), "modules": '\n'.join(modules), "spark_version": spark_v, "shark_version": shark_v, "hadoop_major_version": opts.hadoop_major_version, "spark_worker_instances": "%d" % opts.worker_instances, "spark_master_opts": opts.master_opts } # Create a temp directory in which we will place all the files to be # deployed after we substitue template parameters in them tmp_dir = tempfile.mkdtemp() for path, dirs, files in os.walk(root_dir): if path.find(".svn") == -1: dest_dir = os.path.join('/', path[len(root_dir):]) local_dir = tmp_dir + dest_dir if not os.path.exists(local_dir): os.makedirs(local_dir) for filename in files: if filename[0] not in '#.~' and filename[-1] != '~': dest_file = os.path.join(dest_dir, filename) local_file = tmp_dir + dest_file with open(os.path.join(path, filename)) as src: with open(local_file, "w") as dest: text = src.read() for key in template_vars: text = text.replace("{{" + key + "}}", template_vars[key]) dest.write(text) dest.close() # rsync the whole directory over to the master machine command = [ 'rsync', '-rv', '-e', stringify_command(ssh_command(opts)), "%s/" % tmp_dir, "%s@%s:/" % (opts.user, active_master) ] subprocess.check_call(command) # Remove the temp directory we created above shutil.rmtree(tmp_dir) def stringify_command(parts): if isinstance(parts, str): return parts else: return ' '.join(map(pipes.quote, parts)) def ssh_args(opts): parts = ['-o', 'StrictHostKeyChecking=no'] if opts.identity_file is not None: parts += ['-i', opts.identity_file] return parts def ssh_command(opts): return ['ssh'] + ssh_args(opts) # Run a command on a host through ssh, retrying up to five times # and then throwing an exception if ssh continues to fail. def ssh(host, opts, command): tries = 0 while True: try: return subprocess.check_call( ssh_command(opts) + ['-t', '-t', '%s@%s' % (opts.user, host), stringify_command(command)]) except subprocess.CalledProcessError as e: if (tries > 5): # If this was an ssh failure, provide the user with hints. if e.returncode == 255: raise UsageError("Failed to SSH to remote host {0}.\nPlease check that you have provided the correct --identity-file and --key-pair parameters and try again.".format(host)) else: raise e print >> stderr, "Error executing remote command, retrying after 30 seconds: {0}".format(e) time.sleep(30) tries = tries + 1 def ssh_read(host, opts, command): return subprocess.check_output( ssh_command(opts) + ['%s@%s' % (opts.user, host), stringify_command(command)]) def ssh_write(host, opts, command, input): tries = 0 while True: proc = subprocess.Popen( ssh_command(opts) + ['%s@%s' % (opts.user, host), stringify_command(command)], stdin=subprocess.PIPE) proc.stdin.write(input) proc.stdin.close() status = proc.wait() if status == 0: break elif (tries > 5): raise RuntimeError("ssh_write failed with error %s" % proc.returncode) else: print >> stderr, "Error {0} while executing remote command, retrying after 30 seconds".format(status) time.sleep(30) tries = tries + 1 # Gets a list of zones to launch instances in def get_zones(conn, opts): if opts.zone == 'all': zones = [z.name for z in conn.get_all_zones()] else: zones = [opts.zone] return zones # Gets the number of items in a partition def get_partition(total, num_partitions, current_partitions): num_slaves_this_zone = total / num_partitions if (total % num_partitions) - current_partitions > 0: num_slaves_this_zone += 1 return num_slaves_this_zone def real_main(): (opts, action, cluster_name) = parse_args() try: conn = ec2.connect_to_region(opts.region) except Exception as e: print >> stderr, (e) sys.exit(1) # Select an AZ at random if it was not specified. if opts.zone == "": opts.zone = random.choice(conn.get_all_zones()).name if action == "launch": if opts.slaves <= 0: print >> sys.stderr, "ERROR: You have to start at least 1 slave" sys.exit(1) if opts.resume: (master_nodes, slave_nodes) = get_existing_cluster( conn, opts, cluster_name) else: (master_nodes, slave_nodes) = launch_cluster( conn, opts, cluster_name) wait_for_cluster(conn, opts.wait, master_nodes, slave_nodes) setup_cluster(conn, master_nodes, slave_nodes, opts, True) elif action == "destroy": response = raw_input("Are you sure you want to destroy the cluster " + cluster_name + "?\nALL DATA ON ALL NODES WILL BE LOST!!\n" + "Destroy cluster " + cluster_name + " (y/N): ") if response == "y": (master_nodes, slave_nodes) = get_existing_cluster( conn, opts, cluster_name, die_on_error=False) print "Terminating master..." for inst in master_nodes: inst.terminate() print "Terminating slaves..." for inst in slave_nodes: inst.terminate() # Delete security groups as well if opts.delete_groups: print "Deleting security groups (this will take some time)..." group_names = [cluster_name + "-master", cluster_name + "-slaves"] attempt = 1; while attempt <= 3: print "Attempt %d" % attempt groups = [g for g in conn.get_all_security_groups() if g.name in group_names] success = True # Delete individual rules in all groups before deleting groups to # remove dependencies between them for group in groups: print "Deleting rules in security group " + group.name for rule in group.rules: for grant in rule.grants: success &= group.revoke(ip_protocol=rule.ip_protocol, from_port=rule.from_port, to_port=rule.to_port, src_group=grant) # Sleep for AWS eventual-consistency to catch up, and for instances # to terminate time.sleep(30) # Yes, it does have to be this long :-( for group in groups: try: conn.delete_security_group(group.name) print "Deleted security group " + group.name except boto.exception.EC2ResponseError: success = False; print "Failed to delete security group " + group.name # Unfortunately, group.revoke() returns True even if a rule was not # deleted, so this needs to be rerun if something fails if success: break; attempt += 1 if not success: print "Failed to delete all security groups after 3 tries." print "Try re-running in a few minutes." elif action == "login": (master_nodes, slave_nodes) = get_existing_cluster( conn, opts, cluster_name) master = master_nodes[0].public_dns_name print "Logging into master " + master + "..." proxy_opt = [] if opts.proxy_port != None: proxy_opt = ['-D', opts.proxy_port] subprocess.check_call( ssh_command(opts) + proxy_opt + ['-t', '-t', "%s@%s" % (opts.user, master)]) elif action == "get-master": (master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name) print master_nodes[0].public_dns_name elif action == "stop": response = raw_input("Are you sure you want to stop the cluster " + cluster_name + "?\nDATA ON EPHEMERAL DISKS WILL BE LOST, " + "BUT THE CLUSTER WILL KEEP USING SPACE ON\n" + "AMAZON EBS IF IT IS EBS-BACKED!!\n" + "All data on spot-instance slaves will be lost.\n" + "Stop cluster " + cluster_name + " (y/N): ") if response == "y": (master_nodes, slave_nodes) = get_existing_cluster( conn, opts, cluster_name, die_on_error=False) print "Stopping master..." for inst in master_nodes: if inst.state not in ["shutting-down", "terminated"]: inst.stop() print "Stopping slaves..." for inst in slave_nodes: if inst.state not in ["shutting-down", "terminated"]: if inst.spot_instance_request_id: inst.terminate() else: inst.stop() elif action == "start": (master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name) print "Starting slaves..." for inst in slave_nodes: if inst.state not in ["shutting-down", "terminated"]: inst.start() print "Starting master..." for inst in master_nodes: if inst.state not in ["shutting-down", "terminated"]: inst.start() wait_for_cluster(conn, opts.wait, master_nodes, slave_nodes) setup_cluster(conn, master_nodes, slave_nodes, opts, False) else: print >> stderr, "Invalid action: %s" % action sys.exit(1) def main(): try: real_main() except UsageError, e: print >> stderr, "\nError:\n", e sys.exit(1) if __name__ == "__main__": logging.basicConfig() main()
r""" Stokes equations for incompressible fluid flow. This example demonstrates fields defined on subdomains as well as use of periodic boundary conditions. Find :math:`\ul{u}`, :math:`p` such that: .. math:: \int_{Y_1 \cup Y_2} \nu\ \nabla \ul{v} : \nabla \ul{u} - \int_{Y_1 \cup Y_2} p\ \nabla \cdot \ul{v} = 0 \;, \quad \forall \ul{v} \;, \int_{Y_1 \cup Y_2} q\ \nabla \cdot \ul{u} = 0 \;, \quad \forall q \;. """ from __future__ import absolute_import from sfepy import data_dir from sfepy.discrete.fem.periodic import match_y_line filename_mesh = data_dir + '/meshes/2d/special/channels_symm944t.mesh' if filename_mesh.find( 'symm' ): region_1 = { 'name' : 'Y1', 'select' : """cells of group 3""", } region_2 = { 'name' : 'Y2', 'select' : """cells of group 4 +c cells of group 6 +c cells of group 8""", } region_4 = { 'name' : 'Y1Y2', 'select' : """r.Y1 +c r.Y2""", } region_5 = { 'name' : 'Walls', 'select' : """r.EBCGamma1 +v r.EBCGamma2""", 'kind' : 'facet', } region_310 = { 'name' : 'EBCGamma1', 'select' : """(cells of group 1 *v cells of group 3) +v (cells of group 2 *v cells of group 3) """, 'kind' : 'facet', } region_320 = { 'name' : 'EBCGamma2', 'select' : """(cells of group 5 *v cells of group 4) +v (cells of group 1 *v cells of group 4) +v (cells of group 7 *v cells of group 6) +v (cells of group 2 *v cells of group 6) +v (cells of group 9 *v cells of group 8) +v (cells of group 2 *v cells of group 8) """, 'kind' : 'facet', } w2 = 0.499 # Sides. region_20 = { 'name' : 'Left', 'select' : 'vertices in (x < %.3f)' % -w2, 'kind' : 'facet', } region_21 = { 'name' : 'Right', 'select' : 'vertices in (x > %.3f)' % w2, 'kind' : 'facet', } region_22 = { 'name' : 'Bottom', 'select' : 'vertices in (y < %.3f)' % -w2, 'kind' : 'facet', } region_23 = { 'name' : 'Top', 'select' : 'vertices in (y > %.3f)' % w2, 'kind' : 'facet', } field_1 = { 'name' : '2_velocity', 'dtype' : 'real', 'shape' : (2,), 'region' : 'Y1Y2', 'approx_order' : 2, } field_2 = { 'name' : 'pressure', 'dtype' : 'real', 'shape' : (1,), 'region' : 'Y1Y2', 'approx_order' : 1, } variable_1 = { 'name' : 'u', 'kind' : 'unknown field', 'field' : '2_velocity', 'order' : 0, } variable_2 = { 'name' : 'v', 'kind' : 'test field', 'field' : '2_velocity', 'dual' : 'u', } variable_3 = { 'name' : 'p', 'kind' : 'unknown field', 'field' : 'pressure', 'order' : 1, } variable_4 = { 'name' : 'q', 'kind' : 'test field', 'field' : 'pressure', 'dual' : 'p', } integral_1 = { 'name' : 'i', 'order' : 2, } equations = { 'balance' : """dw_div_grad.i.Y1Y2( fluid.viscosity, v, u ) - dw_stokes.i.Y1Y2( v, p ) = 0""", 'incompressibility' : """dw_stokes.i.Y1Y2( u, q ) = 0""", } material_1 = { 'name' : 'fluid', 'values' : { 'viscosity' : 1.0, 'density' : 1e0, }, } ebc_1 = { 'name' : 'walls', 'region' : 'Walls', 'dofs' : {'u.all' : 0.0}, } ebc_2 = { 'name' : 'top_velocity', 'region' : 'Top', 'dofs' : {'u.1' : -1.0, 'u.0' : 0.0}, } ebc_10 = { 'name' : 'bottom_pressure', 'region' : 'Bottom', 'dofs' : {'p.0' : 0.0}, } epbc_1 = { 'name' : 'u_rl', 'region' : ['Left', 'Right'], 'dofs' : {'u.all' : 'u.all', 'p.0' : 'p.0'}, 'match' : 'match_y_line', } functions = { 'match_y_line' : (match_y_line,), } solver_0 = { 'name' : 'ls', 'kind' : 'ls.scipy_direct', } solver_1 = { 'name' : 'newton', 'kind' : 'nls.newton', 'i_max' : 2, 'eps_a' : 1e-8, 'eps_r' : 1e-2, 'macheps' : 1e-16, 'lin_red' : 1e-2, # Linear system error < (eps_a * lin_red). 'ls_red' : 0.1, 'ls_red_warp' : 0.001, 'ls_on' : 1.1, 'ls_min' : 1e-5, 'check' : 0, 'delta' : 1e-6, } save_format = 'hdf5' # 'hdf5' or 'vtk'
# python-3 # Author: Sreejith Menon ([email protected]) # Creation date: 5/23/16 ''' Script for getting all images without duplicates from the csv file. Date: May 23, 2016 The mapFlName referred to in the entire script is : ../data/imageGID_job_map_expt2_corrected.csv ''' import BuildConsolidatedFeaturesFile as Features, DataStructsHelperAPI as DS, mongod_helper as mh import importlib, re, json, pandas as pd, csv from collections import OrderedDict, Counter importlib.reload(Features) # This method is used to generate a list of all images that are used in the current experiment as specified by the map file. def genUniqueImageListFromMap(mapFlName): with open(mapFlName, "r") as csvFl: reader = csv.reader(csvFl) head = reader.__next__() data = [row for row in reader] images = [img for row in data for img in row[1:]] return list(set(images)) # This method is used to generate a dictionary in the form { Album : [GIDS] }. # This object will give us the capability to query which images exist in a particular album. def genAlbumGIDDictFromMap(mapFlName): reader = csv.reader(open(mapFlName, "r")) head = reader.__next__() data = {row[0]: row[1:] for row in reader} return data # This method is used to generate a dictionary in the form { GID: [Albums] }. # This object will give us the capability to query which album contain a particular GID. def genImgAlbumDictFromMap(mapFLName): uniqImgs = genUniqueImageListFromMap(mapFLName) albumImg = genAlbumGIDDictFromMap(mapFLName) imgAlbum = {} for img in uniqImgs: for album in albumImg.keys(): if img in albumImg[album]: imgAlbum[img] = imgAlbum.get(img, []) + [album] return imgAlbum # This method is used to generate a dictionary in the form { GID : No. of albums it appears }. def getImageFreqFromMap(inFL): imgAlbumDict = genImgAlbumDictFromMap(inFL) imgFreq = sorted([(img, len(imgAlbumDict[img])) for img in imgAlbumDict], key=lambda x: x[1], reverse=True) return imgFreq # This method is used to generate a dictionary in the form { AID : GID }. def genAidGidDictFromMap(mapFL): jsonObj = json.load(open(mapFL, "r")) aidGidDict = {} for gid in jsonObj: if jsonObj[gid][0] != None: for aid in jsonObj[gid][0]: aidGidDict[aid] = aidGidDict.get(aid, []) + [gid] return aidGidDict # This method is used to generate a dictionary in the form { GID : List of AIDs in that image }. def genGidAidDictFromMap(mapFL): jsonObj = json.load(open(mapFL, "r")) gidAidDict = {} for gid in jsonObj: if jsonObj[gid] != None: for aid in jsonObj[gid]: # -- put a [0] for GZC dataset gidAidDict[gid] = gidAidDict.get(gid, []) + [aid] else: gidAidDict[gid] = None return gidAidDict # This method is used to generate a list of tuples in the form ( AID , GID ). def genAidGidTupListFromMap(mapFL): jsonObj = json.load(open(mapFL, "r")) aidGidTupList = [] for gid in jsonObj: if jsonObj[gid][0] != None: for aid in jsonObj[gid][0]: aidGidTupList.append((aid, gid)) return aidGidTupList # This method is used to generate a list of dictionaries in the form [{'AID': xx,'NID' : xx ,.. }]. # This object will give us the capability to iterate through all annotations and their respective features. def genAidFeatureDictList(mapFL): jsonObj = json.load(open(mapFL, "r")) aidFeaturesList = [] for aid in jsonObj.keys(): featuresDict = {} features = jsonObj[aid] featuresDict['AID'] = aid featuresDict['NID'] = features[0] featuresDict['INDIVIDUAL_NAME'] = features[1] featuresDict['SPECIES'] = features[2] featuresDict['SEX'] = features[3] featuresDict['AGE'] = features[4] featuresDict['EXEMPLAR_FLAG'] = features[5] featuresDict['QUALITY'] = features[6] featuresDict['VIEW_POINT'] = features[7] featuresDict['CONTRIBUTOR'] = features[8] # newly added on 06/22 aidFeaturesList.append(featuresDict) return aidFeaturesList # This method is used to generate a dictionary in the form { AID : {'NID' : xxx , 'SPECIES' : xxx, .. }}. # This object will give us the capability to query one/multiple features given a annotation ID. def genAidFeatureDictDict(mapFL): jsonObj = json.load(open(mapFL, "r")) aidFeaturesDict = {} for aid in jsonObj.keys(): featuresDict = {} features = jsonObj[aid] # featuresDict['AID'] = aid featuresDict['NID'] = str(features[0]) featuresDict['INDIVIDUAL_NAME'] = features[1] featuresDict['SPECIES'] = features[2] featuresDict['SEX'] = features[3] featuresDict['AGE'] = features[4] featuresDict['EXEMPLAR_FLAG'] = str(features[5]) # type-casting to string necessary featuresDict['QUALITY'] = features[6] featuresDict['VIEW_POINT'] = features[7] featuresDict['CONTRIBUTOR'] = features[8] # newly added on 06/22 aidFeaturesDict[aid] = featuresDict return aidFeaturesDict def genGidAidFtrDf(gidAidMapFl, aidFtrMapFl, outFlNm="/tmp/genGidAidFtrDf.dump.csv"): aidFeaturesDf = pd.DataFrame(genAidFeatureDictList(aidFtrMapFl)) aidFeaturesDf['AID'] = aidFeaturesDf['AID'].astype('int32') aidGidDict = genAidGidTupListFromMap(gidAidMapFl) aidGidDf = pd.DataFrame(aidGidDict, columns=['AID', 'GID']) df = pd.merge(aidGidDf, aidFeaturesDf, left_on='AID', right_on='AID') df.to_csv(outFlNm, index=False) return df # mapFL - json format (should be in the format {gid: {aid : features}}) # This method is used to generate a dictionary in the form { GID : [list of features instances in that image]}. # This object will give us the capability to check what feature instances are present in a given image. def extractImageFeaturesFromMap(gidAidMapFl, aidFtrMapFl, feature, mode='GZC'): gidAidDict = genGidAidDictFromMap(gidAidMapFl) if mode == "GZC": # aidFeatureDict = genAidFeatureDictDict(aidFtrMapFl) # extra lines - remove with open(aidFtrMapFl, "r") as aidFtrFl: aidFeatureDict = json.load(aidFtrFl) else: with open(aidFtrMapFl, "r") as aidFtrFl: aidFeatureDict = json.load(aidFtrFl) gidFtr = {} if mode == "GGR": for gid in gidAidDict.keys(): if gidAidDict[gid][0] != None: for aid in gidAidDict[gid]: gidFtr[gid] = gidFtr.get(gid, []) + [aidFeatureDict[str(aid)][feature]] else: for gid in gidAidDict.keys(): if len(gidAidDict[gid][0]): for aid in gidAidDict[gid][0]: gidFtr[gid] = gidFtr.get(gid, []) + [aidFeatureDict[str(aid)][feature]] return gidFtr # Part 1: Building the results dictionary (all the fields of interest for all the available jobs) # Returns a master dictionary that has job: answers key-value pair. # Every album corresponds to a .result file which is extracted from the Amazon Mechanical Turk interface. # This method parses the results file and generates a python object consisting of each response key with the actual response from the users. # The dictionary is of the form: { photo_album_i : { Answer.GID : [ GID|'share' , 'GID'|'noShare'] }} # All the results file from jobRangeStart to jobRangeEnd will be parsed and included in the output object. # Setting workerData to True also includes the turker ID in the result dictionary. def createResultDict(jobRangeStart, jobRangeEnd, workerData=False): masterDict = OrderedDict() for i in range(jobRangeStart, jobRangeEnd + 1): inFLTitle = "photo_album_" + str(i) inFL = "../results/photo_album_" + str(i) + ".results" with open(inFL, "r") as inp: inFLList = [line.replace('"', '') for line in inp] header = inFLList[0].split("\t") resultList = [line.split("\t") for line in inFLList[1:]] resultDict = OrderedDict() for i in range(0, len(resultList)): for j in range(0, len(header)): resultDict[header[j]] = resultDict.get(header[j], []) + [resultList[i][j]] keysOfInterest = list(filter(lambda x: re.search("Answer", x), resultDict.keys())) if workerData: keysOfInterest += list(filter(lambda x: re.search("workerid", x), resultDict.keys())) newDict = OrderedDict() for key in keysOfInterest: newDict[key] = resultDict[key] masterDict[inFLTitle] = newDict return masterDict # Not added to github pages # This method will be used to extract responses to general questions in the mechanical turk job. # For instance, in experiment 1 and 2, there were questions asking about how often one shares on social media etc. def genCntrsGenQues(jobRangeStart, jobRangeEnd, keyList): results = createResultDict(jobRangeStart, jobRangeEnd) answers = [[ans for album in results.keys() for ans in results[album][key]] for key in keyList] cntrObj = {keyList[i]: Counter(answers[i]) for i in range(len(keyList))} return cntrObj # This method is used to generate the list of tags generated by Microsoft Image tagging API, thresholded by confindence. # With each tag, there is an associated confidence which quantifies the confidence of the tag prediciting algorithm. # For purpose of experiments, the threshold is defaulted to 0.5, any tags predicted with confidence greater than 0.5 is accepted and the rest is rejected. def genMSAIDataHighConfidenceTags(tagInpDataFl, threshold=0.5): with open(tagInpDataFl, "r") as tagInp: taggedData = json.load(tagInp) gidFtrs = {} for gid in taggedData: tgs = taggedData[gid]['tags'] if len(tgs) == 0: gidFtrs[gid] = [None] for dic in tgs: if dic['confidence'] >= threshold: # added for retaining only high confidence tags gidFtrs[gid] = gidFtrs.get(gid, []) + [dic['name']] return gidFtrs # This method returns a Python list which gives us the capability to iterate through all the images, the number of times an image was shared or not shared in a particular album. # This object will form the basis of all statistic computations in the project. The format of a tuple inside the list is of the form (GID, Album, Share count, Not Share count, Proportion). # The other return object is the list of all (GID, albums) for which there was no valid response. # (Form fields in certain albums in experiment 2 were not mandatory in the beginning, the bug was identified and corrected in a later stage.) def imgShareCountsPerAlbum(imgAlbumDict, results): imgShareNotShareList = [] noResponse = [] for album in results.keys(): ansDict = results[album] for key in ansDict: _, gid = key.split('.') if gid.isdigit(): # to avoid answers to q1,q2, comments and submit btn shrNotShr = ansDict[key] shareCount = 0 notShareCount = 0 ansOnceFlag = False for answer in shrNotShr: if len(answer.split("|")) == 2: ansOnceFlag = True imgRes, ans = answer.split("|") if ans == 'share': shareCount += 1 else: notShareCount += 1 else: imgRes = answer[0] noResponse.append((imgRes, album)) imgShareNotShareList.append( (gid, album, shareCount, notShareCount, shareCount * 100 / (shareCount + notShareCount))) return imgShareNotShareList, noResponse def createMstrFl(gidAidFtrFl, tagsFlNm, attribList, outFlNm="/tmp/createMstrFl.dump.csv"): df = pd.DataFrame.from_csv(gidAidFtrFl) df.reset_index(inplace=True) df = df[attribList] if 'CONTRIBUTOR' in attribList: df['CONTRIBUTOR'] = df[['CONTRIBUTOR']].applymap(lambda x: x.replace(',', '')) df = df.groupby('GID').agg(','.join).reset_index() df.GID = df.GID.astype(str) gidFtrsLst = DS.cnvrtDictToLstTup(genMSAIDataHighConfidenceTags(tagsFlNm)) df_tags = pd.DataFrame(gidFtrsLst, columns=['GID', 'tags']) df_tags['GID'] = df_tags['GID'].astype(str) df_comb = pd.merge(df, df_tags, left_on='GID', right_on='GID') df_comb.to_csv(outFlNm, index=False) return df_comb # audit script # This method is an audit method that ensures there are no leaks or incorrect data in the result and feature objects. # The 3 boolean variables indicate 3 different types of errors. def auditResMap(imgAlbumDict, resultList): err1 = False err2 = False err3 = False imgAlbmDctRest = {} for tup in resultList: imgAlbmDctRest[tup[0]] = imgAlbmDctRest.get(tup[0], []) + [tup[1]] for tup in resultList: shouldBeInAlbms = imgAlbumDict[tup[0]] if tup[1] not in shouldBeInAlbms: err1 = True for gid in imgAlbmDctRest.keys(): albmRes = imgAlbmDctRest[gid] albmOri = imgAlbumDict[gid] for albm in albmOri: if albm not in albmRes: err2 = True for albm in albmRes: if albm not in albmOri: err3 = True return err1, err2, err3 def __main__(): allUniqImgs = genUniqueImageListFromMap("../data/imageGID_job_map_expt2_corrected.csv") # outFL = "../data/all_imgs_expt2.csv" # writeFL = open(outFL,"w") # writer = csv.writer(writeFL) # writer.writerow(allUniqImgs) # writeFL.close() Features.buildFeatureFl(allUniqImgs, "../data/experiment2.csv", False) if __name__ == "__main__": __main__() # pass
import random import string import os import logging import sys import parsing from parsing import flatten_dictionaries, lowercase_keys, safe_to_bool # Python 3 compatibility if sys.version_info[0] > 2: from builtins import range as xrange from past.builtins import basestring """ Collection of generators to be used in templating for test data Plans: extend these by allowing generators that take generators for input Example: generators that case-swap """ INT32_MAX_VALUE = 2147483647 # Max of 32 bit unsigned int logger = logging.getLogger('pyresttest.generators') # Character sets to use in text generation, python string plus extras CHARACTER_SETS = { 'ascii_letters': string.ascii_letters, 'ascii_lowercase': string.ascii_lowercase, 'ascii_uppercase': string.ascii_uppercase, 'digits': string.digits, 'hexdigits': string.hexdigits, 'hex_lower': string.digits + 'abcdef', 'hex_upper': string.digits + 'ABCDEF', 'letters': string.ascii_letters, 'lowercase': string.ascii_lowercase, 'octdigits': string.octdigits, 'punctuation': string.punctuation, 'printable': string.printable, 'uppercase': string.ascii_uppercase, 'whitespace': string.whitespace, 'url.slug': string.ascii_lowercase + string.digits + '-', 'url.safe': string.ascii_letters + string.digits + '-~_.', 'alphanumeric': string.ascii_letters + string.digits, 'alphanumeric_lower': string.ascii_lowercase + string.digits, 'alphanumeric_upper': string.ascii_uppercase + string.digits } def factory_generate_ids(starting_id=1, increment=1): """ Return function generator for ids starting at starting_id Note: needs to be called with () to make generator """ def generate_started_ids(): val = starting_id local_increment = increment while(True): yield val val += local_increment return generate_started_ids def generator_basic_ids(): """ Return ids generator starting at 1 """ return factory_generate_ids(1)() def generator_random_int32(): """ Random integer generator for up to 32-bit signed ints """ rand = random.Random() while (True): yield random.randint(0, INT32_MAX_VALUE) def factory_generate_text(legal_characters=string.ascii_letters, min_length=8, max_length=8): """ Returns a generator function for text with given legal_characters string and length Default is ascii letters, length 8 For hex digits, combine with string.hexstring, etc """ def generate_text(): local_min_len = min_length local_max_len = max_length rand = random.Random() while(True): length = random.randint(local_min_len, local_max_len) array = [random.choice(legal_characters) for x in xrange(0, length)] yield ''.join(array) return generate_text def factory_fixed_sequence(values): """ Return a generator that runs through a list of values in order, looping after end """ def seq_generator(): my_list = list(values) i = 0 while(True): yield my_list[i] if i == len(my_list): i = 0 return seq_generator def parse_fixed_sequence(config): """ Parse fixed sequence string """ vals = config['values'] if not vals: raise ValueError('Values for fixed sequence must exist') if not isinstance(vals, list): raise ValueError('Values must be a list of entries') return factory_fixed_sequence(vals)() def factory_choice_generator(values): """ Return a generator that picks values from a list randomly """ def choice_generator(): my_list = list(values) rand = random.Random() while(True): yield random.choice(my_list) return choice_generator def parse_choice_generator(config): """ Parse choice generator """ vals = config['values'] if not vals: raise ValueError('Values for choice sequence must exist') if not isinstance(vals, list): raise ValueError('Values must be a list of entries') return factory_choice_generator(vals)() def factory_env_variable(env_variable): """ Return a generator function that reads from an environment variable """ def return_variable(): variable_name = env_variable while(True): yield os.environ.get(variable_name) return return_variable def factory_env_string(env_string): """ Return a generator function that uses OS expand path to expand environment variables in string """ def return_variable(): my_input = env_string while(True): yield os.path.expandvars(my_input) return return_variable """ Implements the parsing logic for YAML, and acts as single point for reading configuration """ def parse_random_text_generator(configuration): """ Parses configuration options for a random text generator """ character_set = configuration.get(u'character_set') characters = None if character_set: character_set = character_set.lower() if character_set not in CHARACTER_SETS: raise ValueError( "Illegal character set name, is not defined: {0}".format(character_set)) characters = CHARACTER_SETS[character_set] else: # Custom characters listing, not a character set characters = str(configuration.get(u'characters')) min_length = 8 max_length = 8 if configuration.get(u'min_length'): min_length = int(configuration.get(u'min_length')) if configuration.get(u'max_length'): max_length = int(configuration.get(u'max_length')) if configuration.get(u'length'): length = int(configuration.get(u'length')) min_length = length max_length = length if characters: return factory_generate_text(legal_characters=characters, min_length=min_length, max_length=max_length)() else: return factory_generate_text(min_length=min_length, max_length=max_length)() # List of valid generator types GENERATOR_TYPES = set(['env_variable', 'env_string', 'number_sequence', 'random_int', 'random_text', 'fixed_sequence' ]) GENERATOR_PARSING = {'fixed_sequence': parse_fixed_sequence} def register_generator(typename, parse_function): """ Register a new generator for use in testing typename is the new generator type name (must not already exist) parse_function will parse a configuration object (dict) """ if not isinstance(typename, basestring): raise TypeError( 'Generator type name {0} is invalid, must be a string'.format(typename)) if typename in GENERATOR_TYPES: raise ValueError( 'Generator type named {0} already exists'.format(typename)) GENERATOR_TYPES.add(typename) GENERATOR_PARSING[typename] = parse_function # Try registering a new generator register_generator('choice', parse_choice_generator) def parse_generator(configuration): """ Parses a configuration built from yaml and returns a generator Configuration should be a map """ configuration = lowercase_keys(flatten_dictionaries(configuration)) gen_type = str(configuration.get(u'type')).lower() if gen_type not in GENERATOR_TYPES: raise ValueError( 'Generator type given {0} is not valid '.format(gen_type)) # Do the easy parsing, delegate more complex logic to parsing functions if gen_type == u'env_variable': return factory_env_variable(configuration[u'variable_name'])() elif gen_type == u'env_string': return factory_env_string(configuration[u'string'])() elif gen_type == u'number_sequence': start = configuration.get('start') increment = configuration.get('increment') if not start: start = 1 else: start = int(start) if not increment: increment = 1 else: increment = int(increment) return factory_generate_ids(start, increment)() elif gen_type == u'random_int': return generator_random_int32() elif gen_type == u'random_text': return parse_random_text_generator(configuration) elif gen_type in GENERATOR_TYPES: return GENERATOR_PARSING[gen_type](configuration) else: raise Exception("Unknown generator type: {0}".format('gen_type'))
import errno import os import signal import socket from os.path import join, dirname, abspath import eventlet from mock import patch from textwrap import dedent import pytest from nameko.cli.main import setup_parser from nameko.cli.run import import_service, setup_backdoor, main, run from nameko.exceptions import CommandError from nameko.runners import ServiceRunner from nameko.standalone.rpc import ClusterRpcProxy from nameko.constants import ( AMQP_URI_CONFIG_KEY, WEB_SERVER_CONFIG_KEY, SERIALIZER_CONFIG_KEY) from test.sample import Service TEST_CONFIG_FILE = abspath(join(dirname(__file__), 'config.yaml')) def test_run(rabbit_config): parser = setup_parser() broker = rabbit_config['AMQP_URI'] args = parser.parse_args([ 'run', '--broker', broker, '--backdoor-port', 0, 'test.sample:Service', ]) gt = eventlet.spawn(main, args) eventlet.sleep(1) # make sure service launches ok with ClusterRpcProxy(rabbit_config) as proxy: proxy.service.ping() # stop service pid = os.getpid() os.kill(pid, signal.SIGTERM) gt.wait() def test_main_with_config(rabbit_config, tmpdir): config = tmpdir.join('config.yaml') config.write(""" WEB_SERVER_ADDRESS: '0.0.0.0:8001' AMQP_URI: '{}' serializer: 'json' """.format(rabbit_config[AMQP_URI_CONFIG_KEY])) parser = setup_parser() args = parser.parse_args([ 'run', '--config', config.strpath, 'test.sample', ]) with patch('nameko.cli.run.run') as run: main(args) assert run.call_count == 1 (_, config) = run.call_args[0] assert config == { WEB_SERVER_CONFIG_KEY: '0.0.0.0:8001', AMQP_URI_CONFIG_KEY: rabbit_config[AMQP_URI_CONFIG_KEY], SERIALIZER_CONFIG_KEY: 'json' } def test_main_with_logging_config(rabbit_config, tmpdir): config = """ AMQP_URI: {amqp_uri} LOGGING: version: 1 disable_existing_loggers: false formatters: simple: format: "%(name)s - %(levelname)s - %(message)s" handlers: capture: class: logging.FileHandler level: INFO formatter: simple filename: {capture_file} root: level: INFO handlers: [capture] """ capture_file = tmpdir.join('capture.log') config_file = tmpdir.join('config.yaml') config_file.write( dedent(config.format( capture_file=capture_file.strpath, amqp_uri=rabbit_config['AMQP_URI'] )) ) parser = setup_parser() args = parser.parse_args([ 'run', '--config', config_file.strpath, 'test.sample', ]) gt = eventlet.spawn(main, args) eventlet.sleep(1) with ClusterRpcProxy(rabbit_config) as proxy: proxy.service.ping() pid = os.getpid() os.kill(pid, signal.SIGTERM) gt.wait() assert "test.sample - INFO - ping!" in capture_file.read() def test_import_ok(): assert import_service('test.sample') == [Service] assert import_service('test.sample:Service') == [Service] def test_import_missing(): with pytest.raises(CommandError) as exc: import_service('non_existent') assert "No module named" in str(exc.value) assert "non_existent" in str(exc.value) def test_import_filename(): with pytest.raises(CommandError) as exc: import_service('test/sample.py') assert "did you mean 'test.sample'?" in str(exc) def test_import_broken(): with pytest.raises(ImportError): import_service('test.broken_sample') def test_import_missing_class(): with pytest.raises(CommandError) as exc: import_service('test.sample:NonExistent') assert "Failed to find service class" in str(exc) def test_import_not_a_class(): with pytest.raises(CommandError) as exc: import_service('test.sample:rpc') assert "Service must be a class" in str(exc) def test_import_no_service_classes(): with pytest.raises(CommandError): import_service('test') def recv_until_prompt(sock): data = b"" part = b"" while not data[-5:] == b'\n>>> ': part = sock.recv(4096) data += part return data def test_backdoor(): runner = object() green_socket, gt = setup_backdoor(runner, 0) eventlet.sleep(0) # give backdoor a chance to spawn socket_name = green_socket.fd.getsockname() sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect(socket_name) recv_until_prompt(sock) # banner sock.sendall(b"runner\n") runner_repr = recv_until_prompt(sock) assert repr(runner) in str(runner_repr) sock.sendall(b"quit()\n") error = recv_until_prompt(sock) assert 'RuntimeError: This would kill your service' in str(error) sock.close() gt.kill() def test_stopping(rabbit_config): with patch('nameko.cli.run.eventlet') as mock_eventlet: # this is the service "runlet" mock_eventlet.spawn().wait.side_effect = [ KeyboardInterrupt, None, # second wait, after stop() which returns normally ] gt = eventlet.spawn(run, [Service], rabbit_config) gt.wait() # should complete def test_stopping_twice(rabbit_config): with patch('nameko.cli.run.eventlet') as mock_eventlet: # this is the service "runlet" mock_eventlet.spawn().wait.side_effect = [ KeyboardInterrupt, None, # second wait, after stop() which returns normally ] with patch('nameko.cli.run.ServiceRunner') as runner_cls: runner = runner_cls() runner.stop.side_effect = KeyboardInterrupt runner.kill.return_value = None gt = eventlet.spawn(run, [Service], rabbit_config) gt.wait() def test_os_error_for_signal(rabbit_config): with patch('nameko.cli.run.eventlet') as mock_eventlet: # this is the service "runlet" mock_eventlet.spawn().wait.side_effect = [ OSError(errno.EINTR, ''), None, # second wait, after stop() which returns normally ] # don't actually start the service -- we're not firing a real signal # so the signal handler won't stop it again with patch.object(ServiceRunner, 'start'): gt = eventlet.spawn(run, [Service], rabbit_config) gt.wait() # should complete def test_other_errors_propagate(rabbit_config): with patch('nameko.cli.run.eventlet') as mock_eventlet: # this is the service "runlet" mock_eventlet.spawn().wait.side_effect = [ OSError(0, ''), None, # second wait, after stop() which returns normally ] # don't actually start the service -- there's no real OSError that # would otherwise kill the whole process with patch.object(ServiceRunner, 'start'): gt = eventlet.spawn(run, [Service], rabbit_config) with pytest.raises(OSError): gt.wait()
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module tests some of the methods related to the ``ECSV`` reader/writer. Requires `pyyaml <http://pyyaml.org/>`_ to be installed. """ import os import copy import sys import pytest import numpy as np from ....table import Table, Column, QTable, NdarrayMixin from ....table.table_helpers import simple_table from ....coordinates import SkyCoord, Latitude, Longitude, Angle, EarthLocation from ....time import Time, TimeDelta from ....tests.helper import quantity_allclose from ....units.quantity import QuantityInfo from ....extern.six.moves import StringIO from ..ecsv import DELIMITERS from ... import ascii from .... import units as u try: import yaml # pylint: disable=W0611 HAS_YAML = True except ImportError: HAS_YAML = False DTYPES = ['bool', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64', 'float16', 'float32', 'float64', 'float128', 'str'] if os.name == 'nt' or sys.maxsize <= 2**32: DTYPES.remove('float128') T_DTYPES = Table() for dtype in DTYPES: if dtype == 'bool': data = np.array([False, True, False]) elif dtype == 'str': data = np.array(['ab 0', 'ab, 1', 'ab2']) else: data = np.arange(3, dtype=dtype) c = Column(data, unit='m / s', description='descr_' + dtype, meta={'meta ' + dtype: 1}) T_DTYPES[dtype] = c T_DTYPES.meta['comments'] = ['comment1', 'comment2'] # Corresponds to simple_table() SIMPLE_LINES = ['# %ECSV 0.9', '# ---', '# datatype:', '# - {name: a, datatype: int64}', '# - {name: b, datatype: float64}', '# - {name: c, datatype: string}', '# schema: astropy-2.0', 'a b c', '1 1.0 c', '2 2.0 d', '3 3.0 e'] @pytest.mark.skipif('not HAS_YAML') def test_write_simple(): """ Write a simple table with common types. This shows the compact version of serialization with one line per column. """ t = simple_table() out = StringIO() t.write(out, format='ascii.ecsv') assert out.getvalue().splitlines() == SIMPLE_LINES @pytest.mark.skipif('not HAS_YAML') def test_write_full(): """ Write a full-featured table with common types and explicitly checkout output """ t = T_DTYPES['bool', 'int64', 'float64', 'str'] lines = ['# %ECSV 0.9', '# ---', '# datatype:', '# - name: bool', '# unit: m / s', '# datatype: bool', '# description: descr_bool', '# meta: {meta bool: 1}', '# - name: int64', '# unit: m / s', '# datatype: int64', '# description: descr_int64', '# meta: {meta int64: 1}', '# - name: float64', '# unit: m / s', '# datatype: float64', '# description: descr_float64', '# meta: {meta float64: 1}', '# - name: str', '# unit: m / s', '# datatype: string', '# description: descr_str', '# meta: {meta str: 1}', '# meta: !!omap', '# - comments: [comment1, comment2]', '# schema: astropy-2.0', 'bool int64 float64 str', 'False 0 0.0 "ab 0"', 'True 1 1.0 "ab, 1"', 'False 2 2.0 ab2'] out = StringIO() t.write(out, format='ascii.ecsv') assert out.getvalue().splitlines() == lines @pytest.mark.skipif('not HAS_YAML') def test_write_read_roundtrip(): """ Write a full-featured table with all types and see that it round-trips on readback. Use both space and comma delimiters. """ t = T_DTYPES for delimiter in DELIMITERS: out = StringIO() t.write(out, format='ascii.ecsv', delimiter=delimiter) t2s = [Table.read(out.getvalue(), format='ascii.ecsv'), Table.read(out.getvalue(), format='ascii'), ascii.read(out.getvalue()), ascii.read(out.getvalue(), format='ecsv', guess=False), ascii.read(out.getvalue(), format='ecsv')] for t2 in t2s: assert t.meta == t2.meta for name in t.colnames: assert t[name].attrs_equal(t2[name]) assert np.all(t[name] == t2[name]) @pytest.mark.skipif('not HAS_YAML') def test_bad_delimiter(): """ Passing a delimiter other than space or comma gives an exception """ out = StringIO() with pytest.raises(ValueError) as err: T_DTYPES.write(out, format='ascii.ecsv', delimiter='|') assert 'only space and comma are allowed' in str(err.value) @pytest.mark.skipif('not HAS_YAML') def test_bad_header_start(): """ Bad header without initial # %ECSV x.x """ lines = copy.copy(SIMPLE_LINES) lines[0] = '# %ECV 0.9' with pytest.raises(ascii.InconsistentTableError): Table.read('\n'.join(lines), format='ascii.ecsv', guess=False) @pytest.mark.skipif('not HAS_YAML') def test_bad_delimiter_input(): """ Illegal delimiter in input """ lines = copy.copy(SIMPLE_LINES) lines.insert(2, '# delimiter: |') with pytest.raises(ValueError) as err: Table.read('\n'.join(lines), format='ascii.ecsv', guess=False) assert 'only space and comma are allowed' in str(err.value) @pytest.mark.skipif('not HAS_YAML') def test_multidim_input(): """ Multi-dimensional column in input """ t = Table([np.arange(4).reshape(2, 2)], names=['a']) out = StringIO() with pytest.raises(ValueError) as err: t.write(out, format='ascii.ecsv') assert 'ECSV format does not support multidimensional column' in str(err.value) @pytest.mark.skipif('not HAS_YAML') def test_round_trip_empty_table(): """Test fix in #5010 for issue #5009 (ECSV fails for empty type with bool type)""" t = Table(dtype=[bool, 'i', 'f'], names=['a', 'b', 'c']) out = StringIO() t.write(out, format='ascii.ecsv') t2 = Table.read(out.getvalue(), format='ascii.ecsv') assert t.dtype == t2.dtype assert len(t2) == 0 @pytest.mark.skipif('not HAS_YAML') def test_csv_ecsv_colnames_mismatch(): """ Test that mismatch in column names from normal CSV header vs. ECSV YAML header raises the expected exception. """ lines = copy.copy(SIMPLE_LINES) header_index = lines.index('a b c') lines[header_index] = 'a b d' with pytest.raises(ValueError) as err: ascii.read(lines, format='ecsv') assert "column names from ECSV header ['a', 'b', 'c']" in str(err) @pytest.mark.skipif('not HAS_YAML') def test_regression_5604(): """ See https://github.com/astropy/astropy/issues/5604 for more. """ t = Table() t.meta = {"foo": 5*u.km, "foo2": u.s} t["bar"] = [7]*u.km out = StringIO() t.write(out, format="ascii.ecsv") assert '!astropy.units.Unit' in out.getvalue() assert '!astropy.units.Quantity' in out.getvalue() def assert_objects_equal(obj1, obj2, attrs, compare_class=True): if compare_class: assert obj1.__class__ is obj2.__class__ info_attrs = ['info.name', 'info.format', 'info.unit', 'info.description'] for attr in attrs + info_attrs: a1 = obj1 a2 = obj2 for subattr in attr.split('.'): try: a1 = getattr(a1, subattr) a2 = getattr(a2, subattr) except AttributeError: a1 = a1[subattr] a2 = a2[subattr] if isinstance(a1, np.ndarray) and a1.dtype.kind == 'f': assert quantity_allclose(a1, a2, rtol=1e-10) else: assert np.all(a1 == a2) el = EarthLocation(x=[1, 2] * u.km, y=[3, 4] * u.km, z=[5, 6] * u.km) sc = SkyCoord([1, 2], [3, 4], unit='deg,deg', frame='fk4', obstime='J1990.5') scc = sc.copy() scc.representation = 'cartesian' tm = Time([51000.5, 51001.5], format='mjd', scale='tai', precision=5, location=el[0]) tm2 = Time(tm, format='iso') tm3 = Time(tm, location=el) tm3.info.serialize_method['ecsv'] = 'jd1_jd2' mixin_cols = { 'tm': tm, 'tm2': tm2, 'tm3': tm3, 'dt': TimeDelta([1, 2] * u.day), 'sc': sc, 'scc': scc, 'scd': SkyCoord([1, 2], [3, 4], [5, 6], unit='deg,deg,m', frame='fk4', obstime=['J1990.5'] * 2), 'q': [1, 2] * u.m, 'lat': Latitude([1, 2] * u.deg), 'lon': Longitude([1, 2] * u.deg, wrap_angle=180.*u.deg), 'ang': Angle([1, 2] * u.deg), 'el': el, # 'nd': NdarrayMixin(el) # not supported yet } time_attrs = ['value', 'shape', 'format', 'scale', 'precision', 'in_subfmt', 'out_subfmt', 'location'] compare_attrs = { 'c1': ['data'], 'c2': ['data'], 'tm': time_attrs, 'tm2': time_attrs, 'tm3': time_attrs, 'dt': ['shape', 'value', 'format', 'scale'], 'sc': ['ra', 'dec', 'representation', 'frame.name'], 'scc': ['x', 'y', 'z', 'representation', 'frame.name'], 'scd': ['ra', 'dec', 'distance', 'representation', 'frame.name'], 'q': ['value', 'unit'], 'lon': ['value', 'unit', 'wrap_angle'], 'lat': ['value', 'unit'], 'ang': ['value', 'unit'], 'el': ['x', 'y', 'z', 'ellipsoid'], 'nd': ['x', 'y', 'z'], } @pytest.mark.skipif('not HAS_YAML') def test_ecsv_mixins_ascii_read_class(): """Ensure that ascii.read(ecsv_file) returns the correct class (QTable if any Quantity subclasses, Table otherwise). """ # Make a table with every mixin type except Quantities t = QTable({name: col for name, col in mixin_cols.items() if not isinstance(col.info, QuantityInfo)}) out = StringIO() t.write(out, format="ascii.ecsv") t2 = ascii.read(out.getvalue(), format='ecsv') assert type(t2) is Table # Add a single quantity column t['lon'] = mixin_cols['lon'] out = StringIO() t.write(out, format="ascii.ecsv") t2 = ascii.read(out.getvalue(), format='ecsv') assert type(t2) is QTable @pytest.mark.skipif('not HAS_YAML') def test_ecsv_mixins_qtable_to_table(): """Test writing as QTable and reading as Table. Ensure correct classes come out. """ names = sorted(mixin_cols) t = QTable([mixin_cols[name] for name in names], names=names) out = StringIO() t.write(out, format="ascii.ecsv") t2 = Table.read(out.getvalue(), format='ascii.ecsv') assert t.colnames == t2.colnames for name, col in t.columns.items(): col2 = t2[name] attrs = compare_attrs[name] compare_class = True if isinstance(col.info, QuantityInfo): # Downgrade Quantity to Column + unit assert type(col2) is Column attrs = ['unit'] # Other attrs are lost compare_class = False assert_objects_equal(col, col2, attrs, compare_class) @pytest.mark.skipif('not HAS_YAML') @pytest.mark.parametrize('table_cls', (Table, QTable)) def test_ecsv_mixins_as_one(table_cls): """Test write/read all cols at once and validate intermediate column names""" names = sorted(mixin_cols) serialized_names = ['ang', 'dt', 'el.x', 'el.y', 'el.z', 'lat', 'lon', 'q', 'sc.ra', 'sc.dec', 'scc.x', 'scc.y', 'scc.z', 'scd.ra', 'scd.dec', 'scd.distance', 'scd.obstime', 'tm', # serialize_method is formatted_value 'tm2', # serialize_method is formatted_value 'tm3.jd1', 'tm3.jd2', # serialize is jd1_jd2 'tm3.location.x', 'tm3.location.y', 'tm3.location.z'] t = table_cls([mixin_cols[name] for name in names], names=names) out = StringIO() t.write(out, format="ascii.ecsv") t2 = table_cls.read(out.getvalue(), format='ascii.ecsv') assert t.colnames == t2.colnames # Read as a ascii.basic table (skip all the ECSV junk) t3 = table_cls.read(out.getvalue(), format='ascii.basic') assert t3.colnames == serialized_names @pytest.mark.skipif('not HAS_YAML') @pytest.mark.parametrize('name_col', list(mixin_cols.items())) @pytest.mark.parametrize('table_cls', (Table, QTable)) def test_ecsv_mixins_per_column(table_cls, name_col): """Test write/read one col at a time and do detailed validation""" name, col = name_col c = [1.0, 2.0] t = table_cls([c, col, c], names=['c1', name, 'c2']) t[name].info.description = 'description' if not t.has_mixin_columns: pytest.skip('column is not a mixin (e.g. Quantity subclass in Table)') if isinstance(t[name], NdarrayMixin): pytest.xfail('NdarrayMixin not supported') out = StringIO() t.write(out, format="ascii.ecsv") t2 = table_cls.read(out.getvalue(), format='ascii.ecsv') assert t.colnames == t2.colnames for colname in t.colnames: assert_objects_equal(t[colname], t2[colname], compare_attrs[colname]) # Special case to make sure Column type doesn't leak into Time class data if name.startswith('tm'): assert t2[name]._time.jd1.__class__ is np.ndarray assert t2[name]._time.jd2.__class__ is np.ndarray
from google.appengine.ext import db import string import soc.models.linkable class Tag(db.Model): """Google AppEngine model for store of tags. """ tag = db.StringProperty(required=True) "The actual string value of the tag." added = db.DateTimeProperty(auto_now_add=True) "The date and time that the tag was first added to the datastore." tagged = db.ListProperty(db.Key) "A List of db.Key values for the datastore objects that have been tagged with this tag value." tagged_count = db.IntegerProperty(default=0) "The number of entities in tagged." auto_delete = db.BooleanProperty(required=True, default=False) "If true, a tag instance should be deleted when tagged_count reaches zero." scope = db.ReferenceProperty(reference_class=soc.models.linkable.Linkable, required=False, collection_name='task_type_tags') "Each tag is scoped under some linkable model." @classmethod def _key_name(cls, scope_path, tag_name): """Create the key_name from program key_name as scope_path and tag_name. """ return scope_path + '/' + tag_name def remove_tagged(self, key): def remove_tagged_txn(): if key in self.tagged: self.tagged.remove(key) self.tagged_count -= 1 if not self.tagged_count and self.auto_delete: self.delete() else: self.put() db.run_in_transaction(remove_tagged_txn) self.__class__.expire_cached_tags() def add_tagged(self, key): def add_tagged_txn(): if key not in self.tagged: self.tagged.append(key) self.tagged_count += 1 self.put() db.run_in_transaction(add_tagged_txn) self.__class__.expire_cached_tags() def clear_tagged(self): def clear_tagged_txn(): if self.auto_delete: self.delete() else: self.tagged = [] self.tagged_count = 0 self.put() db.run_in_transaction(clear_tagged_txn) self.__class__.expire_cached_tags() @classmethod def get_by_name(cls, tag_name): """Get the list of tag objects that has the given tag_name. """ tags = db.Query(cls).filter('tag =', tag_name).fetch(1000) return tags @classmethod def get_by_scope_and_name(cls, scope, tag_name): """Get a tag by scope and name. There may be only one such tag. """ return db.Query(cls).filter( 'scope =', scope).filter('tag =', tag_name).get() @classmethod def get_by_scope(cls, scope): """Get a list of tag objects that has a given scope. """ return db.Query(cls).filter('scope = ', scope).fetch(1000) @classmethod def get_tags_for_key(cls, key, limit=1000): """Get the tags for the datastore object represented by key. """ tags = db.Query(cls).filter('tagged =', key).fetch(limit) return tags @staticmethod def get_for_custom_query(model, filter=None, order=None, limit=1000): """Get a list of tag objects for a custom filter. """ query = db.Query(model) if not filter: filter = {} for key, value in filter.iteritems(): if isinstance(value, list) and len(value) == 1: value = value[0] if isinstance(value, list): op = '%s IN' % key query.filter(op, value) else: query.filter(key, value) if not order: order = [] for key in order: filter.order(key) return query.fetch(limit) @classmethod def get_tags_by_frequency(cls, limit=1000): """Return a list of Tags sorted by the number of objects to which they have been applied, most frequently-used first. If limit is given, return only return only that many tags; otherwise, return all. """ tag_list = db.Query(cls).filter('tagged_count >', 0).order( "-tagged_count").fetch(limit) return tag_list @classmethod def get_tags_by_name(cls, limit=1000, ascending=True): """Return a list of Tags sorted alphabetically by the name of the tag. If a limit is given, return only that many tags; otherwise, return all. If ascending is True, sort from a-z; otherwise, sort from z-a. """ from google.appengine.api import memcache cache_name = cls.__name__ + '_tags_by_name' if ascending: cache_name += '_asc' else: cache_name += '_desc' tags = memcache.get(cache_name) if tags is None or len(tags) < limit: order_by = "tag" if not ascending: order_by = "-tag" tags = db.Query(cls).order(order_by).fetch(limit) memcache.add(cache_name, tags, 3600) else: if len(tags) > limit: # Return only as many as requested. tags = tags[:limit] return tags @classmethod def get_or_create(cls, scope, tag_name): """Get the Tag object that has the tag value given by tag_value. """ tag_key_name = cls._key_name(scope.key().name(), tag_name) existing_tag = cls.get_by_key_name(tag_key_name) if existing_tag is None: # the tag does not yet exist, so create it. def create_tag_txn(): new_tag = cls(key_name=tag_key_name, tag=tag_name, scope=scope) new_tag.put() return new_tag existing_tag = db.run_in_transaction(create_tag_txn) return existing_tag @classmethod def copy_tag(cls, scope, tag_name, new_tag_name): """Copy a tag with a given scope and tag_name to another tag with new tag_name. """ tag = cls.get_by_scope_and_name(scope, tag_name) if tag: tag_key_name = cls._key_name(scope.key().name(), new_tag_name) existing_tag = cls.get_by_key_name(tag_key_name) if existing_tag is None: new_tag = cls(key_name=tag_key_name, tag=new_tag_name, scope=scope, added=tag.added, tagged=tag.tagged, tagged_count=tag.tagged_count) new_tag.put() tag.delete() return new_tag return existing_tag return None @classmethod def delete_tag(cls, scope, tag_name): """Delete a tag with a given scope and tag_name. """ tag = cls.get_by_scope_and_name(scope, tag_name) if tag: tag.delete() return True return False @classmethod def popular_tags(cls, limit=5): """Get the most popular tags from memcache, or if they are not defined there, it retrieves them from datastore and sets in memcache. """ from google.appengine.api import memcache tags = memcache.get(cls.__name__ + '_popular_tags') if tags is None: tags = cls.get_tags_by_frequency(limit) memcache.add(cls.__name__ + '_popular_tags', tags, 3600) return tags @classmethod def expire_cached_tags(cls): """Expire all tag lists which exist in memcache. """ from google.appengine.api import memcache memcache.delete(cls.__name__ + '_popular_tags') memcache.delete(cls.__name__ + '_tags_by_name_asc') memcache.delete(cls.__name__ + '_tags_by_name_desc') def __str__(self): """Returns the string representation of the entity's tag name. """ return self.tag def tag_property(tag_name): """Decorator that creates and returns a tag property to be used in Google AppEngine model. Args: tag_name: name of the tag to be created. """ def get_tags(self): """"Get a list of Tag objects for all Tags that apply to the specified entity. """ if self._tags[tag_name] is None or len(self._tags[tag_name]) == 0: self._tags[tag_name] = self._tag_model[ tag_name].get_tags_for_key(self.key()) return self._tags[tag_name] def set_tags(self, seed): """Set a list of Tag objects for all Tags that apply to the specified entity. """ import types if type(seed['tags']) is types.UnicodeType: # Convert unicode to a plain string seed['tags'] = str(seed['tags']) if type(seed['tags']) is types.StringType: # Tags is a string, split it on tag_seperator into a list seed['tags'] = string.split(seed['tags'], self.tag_separator) if type(seed['tags']) is types.ListType: get_tags(self) # Firstly, we will check to see if any tags have been removed. # Iterate over a copy of _tags, as we may need to modify _tags for each_tag in self._tags[tag_name][:]: if each_tag not in seed['tags']: # A tag that was previously assigned to this entity is # missing in the list that is being assigned, so we # disassocaite this entity and the tag. each_tag.remove_tagged(self.key()) self._tags[tag_name].remove(each_tag) # Secondly, we will check to see if any tags have been added. for each_tag in seed['tags']: each_tag = string.strip(each_tag) if len(each_tag) > 0 and each_tag not in self._tags[tag_name]: # A tag that was not previously assigned to this entity # is present in the list that is being assigned, so we # associate this entity with the tag. tag = self._tag_model[tag_name].get_or_create( seed['scope'], each_tag) tag.add_tagged(self.key()) self._tags[tag_name].append(tag) else: raise Exception, "tags must be either a unicode, a string or a list" return property(get_tags, set_tags) class Taggable(object): """A mixin class that is used for making GAE Model classes taggable. This is an extended version of Taggable-mixin which allows for multiple tag properties in the same AppEngine Model class. """ def __init__(self, **kwargs): """The constructor class for Taggable, that creates a dictionary of tags. The difference from the original taggable in terms of interface is that, tag class is not used as the default tag model, since we don't have a default tag property created in this class now. Args: kwargs: keywords containing the name of the tags and arguments containing tag model to be used. """ self._tags = {} self._tag_model = {} for tag_name in kwargs: self._tags[tag_name] = None self._tag_model[tag_name] = kwargs[tag_name] self.tag_separator = ", " def tags_string(self, tag_name, ret_list=False): """Create a formatted string version of this entity's tags. Args: tag_name: the name of the tag which must be formatted ret_list: if False sends a string, otherwise sends a Python list """ tag_list = [each_tag.tag for each_tag in tag_name] if ret_list: return tag_list else: return self.tag_separator.join(tag_list) def tags_class(self, tag_name): """Return a class instance object for a given tag name. """ return self._tag_model[tag_name]
# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import os import eventlet from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_service import loopingcall from oslo_utils import importutils from neutron.agent.linux import dhcp from neutron.agent.linux import external_process from neutron.agent.metadata import driver as metadata_driver from neutron.agent import rpc as agent_rpc from neutron.common import constants from neutron.common import exceptions from neutron.common import rpc as n_rpc from neutron.common import topics from neutron.common import utils from neutron import context from neutron.i18n import _LE, _LI, _LW from neutron import manager LOG = logging.getLogger(__name__) class DhcpAgent(manager.Manager): """DHCP agent service manager. Note that the public methods of this class are exposed as the server side of an rpc interface. The neutron server uses neutron.api.rpc.agentnotifiers.dhcp_rpc_agent_api.DhcpAgentNotifyApi as the client side to execute the methods here. For more information about changing rpc interfaces, see doc/source/devref/rpc_api.rst. """ target = oslo_messaging.Target(version='1.0') def __init__(self, host=None, conf=None): super(DhcpAgent, self).__init__(host=host) self.needs_resync_reasons = collections.defaultdict(list) self.conf = conf or cfg.CONF self.cache = NetworkCache() self.dhcp_driver_cls = importutils.import_class(self.conf.dhcp_driver) ctx = context.get_admin_context_without_session() self.plugin_rpc = DhcpPluginApi(topics.PLUGIN, ctx, self.conf.use_namespaces, self.conf.host) # create dhcp dir to store dhcp info dhcp_dir = os.path.dirname("/%s/dhcp/" % self.conf.state_path) utils.ensure_dir(dhcp_dir) self.dhcp_version = self.dhcp_driver_cls.check_version() self._populate_networks_cache() self._process_monitor = external_process.ProcessMonitor( config=self.conf, resource_type='dhcp') def init_host(self): self.sync_state() def _populate_networks_cache(self): """Populate the networks cache when the DHCP-agent starts.""" try: existing_networks = self.dhcp_driver_cls.existing_dhcp_networks( self.conf ) for net_id in existing_networks: net = dhcp.NetModel(self.conf.use_namespaces, {"id": net_id, "subnets": [], "ports": []}) self.cache.put(net) except NotImplementedError: # just go ahead with an empty networks cache LOG.debug("The '%s' DHCP-driver does not support retrieving of a " "list of existing networks", self.conf.dhcp_driver) def after_start(self): self.run() LOG.info(_LI("DHCP agent started")) def run(self): """Activate the DHCP agent.""" self.sync_state() self.periodic_resync() def call_driver(self, action, network, **action_kwargs): """Invoke an action on a DHCP driver instance.""" LOG.debug('Calling driver for network: %(net)s action: %(action)s', {'net': network.id, 'action': action}) try: # the Driver expects something that is duck typed similar to # the base models. driver = self.dhcp_driver_cls(self.conf, network, self._process_monitor, self.dhcp_version, self.plugin_rpc) getattr(driver, action)(**action_kwargs) return True except exceptions.Conflict: # No need to resync here, the agent will receive the event related # to a status update for the network LOG.warning(_LW('Unable to %(action)s dhcp for %(net_id)s: there ' 'is a conflict with its current state; please ' 'check that the network and/or its subnet(s) ' 'still exist.'), {'net_id': network.id, 'action': action}) except Exception as e: if getattr(e, 'exc_type', '') != 'IpAddressGenerationFailure': # Don't resync if port could not be created because of an IP # allocation failure. When the subnet is updated with a new # allocation pool or a port is deleted to free up an IP, this # will automatically be retried on the notification self.schedule_resync(e, network.id) if (isinstance(e, oslo_messaging.RemoteError) and e.exc_type == 'NetworkNotFound' or isinstance(e, exceptions.NetworkNotFound)): LOG.warning(_LW("Network %s has been deleted."), network.id) else: LOG.exception(_LE('Unable to %(action)s dhcp for %(net_id)s.'), {'net_id': network.id, 'action': action}) def schedule_resync(self, reason, network_id=None): """Schedule a resync for a given network and reason. If no network is specified, resync all networks. """ self.needs_resync_reasons[network_id].append(reason) @utils.synchronized('dhcp-agent') def sync_state(self, networks=None): """Sync the local DHCP state with Neutron. If no networks are passed, or 'None' is one of the networks, sync all of the networks. """ only_nets = set([] if (not networks or None in networks) else networks) LOG.info(_LI('Synchronizing state')) pool = eventlet.GreenPool(self.conf.num_sync_threads) known_network_ids = set(self.cache.get_network_ids()) try: active_networks = self.plugin_rpc.get_active_networks_info() active_network_ids = set(network.id for network in active_networks) for deleted_id in known_network_ids - active_network_ids: try: self.disable_dhcp_helper(deleted_id) except Exception as e: self.schedule_resync(e, deleted_id) LOG.exception(_LE('Unable to sync network state on ' 'deleted network %s'), deleted_id) for network in active_networks: if (not only_nets or # specifically resync all network.id not in known_network_ids or # missing net network.id in only_nets): # specific network to sync pool.spawn(self.safe_configure_dhcp_for_network, network) pool.waitall() LOG.info(_LI('Synchronizing state complete')) except Exception as e: if only_nets: for network_id in only_nets: self.schedule_resync(e, network_id) else: self.schedule_resync(e) LOG.exception(_LE('Unable to sync network state.')) @utils.exception_logger() def _periodic_resync_helper(self): """Resync the dhcp state at the configured interval.""" while True: eventlet.sleep(self.conf.resync_interval) if self.needs_resync_reasons: # be careful to avoid a race with additions to list # from other threads reasons = self.needs_resync_reasons self.needs_resync_reasons = collections.defaultdict(list) for net, r in reasons.items(): if not net: net = "*" LOG.debug("resync (%(network)s): %(reason)s", {"reason": r, "network": net}) self.sync_state(reasons.keys()) def periodic_resync(self): """Spawn a thread to periodically resync the dhcp state.""" eventlet.spawn(self._periodic_resync_helper) def safe_get_network_info(self, network_id): try: network = self.plugin_rpc.get_network_info(network_id) if not network: LOG.warn(_LW('Network %s has been deleted.'), network_id) return network except Exception as e: self.schedule_resync(e, network_id) LOG.exception(_LE('Network %s info call failed.'), network_id) def enable_dhcp_helper(self, network_id): """Enable DHCP for a network that meets enabling criteria.""" network = self.safe_get_network_info(network_id) if network: self.configure_dhcp_for_network(network) @utils.exception_logger() def safe_configure_dhcp_for_network(self, network): try: self.configure_dhcp_for_network(network) except (exceptions.NetworkNotFound, RuntimeError): LOG.warn(_LW('Network %s may have been deleted and its resources ' 'may have already been disposed.'), network.id) def configure_dhcp_for_network(self, network): if not network.admin_state_up: return enable_metadata = self.dhcp_driver_cls.should_enable_metadata( self.conf, network) dhcp_network_enabled = False for subnet in network.subnets: if subnet.enable_dhcp: if self.call_driver('enable', network): dhcp_network_enabled = True self.cache.put(network) break if enable_metadata and dhcp_network_enabled: for subnet in network.subnets: if subnet.ip_version == 4 and subnet.enable_dhcp: self.enable_isolated_metadata_proxy(network) break def disable_dhcp_helper(self, network_id): """Disable DHCP for a network known to the agent.""" network = self.cache.get_network_by_id(network_id) if network: if (self.conf.use_namespaces and self.conf.enable_isolated_metadata): # NOTE(jschwarz): In the case where a network is deleted, all # the subnets and ports are deleted before this function is # called, so checking if 'should_enable_metadata' is True # for any subnet is false logic here. self.disable_isolated_metadata_proxy(network) if self.call_driver('disable', network): self.cache.remove(network) def refresh_dhcp_helper(self, network_id): """Refresh or disable DHCP for a network depending on the current state of the network. """ old_network = self.cache.get_network_by_id(network_id) if not old_network: # DHCP current not running for network. return self.enable_dhcp_helper(network_id) network = self.safe_get_network_info(network_id) if not network: return old_cidrs = set(s.cidr for s in old_network.subnets if s.enable_dhcp) new_cidrs = set(s.cidr for s in network.subnets if s.enable_dhcp) if new_cidrs and old_cidrs == new_cidrs: self.call_driver('reload_allocations', network) self.cache.put(network) elif new_cidrs: if self.call_driver('restart', network): self.cache.put(network) else: self.disable_dhcp_helper(network.id) @utils.synchronized('dhcp-agent') def network_create_end(self, context, payload): """Handle the network.create.end notification event.""" network_id = payload['network']['id'] self.enable_dhcp_helper(network_id) @utils.synchronized('dhcp-agent') def network_update_end(self, context, payload): """Handle the network.update.end notification event.""" network_id = payload['network']['id'] if payload['network']['admin_state_up']: self.enable_dhcp_helper(network_id) else: self.disable_dhcp_helper(network_id) @utils.synchronized('dhcp-agent') def network_delete_end(self, context, payload): """Handle the network.delete.end notification event.""" self.disable_dhcp_helper(payload['network_id']) @utils.synchronized('dhcp-agent') def subnet_update_end(self, context, payload): """Handle the subnet.update.end notification event.""" network_id = payload['subnet']['network_id'] self.refresh_dhcp_helper(network_id) # Use the update handler for the subnet create event. subnet_create_end = subnet_update_end @utils.synchronized('dhcp-agent') def subnet_delete_end(self, context, payload): """Handle the subnet.delete.end notification event.""" subnet_id = payload['subnet_id'] network = self.cache.get_network_by_subnet_id(subnet_id) if network: self.refresh_dhcp_helper(network.id) @utils.synchronized('dhcp-agent') def port_update_end(self, context, payload): """Handle the port.update.end notification event.""" updated_port = dhcp.DictModel(payload['port']) network = self.cache.get_network_by_id(updated_port.network_id) if network: LOG.info(_LI("Trigger reload_allocations for port %s"), updated_port) driver_action = 'reload_allocations' if self._is_port_on_this_agent(updated_port): orig = self.cache.get_port_by_id(updated_port['id']) # assume IP change if not in cache old_ips = {i['ip_address'] for i in orig['fixed_ips'] or []} new_ips = {i['ip_address'] for i in updated_port['fixed_ips']} if old_ips != new_ips: driver_action = 'restart' self.cache.put_port(updated_port) self.call_driver(driver_action, network) def _is_port_on_this_agent(self, port): thishost = utils.get_dhcp_agent_device_id( port['network_id'], self.conf.host) return port['device_id'] == thishost # Use the update handler for the port create event. port_create_end = port_update_end @utils.synchronized('dhcp-agent') def port_delete_end(self, context, payload): """Handle the port.delete.end notification event.""" port = self.cache.get_port_by_id(payload['port_id']) if port: network = self.cache.get_network_by_id(port.network_id) self.cache.remove_port(port) self.call_driver('reload_allocations', network) def enable_isolated_metadata_proxy(self, network): # The proxy might work for either a single network # or all the networks connected via a router # to the one passed as a parameter kwargs = {'network_id': network.id} # When the metadata network is enabled, the proxy might # be started for the router attached to the network if self.conf.enable_metadata_network: router_ports = [port for port in network.ports if (port.device_owner in constants.ROUTER_INTERFACE_OWNERS)] if router_ports: # Multiple router ports should not be allowed if len(router_ports) > 1: LOG.warning(_LW("%(port_num)d router ports found on the " "metadata access network. Only the port " "%(port_id)s, for router %(router_id)s " "will be considered"), {'port_num': len(router_ports), 'port_id': router_ports[0].id, 'router_id': router_ports[0].device_id}) kwargs = {'router_id': router_ports[0].device_id} metadata_driver.MetadataDriver.spawn_monitored_metadata_proxy( self._process_monitor, network.namespace, dhcp.METADATA_PORT, self.conf, **kwargs) def disable_isolated_metadata_proxy(self, network): metadata_driver.MetadataDriver.destroy_monitored_metadata_proxy( self._process_monitor, network.id, self.conf) class DhcpPluginApi(object): """Agent side of the dhcp rpc API. This class implements the client side of an rpc interface. The server side of this interface can be found in neutron.api.rpc.handlers.dhcp_rpc.DhcpRpcCallback. For more information about changing rpc interfaces, see doc/source/devref/rpc_api.rst. API version history: 1.0 - Initial version. 1.1 - Added get_active_networks_info, create_dhcp_port, and update_dhcp_port methods. """ def __init__(self, topic, context, use_namespaces, host): self.context = context self.host = host self.use_namespaces = use_namespaces target = oslo_messaging.Target( topic=topic, namespace=constants.RPC_NAMESPACE_DHCP_PLUGIN, version='1.0') self.client = n_rpc.get_client(target) def get_active_networks_info(self): """Make a remote process call to retrieve all network info.""" cctxt = self.client.prepare(version='1.1') networks = cctxt.call(self.context, 'get_active_networks_info', host=self.host) return [dhcp.NetModel(self.use_namespaces, n) for n in networks] def get_network_info(self, network_id): """Make a remote process call to retrieve network info.""" cctxt = self.client.prepare() network = cctxt.call(self.context, 'get_network_info', network_id=network_id, host=self.host) if network: return dhcp.NetModel(self.use_namespaces, network) def create_dhcp_port(self, port): """Make a remote process call to create the dhcp port.""" cctxt = self.client.prepare(version='1.1') port = cctxt.call(self.context, 'create_dhcp_port', port=port, host=self.host) if port: return dhcp.DictModel(port) def update_dhcp_port(self, port_id, port): """Make a remote process call to update the dhcp port.""" cctxt = self.client.prepare(version='1.1') port = cctxt.call(self.context, 'update_dhcp_port', port_id=port_id, port=port, host=self.host) if port: return dhcp.DictModel(port) def release_dhcp_port(self, network_id, device_id): """Make a remote process call to release the dhcp port.""" cctxt = self.client.prepare() return cctxt.call(self.context, 'release_dhcp_port', network_id=network_id, device_id=device_id, host=self.host) class NetworkCache(object): """Agent cache of the current network state.""" def __init__(self): self.cache = {} self.subnet_lookup = {} self.port_lookup = {} def get_network_ids(self): return self.cache.keys() def get_network_by_id(self, network_id): return self.cache.get(network_id) def get_network_by_subnet_id(self, subnet_id): return self.cache.get(self.subnet_lookup.get(subnet_id)) def get_network_by_port_id(self, port_id): return self.cache.get(self.port_lookup.get(port_id)) def put(self, network): if network.id in self.cache: self.remove(self.cache[network.id]) self.cache[network.id] = network for subnet in network.subnets: self.subnet_lookup[subnet.id] = network.id for port in network.ports: self.port_lookup[port.id] = network.id def remove(self, network): del self.cache[network.id] for subnet in network.subnets: del self.subnet_lookup[subnet.id] for port in network.ports: del self.port_lookup[port.id] def put_port(self, port): network = self.get_network_by_id(port.network_id) for index in range(len(network.ports)): if network.ports[index].id == port.id: network.ports[index] = port break else: network.ports.append(port) self.port_lookup[port.id] = network.id def remove_port(self, port): network = self.get_network_by_port_id(port.id) for index in range(len(network.ports)): if network.ports[index] == port: del network.ports[index] del self.port_lookup[port.id] break def get_port_by_id(self, port_id): network = self.get_network_by_port_id(port_id) if network: for port in network.ports: if port.id == port_id: return port def get_state(self): net_ids = self.get_network_ids() num_nets = len(net_ids) num_subnets = 0 num_ports = 0 for net_id in net_ids: network = self.get_network_by_id(net_id) num_subnets += len(network.subnets) num_ports += len(network.ports) return {'networks': num_nets, 'subnets': num_subnets, 'ports': num_ports} class DhcpAgentWithStateReport(DhcpAgent): def __init__(self, host=None, conf=None): super(DhcpAgentWithStateReport, self).__init__(host=host, conf=conf) self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS) self.agent_state = { 'binary': 'neutron-dhcp-agent', 'host': host, 'availability_zone': self.conf.AGENT.availability_zone, 'topic': topics.DHCP_AGENT, 'configurations': { 'dhcp_driver': self.conf.dhcp_driver, 'use_namespaces': self.conf.use_namespaces, 'dhcp_lease_duration': self.conf.dhcp_lease_duration, 'log_agent_heartbeats': self.conf.AGENT.log_agent_heartbeats}, 'start_flag': True, 'agent_type': constants.AGENT_TYPE_DHCP} report_interval = self.conf.AGENT.report_interval self.use_call = True if report_interval: self.heartbeat = loopingcall.FixedIntervalLoopingCall( self._report_state) self.heartbeat.start(interval=report_interval) def _report_state(self): try: self.agent_state.get('configurations').update( self.cache.get_state()) ctx = context.get_admin_context_without_session() self.state_rpc.report_state(ctx, self.agent_state, self.use_call) self.use_call = False except AttributeError: # This means the server does not support report_state LOG.warn(_LW("Neutron server does not support state report." " State report for this agent will be disabled.")) self.heartbeat.stop() self.run() return except Exception: LOG.exception(_LE("Failed reporting state!")) return if self.agent_state.pop('start_flag', None): self.run() def agent_updated(self, context, payload): """Handle the agent_updated notification event.""" self.schedule_resync(_("Agent updated: %(payload)s") % {"payload": payload}) LOG.info(_LI("agent_updated by server side %s!"), payload) def after_start(self): LOG.info(_LI("DHCP agent started"))
#!/usr/bin/env python # Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import sys import string import json blink_protocol_path = sys.argv[1] browser_protocol_path = sys.argv[2] output_cc_path = sys.argv[3] output_h_path = sys.argv[4] header = """\ // Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // THIS FILE IS AUTOGENERATED. DO NOT EDIT. // Generated by // content/public/browser/devtools_protocol_handler_generator.py from // third_party/WebKit/Source/devtools/protocol.json and // content/browser/devtools/browser_protocol.json """ template_h = string.Template(header + """\ #ifndef CONTENT_BROWSER_DEVTOOLS_PROTOCOL_DEVTOOLS_PROTOCOL_DISPATCHER_H_ #define CONTENT_BROWSER_DEVTOOLS_PROTOCOL_DEVTOOLS_PROTOCOL_DISPATCHER_H_ #include "content/browser/devtools/protocol/devtools_protocol_client.h" namespace content { class DevToolsProtocolDispatcher; namespace devtools { extern const char kProtocolVersion[]; bool IsSupportedProtocolVersion(const std::string& version); template<typename T> base::Value* CreateValue(const T& param) { return new base::FundamentalValue(param); } template<class T> base::Value* CreateValue(scoped_ptr<T>& param) { return param.release(); } template<class T> base::Value* CreateValue(scoped_refptr<T> param) { return param->ToValue().release(); } template<typename T> base::Value* CreateValue(const std::vector<T> param) { base::ListValue* result = new base::ListValue(); for (auto& item : param) { result->Append(CreateValue(item)); } return result; } template<> base::Value* CreateValue(const std::string& param); ${types}\ } // namespace devtools class DevToolsProtocolDispatcher { public: using Notifier = DevToolsProtocolClient::RawMessageCallback; using CommandHandler = base::Callback<bool(int, scoped_ptr<base::DictionaryValue>)>; explicit DevToolsProtocolDispatcher(const Notifier& notifier); ~DevToolsProtocolDispatcher(); CommandHandler FindCommandHandler(const std::string& method); ${setters}\ private: using Response = DevToolsProtocolClient::Response; using CommandHandlers = std::map<std::string, CommandHandler>; ${methods}\ Notifier notifier_; DevToolsProtocolClient client_; CommandHandlers command_handlers_; ${fields}\ }; } // namespace content #endif // CONTENT_BROWSER_DEVTOOLS_PROTOCOL_DEVTOOLS_PROTOCOL_DISPATCHER_H_ """) tmpl_typedef = string.Template("""\ namespace ${domain} { typedef ${param_type} ${declared_name}; } // namespace ${domain} """) tmpl_struct = string.Template("""\ namespace ${domain} { template<int MASK> struct ${declared_name}Builder : base::RefCounted<${declared_name}Builder<MASK>> { public: enum { kAllSet = 0, ${fields_enum}\ }; ${methods}\ static scoped_refptr<${declared_name}Builder<kNoneSet>> Create() { return new ${declared_name}Builder<kNoneSet>(); } scoped_ptr<base::DictionaryValue> ToValue() { static_assert(MASK == kAllSet, "required properties missing"); return make_scoped_ptr(dict_->DeepCopy()); } private: friend struct ${declared_name}Builder<0>; ${declared_name}Builder() : dict_(new base::DictionaryValue()) { } template<class T> T* ThisAs() { static_assert(sizeof(*this) == sizeof(T), "cannot cast"); return reinterpret_cast<T*>(this); } scoped_ptr<base::DictionaryValue> dict_; }; typedef ${declared_name}Builder<0> ${declared_name}; } // namespace ${domain} """) tmpl_builder_setter_req = string.Template("""\ scoped_refptr<${declared_name}Builder<MASK & ~k${Param}>> set_${param}(${pass_type} ${param}) { static_assert(MASK & k${Param}, "already set"); dict_->Set("${proto_param}", CreateValue(${param})); return ThisAs<${declared_name}Builder<MASK & ~k${Param}>>(); } """) tmpl_builder_setter_opt = string.Template("""\ scoped_refptr<${declared_name}Builder<MASK>> set_${param}(${pass_type} ${param}) { dict_->Set("${proto_param}", CreateValue(${param})); return this; } """) tmpl_builder_enum = string.Template("""\ k${Param} = 1 << ${ordinal}, """) tmpl_builder_none_set = string.Template("""\ kNoneSet = ${all_fields} """) tmpl_named_enum = string.Template("""\ namespace ${domain} { ${values}\ } // namespace ${domain} """) tmpl_inline_enum = string.Template("""\ namespace ${domain} { namespace ${subdomain} { ${values}\ } // namespace ${subdomain} } // namespace ${domain} """) tmpl_enum_value = string.Template("""\ extern const char k${Enum}${Value}[]; """) tmpl_enum_value_def = string.Template("""\ const char k${Enum}${Value}[] = "${value}"; """) tmpl_handler = string.Template("""\ namespace ${domain} { class ${Domain}Handler; } // namespace domain """) tmpl_client = string.Template("""\ namespace ${domain} { class Client : public DevToolsProtocolClient { public: explicit Client(const RawMessageCallback& raw_message_callback); ~Client() override; ${methods}\ }; } // namespace ${domain} """) tmpl_event = string.Template("""\ void ${Command}( scoped_refptr<${Command}Params> params); """) tmpl_response = string.Template("""\ void Send${Command}Response( DevToolsCommandId command_id, scoped_refptr<${Command}Response> params); """) tmpl_setter = string.Template("""\ void Set${Domain}Handler( devtools::${domain}::${Domain}Handler* ${domain}_handler); """) tmpl_callback = string.Template("""\ bool On${Domain}${Command}( DevToolsCommandId command_id, scoped_ptr<base::DictionaryValue> params); """) tmpl_field = string.Template("""\ devtools::${domain}::${Domain}Handler* ${domain}_handler_; """) template_cc = string.Template(header + """\ #include "content/browser/devtools/protocol/devtools_protocol_handler.h" #include "base/bind.h" #include "base/strings/string_number_conversions.h" ${includes}\ namespace content { DevToolsProtocolDispatcher::DevToolsProtocolDispatcher( const Notifier& notifier) : notifier_(notifier), client_(notifier), ${fields_init} { } DevToolsProtocolDispatcher::~DevToolsProtocolDispatcher() { } DevToolsProtocolDispatcher::CommandHandler DevToolsProtocolDispatcher::FindCommandHandler(const std::string& method) { CommandHandlers::iterator it = command_handlers_.find(method); return it == command_handlers_.end() ? CommandHandler() : it->second; } ${methods}\ namespace devtools { const char kProtocolVersion[] = "${major}.${minor}"; bool IsSupportedProtocolVersion(const std::string& version) { std::vector<std::string> tokens; Tokenize(version, ".", &tokens); int major, minor; return tokens.size() == 2 && base::StringToInt(tokens[0], &major) && major == ${major} && base::StringToInt(tokens[1], &minor) && minor <= ${minor}; } template<> base::Value* CreateValue(const std::string& param) { return new base::StringValue(param); } ${types}\ } // namespace devtools } // namespace content """) tmpl_include = string.Template("""\ #include "content/browser/devtools/protocol/${domain}_handler.h" """) tmpl_field_init = string.Template("${domain}_handler_(nullptr)") tmpl_setter_impl = string.Template("""\ void DevToolsProtocolDispatcher::Set${Domain}Handler( devtools::${domain}::${Domain}Handler* ${domain}_handler) { DCHECK(!${domain}_handler_); ${domain}_handler_ = ${domain}_handler; ${initializations}\ } """) tmpl_register = string.Template("""\ command_handlers_["${Domain}.${command}"] = base::Bind( &DevToolsProtocolDispatcher::On${TargetDomain}${Command}, base::Unretained(this)); """) tmpl_init_client = string.Template("""\ ${domain}_handler_->SetClient(make_scoped_ptr( new devtools::${domain}::Client(notifier_))); """) tmpl_callback_impl = string.Template("""\ bool DevToolsProtocolDispatcher::On${Domain}${Command}( DevToolsCommandId command_id, scoped_ptr<base::DictionaryValue> params) { ${prep}\ Response response = ${domain}_handler_->${Command}(${args}); scoped_ptr<base::DictionaryValue> protocol_response; if (client_.SendError(command_id, response)) return true; if (response.IsFallThrough()) return false; scoped_ptr<base::DictionaryValue> result(new base::DictionaryValue()); ${wrap}\ client_.SendSuccess(command_id, result.Pass()); return true; } """) tmpl_wrap = string.Template("""\ result->Set("${proto_param}", devtools::CreateValue(out_${param})); """) tmpl_callback_async_impl = string.Template("""\ bool DevToolsProtocolDispatcher::On${Domain}${Command}( DevToolsCommandId command_id, scoped_ptr<base::DictionaryValue> params) { ${prep}\ Response response = ${domain}_handler_->${Command}(${args}); if (client_.SendError(command_id, response)) return true; return !response.IsFallThrough(); } """) tmpl_prep_req = string.Template("""\ ${raw_type} in_${param}${init}; if (!params || !params->Get${Type}("${proto_param}", &in_${param})) { client_.SendError(command_id, Response::InvalidParams("${proto_param}")); return true; } """) tmpl_prep_req_list = string.Template("""\ base::ListValue* list_${param} = nullptr; if (!params || !params->GetList("${proto_param}", &list_${param})) { client_.SendError(command_id, Response::InvalidParams("${proto_param}")); return true; } std::vector<${item_type}> in_${param}; for (base::ListValue::const_iterator it = list_${param}->begin(); it != list_${param}->end(); ++it) { ${item_raw_type} item; if (!(*it)->GetAs${ItemType}(&item)) { client_.SendError(command_id, Response::InvalidParams("${proto_param}")); return true; } in_${param}.push_back(${item_pass}); } """) tmpl_prep_opt = string.Template("""\ ${raw_type} in_${param}${init}; bool ${param}_found = params && params->Get${Type}( "${proto_param}", &in_${param}); """) tmpl_prep_output = string.Template("""\ ${param_type} out_${param}${init}; """) tmpl_arg_name = string.Template("in_${param}") tmpl_arg_req = string.Template("${param_pass}") tmpl_arg_opt = string.Template( "${param}_found ? ${param_pass} : nullptr") tmpl_object_pass = string.Template( "make_scoped_ptr<base::DictionaryValue>(${name}->DeepCopy())") tmpl_client_impl = string.Template("""\ namespace ${domain} { Client::Client(const RawMessageCallback& raw_message_callback) : DevToolsProtocolClient(raw_message_callback) { } Client::~Client() { } ${methods}\ } // namespace ${domain} """) tmpl_event_impl = string.Template("""\ void Client::${Command}( scoped_refptr<${Command}Params> params) { SendNotification("${Domain}.${command}", params->ToValue().Pass()); } """) tmpl_response_impl = string.Template("""\ void Client::Send${Command}Response( DevToolsCommandId command_id, scoped_refptr<${Command}Response> params) { SendSuccess(command_id, params->ToValue().Pass()); } """) tmpl_typename = string.Template("devtools::${domain}::${declared_name}") def Capitalize(s): return s[:1].upper() + s[1:] def Uncamelcase(s): result = "" for i, c in enumerate(s): if c.isupper(): if (i > 0) and ((i < len(s)-1) and s[i+1].islower() or s[i-1].islower()): result += "_" result += c.lower() else: result += c return result types = {} blink_protocol = json.loads(open(blink_protocol_path, "r").read()) browser_protocol = json.loads(open(browser_protocol_path, "r").read()) type_decls = [] type_impls = [] handler_methods = [] handler_method_impls = [] domain_maps = [] redirects = {} all_domains = blink_protocol["domains"] + browser_protocol["domains"] for json_domain in all_domains: if "types" in json_domain: for json_type in json_domain["types"]: types["%s.%s" % (json_domain["domain"], json_type["id"])] = json_type def DeclareStruct(json_properties, mapping): methods = [] fields_enum = [] enum_items = [] req_fields_num = 0 for json_prop in json_properties: prop_map = mapping.copy() prop_map["proto_param"] = json_prop["name"] prop_map["param"] = Uncamelcase(json_prop["name"]) prop_map["Param"] = Capitalize(json_prop["name"]) prop_map["subdomain"] = Uncamelcase(prop_map["declared_name"]) del prop_map["declared_name"] ResolveType(json_prop, prop_map) prop_map["declared_name"] = mapping["declared_name"] if json_prop.get("optional"): methods.append(tmpl_builder_setter_opt.substitute(prop_map)) else: methods.append(tmpl_builder_setter_req.substitute(prop_map)) enum_items.append("k%s" % prop_map["Param"]); fields_enum.append(tmpl_builder_enum.substitute(prop_map, ordinal = req_fields_num)) req_fields_num += 1 all_fields = "kAllSet" if len(enum_items) > 0: all_fields = " | ".join(enum_items) fields_enum.append(tmpl_builder_none_set.substitute(mapping, all_fields = all_fields)) type_decls.append(tmpl_struct.substitute(mapping, methods = "\n".join(methods), fields_enum = "".join(fields_enum))) def DeclareEnum(json, mapping): values = [] value_defs = [] tmpl_enum = tmpl_inline_enum if "declared_name" in mapping: mapping["Enum"] = mapping["declared_name"] tmpl_enum = tmpl_named_enum else: mapping["Enum"] = Capitalize(mapping["proto_param"]) for enum_value in json["enum"]: values.append(tmpl_enum_value.substitute(mapping, Value = Capitalize(enum_value))) value_defs.append(tmpl_enum_value_def.substitute(mapping, value = enum_value, Value = Capitalize(enum_value))) type_decls.append(tmpl_enum.substitute(mapping, values = "".join(values))) type_impls.append(tmpl_enum.substitute(mapping, values = "".join(value_defs))) def ResolveRef(json, mapping): dot_pos = json["$ref"].find(".") if dot_pos == -1: domain_name = mapping["Domain"] type_name = json["$ref"] else: domain_name = json["$ref"][:dot_pos] type_name = json["$ref"][dot_pos + 1:] json_type = types["%s.%s" % (domain_name, type_name)] mapping["declared_name"] = Capitalize(type_name) mapping["Domain"] = domain_name mapping["domain"] = Uncamelcase(domain_name) mapping["param_type"] = tmpl_typename.substitute(mapping) ResolveType(json_type, mapping) if not "___type_declared" in json_type: json_type["___type_declared"] = True; if (json_type.get("type") == "object") and ("properties" in json_type): DeclareStruct(json_type["properties"], mapping) else: if ("enum" in json_type): DeclareEnum(json_type, mapping) type_decls.append(tmpl_typedef.substitute(mapping)) def ResolveArray(json, mapping): items_map = mapping.copy() ResolveType(json["items"], items_map) if items_map["Type"] == "List": # TODO(dgozman) Implement this. raise Exception("Nested arrays are not implemented") mapping["param_type"] = "std::vector<%s>" % items_map["param_type"] mapping["Type"] = "List" mapping["pass_type"] = "const %s&" % mapping["param_type"] mapping["storage_type"] = "std::vector<%s>" % items_map["storage_type"] mapping["raw_type"] = mapping["storage_type"] mapping["prep_req"] = tmpl_prep_req_list.substitute(mapping, item_type = items_map["storage_type"], item_init = items_map["init"], item_raw_type = items_map["raw_type"], item_pass = items_map["pass_template"].substitute(name="item", opt=""), ItemType = items_map["Type"]) mapping["arg_out"] = "&out_%s" % mapping["param"] def ResolveObject(json, mapping): mapping["Type"] = "Dictionary" mapping["storage_type"] = "scoped_ptr<base::DictionaryValue>" mapping["raw_type"] = "base::DictionaryValue*" mapping["pass_template"] = tmpl_object_pass if "properties" in json: if not "declared_name" in mapping: mapping["declared_name"] = ("%s%s" % (mapping["Command"], Capitalize(mapping["proto_param"]))) mapping["param_type"] = ("scoped_refptr<%s>" % tmpl_typename.substitute(mapping)) DeclareStruct(json["properties"], mapping) else: mapping["param_type"] = ("scoped_refptr<%s>" % tmpl_typename.substitute(mapping)) mapping["pass_type"] = mapping["param_type"] mapping["arg_out"] = "&out_%s" % mapping["param"] else: mapping["param_type"] = "base::DictionaryValue" mapping["pass_type"] = "scoped_ptr<base::DictionaryValue>" mapping["arg_out"] = "out_%s.get()" % mapping["param"] mapping["prep_req"] = tmpl_prep_req.substitute(mapping) def ResolvePrimitive(json, mapping): jsonrpc_type = json["type"] if jsonrpc_type == "boolean": mapping["param_type"] = "bool" mapping["Type"] = "Boolean" mapping["init"] = " = false" elif jsonrpc_type == "integer": mapping["param_type"] = "int" mapping["Type"] = "Integer" mapping["init"] = " = 0" elif jsonrpc_type == "number": mapping["param_type"] = "double" mapping["Type"] = "Double" mapping["init"] = " = 0.0" elif jsonrpc_type == "string": mapping["param_type"] = "std::string" mapping["pass_type"] = "const std::string&" mapping["Type"] = "String" if "enum" in json and not "declared_name" in mapping: if not "subdomain" in mapping: mapping["subdomain"] = Uncamelcase(mapping["command"]) DeclareEnum(json, mapping) else: raise Exception("Unknown type: %s" % json_type) mapping["storage_type"] = mapping["param_type"] mapping["raw_type"] = mapping["param_type"] mapping["prep_req"] = tmpl_prep_req.substitute(mapping) if jsonrpc_type != "string": mapping["pass_type"] = mapping["param_type"] mapping["arg_out"] = "&out_%s" % mapping["param"] def ResolveType(json, mapping): mapping["init"] = "" mapping["pass_template"] = string.Template("${opt}${name}") if "$ref" in json: ResolveRef(json, mapping) elif "type" in json: jsonrpc_type = json["type"] if jsonrpc_type == "array": ResolveArray(json, mapping) elif jsonrpc_type == "object": ResolveObject(json, mapping) else: ResolvePrimitive(json, mapping) else: raise Exception("Unknown type at %s.%s %s" % (mapping["Domain"], mapping["command"], mapping["proto_param"])) setters = [] fields = [] includes = [] fields_init = [] for json_domain in all_domains: domain_map = {} domain_map["Domain"] = json_domain["domain"] domain_map["domain"] = Uncamelcase(json_domain["domain"]) initializations = [] client_methods = [] client_method_impls = [] domain_empty = True domain_needs_client = False if "commands" in json_domain: for json_command in json_domain["commands"]: if (not ("handlers" in json_command) or not ("browser" in json_command["handlers"])): continue domain_empty = False command_map = domain_map.copy() command_map["command"] = json_command["name"] command_map["Command"] = Capitalize(json_command["name"]) if "redirect" in json_command: redirect_domain = json_command["redirect"] if not (redirect_domain in redirects): redirects[redirect_domain] = [] command_map["TargetDomain"] = redirect_domain redirects[redirect_domain].append(tmpl_register.substitute(command_map)) continue command_map["TargetDomain"] = command_map["Domain"] prep = [] args = [] if "parameters" in json_command: for json_param in json_command["parameters"]: param_map = command_map.copy() param_map["proto_param"] = json_param["name"] param_map["param"] = Uncamelcase(json_param["name"]) ResolveType(json_param, param_map) if json_param.get("optional"): if param_map["Type"] in ["List"]: # TODO(vkuzkokov) Implement transformation of base::ListValue # to std::vector and base::DictonaryValue to struct. raise Exception( "Optional array parameters are not implemented") prep.append(tmpl_prep_opt.substitute(param_map)) param_pass = param_map["pass_template"].substitute( name=tmpl_arg_name.substitute(param_map), opt="&") args.append( tmpl_arg_opt.substitute(param_map, param_pass=param_pass)) else: prep.append(param_map["prep_req"]) param_pass = param_map["pass_template"].substitute( name=tmpl_arg_name.substitute(param_map), opt="") args.append( tmpl_arg_req.substitute(param_map, param_pass=param_pass)) if json_command.get("async"): domain_needs_client = True json_returns = [] if "returns" in json_command: json_returns = json_command["returns"] command_map["declared_name"] = "%sResponse" % command_map["Command"] DeclareStruct(json_returns, command_map) # TODO(vkuzkokov) Pass async callback instance similar to how # InspectorBackendDispatcher does it. This, however, can work # only if Blink and Chrome are in the same repo. args.insert(0, "command_id") handler_method_impls.append( tmpl_callback_async_impl.substitute(command_map, prep = "".join(prep), args = "\n " + ",\n ".join(args))) client_methods.append(tmpl_response.substitute(command_map)) client_method_impls.append(tmpl_response_impl.substitute(command_map)) else: wrap = [] if "returns" in json_command: for json_param in json_command["returns"]: param_map = command_map.copy() param_map["proto_param"] = json_param["name"] param_map["param"] = Uncamelcase(json_param["name"]) if json_param.get("optional"): # TODO(vkuzkokov) Implement Optional<T> for value types. raise Exception("Optional return values are not implemented") ResolveType(json_param, param_map) prep.append(tmpl_prep_output.substitute(param_map)) args.append(param_map["arg_out"]) wrap.append(tmpl_wrap.substitute(param_map)) args_str = "" if len(args) > 0: args_str = "\n " + ",\n ".join(args) handler_method_impls.append(tmpl_callback_impl.substitute(command_map, prep = "".join(prep), args = args_str, wrap = "".join(wrap))) initializations.append(tmpl_register.substitute(command_map)) handler_methods.append(tmpl_callback.substitute(command_map)) if "events" in json_domain: for json_event in json_domain["events"]: if (not ("handlers" in json_event) or not ("browser" in json_event["handlers"])): continue domain_empty = False domain_needs_client = True event_map = domain_map.copy() event_map["command"] = json_event["name"] event_map["Command"] = Capitalize(json_event["name"]) json_parameters = [] if "parameters" in json_event: json_parameters = json_event["parameters"] event_map["declared_name"] = "%sParams" % event_map["Command"] DeclareStruct(json_parameters, event_map); client_methods.append(tmpl_event.substitute(event_map)) client_method_impls.append(tmpl_event_impl.substitute(event_map)) if domain_empty: continue type_decls.append(tmpl_handler.substitute(domain_map)) setters.append(tmpl_setter.substitute(domain_map)) fields.append(tmpl_field.substitute(domain_map)) includes.append(tmpl_include.substitute(domain_map)) fields_init.append(tmpl_field_init.substitute(domain_map)) if domain_needs_client: type_decls.append(tmpl_client.substitute(domain_map, methods = "".join(client_methods))) initializations.append(tmpl_init_client.substitute(domain_map)) type_impls.append(tmpl_client_impl.substitute(domain_map, methods = "\n".join(client_method_impls))) domain_map["initializations"] = "".join(initializations) domain_maps.append(domain_map) for domain_map in domain_maps: domain = domain_map["Domain"] if domain in redirects: domain_map["initializations"] += "".join(redirects[domain]) handler_method_impls.append(tmpl_setter_impl.substitute(domain_map)) output_h_file = open(output_h_path, "w") output_cc_file = open(output_cc_path, "w") output_h_file.write(template_h.substitute({}, types = "\n".join(type_decls), setters = "".join(setters), methods = "".join(handler_methods), fields = "".join(fields))) output_h_file.close() output_cc_file.write(template_cc.substitute({}, major = blink_protocol["version"]["major"], minor = blink_protocol["version"]["minor"], includes = "".join(sorted(includes)), fields_init = ",\n ".join(fields_init), methods = "\n".join(handler_method_impls), types = "\n".join(type_impls))) output_cc_file.close()