repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
icsi-berkeley/framework_code | src/main/nluas/app/core_solver.py | 2 | 7749 | """
Simple solver "core". Contains capabilities for unpacking
a JSON n-tuple, as well as routing this n-tuple based
on the predicate_type (command, query, assertion, etc.).
Other general capabilities can be added. The design
is general enough that the same "unpacking" and "routing"
method can be used, as long as a new method is written for a given
predicate_type.
"Route_action" can be called by command/query/assertion methods,
to route each parameter to the task-specific method. E.g., "solve_move",
or "solve_push_move", etc.
Author: seantrott <[email protected]>
------
See LICENSE.txt for licensing information.
------
"""
from nluas.ntuple_decoder import *
from nluas.core_agent import *
import sys, traceback
import pprint
import os
path = os.path.dirname(os.path.realpath(__file__))
def check_complexity(n):
s = int(n)
if s not in [1, 2, 3]:
raise argparse.ArgumentTypeError("{} is an invalid entry for the complexity level. Should be 1, 2, or 3.".format(n))
return s
class CoreProblemSolver(CoreAgent):
def __init__(self, args):
self.__path__ = os.getcwd() + "/src/main/nluas/"
self.ntuple = None
self.decoder = NtupleDecoder()
CoreAgent.__init__(self, args)
self.world = []
self.solver_parser = self.setup_solver_parser()
args = self.solver_parser.parse_args(self.unknown)
self.complexity = args.complexity
self.ui_address = "{}_{}".format(self.federation, "AgentUI")
self.transport.subscribe(self.ui_address, self.callback)
self._incapable = "I cannot do that yet."
self.history = list()
self.p_features = None
self.eventFeatures=None
self.parameter_templates = OrderedDict()
def setup_solver_parser(self):
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--complexity", default=1, type=check_complexity, help="indicate level of complexity: 1, 2, or 3.")
return parser
def callback(self, ntuple):
if self.is_quit(ntuple):
return self.close()
self.solve(ntuple)
def initialize_templates(self):
""" Initializes templates from path, set above. """
self.parameter_templates = self.read_templates(self.__path__+"parameter_templates.json")
def request_clarification(self, ntuple, message="This ntuple requires clarification."):
request = {'ntuple': ntuple, 'message': message, 'type': 'clarification', 'tag': self.address}
self.transport.send(self.ui_address, request)
def identification_failure(self, message):
request = {'type': 'id_failure', 'message': message, 'tag': self.address}
self.transport.send(self.ui_address, request)
def respond_to_query(self, message):
request = {'type': 'response', 'message': message, 'tag': self.address}
self.transport.send(self.ui_address, request)
def return_error_descriptor(self, message):
request = {'type': 'error_descriptor', 'message': message, 'tag': self.address}
self.transport.send(self.ui_address, request)
def solve(self, ntuple):
if self.check_for_clarification(ntuple):
self.request_clarification(ntuple=ntuple)
else:
self.ntuple = ntuple
predicate_type = ntuple['predicate_type']
try:
dispatch = getattr(self, "solve_%s" %predicate_type)
dispatch(ntuple)
self.broadcast()
self.p_features = None # Testing, took it out from route_action
except AttributeError as e:
traceback.print_exc()
message = "I cannot solve a(n) {}.".format(predicate_type)
self.identification_failure(message)
def broadcast(self):
""" Here, does nothing. Later, an AgentSolver will broadcast information back to BossSolver. """
pass
def update_world(self, discovered=[]):
for item in discovered:
self.world.append(item)
def solve_command(self, ntuple):
self.route_event(ntuple['eventDescriptor'], "command")
if self.verbose:
self.decoder.pprint_ntuple(ntuple)
def solve_query(self, ntuple):
self.route_event(ntuple['eventDescriptor'], "query")
if self.verbose:
self.decoder.pprint_ntuple(ntuple)
def solve_assertion(self, ntuple):
self.route_event(ntuple['eventDescriptor'], "assertion")
if self.verbose:
self.decoder.pprint_ntuple(ntuple)
def solve_conditional_command(self, ntuple):
""" Takes in conditionalED. (API changed 5/26/16, ST) """
print("Function is deprecated!")
print(ntuple.keys())
def solve_conditional_assertion(self, ntuple):
""" Takes in conditionalED. (API changed 5/26/16, ST) """
print("Function is deprecated!")
print(ntuple.keys())
def solve_conditional_query(self, ntuple):
""" Takes in conditionalED. (API changed 5/26/16, ST) """
print("Function is deprecated!")
print(ntuple.keys())
def route_event(self, eventDescription, predicate):
if "complexKind" in eventDescription and eventDescription['complexKind'] == "conditional":
dispatch = getattr(self, "solve_conditional_{}".format(predicate))
return dispatch(eventDescription)
features = eventDescription['e_features']
if features:
# Set eventFeatures
self.eventFeatures = features['eventFeatures']
parameters = eventDescription['eventProcess']
return_value = self.route_action(parameters, predicate)
self.eventFeatures = None
if return_value:
if predicate == "query":
self.respond_to_query(return_value)
elif predicate == "command":
self.return_error_descriptor(return_value)
return return_value
def route_action(self, parameters, predicate):
if "complexKind" in parameters and parameters['complexKind'] == "serial":
return self.solve_serial(parameters, predicate)
elif "complexKind" in parameters and parameters['complexKind'] == "causal":
return self.solve_causal(parameters, predicate)
else:
template = parameters['template']
action = parameters['actionary']
try:
if parameters['p_features']:
self.p_features = parameters['p_features']['processFeatures']
dispatch = getattr(self, "{}_{}".format(predicate, action))
return_value = self.route_dispatch(dispatch, parameters)
self.history.insert(0, (parameters, True))
self.p_features = None
return return_value
except AttributeError as e:
message = "I cannot solve the '{}_{}' action".format(predicate,action)
self.history.insert(0, (parameters, False))
self.identification_failure(message)
def route_dispatch(self, dispatch_function, parameters):
""" Simply runs dispatch_function on PARAMETERS. """
return dispatch_function(parameters)
def check_for_clarification(self, ntuple):
""" Will need to be replaced by a process that checks whether ntuple needs clarification.
Requires some sort of context/world model. """
#return random.choice([True, False])
return False
def solve_serial(self, parameters, predicate):
self.route_action(parameters['process1'], predicate)
self.route_action(parameters['process2'], predicate)
if __name__ == '__main__':
ps = CoreProblemSolver(sys.argv[1:])
| apache-2.0 |
kirisetsz/kisstudou | kisstudou.py | 1 | 7276 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import re
import httplib
import urllib
from pyquery import PyQuery as pq
parser = argparse.ArgumentParser(
description='Download video resource from tudou.com',
epilog="Parse the url to video address using flvcd.com")
parser.add_argument('-q', '--quality',
default=4, type=int, dest='quality',
help="""Quality of source to download,
values in 0(256P),1(360P),2(480P),3(720P),4(REAL).
REAL by default.
Note:
If the specific resolution is not avaliable the lower nearest will be downloaded""")
parser.add_argument('-o', '--output-pattern',
default='%{n}%-{x}', dest='pattern',
help="""Define the output filename format(%%n by default):
%%{n} - Video name section.
%%{x} - Clip index of the video.
e.g. %{n}%-{x} will produce filename-0001.vod or filename.vod
""")
parser.add_argument('-w', '--wait',
default=2, type=int, dest='wait',
help="Set the time to wait between start next task(in second, default 2).")
parser.add_argument('-D', '--debug',
default=False, dest='debug', action='store_true',
help="Run command in debug mode")
parser.add_argument('-d', '--new-directory',
default=False, dest='mkdir', action='store_true',
help="Create new directory for the download")
parser.add_argument('-c', '--clean',
default=False, dest='clean', action='store_true',
help="Clean old file before start(for sites unavaliable for partial)")
parser.add_argument('-m', '--merge-split',
default=False, dest='merge', action='store_true',
help="Auto merge videos together(Not Implemented)")
parser.add_argument('-s', '--spider',
default=False, dest='detect', action='store_true',
help="Only detect for video information but not download.")
parser.add_argument('-U', '--user-agent',
default=r"Mozilla/5.0 (X11; Linux x86_64; rv:7.0.1) Gecko/20100101 Firefox/7.0.1",
dest='ua',
help="Specific the User-Agent.")
parser.add_argument('-O', '--wget-options',
default="",
dest='wgetopt',
help="Specific the wget Parameter.")
parser.add_argument('url', help='The URL of the video')
#arguments here
global args
args = parser.parse_args()
resolution = [
('normal' , 'Normal'),
('high' , '360P'),
('super' , '480P'),
('super2' , '720P'),
('real' , 'REAL(DEFAULT)')
]
print "Video address to parse:"
print "\t%s" % (args.url)
print "Quality:", resolution[args.quality][1]
print "Pattern:", args.pattern, "+ *ext*"
print "User-Agent:"
print "\t%s" % (args.ua)
if args.debug:
print "Debug:", args.debug
print "New Dir.:", args.mkdir
def parse(url, ua, fmt):
http = httplib.HTTP("www.flvcd.com")
http.putrequest("GET", "/parse.php?format=%s&kw=%s" % (fmt,
urllib.quote(url)))
http.putheader("User-Agent", ua)
http.putheader("Host", "www.flvcd.com")
http.putheader("Accept", "*/*")
http.endheaders()
errcode, errmsg, headers = http.getreply()
print "Status:", errcode, errmsg
if errcode!=200:
print "Error encountered while parsing url"
return -1
res = http.getfile()
print 'Parsing video address...'
html = ''
data = res.read(512)
while data != '':
html += data
data = res.read(512)
html = html.decode('gbk')
return html
html = parse(args.url, args.ua, resolution[args.quality][0])
if html == -1:
exit(1)
q = pq(html)
# Address Parsing Procedure
form = q('form[name="mform"]')
file_a = form.parent('td').parent('tr').prev().children().children('a')
filelist = []
for i in file_a:
a = pq(i)
filelist.append(a.attr('href'))
filename = form('input[name="name"]').val()
formats = form.parent().children('a')
if not filename:
print """
Video is not available for download.
Check http://www.flvcd.com/url.php for available sites.
Or the video is protected from playing by guests.
"""
exit(0)
print "Video Title:"
print "\t%s" % (filename)
print
if args.debug:
print "Download Address:"
for i in filelist:
print i
print
if len(formats) > 0:
print "Optional format:"
for i in formats:
f = pq(i)
href = f.attr('href')
text = f.text()
for i in xrange(len(resolution)):
k, v = resolution[i]
if href.find(k) != -1:
print "\t%d - %s[%s]" % (i, v, text)
break
print
if args.detect:
exit(0)
filepath = filename.replace("/", "_").encode('utf8')
print "Found %d video clip(s) to download" % len(filelist)
print
import os, time
if args.mkdir:
print 'Creating new dir:', filepath
os.system('mkdir "%s" 2>/dev/null 1>/dev/null' % filepath)
os.chdir(filepath)
print 'Current directory:'
print "\t", os.getcwd()
os.system('''echo "#!/bin/bash
%s -q%s -O=\\"%s\\" \\"%s\\" \$@" > "%s.to" && chmod +x "%s.to"
''' % \
(__file__,args.quality,args.wgetopt,args.url,
filepath,filepath))
print
def getFileExt(u):
if u.find('f4v')!=-1:
return '.f4v'
if u.find('mp4')!=-1:
return '.mp4'
if u.find('flv')!=-1:
return '.flv'
if u.find('hlv')!=-1:
return '.flv'
return ".video"
fSuccess = True
def sformat(string, symbol, value):
tokens = string.split('%')
filtered = []
for s in tokens:
if s.find('{' + symbol + '}') < 0:
filtered.append(s)
else:
if value:
filtered.append(s.replace('{' + symbol + '}', value))
return '%'.join(filtered)
for i in xrange(len(filelist)):
url = filelist[i]
local = args.pattern
local = sformat(local, 'n', filepath)
if len(filelist) > 1:
local = sformat(local, 'x', '%04d' % (i + 1))
else:
local = sformat(local, 'x', None)
local = local.replace('%',"").replace('/',"_") + getFileExt(url)
print "Download", local, "..."
if os.path.exists(local):
print "Target already exists, skip to next file!"
continue
rmcmd = "rm -f %s 1>/dev/null 2>/dev/null" % (local+" ."+local)
if args.clean:
print "Before we start, clean the unfinished file"
os.system(rmcmd)
syscmd = 'wget -c ' + args.wgetopt + ' "' + url + '" -U "' + args.ua + '" -O ".' + local + '"'
if args.debug:
print syscmd
continue
rtn = os.system(syscmd)
mvcmd = 'mv "%s" "%s" 1>/dev/null 2>/dev/null' % ('.' + local, local)
if rtn == 0:
os.system(mvcmd)
elif rtn == 2048:
# Server issued an error response.
print "Server Error detected, remove part file and retry."
os.system(rmcmd)
rtn = os.system(syscmd)
if rtn == 0:
os.system(mvcmd)
else:
fSuccess = False;
if rtn == 2048:
print "Server error again, address may be expired."
if args.clean:
os.system(rmcmd)
continue
else:
fSuccess = False;
time.sleep(args.wait + 0.1)
if fSuccess:
os.system('rm "%s.to"' % (filepath))
print "All tasks completed."
exit(0)
| apache-2.0 |
jef-n/QGIS | tests/src/python/test_qgspallabeling_tests.py | 30 | 14051 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsPalLabeling: base suite of render check tests
Class is meant to be inherited by classes that test different labeling outputs
See <qgis-src-dir>/tests/testdata/labeling/README.rst for description.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Larry Shaffer'
__date__ = '07/16/2013'
__copyright__ = 'Copyright 2013, The QGIS Project'
import qgis # NOQA
import os
from qgis.PyQt.QtCore import Qt, QPointF, QSizeF
from qgis.PyQt.QtGui import QFont
from qgis.core import QgsLabelingEngineSettings, QgsPalLayerSettings, QgsUnitTypes, QgsTextBackgroundSettings, \
QgsProject, QgsExpressionContextUtils, QgsExpressionContext
from qgis.core import QgsCoordinateReferenceSystem
from utilities import svgSymbolsPath
# noinspection PyPep8Naming
class TestPointBase(object):
def __init__(self):
"""Dummy assignments, intended to be overridden in subclasses"""
self.lyr = QgsPalLayerSettings()
""":type: QgsPalLayerSettings"""
# noinspection PyArgumentList
self._TestFont = QFont() # will become a standard test font
self._Canvas = None
""":type: QgsMapCanvas"""
# custom mismatches per group/test (should not mask any needed anomaly)
# e.g. self._Mismatches['TestClassName'] = 300
# check base output class's checkTest() or subclasses for any defaults
self._Mismatches = dict()
# custom color tolerances per group/test: 1 - 20 (0 default, 20 max)
# (should not mask any needed anomaly)
# e.g. self._ColorTols['TestClassName'] = 10
# check base output class's checkTest() or subclasses for any defaults
self._ColorTols = dict()
# noinspection PyMethodMayBeStatic
def checkTest(self, **kwargs):
"""Intended to be overridden in subclasses"""
pass
def test_default_label(self):
# Default label placement, with text size in points
self._Mismatches['TestCanvasPoint'] = 776
self._ColorTols['TestComposerPdfPoint'] = 2
self.checkTest()
def test_text_size_map_unit(self):
# Label text size in map units
format = self.lyr.format()
format.setSizeUnit(QgsUnitTypes.RenderMapUnits)
format.setSize(460)
font = QFont(self._TestFont)
format.setFont(font)
self.lyr.setFormat(format)
self._Mismatches['TestCanvasPoint'] = 776
self._ColorTols['TestComposerPdfPoint'] = 2
self.checkTest()
def test_text_color(self):
self._Mismatches['TestCanvasPoint'] = 774
self._ColorTols['TestComposerPdfPoint'] = 2
# Label color change
format = self.lyr.format()
format.setColor(Qt.blue)
self.lyr.setFormat(format)
self.checkTest()
def test_background_rect(self):
self._Mismatches['TestComposerImageVsCanvasPoint'] = 800
self._Mismatches['TestComposerImagePoint'] = 800
format = self.lyr.format()
format.background().setEnabled(True)
self.lyr.setFormat(format)
self._Mismatches['TestCanvasPoint'] = 776
self._ColorTols['TestComposerPdfPoint'] = 1
self.checkTest()
def test_background_rect_w_offset(self):
# Label rectangular background
self._Mismatches['TestComposerImageVsCanvasPoint'] = 800
self._Mismatches['TestComposerImagePoint'] = 800
# verify fix for issues
# https://github.com/qgis/QGIS/issues/17705
# http://gis.stackexchange.com/questions/86900
format = self.lyr.format()
format.setSizeUnit(QgsUnitTypes.RenderMapUnits)
format.setSize(460)
font = QFont(self._TestFont)
format.setFont(font)
format.background().setEnabled(True)
format.background().setOffsetUnit(QgsUnitTypes.RenderMapUnits)
format.background().setOffset(QPointF(-2900.0, -450.0))
self.lyr.setFormat(format)
self._Mismatches['TestCanvasPoint'] = 774
self._ColorTols['TestComposerPdfPoint'] = 2
self.checkTest()
def test_background_svg(self):
# Label SVG background
format = self.lyr.format()
format.setSizeUnit(QgsUnitTypes.RenderMapUnits)
format.setSize(460)
font = QFont(self._TestFont)
format.setFont(font)
format.background().setEnabled(True)
format.background().setType(QgsTextBackgroundSettings.ShapeSVG)
svg = os.path.join(
svgSymbolsPath(), 'backgrounds', 'background_square.svg')
format.background().setSvgFile(svg)
format.background().setSizeUnit(QgsUnitTypes.RenderMapUnits)
format.background().setSizeType(QgsTextBackgroundSettings.SizeBuffer)
format.background().setSize(QSizeF(100.0, 0.0))
self.lyr.setFormat(format)
self._Mismatches['TestComposerPdfVsComposerPoint'] = 580
self._Mismatches['TestCanvasPoint'] = 776
self._ColorTols['TestComposerPdfPoint'] = 2
self.checkTest()
def test_background_svg_w_offset(self):
# Label SVG background
format = self.lyr.format()
format.setSizeUnit(QgsUnitTypes.RenderMapUnits)
format.setSize(460)
font = QFont(self._TestFont)
format.setFont(font)
format.background().setEnabled(True)
format.background().setType(QgsTextBackgroundSettings.ShapeSVG)
svg = os.path.join(
svgSymbolsPath(), 'backgrounds', 'background_square.svg')
format.background().setSvgFile(svg)
format.background().setSizeUnit(QgsUnitTypes.RenderMapUnits)
format.background().setSizeType(QgsTextBackgroundSettings.SizeBuffer)
format.background().setSize(QSizeF(100.0, 0.0))
format.background().setOffsetUnit(QgsUnitTypes.RenderMapUnits)
format.background().setOffset(QPointF(-2850.0, 500.0))
self.lyr.setFormat(format)
self._Mismatches['TestComposerPdfVsComposerPoint'] = 760
self._Mismatches['TestCanvasPoint'] = 776
self._ColorTols['TestComposerPdfPoint'] = 2
self.checkTest()
def test_partials_labels_enabled(self):
# Set Big font size
format = self.lyr.format()
font = QFont(self._TestFont)
format.setFont(font)
format.setSize(84)
self.lyr.setFormat(format)
# Enable partials labels
engine_settings = QgsLabelingEngineSettings()
engine_settings.setFlag(QgsLabelingEngineSettings.UsePartialCandidates, True)
self._TestMapSettings.setLabelingEngineSettings(engine_settings)
self._Mismatches['TestCanvasPoint'] = 779
self._ColorTols['TestComposerPdfPoint'] = 2
self.checkTest()
def test_partials_labels_disabled(self):
# Set Big font size
format = self.lyr.format()
font = QFont(self._TestFont)
format.setFont(font)
format.setSize(84)
self.lyr.setFormat(format)
# Disable partials labels
engine_settings = QgsLabelingEngineSettings()
engine_settings.setFlag(QgsLabelingEngineSettings.UsePartialCandidates, False)
self._TestMapSettings.setLabelingEngineSettings(engine_settings)
self.checkTest()
def test_buffer(self):
# Label with buffer
format = self.lyr.format()
format.buffer().setEnabled(True)
format.buffer().setSize(2)
self.lyr.setFormat(format)
self.checkTest()
def test_shadow(self):
# Label with shadow
format = self.lyr.format()
format.shadow().setEnabled(True)
format.shadow().setOffsetDistance(2)
format.shadow().setOpacity(1)
self.lyr.setFormat(format)
self.checkTest()
def test_letter_spacing(self):
# Modified letter spacing
format = self.lyr.format()
font = QFont(self._TestFont)
font.setLetterSpacing(QFont.AbsoluteSpacing, 3.5)
format.setFont(font)
format.setSize(30)
self.lyr.setFormat(format)
self.checkTest()
def test_word_spacing(self):
# Modified word spacing
format = self.lyr.format()
font = QFont(self._TestFont)
font.setWordSpacing(20.5)
format.setFont(font)
format.setSize(30)
self.lyr.setFormat(format)
self.checkTest()
# noinspection PyPep8Naming
class TestLineBase(object):
def __init__(self):
"""Dummy assignments, intended to be overridden in subclasses"""
self.lyr = QgsPalLayerSettings()
""":type: QgsPalLayerSettings"""
# noinspection PyArgumentList
self._TestFont = QFont() # will become a standard test font
self._Pal = None
""":type: QgsPalLabeling"""
self._Canvas = None
""":type: QgsMapCanvas"""
# custom mismatches per group/test (should not mask any needed anomaly)
# e.g. self._Mismatches['TestClassName'] = 300
# check base output class's checkTest() or subclasses for any defaults
self._Mismatches = dict()
# custom color tolerances per group/test: 1 - 20 (0 default, 20 max)
# (should not mask any needed anomaly)
# e.g. self._ColorTols['TestClassName'] = 10
# check base output class's checkTest() or subclasses for any defaults
self._ColorTols = dict()
# noinspection PyMethodMayBeStatic
def checkTest(self, **kwargs):
"""Intended to be overridden in subclasses"""
pass
def test_line_placement_above_line_orientation(self):
# Line placement, above, follow line orientation
self.lyr.placement = QgsPalLayerSettings.Line
self.lyr.placementFlags = QgsPalLayerSettings.AboveLine
self.checkTest()
def test_line_placement_online(self):
# Line placement, on line
self.lyr.placement = QgsPalLayerSettings.Line
self.lyr.placementFlags = QgsPalLayerSettings.OnLine
self.checkTest()
def test_line_placement_below_line_orientation(self):
# Line placement, below, follow line orientation
self.lyr.placement = QgsPalLayerSettings.Line
self.lyr.placementFlags = QgsPalLayerSettings.BelowLine
self.checkTest()
def test_line_placement_above_map_orientation(self):
# Line placement, above, follow map orientation
self.lyr.placement = QgsPalLayerSettings.Line
self.lyr.placementFlags = QgsPalLayerSettings.AboveLine | QgsPalLayerSettings.MapOrientation
self.checkTest()
def test_line_placement_below_map_orientation(self):
# Line placement, below, follow map orientation
self.lyr.placement = QgsPalLayerSettings.Line
self.lyr.placementFlags = QgsPalLayerSettings.BelowLine | QgsPalLayerSettings.MapOrientation
self.checkTest()
def test_curved_placement_online(self):
# Curved placement, on line
self.lyr.placement = QgsPalLayerSettings.Curved
self.lyr.placementFlags = QgsPalLayerSettings.OnLine
self.checkTest()
def test_curved_placement_above(self):
# Curved placement, on line
self.lyr.placement = QgsPalLayerSettings.Curved
self.lyr.placementFlags = QgsPalLayerSettings.AboveLine | QgsPalLayerSettings.MapOrientation
self.checkTest()
def test_curved_placement_below(self):
# Curved placement, on line
self.lyr.placement = QgsPalLayerSettings.Curved
self.lyr.placementFlags = QgsPalLayerSettings.BelowLine | QgsPalLayerSettings.MapOrientation
self.checkTest()
def test_curved_placement_online_html(self):
# Curved placement, on line
self.lyr.placement = QgsPalLayerSettings.Curved
self.lyr.placementFlags = QgsPalLayerSettings.OnLine
format = self.lyr.format()
format.setAllowHtmlFormatting(True)
self.lyr.setFormat(format)
self.lyr.fieldName = "'<span style=\"color: red\">aaa</span><s>aa</s><span style=\"text-decoration: overline\">a</span>'"
self.lyr.isExpression = True
self.checkTest()
def test_length_expression(self):
# compare length using the ellipsoid in kms and the planimetric distance in meters
self.lyr.fieldName = "round($length,5) || ' - ' || round(length($geometry),2)"
self.lyr.isExpression = True
QgsProject.instance().setCrs(QgsCoordinateReferenceSystem("EPSG:32613"))
QgsProject.instance().setEllipsoid("WGS84")
QgsProject.instance().setDistanceUnits(QgsUnitTypes.DistanceKilometers)
ctxt = QgsExpressionContext()
ctxt.appendScope(QgsExpressionContextUtils.projectScope(QgsProject.instance()))
ctxt.appendScope(QgsExpressionContextUtils.layerScope(self.layer))
self._TestMapSettings.setExpressionContext(ctxt)
self.lyr.placement = QgsPalLayerSettings.Curved
self.lyr.placementFlags = QgsPalLayerSettings.AboveLine | QgsPalLayerSettings.MapOrientation
self.checkTest()
# noinspection PyPep8Naming
def suiteTests():
"""
Use to define which tests are run when PAL_SUITE is set.
Use sp_vs_suite for comparison of server and layout outputs to canvas
"""
sp_suite = [
# 'test_default_label',
# 'test_text_size_map_unit',
# 'test_text_color',
# 'test_background_rect',
# 'test_background_rect_w_offset',
# 'test_background_svg',
# 'test_background_svg_w_offset',
# 'test_partials_labels_enabled',
# 'test_partials_labels_disabled',
]
sp_vs_suite = [
# 'test_something_specific',
]
# extended separately for finer control of PAL_SUITE (comment-out undesired)
sp_vs_suite.extend(sp_suite)
return {
'sp_suite': sp_suite,
'sp_vs_suite': sp_vs_suite
}
if __name__ == '__main__':
pass
| gpl-2.0 |
gw0/myhdl | myhdl/_resize.py | 1 | 4719 | # This file is part of the myhdl library, a Python package for using
# Python as a Hardware Description Language.
#
# Copyright (C) 2013 Christopher L. Felton
#
# The myhdl library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of the
# License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
""" Module with the fixbv resize function """
import math
from _fixbv import fixbv
from _fixbv import FixedPointFormat
# round :
ROUND_MODES = ( # towards :
'ceil', # +infinity: always round up
'fix', # 0 : always down
'floor', # -infinity: truncate, always round down
'nearest', # nearest : tie towards largest absolute value
'round', # nearest : ties to +infinity
'convergent', # nearest : tie to closest even (round_even)
'round_even', # nearest : tie to closest even (convergent)
)
OVERFLOW_MODES = (
'saturate',
'ring',
'wrap',
)
def is_round_mode(mode):
if mode.lower() in ROUND_MODES:
found = True
else:
# @todo: is there a close match?
found = False
return found
def is_overflow_mode(mode):
if mode.lower() in OVERFLOW_MODES:
found = True
else:
# @todo: is there a close match?
found = False
return found
def _overflow(val, fmt, overflow_mode):
"""handle overflow"""
assert is_overflow_mode(overflow_mode)
wl,iwl,fwl = fmt
mm = 2**(wl-1)
mmin,mmax = -mm,mm
#print(" [rsz][ovl]: %f %d %d, %s" % (val, mmin, mmax, fmt))
if overflow_mode == 'saturate':
if val >= mmax:
retval = mmax-1
elif val <= mmin:
retval = mmin
else:
retval = val
elif overflow_mode == 'ring' or overflow_mode == 'wrap':
retval = (val - mmin) % (mmax - mmin) + mmin
else:
raise ValueError
return retval
def _round(val, fmt, round_mode):
"""Round the initial value if needed"""
# Scale the value to the integer range (the underlying representation)
assert is_round_mode(round_mode)
assert isinstance(fmt, tuple)
wl,iwl,fwl = fmt
_val = val
val = val * 2.0**fwl
#print(" [rsz][rnd]: %f %f, %s" % (val, _val, fmt))
if round_mode == 'ceil':
retval = math.ceil(val)
elif round_mode == 'fix':
if val > 0:
retval = math.floor(val)
else:
retval = math.ceil(val)
elif round_mode == 'floor':
retval = math.floor(val)
elif round_mode == 'nearest':
fval,ival = math.modf(val)
if fval == .5:
retval = int(val+1) if val > 0 else int(val-1)
else:
retval = round(val)
elif round_mode == 'round':
retval = round(val)
elif round_mode == 'round_even' or round_mode == 'convergent':
fval,ival = math.modf(val)
abs_ival = int(abs(ival))
sign = -1 if ival < 0 else 1
if (abs(fval) - 0.5) == 0.0:
if abs_ival%2 == 0:
retval = abs_ival * sign
else:
retval = (abs_ival + 1) * sign
else:
retval = round(val)
else:
raise TypeError("invalid round mode!" % self.round_mode)
return int(retval)
def resize(val, fmt, round_mode='convergent', overflow_mode='saturate'):
"""
"""
if isinstance(fmt, fixbv):
fmt = fmt.format
elif isinstance(fmt, FixedPointFormat):
fmt = tuple(fmt[:])
elif isinstance(fmt, tuple):
fmt = fmt
else:
pass
if isinstance(val, fixbv):
fval = float(val)
elif isinstance(val, float):
fval = val
else:
fval = float(val)
wl,iwl,fwl = fmt
mm = 2**iwl
res = 2**-fwl
rfx = fixbv(0, min=-mm, max=mm, res=res)
assert (wl,iwl,fwl,) == rfx.format, "%d,%d,%d != %s" % (wl,iwl,fwl, repr(rfx))
ival = _round(fval, fmt, round_mode=round_mode)
ival = _overflow(ival, fmt, overflow_mode=overflow_mode)
rfx._val = ival
rfx._handleBounds()
return rfx
| lgpl-2.1 |
TribeMedia/sky_engine | tools/sort_sources.py | 68 | 5062 | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Given a GYP/GN filename, sort C-ish source files in that file.
Shows a diff and prompts for confirmation before doing the deed.
Works great with tools/git/for-all-touched-files.py.
Limitations:
1) Comments used as section headers
If a comment (1+ lines starting with #) appears in a source list without a
preceding blank line, the tool assumes that the comment is about the next
line. For example, given the following source list,
sources = [
"b.cc",
# Comment.
"a.cc",
"c.cc",
]
the tool will produce the following output:
sources = [
# Comment.
"a.cc",
"b.cc",
"c.cc",
]
This is not correct if the comment is for starting a new section like:
sources = [
"b.cc",
# These are for Linux.
"a.cc",
"c.cc",
]
The tool cannot disambiguate the two types of comments. The problem can be
worked around by inserting a blank line before the comment because the tool
interprets a blank line as the end of a source list.
2) Sources commented out
Sometimes sources are commented out with their positions kept in the
alphabetical order, but what if the list is not sorted correctly? For
example, given the following source list,
sources = [
"a.cc",
# "b.cc",
"d.cc",
"c.cc",
]
the tool will produce the following output:
sources = [
"a.cc",
"c.cc",
# "b.cc",
"d.cc",
]
This is because the tool assumes that the comment (# "b.cc",) is about the
next line ("d.cc",). This kind of errors should be fixed manually, or the
commented-out code should be deleted.
3) " and ' are used both used in the same source list (GYP only problem)
If both " and ' are used in the same source list, sources quoted with " will
appear first in the output. The problem is rare enough so the tool does not
attempt to normalize them. Hence this kind of errors should be fixed
manually.
4) Spaces and tabs used in the same source list
Similarly, if spaces and tabs are both used in the same source list, sources
indented with tabs will appear first in the output. This kind of errors
should be fixed manually.
"""
import difflib
import optparse
import re
import sys
from yes_no import YesNo
SUFFIXES = ['c', 'cc', 'cpp', 'h', 'mm', 'rc', 'rc.version', 'ico', 'def',
'release']
SOURCE_PATTERN = re.compile(r'^\s+[\'"].*\.(%s)[\'"],$' %
'|'.join([re.escape(x) for x in SUFFIXES]))
COMMENT_PATTERN = re.compile(r'^\s+#')
def SortSources(original_lines):
"""Sort source file names in |original_lines|.
Args:
original_lines: Lines of the original content as a list of strings.
Returns:
Lines of the sorted content as a list of strings.
The algorithm is fairly naive. The code tries to find a list of C-ish
source file names by a simple regex, then sort them. The code does not try
to understand the syntax of the build files. See the file comment above for
details.
"""
output_lines = []
comments = []
sources = []
for line in original_lines:
if re.search(COMMENT_PATTERN, line):
comments.append(line)
elif re.search(SOURCE_PATTERN, line):
# Associate the line with the preceding comments.
sources.append([line, comments])
comments = []
else:
# |sources| should be flushed first, to handle comments at the end of a
# source list correctly.
if sources:
for source_line, source_comments in sorted(sources):
output_lines.extend(source_comments)
output_lines.append(source_line)
sources = []
if comments:
output_lines.extend(comments)
comments = []
output_lines.append(line)
return output_lines
def ProcessFile(filename, should_confirm):
"""Process the input file and rewrite if needed.
Args:
filename: Path to the input file.
should_confirm: If true, diff and confirmation prompt are shown.
"""
original_lines = []
with open(filename, 'r') as input_file:
for line in input_file:
original_lines.append(line)
new_lines = SortSources(original_lines)
if original_lines == new_lines:
print '%s: no change' % filename
return
if should_confirm:
diff = difflib.unified_diff(original_lines, new_lines)
sys.stdout.writelines(diff)
if not YesNo('Use new file (y/N)'):
return
with open(filename, 'w') as output_file:
output_file.writelines(new_lines)
def main():
parser = optparse.OptionParser(usage='%prog filename1 filename2 ...')
parser.add_option('-f', '--force', action='store_false', default=True,
dest='should_confirm',
help='Turn off confirmation prompt.')
opts, filenames = parser.parse_args()
if len(filenames) < 1:
parser.print_help()
return 1
for filename in filenames:
ProcessFile(filename, opts.should_confirm)
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
XiaJieCom/change | Demo/days07/day07.py | 1 | 4725 |
'''
class Aniaml:
count = 10
def __init__(self,name):
self.name = name
self.num = None
hobbie = 'meat'
@classmethod #类方法,不能访问实例变量
def talk(self):
print('%s is talking ...'%self.hobbie )
@staticmethod #静态方法,不能访问类变量及实例变量
def walk():
print('is walking ...')
@property #把方法变属性
def habbit(self):
print('%s habit is sss'%self.name)
@property
def total_players(self):
return self.num
@total_players.setter
def total_players(self,num):
self.num = num
print('total players:',self.num)
@total_players.deleter
def total_players(self):
print('total player got deleted.')
del self.num
Aniaml.hobbie
Aniaml.talk()
d = Aniaml('hahah')
print(d.total_players)
d.total_players = 3
del d.total_players
print(d.total_players)
'''
''''
class A:
n = 'A'
def f2(self):
print('f2 from A')
class B(A):
n = 'B'
def __init__(self):
pass
def f1(self):
print('f1 from B')
def f2(self):
print('f2 from B')
def __del__(self):
print('del ....')
def __call__(self, *args, **kwargs):
print('__cal__')
class C(A):
n = 'C'
def f2(self):
print('f2 from C')
class D(B,C):
pass
d = D()
d.f1()
d.f2()
print(B.__doc__)
print(B.__dict__)
print(B.__class__)
print(B.__module__)
B.__del__
obj = B()
obj()
'''
'''
import sys
class WebServer(object):
def __init__(self,host,port):
self.host = host
self.port = port
def start(self):
print('Server is stating ...')
def stop(self):
print('Server is stopping ...')
def restart(self):
self.stop()
self.start()
print('Server is restarting ...')
def test_run(self,name):
print('Test_running ...',name,self.host)
if __name__ == '__main__':
server = WebServer('localhost',80)
if hasattr(server,sys.argv[1]):
func = getattr(server,sys.argv[1])
func()
setattr(server,'run',test_run)
server.run(server,'haha')
'''
'''
import socket
ip_port = ('127.0.0.1',9999)
sk = socket.socket()
sk.bind(ip_port)
sk.listen(5)
while True:
print('Server is waiting ... ')
conn,addr = sk.accept()
client_data = conn.recv(1024)
print(str(client_data,'utf-8'))
conn.sendall(bytes('这是 server !','utf-8'))
conn.close()
'''
'''
import socket
#ip_port = ('0.0.0.0',9999)
ip_port = ('127.0.0.1',9090)
sk = socket.socket()
sk.bind(ip_port)
sk.listen(5)
'''
'''
while True:
print('Server is waiting ... ')
conn,addr = sk.accept()
client_data = conn.recv(1024)
print(str(client_data,'utf-8'))
conn.sendall(bytes('这是 server !','utf-8'))
while True:
client_data = conn.recv(1024)
server_raw = input('>>').strip()
conn.sendall(bytes(server_raw,'utf-8'))
print(str(client_data,'utf-8'))
'''
'''
menu_dic = {'1':'start',
'2':'stop',
'3':'restart'
}
raw = input('请输入您的选择: ').strip()
if raw in menu_dic:
print(menu_dic[raw])
'''
'''
import sys
class WebServer(object):
def __init__(self,host,port):
self.host = host
self.port = port
def start(self):
print('Server is stating ...')
def stop(self):
print('Server is stopping ...')
def restart(self):
self.stop()
self.start()
print('Server is restarting ...')
def test_run(self,name):
print('Test_running ...',name,self.host)
if __name__ == '__main__':
server = WebServer('localhost',80)
if hasattr(server,sys.argv[1]):
func = getattr(server,sys.argv[1])
func()
setattr(server,'run',test_run)
server.run(server,'haha')
'''
class Aniaml:
count = 10
def __init__(self,name):
self.name = name
self.num = None
hobbie = 'meat'
@classmethod #类方法,不能访问实例变量
def talk(self):
print('%s is talking ...'%self.hobbie )
@staticmethod #静态方法,不能访问类变量及实例变量
def walk():
print('is walking ...')
@property #把方法变属性
def habbit(self):
print('%s habit is sss'%self.name)
@property
def total_players(self):
return self.num
@total_players.setter
def total_players(self,num):
self.num = num
print('total players:',self.num)
@total_players.deleter
def total_players(self):
print('total player got deleted.')
del self.num
Aniaml.hobbie
Aniaml.talk()
d = Aniaml('hahah')
print(d.total_players)
d.total_players = 3
del d.total_players
print(d.total_players)
| lgpl-2.1 |
sabi0/intellij-community | python/lib/Lib/site-packages/django/contrib/messages/tests/user_messages.py | 241 | 2619 | from django import http
from django.contrib.auth.models import User
from django.contrib.messages.storage.user_messages import UserMessagesStorage,\
LegacyFallbackStorage
from django.contrib.messages.tests.base import skipUnlessAuthIsInstalled
from django.contrib.messages.tests.cookie import set_cookie_data
from django.contrib.messages.tests.fallback import FallbackTest
from django.test import TestCase
class UserMessagesTest(TestCase):
def setUp(self):
self.user = User.objects.create(username='tester')
def test_add(self):
storage = UserMessagesStorage(http.HttpRequest())
self.assertRaises(NotImplementedError, storage.add, 'Test message 1')
def test_get_anonymous(self):
# Ensure that the storage still works if no user is attached to the
# request.
storage = UserMessagesStorage(http.HttpRequest())
self.assertEqual(len(storage), 0)
def test_get(self):
storage = UserMessagesStorage(http.HttpRequest())
storage.request.user = self.user
self.user.message_set.create(message='test message')
self.assertEqual(len(storage), 1)
self.assertEqual(list(storage)[0].message, 'test message')
UserMessagesTest = skipUnlessAuthIsInstalled(UserMessagesTest)
class LegacyFallbackTest(FallbackTest, TestCase):
storage_class = LegacyFallbackStorage
def setUp(self):
super(LegacyFallbackTest, self).setUp()
self.user = User.objects.create(username='tester')
def get_request(self, *args, **kwargs):
request = super(LegacyFallbackTest, self).get_request(*args, **kwargs)
request.user = self.user
return request
def test_get_legacy_only(self):
request = self.get_request()
storage = self.storage_class(request)
self.user.message_set.create(message='user message')
# Test that the message actually contains what we expect.
self.assertEqual(len(storage), 1)
self.assertEqual(list(storage)[0].message, 'user message')
def test_get_legacy(self):
request = self.get_request()
storage = self.storage_class(request)
cookie_storage = self.get_cookie_storage(storage)
self.user.message_set.create(message='user message')
set_cookie_data(cookie_storage, ['cookie'])
# Test that the message actually contains what we expect.
self.assertEqual(len(storage), 2)
self.assertEqual(list(storage)[0].message, 'user message')
self.assertEqual(list(storage)[1], 'cookie')
LegacyFallbackTest = skipUnlessAuthIsInstalled(LegacyFallbackTest)
| apache-2.0 |
mikhaelfelian/PROJECT-RESTO-TIGERA | assets/tinymce/filemanager/connectors/py/connector.py | 9 | 22350 | #!/usr/bin/env python
"""
FCKeditor - The text editor for internet
Copyright (C) 2003-2005 Frederico Caldeira Knabben
Licensed under the terms of the GNU Lesser General Public License:
http://www.opensource.org/licenses/lgpl-license.php
For further information visit:
http://www.fckeditor.net/
"Support Open Source software. What about a donation today?"
File Name: connector.py
Connector for Python.
Tested With:
Standard:
Python 2.3.3
Zope:
Zope Version: (Zope 2.8.1-final, python 2.3.5, linux2)
Python Version: 2.3.5 (#4, Mar 10 2005, 01:40:25)
[GCC 3.3.3 20040412 (Red Hat Linux 3.3.3-7)]
System Platform: linux2
File Authors:
Andrew Liu ([email protected])
"""
"""
Author Notes (04 December 2005):
This module has gone through quite a few phases of change. Obviously,
I am only supporting that part of the code that I use. Initially
I had the upload directory as a part of zope (ie. uploading files
directly into Zope), before realising that there were too many
complex intricacies within Zope to deal with. Zope is one ugly piece
of code. So I decided to complement Zope by an Apache server (which
I had running anyway, and doing nothing). So I mapped all uploads
from an arbitrary server directory to an arbitrary web directory.
All the FCKeditor uploading occurred this way, and I didn't have to
stuff around with fiddling with Zope objects and the like (which are
terribly complex and something you don't want to do - trust me).
Maybe a Zope expert can touch up the Zope components. In the end,
I had FCKeditor loaded in Zope (probably a bad idea as well), and
I replaced the connector.py with an alias to a server module.
Right now, all Zope components will simple remain as is because
I've had enough of Zope.
See notes right at the end of this file for how I aliased out of Zope.
Anyway, most of you probably wont use Zope, so things are pretty
simple in that regard.
Typically, SERVER_DIR is the root of WEB_DIR (not necessarily).
Most definitely, SERVER_USERFILES_DIR points to WEB_USERFILES_DIR.
"""
import cgi
import re
import os
import string
"""
escape
Converts the special characters '<', '>', and '&'.
RFC 1866 specifies that these characters be represented
in HTML as < > and & respectively. In Python
1.5 we use the new string.replace() function for speed.
"""
def escape(text, replace=string.replace):
text = replace(text, '&', '&') # must be done 1st
text = replace(text, '<', '<')
text = replace(text, '>', '>')
text = replace(text, '"', '"')
return text
"""
getFCKeditorConnector
Creates a new instance of an FCKeditorConnector, and runs it
"""
def getFCKeditorConnector(context=None):
# Called from Zope. Passes the context through
connector = FCKeditorConnector(context=context)
return connector.run()
"""
FCKeditorRequest
A wrapper around the request object
Can handle normal CGI request, or a Zope request
Extend as required
"""
class FCKeditorRequest(object):
def __init__(self, context=None):
if (context is not None):
r = context.REQUEST
else:
r = cgi.FieldStorage()
self.context = context
self.request = r
def isZope(self):
if (self.context is not None):
return True
return False
def has_key(self, key):
return self.request.has_key(key)
def get(self, key, default=None):
value = None
if (self.isZope()):
value = self.request.get(key, default)
else:
if key in self.request.keys():
value = self.request[key].value
else:
value = default
return value
"""
FCKeditorConnector
The connector class
"""
class FCKeditorConnector(object):
# Configuration for FCKEditor
# can point to another server here, if linked correctly
#WEB_HOST = "http://127.0.0.1/"
WEB_HOST = ""
SERVER_DIR = "/var/www/html/"
WEB_USERFILES_FOLDER = WEB_HOST + "upload/"
SERVER_USERFILES_FOLDER = SERVER_DIR + "upload/"
# Allow access (Zope)
__allow_access_to_unprotected_subobjects__ = 1
# Class Attributes
parentFolderRe = re.compile("[\/][^\/]+[\/]?$")
"""
Constructor
"""
def __init__(self, context=None):
# The given root path will NOT be shown to the user
# Only the userFilesPath will be shown
# Instance Attributes
self.context = context
self.request = FCKeditorRequest(context=context)
self.rootPath = self.SERVER_DIR
self.userFilesFolder = self.SERVER_USERFILES_FOLDER
self.webUserFilesFolder = self.WEB_USERFILES_FOLDER
# Enables / Disables the connector
self.enabled = False # Set to True to enable this connector
# These are instance variables
self.zopeRootContext = None
self.zopeUploadContext = None
# Copied from php module =)
self.allowedExtensions = {
"File": None,
"Image": None,
"Flash": None,
"Media": None
}
self.deniedExtensions = {
"File": [ "php", "php3", "php5", "phtml", "asp", "aspx", "ascx", "jsp", "cfm", "cfc", "pl", "bat", "exe", "dll", "reg", "cgi" ],
"Image": [ "php", "php3", "php5", "phtml", "asp", "aspx", "ascx", "jsp", "cfm", "cfc", "pl", "bat", "exe", "dll", "reg", "cgi" ],
"Flash": [ "php", "php3", "php5", "phtml", "asp", "aspx", "ascx", "jsp", "cfm", "cfc", "pl", "bat", "exe", "dll", "reg", "cgi" ],
"Media": [ "php", "php3", "php5", "phtml", "asp", "aspx", "ascx", "jsp", "cfm", "cfc", "pl", "bat", "exe", "dll", "reg", "cgi" ]
}
"""
Zope specific functions
"""
def isZope(self):
# The context object is the zope object
if (self.context is not None):
return True
return False
def getZopeRootContext(self):
if self.zopeRootContext is None:
self.zopeRootContext = self.context.getPhysicalRoot()
return self.zopeRootContext
def getZopeUploadContext(self):
if self.zopeUploadContext is None:
folderNames = self.userFilesFolder.split("/")
c = self.getZopeRootContext()
for folderName in folderNames:
if (folderName <> ""):
c = c[folderName]
self.zopeUploadContext = c
return self.zopeUploadContext
"""
Generic manipulation functions
"""
def getUserFilesFolder(self):
return self.userFilesFolder
def getWebUserFilesFolder(self):
return self.webUserFilesFolder
def getAllowedExtensions(self, resourceType):
return self.allowedExtensions[resourceType]
def getDeniedExtensions(self, resourceType):
return self.deniedExtensions[resourceType]
def removeFromStart(self, string, char):
return string.lstrip(char)
def removeFromEnd(self, string, char):
return string.rstrip(char)
def convertToXmlAttribute(self, value):
if (value is None):
value = ""
return escape(value)
def convertToPath(self, path):
if (path[-1] <> "/"):
return path + "/"
else:
return path
def getUrlFromPath(self, resourceType, path):
if (resourceType is None) or (resourceType == ''):
url = "%s%s" % (
self.removeFromEnd(self.getUserFilesFolder(), '/'),
path
)
else:
url = "%s%s%s" % (
self.getUserFilesFolder(),
resourceType,
path
)
return url
def getWebUrlFromPath(self, resourceType, path):
if (resourceType is None) or (resourceType == ''):
url = "%s%s" % (
self.removeFromEnd(self.getWebUserFilesFolder(), '/'),
path
)
else:
url = "%s%s%s" % (
self.getWebUserFilesFolder(),
resourceType,
path
)
return url
def removeExtension(self, fileName):
index = fileName.rindex(".")
newFileName = fileName[0:index]
return newFileName
def getExtension(self, fileName):
index = fileName.rindex(".") + 1
fileExtension = fileName[index:]
return fileExtension
def getParentFolder(self, folderPath):
parentFolderPath = self.parentFolderRe.sub('', folderPath)
return parentFolderPath
"""
serverMapFolder
Purpose: works out the folder map on the server
"""
def serverMapFolder(self, resourceType, folderPath):
# Get the resource type directory
resourceTypeFolder = "%s%s/" % (
self.getUserFilesFolder(),
resourceType
)
# Ensure that the directory exists
self.createServerFolder(resourceTypeFolder)
# Return the resource type directory combined with the
# required path
return "%s%s" % (
resourceTypeFolder,
self.removeFromStart(folderPath, '/')
)
"""
createServerFolder
Purpose: physically creates a folder on the server
"""
def createServerFolder(self, folderPath):
# Check if the parent exists
parentFolderPath = self.getParentFolder(folderPath)
if not(os.path.exists(parentFolderPath)):
errorMsg = self.createServerFolder(parentFolderPath)
if errorMsg is not None:
return errorMsg
# Check if this exists
if not(os.path.exists(folderPath)):
os.mkdir(folderPath)
os.chmod(folderPath, 0755)
errorMsg = None
else:
if os.path.isdir(folderPath):
errorMsg = None
else:
raise "createServerFolder: Non-folder of same name already exists"
return errorMsg
"""
getRootPath
Purpose: returns the root path on the server
"""
def getRootPath(self):
return self.rootPath
"""
setXmlHeaders
Purpose: to prepare the headers for the xml to return
"""
def setXmlHeaders(self):
#now = self.context.BS_get_now()
#yesterday = now - 1
self.setHeader("Content-Type", "text/xml")
#self.setHeader("Expires", yesterday)
#self.setHeader("Last-Modified", now)
#self.setHeader("Cache-Control", "no-store, no-cache, must-revalidate")
self.printHeaders()
return
def setHeader(self, key, value):
if (self.isZope()):
self.context.REQUEST.RESPONSE.setHeader(key, value)
else:
print "%s: %s" % (key, value)
return
def printHeaders(self):
# For non-Zope requests, we need to print an empty line
# to denote the end of headers
if (not(self.isZope())):
print ""
"""
createXmlFooter
Purpose: returns the xml header
"""
def createXmlHeader(self, command, resourceType, currentFolder):
self.setXmlHeaders()
s = ""
# Create the XML document header
s += """<?xml version="1.0" encoding="utf-8" ?>"""
# Create the main connector node
s += """<Connector command="%s" resourceType="%s">""" % (
command,
resourceType
)
# Add the current folder node
s += """<CurrentFolder path="%s" url="%s" />""" % (
self.convertToXmlAttribute(currentFolder),
self.convertToXmlAttribute(
self.getWebUrlFromPath(
resourceType,
currentFolder
)
),
)
return s
"""
createXmlFooter
Purpose: returns the xml footer
"""
def createXmlFooter(self):
s = """</Connector>"""
return s
"""
sendError
Purpose: in the event of an error, return an xml based error
"""
def sendError(self, number, text):
self.setXmlHeaders()
s = ""
# Create the XML document header
s += """<?xml version="1.0" encoding="utf-8" ?>"""
s += """<Connector>"""
s += """<Error number="%s" text="%s" />""" % (number, text)
s += """</Connector>"""
return s
"""
getFolders
Purpose: command to recieve a list of folders
"""
def getFolders(self, resourceType, currentFolder):
if (self.isZope()):
return self.getZopeFolders(resourceType, currentFolder)
else:
return self.getNonZopeFolders(resourceType, currentFolder)
def getZopeFolders(self, resourceType, currentFolder):
# Open the folders node
s = ""
s += """<Folders>"""
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
for (name, o) in zopeFolder.objectItems(["Folder"]):
s += """<Folder name="%s" />""" % (
self.convertToXmlAttribute(name)
)
# Close the folders node
s += """</Folders>"""
return s
def getNonZopeFolders(self, resourceType, currentFolder):
# Map the virtual path to our local server
serverPath = self.serverMapFolder(resourceType, currentFolder)
# Open the folders node
s = ""
s += """<Folders>"""
for someObject in os.listdir(serverPath):
someObjectPath = os.path.join(serverPath, someObject)
if os.path.isdir(someObjectPath):
s += """<Folder name="%s" />""" % (
self.convertToXmlAttribute(someObject)
)
# Close the folders node
s += """</Folders>"""
return s
"""
getFoldersAndFiles
Purpose: command to recieve a list of folders and files
"""
def getFoldersAndFiles(self, resourceType, currentFolder):
if (self.isZope()):
return self.getZopeFoldersAndFiles(resourceType, currentFolder)
else:
return self.getNonZopeFoldersAndFiles(resourceType, currentFolder)
def getNonZopeFoldersAndFiles(self, resourceType, currentFolder):
# Map the virtual path to our local server
serverPath = self.serverMapFolder(resourceType, currentFolder)
# Open the folders / files node
folders = """<Folders>"""
files = """<Files>"""
for someObject in os.listdir(serverPath):
someObjectPath = os.path.join(serverPath, someObject)
if os.path.isdir(someObjectPath):
folders += """<Folder name="%s" />""" % (
self.convertToXmlAttribute(someObject)
)
elif os.path.isfile(someObjectPath):
size = os.path.getsize(someObjectPath)
files += """<File name="%s" size="%s" />""" % (
self.convertToXmlAttribute(someObject),
os.path.getsize(someObjectPath)
)
# Close the folders / files node
folders += """</Folders>"""
files += """</Files>"""
# Return it
s = folders + files
return s
def getZopeFoldersAndFiles(self, resourceType, currentFolder):
folders = self.getZopeFolders(resourceType, currentFolder)
files = self.getZopeFiles(resourceType, currentFolder)
s = folders + files
return s
def getZopeFiles(self, resourceType, currentFolder):
# Open the files node
s = ""
s += """<Files>"""
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
for (name, o) in zopeFolder.objectItems(["File","Image"]):
s += """<File name="%s" size="%s" />""" % (
self.convertToXmlAttribute(name),
((o.get_size() / 1024) + 1)
)
# Close the files node
s += """</Files>"""
return s
def findZopeFolder(self, resourceType, folderName):
# returns the context of the resource / folder
zopeFolder = self.getZopeUploadContext()
folderName = self.removeFromStart(folderName, "/")
folderName = self.removeFromEnd(folderName, "/")
if (resourceType <> ""):
try:
zopeFolder = zopeFolder[resourceType]
except:
zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=resourceType, title=resourceType)
zopeFolder = zopeFolder[resourceType]
if (folderName <> ""):
folderNames = folderName.split("/")
for folderName in folderNames:
zopeFolder = zopeFolder[folderName]
return zopeFolder
"""
createFolder
Purpose: command to create a new folder
"""
def createFolder(self, resourceType, currentFolder):
if (self.isZope()):
return self.createZopeFolder(resourceType, currentFolder)
else:
return self.createNonZopeFolder(resourceType, currentFolder)
def createZopeFolder(self, resourceType, currentFolder):
# Find out where we are
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
errorNo = 0
errorMsg = ""
if self.request.has_key("NewFolderName"):
newFolder = self.request.get("NewFolderName", None)
zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=newFolder, title=newFolder)
else:
errorNo = 102
error = """<Error number="%s" originalDescription="%s" />""" % (
errorNo,
self.convertToXmlAttribute(errorMsg)
)
return error
def createNonZopeFolder(self, resourceType, currentFolder):
errorNo = 0
errorMsg = ""
if self.request.has_key("NewFolderName"):
newFolder = self.request.get("NewFolderName", None)
currentFolderPath = self.serverMapFolder(
resourceType,
currentFolder
)
try:
newFolderPath = currentFolderPath + newFolder
errorMsg = self.createServerFolder(newFolderPath)
if (errorMsg is not None):
errorNo = 110
except:
errorNo = 103
else:
errorNo = 102
error = """<Error number="%s" originalDescription="%s" />""" % (
errorNo,
self.convertToXmlAttribute(errorMsg)
)
return error
"""
getFileName
Purpose: helper function to extrapolate the filename
"""
def getFileName(self, filename):
for splitChar in ["/", "\\"]:
array = filename.split(splitChar)
if (len(array) > 1):
filename = array[-1]
return filename
"""
fileUpload
Purpose: command to upload files to server
"""
def fileUpload(self, resourceType, currentFolder):
if (self.isZope()):
return self.zopeFileUpload(resourceType, currentFolder)
else:
return self.nonZopeFileUpload(resourceType, currentFolder)
def zopeFileUpload(self, resourceType, currentFolder, count=None):
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
file = self.request.get("NewFile", None)
fileName = self.getFileName(file.filename)
fileNameOnly = self.removeExtension(fileName)
fileExtension = self.getExtension(fileName).lower()
if (count):
nid = "%s.%s.%s" % (fileNameOnly, count, fileExtension)
else:
nid = fileName
title = nid
try:
zopeFolder.manage_addProduct['OFSP'].manage_addFile(
id=nid,
title=title,
file=file.read()
)
except:
if (count):
count += 1
else:
count = 1
self.zopeFileUpload(resourceType, currentFolder, count)
return
def nonZopeFileUpload(self, resourceType, currentFolder):
errorNo = 0
errorMsg = ""
if self.request.has_key("NewFile"):
# newFile has all the contents we need
newFile = self.request.get("NewFile", "")
# Get the file name
newFileName = newFile.filename
newFileNameOnly = self.removeExtension(newFileName)
newFileExtension = self.getExtension(newFileName).lower()
allowedExtensions = self.getAllowedExtensions(resourceType)
deniedExtensions = self.getDeniedExtensions(resourceType)
if (allowedExtensions is not None):
# Check for allowed
isAllowed = False
if (newFileExtension in allowedExtensions):
isAllowed = True
elif (deniedExtensions is not None):
# Check for denied
isAllowed = True
if (newFileExtension in deniedExtensions):
isAllowed = False
else:
# No extension limitations
isAllowed = True
if (isAllowed):
if (self.isZope()):
# Upload into zope
self.zopeFileUpload(resourceType, currentFolder)
else:
# Upload to operating system
# Map the virtual path to the local server path
currentFolderPath = self.serverMapFolder(
resourceType,
currentFolder
)
i = 0
while (True):
newFilePath = "%s%s" % (
currentFolderPath,
newFileName
)
if os.path.exists(newFilePath):
i += 1
newFilePath = "%s%s(%s).%s" % (
currentFolderPath,
newFileNameOnly,
i,
newFileExtension
)
errorNo = 201
break
else:
fileHandle = open(newFilePath,'w')
linecount = 0
while (1):
#line = newFile.file.readline()
line = newFile.readline()
if not line: break
fileHandle.write("%s" % line)
linecount += 1
os.chmod(newFilePath, 0777)
break
else:
newFileName = "Extension not allowed"
errorNo = 203
else:
newFileName = "No File"
errorNo = 202
string = """
<script type="text/javascript">
window.parent.frames["frmUpload"].OnUploadCompleted(%s,"%s");
</script>
""" % (
errorNo,
newFileName.replace('"',"'")
)
return string
def run(self):
s = ""
try:
# Check if this is disabled
if not(self.enabled):
return self.sendError(1, "This connector is disabled. Please check the connector configurations and try again")
# Make sure we have valid inputs
if not(
(self.request.has_key("Command")) and
(self.request.has_key("Type")) and
(self.request.has_key("CurrentFolder"))
):
return
# Get command
command = self.request.get("Command", None)
# Get resource type
resourceType = self.request.get("Type", None)
# folder syntax must start and end with "/"
currentFolder = self.request.get("CurrentFolder", None)
if (currentFolder[-1] <> "/"):
currentFolder += "/"
if (currentFolder[0] <> "/"):
currentFolder = "/" + currentFolder
# Check for invalid paths
if (".." in currentFolder):
return self.sendError(102, "")
# File upload doesn't have to return XML, so intercept
# her:e
if (command == "FileUpload"):
return self.fileUpload(resourceType, currentFolder)
# Begin XML
s += self.createXmlHeader(command, resourceType, currentFolder)
# Execute the command
if (command == "GetFolders"):
f = self.getFolders
elif (command == "GetFoldersAndFiles"):
f = self.getFoldersAndFiles
elif (command == "CreateFolder"):
f = self.createFolder
else:
f = None
if (f is not None):
s += f(resourceType, currentFolder)
s += self.createXmlFooter()
except Exception, e:
s = "ERROR: %s" % e
return s
# Running from command line
if __name__ == '__main__':
# To test the output, uncomment the standard headers
#print "Content-Type: text/html"
#print ""
print getFCKeditorConnector()
"""
Running from zope, you will need to modify this connector.
If you have uploaded the FCKeditor into Zope (like me), you need to
move this connector out of Zope, and replace the "connector" with an
alias as below. The key to it is to pass the Zope context in, as
we then have a like to the Zope context.
## Script (Python) "connector.py"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters=*args, **kws
##title=ALIAS
##
import Products.connector as connector
return connector.getFCKeditorConnector(context=context).run()
"""
| gpl-2.0 |
gregbdunn/aws-ec2rescue-linux | tools/moduletests/unit/test_arpcache.py | 2 | 12661 | # Copyright 2016-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""
Unit tests for the arpcache module
"""
import os
import subprocess
import sys
import unittest
import mock
import moduletests.src.arpcache
try:
# Python 2.x
from cStringIO import StringIO
except ImportError:
# Python 3.x
from io import StringIO
if sys.hexversion >= 0x3040000:
# contextlib.redirect_stdout was introduced in Python 3.4
import contextlib
else:
# contextlib2 is a backport of contextlib from Python 3.5 and is compatible with Python2/3
import contextlib2 as contextlib
class TestArpcache(unittest.TestCase):
config_file_path = "/etc/sysctl.d/55-arp-gc_thresh1.conf"
def setUp(self):
self.output = StringIO()
def tearDown(self):
self.output.close()
@mock.patch("subprocess.check_output")
def test_detect_noproblem(self, check_output_mock):
check_output_mock.return_value = "net.ipv4.neigh.default.gc_thresh1 = 0"
self.assertFalse(moduletests.src.arpcache.detect())
self.assertTrue(check_output_mock.called)
@mock.patch("subprocess.check_output")
def test_detect_problem(self, check_output_mock):
check_output_mock.return_value = "net.ipv4.neigh.default.gc_thresh1 = 1"
self.assertTrue(moduletests.src.arpcache.detect())
self.assertTrue(check_output_mock.called)
@mock.patch("subprocess.check_output", side_effect=subprocess.CalledProcessError(
"1", "test", "/etc/sysctl.d/55-arp-gc_thresh1.conf: no such file or directory"))
def test_fix_cpe(self, check_output_mock):
with contextlib.redirect_stdout(self.output):
self.assertRaises(subprocess.CalledProcessError, moduletests.src.arpcache.fix, self.config_file_path)
self.assertTrue(self.output.getvalue().endswith(
"[UNFIXED] 'sysctl -w net.ipv4.neigh.default.gc_thresh1=0' failed for running system\n"))
self.assertTrue(check_output_mock.called)
@mock.patch("subprocess.check_output")
@mock.patch("moduletests.src.arpcache.os.path.exists", side_effect=[False])
@mock.patch("moduletests.src.arpcache.open", mock.mock_open(read_data="stuff"))
def test_fix_exists_sudo_true(self, check_output_mock, exists_mock):
check_output_mock.return_value = "True"
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.arpcache.fix(self.config_file_path))
self.assertTrue(self.output.getvalue().endswith(
"[FIXED] set net.ipv4.neigh.default.gc_thresh1=0 for running system\n"
"[FIXED] net.ipv4.neigh.default.gc_thresh1=0 in /etc/sysctl.d/55-arp-gc_thresh1.conf\n"))
self.assertTrue(check_output_mock.called)
self.assertTrue(exists_mock.called)
@mock.patch("subprocess.check_output")
@mock.patch("moduletests.src.arpcache.os.path.exists", side_effect=[True])
@mock.patch("moduletests.src.arpcache.open", mock.mock_open(read_data="net.ipv4.neigh.default.gc_thresh1 = 0\n"
"something else\n"))
def test_fix_sudo_true(self, check_output_mock, exists_mock):
check_output_mock.return_value = "True"
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.arpcache.fix(self.config_file_path))
self.assertTrue(self.output.getvalue().endswith(
"[FIXED] set net.ipv4.neigh.default.gc_thresh1=0 for running system\n"
"[FIXED] net.ipv4.neigh.default.gc_thresh1=0 in /etc/sysctl.d/55-arp-gc_thresh1.conf\n"))
self.assertTrue(check_output_mock.called)
self.assertTrue(exists_mock.called)
@mock.patch("subprocess.check_output")
@mock.patch("moduletests.src.arpcache.os.path.exists", side_effect=[True])
@mock.patch("moduletests.src.arpcache.open", mock.mock_open(read_data="net.ipv4.neigh.default.gc_thresh1 = 0\n"
"net.ipv4.neigh.default.gc_thresh1 = 0\n"))
def test_fix_sudo_true_found_twice(self, check_output_mock, exists_mock):
check_output_mock.return_value = "True"
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.arpcache.fix(self.config_file_path))
self.assertTrue(self.output.getvalue().endswith(
"[FIXED] set net.ipv4.neigh.default.gc_thresh1=0 for running system\n"
"[FIXED] net.ipv4.neigh.default.gc_thresh1=0 in /etc/sysctl.d/55-arp-gc_thresh1.conf\n"))
self.assertTrue(check_output_mock.called)
self.assertTrue(exists_mock.called)
@mock.patch("subprocess.check_output")
@mock.patch("moduletests.src.arpcache.os.path.exists", side_effect=[False])
@mock.patch("moduletests.src.arpcache.open", side_effect=IOError)
def test_fix_writefail(self, open_mock, exists_mock, check_output_mock):
check_output_mock.return_value = "True"
with contextlib.redirect_stdout(self.output):
self.assertRaises(IOError, moduletests.src.arpcache.fix, self.config_file_path)
self.assertTrue(check_output_mock.called)
self.assertTrue(exists_mock.called)
self.assertTrue(open_mock.called)
self.assertTrue(self.output.getvalue().endswith(
"[UNFIXED] Failed to write config to /etc/sysctl.d/55-arp-gc_thresh1.conf\n"))
@mock.patch("moduletests.src.arpcache.detect", return_value=False)
def test_run_success(self, detect_mock):
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.arpcache.run())
self.assertTrue(self.output.getvalue().endswith("Determining if aggressive ARP caching is enabled\n"
"[SUCCESS] Aggressive arp caching is disabled.\n"))
self.assertTrue(detect_mock.called)
@mock.patch("moduletests.src.arpcache.get_config_dict")
@mock.patch("moduletests.src.arpcache.detect", return_value=True)
def test_run_no_remediate(self, detect_mock, config_mock):
config_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": dict(),
"REMEDIATE": False,
"SUDO": True}
with contextlib.redirect_stdout(self.output):
moduletests.src.arpcache.run()
self.assertTrue("[UNFIXED] Remediation impossible without sudo and --remediate.\n"
"-- Running as root/sudo: True\n"
"-- Required --remediate flag specified: False\n"
"[FAILURE] Aggressive arp caching is enabled."
in self.output.getvalue())
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
@mock.patch("moduletests.src.arpcache.get_config_dict")
@mock.patch("moduletests.src.arpcache.detect", return_value=True)
@mock.patch("moduletests.src.arpcache.os.path.isfile", return_value=True)
@mock.patch("moduletests.src.arpcache.backup", return_value=True)
@mock.patch("moduletests.src.arpcache.fix", return_value=True)
@mock.patch("moduletests.src.arpcache.restore", return_value=True)
def test_run_failure_isfile(self, restore_mock, fix_mock, backup_mock, isfile_mock, detect_mock, config_mock):
config_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": {self.config_file_path: "/some/path"},
"REMEDIATE": True,
"SUDO": True}
with contextlib.redirect_stdout(self.output):
self.assertFalse(moduletests.src.arpcache.run())
self.assertTrue("Determining if aggressive ARP caching is enabled\n"
"[FAILURE] Aggressive arp caching is enabled. "
"This can cause issues communicating with instances in the same subnet"
in self.output.getvalue())
self.assertTrue(restore_mock.called)
self.assertTrue(fix_mock.called)
self.assertTrue(backup_mock.called)
self.assertTrue(isfile_mock.called)
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
@mock.patch("moduletests.src.arpcache.get_config_dict")
@mock.patch("moduletests.src.arpcache.detect", return_value=True)
@mock.patch("moduletests.src.arpcache.os.path.isfile", return_value=False)
@mock.patch("moduletests.src.arpcache.fix", return_value=True)
def test_run_failure(self, fix_mock, isfile_mock, detect_mock, config_mock):
config_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": dict(),
"REMEDIATE": True,
"SUDO": True}
with contextlib.redirect_stdout(self.output):
self.assertFalse(moduletests.src.arpcache.run())
self.assertTrue("Determining if aggressive ARP caching is enabled\n"
"[FAILURE] Aggressive arp caching is enabled. "
"This can cause issues communicating with instances in the same subnet"
in self.output.getvalue())
self.assertTrue(fix_mock.called)
self.assertTrue(isfile_mock.called)
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
@mock.patch("moduletests.src.arpcache.get_config_dict")
@mock.patch("moduletests.src.arpcache.detect", side_effect=(True, False))
@mock.patch("moduletests.src.arpcache.os.path.isfile", return_value=False)
@mock.patch("moduletests.src.arpcache.fix", return_value=True)
def test_run_fix(self, fix_mock, isfile_mock, detect_mock, config_mock):
config_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": dict(),
"REMEDIATE": True,
"SUDO": True}
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.arpcache.run())
self.assertTrue(self.output.getvalue().endswith("Determining if aggressive ARP caching is enabled\n"
"[SUCCESS] Aggressive arp caching is disabled after "
"remediation. Please see the logs for further details\n"))
self.assertTrue(fix_mock.called)
self.assertTrue(isfile_mock.called)
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
@mock.patch("moduletests.src.arpcache.get_config_dict")
@mock.patch("moduletests.src.arpcache.detect", side_effect=Exception)
@mock.patch("moduletests.src.arpcache.restore", return_value=True)
def test_run_detect_exception(self, restore_mock, detect_mock, config_mock):
config_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": {self.config_file_path: "/some/path"},
"REMEDIATE": True}
with contextlib.redirect_stdout(self.output):
self.assertFalse(moduletests.src.arpcache.run())
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
self.assertTrue(restore_mock.called)
@mock.patch("moduletests.src.arpcache.get_config_dict", side_effect=Exception)
def test_run_config_exception(self, config_mock):
with contextlib.redirect_stdout(self.output):
self.assertFalse(moduletests.src.arpcache.run())
self.assertTrue(config_mock.called)
| apache-2.0 |
sagarghuge/recurringtask | GTG/gtk/editor/notify_dialog.py | 1 | 2066 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Getting Things GNOME! - a personal organizer for the GNOME desktop
# Copyright (c) 2008-2013 - Lionel Dricot & Bertrand Rousseau
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
from gi.repository import Gtk
from GTG import _, ngettext
from GTG.gtk.editor import GnomeConfig
class NotifyCloseUI():
def __init__(self):
# Load window tree
self.builder = Gtk.Builder()
self.builder.add_from_file(GnomeConfig.NOTIFY_UI_FILE)
signals = {"on_confirm_activate": self.on_confirm_pressed,
"on_delete_cancel": lambda x: x.hide, }
self.builder.connect_signals(signals)
def on_confirm_pressed(self, widget):
self.builder.get_object("notify_dialog").hide()
def notifyclose(self):
cdlabel2 = self.builder.get_object("cd-label2")
cdlabel2.set_label(ngettext(
"You need to set Due date before closing the task.",
"You need to set Due date before closing the task.",
0))
notifyclose_dialog = self.builder.get_object("notify_dialog")
notifyclose_dialog.resize(1, 1)
confirm_button = self.builder.get_object("confirm")
confirm_button.grab_focus()
if notifyclose_dialog.run() != 1:
pass
notifyclose_dialog.hide()
| gpl-3.0 |
saltastro/polsalt | polsalt/specpolwollaston.py | 1 | 2770 |
"""
specpolsplit
Split O and E beams
"""
import numpy as np
from scipy.interpolate import interp1d
from scipy.ndimage.interpolation import shift
from specpolutils import rssmodelwave
def read_wollaston(hdu, wollaston_file):
""" Correct the O or E beam for distortion due to the beam splitter
Parameters
----------
hdu: fits.HDUList
Polarimetric observations data
wollaston_file: None or str
File containing the central position of the split O and E beams
Return
------
woll_pix: ~numpy.ndarray
A two column array represnting the center pixels of the O+E beams for
the given configuration
"""
#set up data
data= hdu['SCI'].data
rows,cols = data.shape
grating = hdu[0].header['GRATING'].strip()
grang = hdu[0].header['GR-ANGLE']
artic = hdu[0].header['CAMANG']
trkrho = hdu[0].header['TRKRHO']
date = hdu[0].header['DATE-OBS'].replace('-','')
cbin, rbin = [int(x) for x in hdu[0].header['CCDSUM'].split(" ")]
#load data from wollaston file
lam_m = np.loadtxt(wollaston_file,dtype=float,usecols=(0,))
rpix_om = np.loadtxt(wollaston_file,dtype=float,unpack=True,usecols=(1,2))
lam_c = rssmodelwave(grating,grang,artic,trkrho,cbin,cols,date)
return interp1d(lam_m,rpix_om,kind='cubic',bounds_error=False)(lam_c)
def specpolwollaston(hdu, wollaston_file=None):
""" Correct the O or E beam for distortion due to the beam splitter
Parameters
----------
hdu: fits.HDUList
Polarimetric observations data
beam: str
Either the 'O' or the 'E' beam
wollaston_file: None or str
File containing the central position of the split O and E beams
Return
------
whdu: fits.HDUList
New object with each extension corrected
"""
rows,cols = hdu[1].data.shape
cbin, rbin = [int(x) for x in hdu[0].header['CCDSUM'].split(" ")]
#determine the shift
rpix_oc = read_wollaston(data, wollaston_file)
drow_shift = (rpix_oc-rpix_oc[:,cols/2][:,None])/rbin
for i in range(len(hdu)):
if hdu[i].data.any():
for o in (0,1):
hdu[i].data[o] = correct_wollaston(hdu[i].data[o], drow_shift[o])
return hdu
def correct_wollaston(data, drow_shift):
"""Correct the distortion in the data by a shift
Parameters
----------
data: ~numpy.ndarray
Data to be corrected
drow_shift: ~numpy.ndarray
Shift to be applied to each column
Returns
-------
sdata: ~numpy.ndarray
Corrected data
"""
rows,cols = data.shape
sdata = np.zeros(data.shape, dtype='float32')
for c in range(cols):
shift(data[:,c], drow_shift[c], sdata[:,c], order=1)
return sdata
| bsd-3-clause |
Fritz449/SRLF | algos/a3c_discrete.py | 1 | 15229 | import tensorflow as tf
import numpy as np
import os
import sys
import random
import subprocess
from redis import Redis
import time
sys.path.append(os.path.realpath(".."))
import helpers.utils as hlp
from models.feed_forward import FFDiscrete
class A3CDiscreteTrainer(FFDiscrete):
def __init__(self, sess, args):
FFDiscrete.__init__(self, sess, args)
self.sess = sess
self.config = args['config']
self.env = args['environment']
self.timesteps_per_launch = args['max_pathlength']
self.n_workers = args['n_workers']
self.distributed = args['distributed']
self.n_tests = args['n_tests']
self.entropy_coef = args['entropy_coef']
self.learning_rate = args['learning_rate']
self.n_steps = args['n_steps']
self.scale = args['scale']
self.gamma = args['gamma']
self.save_every = args.get('save_every', 1)
self.test_every = args.get('test_every', 10)
self.sums = self.sumsqrs = self.sumtime = 0
self.timestep = 0
self.create_internal()
self.train_scores = []
self.test_scores = []
np.set_printoptions(precision=6)
# Worker parameters:
self.id_worker = args['id_worker']
self.test_mode = args['test_mode']
def create_internal(self):
self.targets = {
"advantage": tf.placeholder(dtype=tf.float32, shape=[None]),
"return": tf.placeholder(dtype=tf.float32, shape=[None]),
}
for i in range(len(self.n_actions)):
self.targets["action_{}".format(i)] = tf.placeholder(dtype=tf.int32, shape=[None])
N = tf.shape(self.targets["advantage"])[0]
base = [N] + [1 for _ in range(len(self.n_actions))]
log_dist = tf.zeros(shape=[N] + self.n_actions)
p_n = tf.zeros(shape=[N])
for i, n in enumerate(self.n_actions):
right_shape = base[:]
right_shape[i + 1] = n
actions = self.targets["action_{}".format(i)]
action_log_dist = tf.reshape(self.action_logprobs[i], [-1])
p = tf.reshape(tf.gather(action_log_dist, tf.range(0, N) * n + actions), [-1])
p_n += p
log_dist += tf.reshape(action_log_dist, right_shape)
N = tf.cast(N, tf.float32)
self.loss = -tf.reduce_mean(p_n * self.targets["advantage"])
self.entropy = tf.reduce_sum(-tf.exp(log_dist) * log_dist) / N
value_loss = tf.reduce_mean((self.targets["return"] - self.value) ** 2)
self.loss += -self.entropy_coef * self.entropy + value_loss / 2
self.weights += self.value_weights
self.gradients = tf.gradients(self.loss, self.weights)
def save(self, name):
directory = 'saves/' + name + '/'
if not os.path.exists(directory):
os.makedirs(directory)
directory += 'iteration_{}'.format(self.timestep) + '/'
if not os.path.exists(directory):
os.makedirs(directory)
for i, tensor in enumerate(tf.global_variables()):
value = self.sess.run(tensor)
np.save(directory + 'weight_{}'.format(i), value)
if self.scale != 'off':
np.save(directory + 'sums', self.sums)
np.save(directory + 'sumsquares', self.sumsqrs)
np.save(directory + 'sumtime', self.sumtime)
np.save(directory + 'timestep', np.array([self.timestep]))
np.save(directory + 'train_scores', np.array(self.train_scores))
np.save(directory + 'test_scores', np.array(self.test_scores))
print("Agent successfully saved in folder {}".format(directory))
def load(self, name, iteration=None):
try:
directory = 'saves/' + name + '/'
if not os.path.exists(directory):
print('That directory does not exist!')
raise Exception
if iteration is None:
iteration = np.max([int(x[10:]) for x in [dir for dir in os.walk(directory)][0][1]])
directory += 'iteration_{}'.format(iteration) + '/'
for i, tensor in enumerate(tf.global_variables()):
arr = np.load(directory + 'weight_{}.npy'.format(i))
self.sess.run(tensor.assign(arr))
if self.scale != 'off':
self.sums = np.load(directory + 'sums.npy')
self.sumsqrs = np.load(directory + 'sumsquares.npy')
self.sumtime = np.load(directory + 'sumtime.npy')
self.timestep = np.load(directory + 'timestep.npy')[0]
self.train_scores = np.load(directory + 'train_scores.npy').tolist()
self.test_scores = np.load(directory + 'test_scores.npy').tolist()
print("Agent successfully loaded from folder {}".format(directory))
except:
print("Something is wrong, loading failed")
def apply_adam_updates(self, variables_server, gradients, learning_rate, epsilon=1e-6):
update_steps = hlp.load_object(variables_server.get('update_steps')) + 1
variables_server.set('update_steps', hlp.dump_object(update_steps))
learning_rate = learning_rate * ((1 - 0.999 ** update_steps) ** 0.5) / (1 - 0.9 ** update_steps)
for i, gradient in enumerate(gradients):
momentum = hlp.load_object(variables_server.get('momentum_{}'.format(i)))
momentum = 0.999 * momentum + (1 - 0.999) * gradient * gradient
variables_server.set('momentum_{}'.format(i), hlp.dump_object(momentum))
velocity = hlp.load_object(variables_server.get('velocity_{}'.format(i)))
velocity = 0.9 * velocity + (1 - 0.9) * gradient
variables_server.set('velocity_{}'.format(i), hlp.dump_object(velocity))
weight = hlp.load_object(variables_server.get('weight_{}'.format(i)))
new_weight = weight - velocity * learning_rate / ((momentum ** 0.5) + epsilon)
variables_server.set('weight_{}'.format(i), hlp.dump_object(new_weight))
return update_steps
def work(self):
variables_server = Redis(port=12000)
if self.scale != 'off':
try:
means = hlp.load_object(variables_server.get("means"))
stds = hlp.load_object(variables_server.get("stds"))
self.sess.run(self.norm_set_op, feed_dict=dict(zip(self.norm_phs, [means, stds])))
except:
pass
try:
weights = [hlp.load_object(variables_server.get("weight_{}".format(i))) for i in
range(len(self.weights))]
self.set_weights(weights)
except:
pass
env = self.env
while True:
observations, action_tuples, rewards, timestamps = [], [], [], []
for _ in range(self.n_steps):
observations.append(env.features[0])
timestamps.append(env.timestamp)
actions = self.act(env.features)
env.step(actions)
action_tuples.append(actions)
rewards.append(env.reward)
if env.done or env.timestamp > self.timesteps_per_launch:
variables_server.lpush('results', hlp.dump_object(env.get_total_reward()))
print("Episode reward: {}".format(env.get_total_reward()), "Length: {}".format(env.timestamp))
break
timestamps.append(env.timestamp)
observations_batch = np.array(observations)
actions_batch = np.array(action_tuples)
feed_dict = {self.state_input: observations_batch}
for i in range(len(self.n_actions)):
feed_dict[self.targets["action_{}".format(i)]] = actions_batch[:, i]
if env.done or env.timestamp > self.timesteps_per_launch:
rewards.append(0)
env.reset()
else:
obs = observations[-1]
rewards.append(self.sess.run(self.value, feed_dict={self.state_input: obs.reshape((1,) + obs.shape)}))
returns_batch = hlp.discount(np.array(rewards), self.gamma, np.array(timestamps))[:-1]
values = self.sess.run(self.value, feed_dict)
feed_dict[self.targets["advantage"]] = returns_batch - values
feed_dict[self.targets["return"]] = returns_batch
gradients = self.sess.run(self.gradients, feed_dict)
self.apply_adam_updates(variables_server, gradients, self.learning_rate)
weights = [hlp.load_object(variables_server.get("weight_{}".format(i))) for i in
range(len(self.weights))]
self.set_weights(weights)
def make_rollout(self):
variables_server = Redis(port=12000)
if self.scale != 'off':
try:
means = hlp.load_object(variables_server.get("means"))
stds = hlp.load_object(variables_server.get("stds"))
self.sess.run(self.norm_set_op, feed_dict=dict(zip(self.norm_phs, [means, stds])))
except:
pass
try:
weights = [hlp.load_object(variables_server.get("weight_{}".format(i))) for i in
range(len(self.weights))]
self.set_weights(weights)
except:
pass
env = self.env
n_tasks = self.n_tests
timestep = 0
i_task = 0
paths = []
while i_task < n_tasks:
path = {}
observations, action_tuples, rewards, dist_tuples, timestamps = [], [], [], [], []
sums = np.zeros((1, env.get_observation_space()))
sumsqrs = np.zeros(sums.shape)
env.reset()
while not env.done and env.timestamp < self.timesteps_per_launch:
sums += env.features
sumsqrs += np.square(env.features)
observations.append(env.features[0])
timestamps.append(env.timestamp)
if not self.test_mode:
actions, dist_tuple = self.act(env.features, return_dists=True)
dist_tuples.append(dist_tuple)
else:
actions = self.act(env.features, exploration=False)
env.step(actions)
timestep += 1
action_tuples.append(actions)
rewards.append(env.reward)
path["observations"] = np.array(observations)
path["action_tuples"] = np.array(action_tuples)
path["rewards"] = np.array(rewards)
if not self.test_mode:
path["dist_tuples"] = np.array(dist_tuples)
path["timestamps"] = np.array(timestamps)
path["sumobs"] = sums
path["sumsqrobs"] = sumsqrs
path["terminated"] = env.done
path["total"] = env.get_total_reward()
paths.append(path)
i_task += 1
if self.distributed:
variables_server.set("paths_{}".format(self.id_worker), hlp.dump_object(paths))
else:
self.paths = paths
def train(self):
cmd_server = 'redis-server --port 12000'
p = subprocess.Popen(cmd_server, shell=True, preexec_fn=os.setsid)
self.variables_server = Redis(port=12000)
means = "-"
stds = "-"
if self.scale != 'off':
if self.timestep == 0:
print("Time to measure features!")
if self.distributed:
worker_args = \
{
'config': self.config,
'test_mode': False,
}
hlp.launch_workers(worker_args, self.n_workers)
paths = []
for i in range(self.n_workers):
paths += hlp.load_object(self.variables_server.get("paths_{}".format(i)))
else:
self.test_mode = False
self.make_rollout()
paths = self.paths
for path in paths:
self.sums += path["sumobs"]
self.sumsqrs += path["sumsqrobs"]
self.sumtime += path["observations"].shape[0]
stds = np.sqrt((self.sumsqrs - np.square(self.sums) / self.sumtime) / (self.sumtime - 1))
means = self.sums / self.sumtime
print("Init means: {}".format(means))
print("Init stds: {}".format(stds))
self.variables_server.set("means", hlp.dump_object(means))
self.variables_server.set("stds", hlp.dump_object(stds))
self.sess.run(self.norm_set_op, feed_dict=dict(zip(self.norm_phs, [means, stds])))
weights = self.get_weights()
for i, weight in enumerate(weights):
self.variables_server.set("weight_" + str(i), hlp.dump_object(weight))
self.variables_server.set('momentum_{}'.format(i), hlp.dump_object(np.zeros(weight.shape)))
self.variables_server.set('velocity_{}'.format(i), hlp.dump_object(np.zeros(weight.shape)))
self.variables_server.set('update_steps', hlp.dump_object(0))
worker_args = \
{
'config': self.config,
'test_mode': False,
}
hlp.launch_workers(worker_args, self.n_workers, command='work', wait=False)
while True:
time.sleep(self.test_every)
print("Time for testing!")
if self.distributed:
worker_args = \
{
'config': self.config,
'test_mode': True,
}
hlp.launch_workers(worker_args, self.n_workers)
paths = []
for i in range(self.n_workers):
paths += hlp.load_object(self.variables_server.get("paths_{}".format(i)))
else:
self.test_mode = True
self.make_rollout()
paths = self.paths
total_rewards = np.array([path["total"] for path in paths])
eplens = np.array([len(path["rewards"]) for path in paths])
print("""
-------------------------------------------------------------
Mean test score: {test_scores}
Mean test episode length: {test_eplengths}
Max test score: {max_test}
Number of train episodes: {number}
Mean of features: {means}
Std of features: {stds}
-------------------------------------------------------------
""".format(
means=means,
stds=stds,
test_scores=np.mean(total_rewards),
test_eplengths=np.mean(eplens),
max_test=np.max(total_rewards),
number=self.variables_server.llen('results')
))
self.timestep += 1
self.train_scores = [hlp.load_object(res) for res in self.variables_server.lrange('results', 0, -1)][::-1]
self.test_scores.append(np.mean(total_rewards))
if self.timestep % self.save_every == 0:
self.save(self.config[:-5])
| apache-2.0 |
varunarya10/nova_test_latest | nova/virt/libvirt/guest.py | 4 | 13570 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2012 University Of Minho
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2015 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Manages information about the guest.
This class encapsulates libvirt domain provides certain
higher level APIs around the raw libvirt API. These APIs are
then used by all the other libvirt related classes
"""
from lxml import etree
from oslo_log import log as logging
from oslo_utils import encodeutils
from oslo_utils import excutils
from oslo_utils import importutils
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova import utils
from nova.virt.libvirt import config as vconfig
libvirt = None
LOG = logging.getLogger(__name__)
class Guest(object):
def __init__(self, domain):
global libvirt
if libvirt is None:
libvirt = importutils.import_module('libvirt')
self._domain = domain
def __repr__(self):
return "<Guest %(id)d %(name)s %(uuid)s>" % {
'id': self.id,
'name': self.name,
'uuid': self.uuid
}
@property
def id(self):
return self._domain.ID()
@property
def uuid(self):
return self._domain.UUIDString()
@property
def name(self):
return self._domain.name()
@property
def _encoded_xml(self):
return encodeutils.safe_decode(self._domain.XMLDesc(0))
@classmethod
def create(cls, xml, host):
"""Create a new Guest
:param xml: XML definition of the domain to create
:param host: host.Host connection to define the guest on
:returns guest.Guest: Guest ready to be launched
"""
try:
# TODO(sahid): Host.write_instance_config should return
# an instance of Guest
domain = host.write_instance_config(xml)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error defining a domain with XML: %s') %
encodeutils.safe_decode(xml))
return cls(domain)
def launch(self, pause=False):
"""Starts a created guest.
:param pause: Indicates whether to start and pause the guest
"""
flags = pause and libvirt.VIR_DOMAIN_START_PAUSED or 0
try:
return self._domain.createWithFlags(flags)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error launching a defined domain '
'with XML: %s') %
self._encoded_xml, errors='ignore')
def poweroff(self):
"""Stops a running guest."""
self._domain.destroy()
def inject_nmi(self):
"""Injects an NMI to a guest."""
self._domain.injectNMI()
def resume(self):
"""Resumes a suspended guest."""
self._domain.resume()
def enable_hairpin(self):
"""Enables hairpin mode for this guest."""
interfaces = self.get_interfaces()
try:
for interface in interfaces:
utils.execute(
'tee',
'/sys/class/net/%s/brport/hairpin_mode' % interface,
process_input='1',
run_as_root=True,
check_exit_code=[0, 1])
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error enabling hairpin mode with XML: %s') %
self._encoded_xml, errors='ignore')
def get_interfaces(self):
"""Returns a list of all network interfaces for this domain."""
doc = None
try:
doc = etree.fromstring(self._encoded_xml)
except Exception:
return []
interfaces = []
nodes = doc.findall('./devices/interface/target')
for target in nodes:
interfaces.append(target.get('dev'))
return interfaces
def get_vcpus_info(self):
"""Returns virtual cpus information of guest.
:returns: guest.VCPUInfo
"""
vcpus = self._domain.vcpus()
if vcpus is not None:
for vcpu in vcpus[0]:
yield VCPUInfo(
id=vcpu[0], cpu=vcpu[3], state=vcpu[1], time=vcpu[2])
def delete_configuration(self):
"""Undefines a domain from hypervisor."""
try:
self._domain.undefineFlags(
libvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE)
except libvirt.libvirtError:
LOG.debug("Error from libvirt during undefineFlags. %d"
"Retrying with undefine", self.id)
self._domain.undefine()
except AttributeError:
# Older versions of libvirt don't support undefine flags,
# trying to remove managed image
try:
if self._domain.hasManagedSaveImage(0):
self._domain.managedSaveRemove(0)
except AttributeError:
pass
self._domain.undefine()
def has_persistent_configuration(self):
"""Whether domain config is persistently stored on the host."""
return self._domain.isPersistent()
def attach_device(self, conf, persistent=False, live=False):
"""Attaches device to the guest.
:param conf: A LibvirtConfigObject of the device to attach
:param persistent: A bool to indicate whether the change is
persistent or not
:param live: A bool to indicate whether it affect the guest
in running state
"""
flags = persistent and libvirt.VIR_DOMAIN_AFFECT_CONFIG or 0
flags |= live and libvirt.VIR_DOMAIN_AFFECT_LIVE or 0
self._domain.attachDeviceFlags(conf.to_xml(), flags=flags)
def get_disk(self, device):
"""Returns the disk mounted at device
:returns LivirtConfigGuestDisk: mounted at device or None
"""
try:
doc = etree.fromstring(self._domain.XMLDesc(0))
except Exception:
return None
node = doc.find("./devices/disk/target[@dev='%s'].." % device)
if node is not None:
conf = vconfig.LibvirtConfigGuestDisk()
conf.parse_dom(node)
return conf
def detach_device(self, conf, persistent=False, live=False):
"""Detaches device to the guest.
:param conf: A LibvirtConfigObject of the device to detach
:param persistent: A bool to indicate whether the change is
persistent or not
:param live: A bool to indicate whether it affect the guest
in running state
"""
flags = persistent and libvirt.VIR_DOMAIN_AFFECT_CONFIG or 0
flags |= live and libvirt.VIR_DOMAIN_AFFECT_LIVE or 0
self._domain.detachDeviceFlags(conf.to_xml(), flags=flags)
def get_xml_desc(self, dump_inactive=False, dump_sensitive=False,
dump_migratable=False):
"""Returns xml description of guest.
:param dump_inactive: Dump inactive domain information
:param dump_sensitive: Dump security sensitive information
:param dump_migratable: Dump XML suitable for migration
:returns string: XML description of the guest
"""
flags = dump_inactive and libvirt.VIR_DOMAIN_XML_INACTIVE or 0
flags |= dump_sensitive and libvirt.VIR_DOMAIN_XML_SECURE or 0
flags |= dump_migratable and libvirt.VIR_DOMAIN_XML_MIGRATABLE or 0
return self._domain.XMLDesc(flags=flags)
def save_memory_state(self):
"""Saves the domain's memory state. Requires running domain.
raises: raises libvirtError on error
"""
self._domain.managedSave(0)
def get_block_device(self, disk):
"""Returns a block device wrapper for disk."""
return BlockDevice(self, disk)
class BlockDevice(object):
"""Wrapper around block device API"""
REBASE_DEFAULT_BANDWIDTH = 0 # in MiB/s - 0 unlimited
COMMIT_DEFAULT_BANDWIDTH = 0 # in MiB/s - 0 unlimited
def __init__(self, guest, disk):
self._guest = guest
self._disk = disk
def abort_job(self, async=False, pivot=False):
"""Request to cancel any job currently running on the block.
:param async: Request only, do not wait for completion
:param pivot: Pivot to new file when ending a copy or
active commit job
"""
flags = async and libvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC or 0
flags |= pivot and libvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT or 0
self._guest._domain.blockJobAbort(self._disk, flags=flags)
def get_job_info(self):
"""Returns information about job currently running
:returns: BlockDeviceJobInfo or None
"""
status = self._guest._domain.blockJobInfo(self._disk, flags=0)
if status != -1:
return BlockDeviceJobInfo(
job=status.get("type", 0),
bandwidth=status.get("bandwidth", 0),
cur=status.get("cur", 0),
end=status.get("end", 0))
def rebase(self, base, shallow=False, reuse_ext=False,
copy=False, relative=False):
"""Rebases block to new base
:param shallow: Limit copy to top of source backing chain
:param reuse_ext: Reuse existing external file of a copy
:param copy: Start a copy job
:param relative: Keep backing chain referenced using relative names
"""
flags = shallow and libvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW or 0
flags |= reuse_ext and libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT or 0
flags |= copy and libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY or 0
flags |= relative and libvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE or 0
return self._guest._domain.blockRebase(
self._disk, base, self.REBASE_DEFAULT_BANDWIDTH, flags=flags)
def commit(self, base, top, relative=False):
"""Commit on block device
For performance during live snapshot it will reduces the disk chain
to a single disk.
:param relative: Keep backing chain referenced using relative names
"""
flags = relative and libvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE or 0
return self._guest._domain.blockCommit(
self._disk, base, top, self.COMMIT_DEFAULT_BANDWIDTH, flags=flags)
def resize(self, size_kb):
"""Resizes block device to Kib size."""
self._guest._domain.blockResize(self._disk, size_kb)
def wait_for_job(self, abort_on_error=False, wait_for_job_clean=False):
"""Wait for libvirt block job to complete.
Libvirt may return either cur==end or an empty dict when
the job is complete, depending on whether the job has been
cleaned up by libvirt yet, or not.
:param abort_on_error: Whether to stop process and raise NovaException
on error (default: False)
:param wait_for_job_clean: Whether to force wait to ensure job is
finished (see bug: LP#1119173)
:returns: True if still in progress
False if completed
"""
status = self.get_job_info()
if not status and abort_on_error:
msg = _('libvirt error while requesting blockjob info.')
raise exception.NovaException(msg)
if wait_for_job_clean:
job_ended = status.job == 0
else:
job_ended = status.cur == status.end
return not job_ended
class VCPUInfo(object):
def __init__(self, id, cpu, state, time):
"""Structure for information about guest vcpus.
:param id: The virtual cpu number
:param cpu: The host cpu currently associated
:param state: The running state of the vcpu (0 offline, 1 running, 2
blocked on resource)
:param time: The cpu time used in nanoseconds
"""
self.id = id
self.cpu = cpu
self.state = state
self.time = time
class BlockDeviceJobInfo(object):
def __init__(self, job, bandwidth, cur, end):
"""Structure for information about running job.
:param job: The running job (0 placeholder, 1 pull,
2 copy, 3 commit, 4 active commit)
:param bandwidth: Used in MiB/s
:param cur: Indicates the position between 0 and 'end'
:param end: Indicates the position for this operation
"""
self.job = job
self.bandwidth = bandwidth
self.cur = cur
self.end = end
| apache-2.0 |
HerlonNascimento/namebench | libnamebench/nameserver_test.py | 175 | 7015 | #!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mocks for tests."""
__author__ = '[email protected] (Thomas Stromberg)'
import mocks
import nameserver
import unittest
class TestNameserver(unittest.TestCase):
def testInit(self):
ns = mocks.MockNameServer(mocks.GOOD_IP)
self.assertEquals(ns.ip, mocks.GOOD_IP)
self.assertEquals(ns.name, None)
ns = mocks.MockNameServer(mocks.NO_RESPONSE_IP, name='Broked')
self.assertEquals(ns.ip, mocks.NO_RESPONSE_IP)
self.assertEquals(ns.name, 'Broked')
def testTimedRequest(self):
ns = mocks.MockNameServer(mocks.GOOD_IP)
(response, duration, exception) = ns.TimedRequest('A', 'www.paypal.com')
self.assertEquals(response.id, 999)
expected = ('www.paypal.com. 159 IN A 66.211.169.65\n'
'www.paypal.com. 159 IN A 66.211.169.2')
self.assertEquals(str(response.answer[0]), expected)
self.assertTrue(duration > 0)
self.assertEquals(exception, None)
def testTestAnswers(self):
ns = mocks.MockNameServer(mocks.GOOD_IP)
(is_broken, warning, duration) = ns.TestAnswers('A', 'www.paypal.com',
'10.0.0.1')
self.assertEquals(is_broken, False)
self.assertEquals(warning, None)
self.assertTrue(duration > 0 and duration < 3600)
def testResponseToAscii(self):
ns = mocks.MockNameServer(mocks.GOOD_IP)
(response, duration, exception) = ns.TimedRequest('A', 'www.paypal.com')
self.assertEquals(nameserver.ResponseToAscii(response),
'66.211.169.65 + 66.211.169.2')
response.answer = None
self.assertEquals(nameserver.ResponseToAscii(response), 'no answer')
def testGoogleComResponse(self):
ns = mocks.MockNameServer(mocks.GOOD_IP)
(is_broken, warning, duration) = ns.TestGoogleComResponse()
self.assertEquals(is_broken, False)
self.assertEquals(warning,
'google.com. is hijacked (66.211.169.65 + 66.211.169.2)')
self.assertTrue(duration > 0 and duration < 3600)
def testWwwGoogleComResponse(self):
ns = mocks.MockNameServer(mocks.GOOD_IP)
(is_broken, warning, duration) = ns.TestWwwGoogleComResponse()
self.assertEquals(is_broken, True)
self.assertEquals(warning, 'No answer')
self.assertTrue(duration > 0 and duration < 3600)
def testWwwPaypalComResponse(self):
ns = mocks.MockNameServer(mocks.GOOD_IP)
(is_broken, warning, duration) = ns.TestWwwPaypalComResponse()
self.assertEquals(is_broken, False)
self.assertEquals(warning, None)
def testNegativeResponse(self):
ns = mocks.MockNameServer(mocks.NO_RESPONSE_IP)
(is_broken, warning, duration) = ns.TestNegativeResponse()
self.assertEquals(is_broken, False)
self.assertEquals(warning, None)
def testNegativeResponseHijacked(self):
ns = mocks.MockNameServer(mocks.GOOD_IP)
(is_broken, warning, duration) = ns.TestNegativeResponse()
self.assertEquals(is_broken, False)
self.assertEquals(warning,
'NXDOMAIN Hijacking (66.211.169.65 + 66.211.169.2)')
def testNegativeResponseBroken(self):
ns = mocks.MockNameServer(mocks.BROKEN_IP)
(is_broken, warning, duration) = ns.TestNegativeResponse()
self.assertEquals(is_broken, True)
self.assertEquals(warning, 'BadResponse')
def testWildcardCache(self):
ns = mocks.MockNameServer(mocks.GOOD_IP)
(response, is_broken, warning, duration) = ns.QueryWildcardCache()
self.assertEquals(is_broken, False)
question = str(response.question[0])
self.assertTrue(question.startswith('namebench'))
self.assertEquals(warning, None)
def testCheckHealthGood(self):
ns = mocks.MockNameServer(mocks.GOOD_IP)
ns.CheckHealth()
self.assertEquals(ns.CheckHealth(), False)
self.assertEquals(ns.warnings, ['No answer'])
self.assertEquals(len(ns.checks), 1)
self.assertEquals(ns.failure[0], 'TestWwwGoogleComResponse')
self.assertEquals(ns.checks[0][0:3],
('TestWwwGoogleComResponse', True, 'No answer'))
def testCheckHealthPerfect(self):
ns = mocks.MockNameServer(mocks.PERFECT_IP)
ns.CheckHealth()
self.assertEquals(ns.CheckHealth(), True)
expected = ['www.google.com. is hijacked (66.211.169.65 + 66.211.169.2)',
'google.com. is hijacked (66.211.169.65 + 66.211.169.2)',
'NXDOMAIN Hijacking (66.211.169.65 + 66.211.169.2)']
self.assertEquals(ns.warnings, expected)
self.assertEquals(len(ns.checks), 5)
self.assertEquals(ns.failure, None)
self.assertTrue(ns.check_duration > 10)
def testQUeryWildcardCacheSaving(self):
ns = mocks.MockNameServer(mocks.GOOD_IP)
other_ns = mocks.MockNameServer(mocks.PERFECT_IP)
ns.QueryWildcardCache(save=True)
other_ns.QueryWildcardCache(save=True)
# Test our cache-sharing mechanisms
(hostname, ttl) = ns.cache_check
self.assertTrue(hostname.startswith('namebench'))
self.assertEquals(ttl, 159)
(other_hostname, other_ttl) = other_ns.cache_check
self.assertTrue(other_hostname.startswith('namebench'))
self.assertNotEqual(hostname, other_hostname)
self.assertEquals(other_ttl, 159)
def testSharedCacheNoMatch(self):
ns = mocks.MockNameServer(mocks.GOOD_IP)
other_ns = mocks.MockNameServer(mocks.PERFECT_IP)
ns.QueryWildcardCache(save=True)
other_ns.QueryWildcardCache(save=True)
(shared, slower, faster) = ns.TestSharedCache(other_ns)
self.assertEquals(shared, False)
self.assertEquals(slower, None)
self.assertEquals(faster, None)
def testSharedCacheMatch(self):
ns = mocks.MockNameServer(mocks.GOOD_IP)
other_ns = mocks.MockNameServer(mocks.PERFECT_IP)
ns.QueryWildcardCache(save=True)
other_ns.QueryWildcardCache(save=True)
# Increase the TTL of 'other'
other_ns.cache_check = (other_ns.cache_check[0], other_ns.cache_check[1] + 5)
(shared, slower, faster) = ns.TestSharedCache(other_ns)
self.assertEquals(shared, True)
self.assertEquals(slower.ip, mocks.GOOD_IP)
self.assertEquals(faster.ip, mocks.PERFECT_IP)
# Increase the TTL of 'other' by a whole lot
other_ns.cache_check = (other_ns.cache_check[0], other_ns.cache_check[1] + 3600)
(shared, slower, faster) = ns.TestSharedCache(other_ns)
self.assertEquals(shared, False)
self.assertEquals(slower, None)
self.assertEquals(faster, None)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
munkiat/libcloud | docs/examples/compute/vmware_vcloud_1.5.py | 60 | 2076 | from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
import libcloud.security
# Skip this step if you are launching nodes on an official vCloud
# provider. It is intended only for self signed SSL certs in
# vanilla vCloud Director v1.5 test deployments.
# Note: Code like this poses a security risk (MITM attack) and
# that's the reason why you should never use it for anything else
# besides testing. You have been warned.
libcloud.security.VERIFY_SSL_CERT = False
vcloud = get_driver(Provider.VCLOUD)
driver = vcloud('you username@organisation', 'your password',
host='vcloud.local', api_version='1.5')
# List all instantiated vApps
nodes = driver.list_nodes()
# List all VMs within the first vApp instance
print nodes[0].extra['vms']
# List all available vApp Templates
images = driver.list_images()
image = [i for i in images if i.name == 'natty-server-cloudimg-amd64'][0]
# Create node with minimum set of parameters
node = driver.create_node(name='test node 1', image=image)
# Destroy the node
driver.destroy_node(node)
# Create node without deploying and powering it on
node = driver.create_node(name='test node 2', image=image, ex_deploy=False)
# Create node with custom CPU & Memory values
node = driver.create_node(name='test node 3', image=image, ex_vm_cpu=3,
ex_vm_memory=1024)
# Create node with customised networking parameters (eg. for OVF
# imported images)
node = driver.create_node(name='test node 4', image=image,
ex_vm_network='your vm net name',
ex_network='your org net name',
ex_vm_fence='bridged', ex_vm_ipmode='DHCP')
# Create node in a custom virtual data center
node = driver.create_node(name='test node 4', image=image,
ex_vdc='your vdc name')
# Create node with guest OS customisation script to be run at first boot
node = driver.create_node(name='test node 5', image=image,
ex_vm_script='filesystem path to your script')
| apache-2.0 |
jjscarafia/odoo | openerp/tools/which.py | 456 | 6884 | #!/usr/bin/env python
""" Which - locate a command
* adapted from Brian Curtin's http://bugs.python.org/file15381/shutil_which.patch
* see http://bugs.python.org/issue444582
* uses ``PATHEXT`` on Windows
* searches current directory before ``PATH`` on Windows,
but not before an explicitly passed path
* accepts both string or iterable for an explicitly passed path, or pathext
* accepts an explicitly passed empty path, or pathext (either '' or [])
* does not search ``PATH`` for files that have a path specified in their name already
* moved defpath and defpathext lists initialization to module level,
instead of initializing them on each function call
* changed interface: which_files() returns generator, which() returns first match,
or raises IOError(errno.ENOENT)
.. function:: which_files(file [, mode=os.F_OK | os.X_OK[, path=None[, pathext=None]]])
Return a generator which yields full paths in which the *file* name exists
in a directory that is part of the file name, or on *path*,
and has the given *mode*.
By default, *mode* matches an inclusive OR of os.F_OK and os.X_OK - an
existing executable file.
The *path* is, by default, the ``PATH`` variable on the platform,
or the string/iterable passed in as *path*.
In the event that a ``PATH`` variable is not found, :const:`os.defpath` is used.
On Windows, a current directory is searched before using the ``PATH`` variable,
but not before an explicitly passed *path*.
The *pathext* is only used on Windows to match files with given extensions appended as well.
It defaults to the ``PATHEXT`` variable, or the string/iterable passed in as *pathext*.
In the event that a ``PATHEXT`` variable is not found,
default value for Windows XP/Vista is used.
The command is always searched without extension first,
even when *pathext* is explicitly passed.
.. function:: which(file [, mode=os.F_OK | os.X_OK[, path=None[, pathext=None]]])
Return first match generated by which_files(file, mode, path, pathext),
or raise IOError(errno.ENOENT).
"""
__docformat__ = 'restructuredtext en'
__all__ = 'which which_files pathsep defpath defpathext F_OK R_OK W_OK X_OK'.split()
import sys
from os import access, defpath, pathsep, environ, F_OK, R_OK, W_OK, X_OK
from os.path import exists, dirname, split, join
windows = sys.platform.startswith('win')
defpath = environ.get('PATH', defpath).split(pathsep)
if windows:
defpath.insert(0, '.') # can insert without checking, when duplicates are removed
# given the quite usual mess in PATH on Windows, let's rather remove duplicates
seen = set()
defpath = [dir for dir in defpath if dir.lower() not in seen and not seen.add(dir.lower())]
del seen
defpathext = [''] + environ.get('PATHEXT',
'.COM;.EXE;.BAT;.CMD;.VBS;.VBE;.JS;.JSE;.WSF;.WSH;.MSC').lower().split(pathsep)
else:
defpathext = ['']
def which_files(file, mode=F_OK | X_OK, path=None, pathext=None):
""" Locate a file in a path supplied as a part of the file name,
or the user's path, or a supplied path.
The function yields full paths (not necessarily absolute paths),
in which the given file name matches an existing file in a directory on the path.
>>> def test_which(expected, *args, **argd):
... result = list(which_files(*args, **argd))
... assert result == expected, 'which_files: %s != %s' % (result, expected)
...
... try:
... result = [ which(*args, **argd) ]
... except IOError:
... result = []
... assert result[:1] == expected[:1], 'which: %s != %s' % (result[:1], expected[:1])
>>> if windows: cmd = environ['COMSPEC']
>>> if windows: test_which([cmd], 'cmd')
>>> if windows: test_which([cmd], 'cmd.exe')
>>> if windows: test_which([cmd], 'cmd', path=dirname(cmd))
>>> if windows: test_which([cmd], 'cmd', pathext='.exe')
>>> if windows: test_which([cmd], cmd)
>>> if windows: test_which([cmd], cmd, path='<nonexistent>')
>>> if windows: test_which([cmd], cmd, pathext='<nonexistent>')
>>> if windows: test_which([cmd], cmd[:-4])
>>> if windows: test_which([cmd], cmd[:-4], path='<nonexistent>')
>>> if windows: test_which([], 'cmd', path='<nonexistent>')
>>> if windows: test_which([], 'cmd', pathext='<nonexistent>')
>>> if windows: test_which([], '<nonexistent>/cmd')
>>> if windows: test_which([], cmd[:-4], pathext='<nonexistent>')
>>> if not windows: sh = '/bin/sh'
>>> if not windows: test_which([sh], 'sh')
>>> if not windows: test_which([sh], 'sh', path=dirname(sh))
>>> if not windows: test_which([sh], 'sh', pathext='<nonexistent>')
>>> if not windows: test_which([sh], sh)
>>> if not windows: test_which([sh], sh, path='<nonexistent>')
>>> if not windows: test_which([sh], sh, pathext='<nonexistent>')
>>> if not windows: test_which([], 'sh', mode=W_OK) # not running as root, are you?
>>> if not windows: test_which([], 'sh', path='<nonexistent>')
>>> if not windows: test_which([], '<nonexistent>/sh')
"""
filepath, file = split(file)
if filepath:
path = (filepath,)
elif path is None:
path = defpath
elif isinstance(path, str):
path = path.split(pathsep)
if pathext is None:
pathext = defpathext
elif isinstance(pathext, str):
pathext = pathext.split(pathsep)
if not '' in pathext:
pathext.insert(0, '') # always check command without extension, even for custom pathext
for dir in path:
basepath = join(dir, file)
for ext in pathext:
fullpath = basepath + ext
if exists(fullpath) and access(fullpath, mode):
yield fullpath
def which(file, mode=F_OK | X_OK, path=None, pathext=None):
""" Locate a file in a path supplied as a part of the file name,
or the user's path, or a supplied path.
The function returns full path (not necessarily absolute path),
in which the given file name matches an existing file in a directory on the path,
or raises IOError(errno.ENOENT).
>>> # for doctest see which_files()
"""
try:
return iter(which_files(file, mode, path, pathext)).next()
except StopIteration:
try:
from errno import ENOENT
except ImportError:
ENOENT = 2
raise IOError(ENOENT, '%s not found' % (mode & X_OK and 'command' or 'file'), file)
if __name__ == '__main__':
import doctest
doctest.testmod()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
aarticianpc/greenpointtrees | src/paypal/pro/migrations/0001_initial.py | 12 | 2843 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='PayPalNVP',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('method', models.CharField(max_length=64, blank=True)),
('ack', models.CharField(max_length=32, blank=True)),
('profilestatus', models.CharField(max_length=32, blank=True)),
('timestamp', models.DateTimeField(null=True, blank=True)),
('profileid', models.CharField(max_length=32, blank=True)),
('profilereference', models.CharField(max_length=128, blank=True)),
('correlationid', models.CharField(max_length=32, blank=True)),
('token', models.CharField(max_length=64, blank=True)),
('payerid', models.CharField(max_length=64, blank=True)),
('firstname', models.CharField(max_length=255, verbose_name='First Name', blank=True)),
('lastname', models.CharField(max_length=255, verbose_name='Last Name', blank=True)),
('street', models.CharField(max_length=255, verbose_name='Street Address', blank=True)),
('city', models.CharField(max_length=255, verbose_name='City', blank=True)),
('state', models.CharField(max_length=255, verbose_name='State', blank=True)),
('countrycode', models.CharField(max_length=2, verbose_name='Country', blank=True)),
('zip', models.CharField(max_length=32, verbose_name='Postal / Zip Code', blank=True)),
('invnum', models.CharField(max_length=255, blank=True)),
('custom', models.CharField(max_length=255, blank=True)),
('flag', models.BooleanField(default=False)),
('flag_code', models.CharField(max_length=32, blank=True)),
('flag_info', models.TextField(blank=True)),
('ipaddress', models.IPAddressField(blank=True)),
('query', models.TextField(blank=True)),
('response', models.TextField(blank=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'db_table': 'paypal_nvp',
'verbose_name': 'PayPal NVP',
},
bases=(models.Model,),
),
]
| mit |
kmspriyatham/symath | scipy/scipy/sparse/construct.py | 2 | 19642 | """Functions to construct sparse matrices
"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['spdiags', 'eye', 'identity', 'kron', 'kronsum',
'hstack', 'vstack', 'bmat', 'rand', 'diags', 'block_diag']
from warnings import warn
import numpy as np
from .sputils import upcast
from .csr import csr_matrix
from .csc import csc_matrix
from .bsr import bsr_matrix
from .coo import coo_matrix
from .lil import lil_matrix
from .dia import dia_matrix
from .base import issparse
def spdiags(data, diags, m, n, format=None):
"""
Return a sparse matrix from diagonals.
Parameters
----------
data : array_like
matrix diagonals stored row-wise
diags : diagonals to set
- k = 0 the main diagonal
- k > 0 the k-th upper diagonal
- k < 0 the k-th lower diagonal
m, n : int
shape of the result
format : format of the result (e.g. "csr")
By default (format=None) an appropriate sparse matrix
format is returned. This choice is subject to change.
See Also
--------
diags : more convenient form of this function
dia_matrix : the sparse DIAgonal format.
Examples
--------
>>> data = array([[1,2,3,4],[1,2,3,4],[1,2,3,4]])
>>> diags = array([0,-1,2])
>>> spdiags(data, diags, 4, 4).todense()
matrix([[1, 0, 3, 0],
[1, 2, 0, 4],
[0, 2, 3, 0],
[0, 0, 3, 4]])
"""
return dia_matrix((data, diags), shape=(m,n)).asformat(format)
def diags(diagonals, offsets, shape=None, format=None, dtype=None):
"""
Construct a sparse matrix from diagonals.
.. versionadded:: 0.11
Parameters
----------
diagonals : sequence of array_like
Sequence of arrays containing the matrix diagonals,
corresponding to `offsets`.
offsets : sequence of int
Diagonals to set:
- k = 0 the main diagonal
- k > 0 the k-th upper diagonal
- k < 0 the k-th lower diagonal
shape : tuple of int, optional
Shape of the result. If omitted, a square matrix large enough
to contain the diagonals is returned.
format : {"dia", "csr", "csc", "lil", ...}, optional
Matrix format of the result. By default (format=None) an
appropriate sparse matrix format is returned. This choice is
subject to change.
dtype : dtype, optional
Data type of the matrix.
See Also
--------
spdiags : construct matrix from diagonals
Notes
-----
This function differs from `spdiags` in the way it handles
off-diagonals.
The result from `diags` is the sparse equivalent of::
np.diag(diagonals[0], offsets[0])
+ ...
+ np.diag(diagonals[k], offsets[k])
Repeated diagonal offsets are disallowed.
Examples
--------
>>> diagonals = [[1,2,3,4], [1,2,3], [1,2]]
>>> diags(diagonals, [0, -1, 2]).todense()
matrix([[1, 0, 1, 0],
[1, 2, 0, 2],
[0, 2, 3, 0],
[0, 0, 3, 4]])
Broadcasting of scalars is supported (but shape needs to be
specified):
>>> diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)).todense()
matrix([[-2., 1., 0., 0.],
[ 1., -2., 1., 0.],
[ 0., 1., -2., 1.],
[ 0., 0., 1., -2.]])
If only one diagonal is wanted (as in `numpy.diag`), the following
works as well:
>>> diags([1, 2, 3], 1).todense()
matrix([[ 0., 1., 0., 0.],
[ 0., 0., 2., 0.],
[ 0., 0., 0., 3.],
[ 0., 0., 0., 0.]])
"""
# if offsets is not a sequence, assume that there's only one diagonal
try:
iter(offsets)
except TypeError:
# now check that there's actually only one diagonal
try:
iter(diagonals[0])
except TypeError:
diagonals = [np.atleast_1d(diagonals)]
else:
raise ValueError("Different number of diagonals and offsets.")
else:
diagonals = list(map(np.atleast_1d, diagonals))
offsets = np.atleast_1d(offsets)
# Basic check
if len(diagonals) != len(offsets):
raise ValueError("Different number of diagonals and offsets.")
# Determine shape, if omitted
if shape is None:
m = len(diagonals[0]) + abs(int(offsets[0]))
shape = (m, m)
# Determine data type, if omitted
if dtype is None:
dtype = np.common_type(*diagonals)
# Construct data array
m, n = shape
M = max([min(m + offset, n - offset) + max(0, offset)
for offset in offsets])
M = max(0, M)
data_arr = np.zeros((len(offsets), M), dtype=dtype)
for j, diagonal in enumerate(diagonals):
offset = offsets[j]
k = max(0, offset)
length = min(m + offset, n - offset)
if length <= 0:
raise ValueError("Offset %d (index %d) out of bounds" % (offset, j))
try:
data_arr[j, k:k+length] = diagonal
except ValueError:
if len(diagonal) != length and len(diagonal) != 1:
raise ValueError(
"Diagonal length (index %d: %d at offset %d) does not "
"agree with matrix size (%d, %d)." % (
j, len(diagonal), offset, m, n))
raise
return dia_matrix((data_arr, offsets), shape=(m, n)).asformat(format)
def identity(n, dtype='d', format=None):
"""Identity matrix in sparse format
Returns an identity matrix with shape (n,n) using a given
sparse format and dtype.
Parameters
----------
n : integer
Shape of the identity matrix.
dtype :
Data type of the matrix
format : string
Sparse format of the result, e.g. format="csr", etc.
Examples
--------
>>> identity(3).todense()
matrix([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> identity(3, dtype='int8', format='dia')
<3x3 sparse matrix of type '<type 'numpy.int8'>'
with 3 stored elements (1 diagonals) in DIAgonal format>
"""
return eye(n, n, dtype=dtype, format=format)
def eye(m, n=None, k=0, dtype=float, format=None):
"""Sparse matrix with ones on diagonal
Returns a sparse (m x n) matrix where the k-th diagonal
is all ones and everything else is zeros.
Parameters
----------
n : integer
Number of rows in the matrix.
m : integer, optional
Number of columns. Default: n
k : integer, optional
Diagonal to place ones on. Default: 0 (main diagonal)
dtype :
Data type of the matrix
format : string
Sparse format of the result, e.g. format="csr", etc.
Examples
--------
>>> from scipy import sparse
>>> sparse.eye(3).todense()
matrix([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> sparse.eye(3, dtype=np.int8)
<3x3 sparse matrix of type '<type 'numpy.int8'>'
with 3 stored elements (1 diagonals) in DIAgonal format>
"""
if n is None:
n = m
m,n = int(m),int(n)
if m == n and k == 0:
# fast branch for special formats
if format in ['csr', 'csc']:
indptr = np.arange(n+1, dtype=np.intc)
indices = np.arange(n, dtype=np.intc)
data = np.ones(n, dtype=dtype)
cls = {'csr': csr_matrix, 'csc': csc_matrix}[format]
return cls((data,indices,indptr),(n,n))
elif format == 'coo':
row = np.arange(n, dtype=np.intc)
col = np.arange(n, dtype=np.intc)
data = np.ones(n, dtype=dtype)
return coo_matrix((data,(row,col)),(n,n))
diags = np.ones((1, max(0, min(m + k, n))), dtype=dtype)
return spdiags(diags, k, m, n).asformat(format)
def kron(A, B, format=None):
"""kronecker product of sparse matrices A and B
Parameters
----------
A : sparse or dense matrix
first matrix of the product
B : sparse or dense matrix
second matrix of the product
format : string
format of the result (e.g. "csr")
Returns
-------
kronecker product in a sparse matrix format
Examples
--------
>>> A = csr_matrix(array([[0,2],[5,0]]))
>>> B = csr_matrix(array([[1,2],[3,4]]))
>>> kron(A,B).todense()
matrix([[ 0, 0, 2, 4],
[ 0, 0, 6, 8],
[ 5, 10, 0, 0],
[15, 20, 0, 0]])
>>> kron(A,[[1,2],[3,4]]).todense()
matrix([[ 0, 0, 2, 4],
[ 0, 0, 6, 8],
[ 5, 10, 0, 0],
[15, 20, 0, 0]])
"""
B = coo_matrix(B)
if (format is None or format == "bsr") and 2*B.nnz >= B.shape[0] * B.shape[1]:
# B is fairly dense, use BSR
A = csr_matrix(A,copy=True)
output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1])
if A.nnz == 0 or B.nnz == 0:
# kronecker product is the zero matrix
return coo_matrix(output_shape)
B = B.toarray()
data = A.data.repeat(B.size).reshape(-1,B.shape[0],B.shape[1])
data = data * B
return bsr_matrix((data,A.indices,A.indptr), shape=output_shape)
else:
# use COO
A = coo_matrix(A)
output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1])
if A.nnz == 0 or B.nnz == 0:
# kronecker product is the zero matrix
return coo_matrix(output_shape)
# expand entries of a into blocks
row = A.row.repeat(B.nnz)
col = A.col.repeat(B.nnz)
data = A.data.repeat(B.nnz)
row *= B.shape[0]
col *= B.shape[1]
# increment block indices
row,col = row.reshape(-1,B.nnz),col.reshape(-1,B.nnz)
row += B.row
col += B.col
row,col = row.reshape(-1),col.reshape(-1)
# compute block entries
data = data.reshape(-1,B.nnz) * B.data
data = data.reshape(-1)
return coo_matrix((data,(row,col)), shape=output_shape).asformat(format)
def kronsum(A, B, format=None):
"""kronecker sum of sparse matrices A and B
Kronecker sum of two sparse matrices is a sum of two Kronecker
products kron(I_n,A) + kron(B,I_m) where A has shape (m,m)
and B has shape (n,n) and I_m and I_n are identity matrices
of shape (m,m) and (n,n) respectively.
Parameters
----------
A
square matrix
B
square matrix
format : string
format of the result (e.g. "csr")
Returns
-------
kronecker sum in a sparse matrix format
Examples
--------
"""
A = coo_matrix(A)
B = coo_matrix(B)
if A.shape[0] != A.shape[1]:
raise ValueError('A is not square')
if B.shape[0] != B.shape[1]:
raise ValueError('B is not square')
dtype = upcast(A.dtype, B.dtype)
L = kron(eye(B.shape[0],dtype=dtype), A, format=format)
R = kron(B, eye(A.shape[0],dtype=dtype), format=format)
return (L+R).asformat(format) # since L + R is not always same format
def hstack(blocks, format=None, dtype=None):
"""
Stack sparse matrices horizontally (column wise)
Parameters
----------
blocks
sequence of sparse matrices with compatible shapes
format : string
sparse format of the result (e.g. "csr")
by default an appropriate sparse matrix format is returned.
This choice is subject to change.
See Also
--------
vstack : stack sparse matrices vertically (row wise)
Examples
--------
>>> from scipy.sparse import coo_matrix, hstack
>>> A = coo_matrix([[1,2],[3,4]])
>>> B = coo_matrix([[5],[6]])
>>> hstack( [A,B] ).todense()
matrix([[1, 2, 5],
[3, 4, 6]])
"""
return bmat([blocks], format=format, dtype=dtype)
def vstack(blocks, format=None, dtype=None):
"""
Stack sparse matrices vertically (row wise)
Parameters
----------
blocks
sequence of sparse matrices with compatible shapes
format : string
sparse format of the result (e.g. "csr")
by default an appropriate sparse matrix format is returned.
This choice is subject to change.
See Also
--------
hstack : stack sparse matrices horizontally (column wise)
Examples
--------
>>> from scipy.sparse import coo_matrix, vstack
>>> A = coo_matrix([[1,2],[3,4]])
>>> B = coo_matrix([[5,6]])
>>> vstack( [A,B] ).todense()
matrix([[1, 2],
[3, 4],
[5, 6]])
"""
return bmat([[b] for b in blocks], format=format, dtype=dtype)
def bmat(blocks, format=None, dtype=None):
"""
Build a sparse matrix from sparse sub-blocks
Parameters
----------
blocks : array_like
Grid of sparse matrices with compatible shapes.
An entry of None implies an all-zero matrix.
format : {'bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil'}, optional
The sparse format of the result (e.g. "csr"). If not given, the matrix
is returned in "coo" format.
dtype : dtype specifier, optional
The data-type of the output matrix. If not given, the dtype is
determined from that of `blocks`.
Returns
-------
bmat : sparse matrix
A "coo" sparse matrix or type of sparse matrix identified by `format`.
See Also
--------
block_diag, diags
Examples
--------
>>> from scipy.sparse import coo_matrix, bmat
>>> A = coo_matrix([[1,2],[3,4]])
>>> B = coo_matrix([[5],[6]])
>>> C = coo_matrix([[7]])
>>> bmat( [[A,B],[None,C]] ).todense()
matrix([[1, 2, 5],
[3, 4, 6],
[0, 0, 7]])
>>> bmat( [[A,None],[None,C]] ).todense()
matrix([[1, 2, 0],
[3, 4, 0],
[0, 0, 7]])
"""
blocks = np.asarray(blocks, dtype='object')
if np.rank(blocks) != 2:
raise ValueError('blocks must have rank 2')
M,N = blocks.shape
block_mask = np.zeros(blocks.shape, dtype=np.bool)
brow_lengths = np.zeros(blocks.shape[0], dtype=np.intc)
bcol_lengths = np.zeros(blocks.shape[1], dtype=np.intc)
# convert everything to COO format
for i in range(M):
for j in range(N):
if blocks[i,j] is not None:
A = coo_matrix(blocks[i,j])
blocks[i,j] = A
block_mask[i,j] = True
if brow_lengths[i] == 0:
brow_lengths[i] = A.shape[0]
else:
if brow_lengths[i] != A.shape[0]:
raise ValueError('blocks[%d,:] has incompatible row dimensions' % i)
if bcol_lengths[j] == 0:
bcol_lengths[j] = A.shape[1]
else:
if bcol_lengths[j] != A.shape[1]:
raise ValueError('blocks[:,%d] has incompatible column dimensions' % j)
# ensure that at least one value in each row and col is not None
if brow_lengths.min() == 0:
raise ValueError('blocks[%d,:] is all None' % brow_lengths.argmin())
if bcol_lengths.min() == 0:
raise ValueError('blocks[:,%d] is all None' % bcol_lengths.argmin())
nnz = sum([A.nnz for A in blocks[block_mask]])
if dtype is None:
dtype = upcast(*tuple([A.dtype for A in blocks[block_mask]]))
row_offsets = np.concatenate(([0], np.cumsum(brow_lengths)))
col_offsets = np.concatenate(([0], np.cumsum(bcol_lengths)))
data = np.empty(nnz, dtype=dtype)
row = np.empty(nnz, dtype=np.intc)
col = np.empty(nnz, dtype=np.intc)
nnz = 0
for i in range(M):
for j in range(N):
if blocks[i,j] is not None:
A = blocks[i,j]
data[nnz:nnz + A.nnz] = A.data
row[nnz:nnz + A.nnz] = A.row
col[nnz:nnz + A.nnz] = A.col
row[nnz:nnz + A.nnz] += row_offsets[i]
col[nnz:nnz + A.nnz] += col_offsets[j]
nnz += A.nnz
shape = (np.sum(brow_lengths), np.sum(bcol_lengths))
return coo_matrix((data, (row, col)), shape=shape).asformat(format)
def block_diag(mats, format=None, dtype=None):
"""
Build a block diagonal sparse matrix from provided matrices.
.. versionadded:: 0.11.0
Parameters
----------
A, B, ... : sequence of matrices
Input matrices.
format : str, optional
The sparse format of the result (e.g. "csr"). If not given, the matrix
is returned in "coo" format.
dtype : dtype specifier, optional
The data-type of the output matrix. If not given, the dtype is
determined from that of `blocks`.
Returns
-------
res : sparse matrix
See Also
--------
bmat, diags
Examples
--------
>>> A = coo_matrix([[1, 2], [3, 4]])
>>> B = coo_matrix([[5], [6]])
>>> C = coo_matrix([[7]])
>>> block_diag((A, B, C)).todense()
matrix([[1, 2, 0, 0],
[3, 4, 0, 0],
[0, 0, 5, 0],
[0, 0, 6, 0],
[0, 0, 0, 7]])
"""
nmat = len(mats)
rows = []
for ia, a in enumerate(mats):
row = [None]*nmat
if issparse(a):
row[ia] = a
else:
row[ia] = coo_matrix(a)
rows.append(row)
return bmat(rows, format=format, dtype=dtype)
def rand(m, n, density=0.01, format="coo", dtype=None, random_state=None):
"""Generate a sparse matrix of the given shape and density with uniformely
distributed values.
Parameters
----------
m, n : int
shape of the matrix
density : real
density of the generated matrix: density equal to one means a full
matrix, density of 0 means a matrix with no non-zero items.
format : str
sparse matrix format.
dtype : dtype
type of the returned matrix values.
random_state : {numpy.random.RandomState, int}, optional
Random number generator or random seed. If not given, the singleton
numpy.random will be used.
Notes
-----
Only float types are supported for now.
"""
if density < 0 or density > 1:
raise ValueError("density expected to be 0 <= density <= 1")
if dtype and not dtype in [np.float32, np.float64, np.longdouble]:
raise NotImplementedError("type %s not supported" % dtype)
mn = m * n
# XXX: sparse uses intc instead of intp...
tp = np.intp
if mn > np.iinfo(tp).max:
msg = """\
Trying to generate a random sparse matrix such as the product of dimensions is
greater than %d - this is not supported on this machine
"""
raise ValueError(msg % np.iinfo(tp).max)
# Number of non zero values
k = int(density * m * n)
# Generate a few more values than k so that we can get unique values
# afterwards.
# XXX: one could be smarter here
mlow = 5
fac = 1.02
gk = min(k + mlow, fac * k)
if random_state is None:
random_state = np.random
elif isinstance(random_state, (int, np.integer)):
random_state = np.random.RandomState(random_state)
def _gen_unique_rand(rng, _gk):
ind = rng.rand(int(_gk))
return np.unique(np.floor(ind * mn))[:k]
ind = _gen_unique_rand(random_state, gk)
while ind.size < k:
gk *= 1.05
ind = _gen_unique_rand(random_state, gk)
j = np.floor(ind * 1. / m).astype(tp)
i = (ind - j * m).astype(tp)
vals = random_state.rand(k).astype(dtype)
return coo_matrix((vals, (i, j)), shape=(m, n)).asformat(format)
| apache-2.0 |
nagyistoce/odoo-dev-odoo | addons/base_setup/res_config.py | 261 | 5089 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import re
from openerp.report.render.rml2pdf import customfonts
class base_config_settings(osv.osv_memory):
_name = 'base.config.settings'
_inherit = 'res.config.settings'
_columns = {
'module_multi_company': fields.boolean('Manage multiple companies',
help='Work in multi-company environments, with appropriate security access between companies.\n'
'-This installs the module multi_company.'),
'module_share': fields.boolean('Allow documents sharing',
help="""Share or embbed any screen of Odoo."""),
'module_portal': fields.boolean('Activate the customer portal',
help="""Give your customers access to their documents."""),
'module_auth_oauth': fields.boolean('Use external authentication providers, sign in with google, facebook, ...'),
'module_base_import': fields.boolean("Allow users to import data from CSV files"),
'module_google_drive': fields.boolean('Attach Google documents to any record',
help="""This installs the module google_docs."""),
'module_google_calendar': fields.boolean('Allow the users to synchronize their calendar with Google Calendar',
help="""This installs the module google_calendar."""),
'font': fields.many2one('res.font', string="Report Font", domain=[('mode', 'in', ('Normal', 'Regular', 'all', 'Book'))],
help="Set the font into the report header, it will be used as default font in the RML reports of the user company"),
}
_defaults= {
'font': lambda self,cr,uid,c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.font.id,
}
def open_company(self, cr, uid, ids, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context)
return {
'type': 'ir.actions.act_window',
'name': 'Your Company',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'res.company',
'res_id': user.company_id.id,
'target': 'current',
}
def _change_header(self, header,font):
""" Replace default fontname use in header and setfont tag """
default_para = re.sub('fontName.?=.?".*"', 'fontName="%s"'% font,header)
return re.sub('(<setFont.?name.?=.?)(".*?")(.)', '\g<1>"%s"\g<3>'% font,default_para)
def set_base_defaults(self, cr, uid, ids, context=None):
ir_model_data = self.pool.get('ir.model.data')
wizard = self.browse(cr, uid, ids, context)[0]
if wizard.font:
user = self.pool.get('res.users').browse(cr, uid, uid, context)
font_name = wizard.font.name
user.company_id.write({'font': wizard.font.id,'rml_header': self._change_header(user.company_id.rml_header,font_name), 'rml_header2': self._change_header(user.company_id.rml_header2, font_name), 'rml_header3': self._change_header(user.company_id.rml_header3, font_name)})
return {}
def act_discover_fonts(self, cr, uid, ids, context=None):
return self.pool.get("res.font").font_scan(cr, uid, context=context)
# Preferences wizard for Sales & CRM.
# It is defined here because it is inherited independently in modules sale, crm.
class sale_config_settings(osv.osv_memory):
_name = 'sale.config.settings'
_inherit = 'res.config.settings'
_columns = {
'module_web_linkedin': fields.boolean('Get contacts automatically from linkedIn',
help="""When you create a new contact (person or company), you will be able to load all the data from LinkedIn (photos, address, etc)."""),
'module_crm': fields.boolean('CRM'),
'module_sale' : fields.boolean('SALE'),
'module_mass_mailing': fields.boolean(
'Manage mass mailing campaigns',
help='Get access to statistics with your mass mailing, manage campaigns.'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | agpl-3.0 |
psdh/servo | tests/wpt/web-platform-tests/tools/html5lib/html5lib/filters/whitespace.py | 1730 | 1142 | from __future__ import absolute_import, division, unicode_literals
import re
from . import _base
from ..constants import rcdataElements, spaceCharacters
spaceCharacters = "".join(spaceCharacters)
SPACES_REGEX = re.compile("[%s]+" % spaceCharacters)
class Filter(_base.Filter):
spacePreserveElements = frozenset(["pre", "textarea"] + list(rcdataElements))
def __iter__(self):
preserve = 0
for token in _base.Filter.__iter__(self):
type = token["type"]
if type == "StartTag" \
and (preserve or token["name"] in self.spacePreserveElements):
preserve += 1
elif type == "EndTag" and preserve:
preserve -= 1
elif not preserve and type == "SpaceCharacters" and token["data"]:
# Test on token["data"] above to not introduce spaces where there were not
token["data"] = " "
elif not preserve and type == "Characters":
token["data"] = collapse_spaces(token["data"])
yield token
def collapse_spaces(text):
return SPACES_REGEX.sub(' ', text)
| mpl-2.0 |
superdesk/superdesk-core | superdesk/io/feeding_services/file_service.py | 2 | 8059 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import logging
import os
import shutil
from datetime import datetime
from lxml import etree
from flask import current_app as app
from superdesk.errors import IngestFileError, ParserError, ProviderError
from superdesk.io.registry import register_feeding_service
from superdesk.io.feed_parsers import XMLFeedParser
from superdesk.io.feeding_services import FeedingService, OLD_CONTENT_MINUTES
from superdesk.notification import push_notification
from superdesk.utc import utc
from superdesk.utils import get_sorted_files, FileSortAttributes
logger = logging.getLogger(__name__)
class FileFeedingService(FeedingService):
"""
Feeding Service class which can read the configured local file system for article(s).
"""
NAME = "file"
ERRORS = [
ParserError.IPTC7901ParserError().get_error_description(),
ParserError.nitfParserError().get_error_description(),
ParserError.newsmlOneParserError().get_error_description(),
ProviderError.ingestError().get_error_description(),
ParserError.parseFileError().get_error_description(),
]
label = "File feed"
fields = [
{
"id": "path",
"type": "text",
"label": "Server Folder",
"placeholder": "path to folder",
"required": True,
"errors": {3003: "Path not found on server.", 3004: "Path should be directory."},
}
]
def _test(self, provider):
path = provider.get("config", {}).get("path", None)
if not os.path.exists(path):
raise IngestFileError.notExistsError()
if not os.path.isdir(path):
raise IngestFileError.isNotDirError()
def _update(self, provider, update):
# check if deprecated FILE_INGEST_OLD_CONTENT_MINUTES setting is still used
if "FILE_INGEST_OLD_CONTENT_MINUTES" in app.config:
deprecated_cont_min = app.config["FILE_INGEST_OLD_CONTENT_MINUTES"]
cont_min = app.config[OLD_CONTENT_MINUTES]
if deprecated_cont_min != cont_min:
logger.warning(
"'FILE_INGEST_OLD_CONTENT_MINUTES' is deprecated, please update settings.py to use {new_name!r}".format(
new_name=OLD_CONTENT_MINUTES
)
)
app.config[OLD_CONTENT_MINUTES] = deprecated_cont_min
self.provider = provider
self.path = provider.get("config", {}).get("path", None)
if not self.path:
logger.warn(
"File Feeding Service {} is configured without path. Please check the configuration".format(
provider["name"]
)
)
return []
registered_parser = self.get_feed_parser(provider)
for filename in get_sorted_files(self.path, sort_by=FileSortAttributes.created):
try:
last_updated = None
file_path = os.path.join(self.path, filename)
if os.path.isfile(file_path):
last_updated = self.get_last_updated(file_path)
if self.is_latest_content(last_updated, provider.get("last_updated")):
if self.is_empty(file_path):
logger.info("Ignoring empty file {}".format(filename))
continue
if isinstance(registered_parser, XMLFeedParser):
with open(file_path, "rb") as f:
xml = etree.parse(f)
parser = self.get_feed_parser(provider, xml.getroot())
item = parser.parse(xml.getroot(), provider)
else:
parser = self.get_feed_parser(provider, file_path)
item = parser.parse(file_path, provider)
self.after_extracting(item, provider)
if isinstance(item, list):
failed = yield item
else:
failed = yield [item]
self.move_file(self.path, filename, provider=provider, success=not failed)
else:
self.move_file(self.path, filename, provider=provider, success=False)
except Exception as ex:
if last_updated and self.is_old_content(last_updated):
self.move_file(self.path, filename, provider=provider, success=False)
raise ParserError.parseFileError("{}-{}".format(provider["name"], self.NAME), filename, ex, provider)
push_notification("ingest:update")
def after_extracting(self, article, provider):
"""Sub-classes should override this method if something needs to be done to the given article.
For example, if the article comes from DPA provider the system needs to derive dateline
from the properties in the article.
Invoked after parser parses the article received from the provider.
:param article: dict having properties that can be saved into ingest collection
:type article: dict
:param provider: dict - Ingest provider details to which the current directory has been configured
:type provider: dict :py:class: `superdesk.io.ingest_provider_model.IngestProviderResource`
"""
pass
def move_file(self, file_path, filename, provider, success=True):
"""Move the files from the current directory to the _Processed if successful, else _Error if unsuccessful.
Creates _Processed and _Error directories within current directory if they don't exist.
:param file_path: str - current directory location
:param filename: str - file name in the current directory to move
:param provider: dict - Ingest provider details to which the current directory has been configured
:param success: bool - default value is True. When True moves to _Processed directory else _Error directory.
:raises IngestFileError.folderCreateError() if creation of _Processed or _Error directories fails
:raises IngestFileError.fileMoveError() if failed to move the file pointed by filename
"""
try:
if not os.path.exists(os.path.join(file_path, "_PROCESSED/")):
os.makedirs(os.path.join(file_path, "_PROCESSED/"))
if not os.path.exists(os.path.join(file_path, "_ERROR/")):
os.makedirs(os.path.join(file_path, "_ERROR/"))
except Exception as ex:
raise IngestFileError.folderCreateError(ex, provider)
try:
if success:
shutil.copy2(os.path.join(file_path, filename), os.path.join(file_path, "_PROCESSED/"))
else:
shutil.copy2(os.path.join(file_path, filename), os.path.join(file_path, "_ERROR/"))
except Exception as ex:
raise IngestFileError.fileMoveError(ex, provider)
finally:
os.remove(os.path.join(file_path, filename))
def is_empty(self, file_path):
"""Test if given file path is empty, return True if a file is empty"""
return not (os.path.isfile(file_path) and os.path.getsize(file_path) > 0)
def get_last_updated(self, file_path):
"""Get last updated time for file.
Using both mtime and ctime timestamps not to miss
old files being copied around and recent files after
changes done in place.
"""
stat = os.lstat(file_path)
timestamp = max(stat.st_mtime, stat.st_ctime)
return datetime.fromtimestamp(timestamp, tz=utc)
register_feeding_service(FileFeedingService)
| agpl-3.0 |
ATIX-AG/ansible | lib/ansible/modules/cloud/azure/azure_rm_virtualmachine.py | 8 | 79001 | #!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <[email protected]>
# Chris Houseknecht, <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
---
module: azure_rm_virtualmachine
version_added: "2.1"
short_description: Manage Azure virtual machines.
description:
- Create, update, stop and start a virtual machine. Provide an existing storage account and network interface or
allow the module to create these for you. If you choose not to provide a network interface, the resource group
must contain a virtual network with at least one subnet.
- Before Ansible 2.5, this required an image found in the Azure Marketplace which can be discovered with
M(azure_rm_virtualmachineimage_facts). In Ansible 2.5 and newer, custom images can be used as well, see the
examples for more details.
options:
resource_group:
description:
- Name of the resource group containing the virtual machine.
required: true
name:
description:
- Name of the virtual machine.
required: true
custom_data:
description:
- Data which is made available to the virtual machine and used by e.g., cloud-init.
version_added: "2.5"
state:
description:
- Assert the state of the virtual machine.
- State 'present' will check that the machine exists with the requested configuration. If the configuration
of the existing machine does not match, the machine will be updated. Use options started, allocated and restarted to change the machine's power
state.
- State 'absent' will remove the virtual machine.
default: present
choices:
- absent
- present
started:
description:
- Use with state 'present' to start the machine. Set to false to have the machine be 'stopped'.
default: true
allocated:
description:
- Toggle that controls if the machine is allocated/deallocated, only useful with state='present'.
default: True
restarted:
description:
- Use with state 'present' to restart a running VM.
location:
description:
- Valid Azure location. Defaults to location of the resource group.
short_hostname:
description:
- Name assigned internally to the host. On a linux VM this is the name returned by the `hostname` command.
When creating a virtual machine, short_hostname defaults to name.
vm_size:
description:
- A valid Azure VM size value. For example, 'Standard_D4'. The list of choices varies depending on the
subscription and location. Check your subscription for available choices. Required when creating a VM.
admin_username:
description:
- Admin username used to access the host after it is created. Required when creating a VM.
admin_password:
description:
- Password for the admin username. Not required if the os_type is Linux and SSH password authentication
is disabled by setting ssh_password_enabled to false.
ssh_password_enabled:
description:
- When the os_type is Linux, setting ssh_password_enabled to false will disable SSH password authentication
and require use of SSH keys.
default: true
ssh_public_keys:
description:
- "For os_type Linux provide a list of SSH keys. Each item in the list should be a dictionary where the
dictionary contains two keys: path and key_data. Set the path to the default location of the
authorized_keys files. On an Enterprise Linux host, for example, the path will be
/home/<admin username>/.ssh/authorized_keys. Set key_data to the actual value of the public key."
image:
description:
- Specifies the image used to build the VM.
- If a string, the image is sourced from a custom image based on the
name.
- 'If a dict with the keys C(publisher), C(offer), C(sku), and
C(version), the image is sourced from a Marketplace image. NOTE:
set image.version to C(latest) to get the most recent version of a
given image.'
- 'If a dict with the keys C(name) and C(resource_group), the image
is sourced from a custom image based on the C(name) and
C(resource_group) set. NOTE: the key C(resource_group) is optional
and if omitted, all images in the subscription will be searched
for by C(name).'
- Custom image support was added in Ansible 2.5
required: true
availability_set:
description:
- Name or ID of an existing availability set to add the VM to. The availability_set should be in the same resource group as VM.
version_added: "2.5"
storage_account_name:
description:
- Name of an existing storage account that supports creation of VHD blobs. If not specified for a new VM,
a new storage account named <vm name>01 will be created using storage type 'Standard_LRS'.
storage_container_name:
description:
- Name of the container to use within the storage account to store VHD blobs. If no name is specified a
default container will created.
default: vhds
storage_blob_name:
description:
- Name fo the storage blob used to hold the VM's OS disk image. If no name is provided, defaults to
the VM name + '.vhd'. If you provide a name, it must end with '.vhd'
aliases:
- storage_blob
managed_disk_type:
description:
- Managed OS disk type
choices:
- Standard_LRS
- Premium_LRS
version_added: "2.4"
os_disk_caching:
description:
- Type of OS disk caching.
choices:
- ReadOnly
- ReadWrite
default: ReadOnly
aliases:
- disk_caching
os_type:
description:
- Base type of operating system.
choices:
- Windows
- Linux
default:
- Linux
data_disks:
description:
- Describes list of data disks.
version_added: "2.4"
suboptions:
lun:
description:
- The logical unit number for data disk
default: 0
version_added: "2.4"
disk_size_gb:
description:
- The initial disk size in GB for blank data disks
version_added: "2.4"
managed_disk_type:
description:
- Managed data disk type
choices:
- Standard_LRS
- Premium_LRS
version_added: "2.4"
storage_account_name:
description:
- Name of an existing storage account that supports creation of VHD blobs. If not specified for a new VM,
a new storage account named <vm name>01 will be created using storage type 'Standard_LRS'.
version_added: "2.4"
storage_container_name:
description:
- Name of the container to use within the storage account to store VHD blobs. If no name is specified a
default container will created.
default: vhds
version_added: "2.4"
storage_blob_name:
description:
- Name fo the storage blob used to hold the VM's OS disk image. If no name is provided, defaults to
the VM name + '.vhd'. If you provide a name, it must end with '.vhd'
version_added: "2.4"
caching:
description:
- Type of data disk caching.
choices:
- ReadOnly
- ReadWrite
default: ReadOnly
version_added: "2.4"
public_ip_allocation_method:
description:
- If a public IP address is created when creating the VM (because a Network Interface was not provided),
determines if the public IP address remains permanently associated with the Network Interface. If set
to 'Dynamic' the public IP address may change any time the VM is rebooted or power cycled.
- The C(Disabled) choice was added in Ansible 2.6.
choices:
- Dynamic
- Static
- Disabled
default:
- Static
aliases:
- public_ip_allocation
open_ports:
description:
- If a network interface is created when creating the VM, a security group will be created as well. For
Linux hosts a rule will be added to the security group allowing inbound TCP connections to the default
SSH port 22, and for Windows hosts ports 3389 and 5986 will be opened. Override the default open ports by
providing a list of ports.
network_interface_names:
description:
- List of existing network interface names to add to the VM. If a network interface name is not provided
when the VM is created, a default network interface will be created. In order for the module to create
a network interface, at least one Virtual Network with one Subnet must exist.
virtual_network_resource_group:
description:
- When creating a virtual machine, if a specific virtual network from another resource group should be
used, use this parameter to specify the resource group to use.
version_added: "2.4"
virtual_network_name:
description:
- When creating a virtual machine, if a network interface name is not provided, one will be created.
The new network interface will be assigned to the first virtual network found in the resource group.
Use this parameter to provide a specific virtual network instead.
aliases:
- virtual_network
subnet_name:
description:
- When creating a virtual machine, if a network interface name is not provided, one will be created.
The new network interface will be assigned to the first subnet found in the virtual network.
Use this parameter to provide a specific subnet instead.
aliases:
- subnet
remove_on_absent:
description:
- When removing a VM using state 'absent', also remove associated resources
- "It can be 'all' or a list with any of the following: ['network_interfaces', 'virtual_storage', 'public_ips']"
- Any other input will be ignored
default: ['all']
plan:
description:
- A dictionary describing a third-party billing plan for an instance
version_added: 2.5
suboptions:
name:
description:
- billing plan name
required: true
product:
description:
- product name
required: true
publisher:
description:
- publisher offering the plan
required: true
promotion_code:
description:
- optional promotion code
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Chris Houseknecht (@chouseknecht)"
- "Matt Davis (@nitzmahone)"
'''
EXAMPLES = '''
- name: Create VM with defaults
azure_rm_virtualmachine:
resource_group: Testing
name: testvm10
admin_username: chouseknecht
admin_password: <your password here>
image:
offer: CentOS
publisher: OpenLogic
sku: '7.1'
version: latest
- name: Create a VM with managed disk
azure_rm_virtualmachine:
resource_group: Testing
name: testvm001
vm_size: Standard_D4
managed_disk_type: Standard_LRS
admin_username: adminUser
ssh_public_keys:
- path: /home/adminUser/.ssh/authorized_keys
key_data: < insert yor ssh public key here... >
image:
offer: CoreOS
publisher: CoreOS
sku: Stable
version: latest
- name: Create a VM with existing storage account and NIC
azure_rm_virtualmachine:
resource_group: Testing
name: testvm002
vm_size: Standard_D4
storage_account: testaccount001
admin_username: adminUser
ssh_public_keys:
- path: /home/adminUser/.ssh/authorized_keys
key_data: < insert yor ssh public key here... >
network_interfaces: testvm001
image:
offer: CentOS
publisher: OpenLogic
sku: '7.1'
version: latest
- name: Create a VM with OS and multiple data managed disks
azure_rm_virtualmachine:
resource_group: Testing
name: testvm001
vm_size: Standard_D4
managed_disk_type: Standard_LRS
admin_username: adminUser
ssh_public_keys:
- path: /home/adminUser/.ssh/authorized_keys
key_data: < insert yor ssh public key here... >
image:
offer: CoreOS
publisher: CoreOS
sku: Stable
version: latest
data_disks:
- lun: 0
disk_size_gb: 64
managed_disk_type: Standard_LRS
- lun: 1
disk_size_gb: 128
managed_disk_type: Premium_LRS
- name: Create a VM with OS and multiple data storage accounts
azure_rm_virtualmachine:
resource_group: Testing
name: testvm001
vm_size: Standard_DS1_v2
admin_username: adminUser
ssh_password_enabled: false
ssh_public_keys:
- path: /home/adminUser/.ssh/authorized_keys
key_data: < insert yor ssh public key here... >
network_interfaces: testvm001
storage_container: osdisk
storage_blob: osdisk.vhd
image:
offer: CoreOS
publisher: CoreOS
sku: Stable
version: latest
data_disks:
- lun: 0
disk_size_gb: 64
storage_container_name: datadisk1
storage_blob_name: datadisk1.vhd
- lun: 1
disk_size_gb: 128
storage_container_name: datadisk2
storage_blob_name: datadisk2.vhd
- name: Create a VM with a custom image
azure_rm_virtualmachine:
resource_group: Testing
name: testvm001
vm_size: Standard_DS1_v2
admin_username: adminUser
admin_password: password01
image: customimage001
- name: Create a VM with a custom image from a particular resource group
azure_rm_virtualmachine:
resource_group: Testing
name: testvm001
vm_size: Standard_DS1_v2
admin_username: adminUser
admin_password: password01
image:
name: customimage001
resource_group: Testing
- name: Power Off
azure_rm_virtualmachine:
resource_group: Testing
name: testvm002
started: no
- name: Deallocate
azure_rm_virtualmachine:
resource_group: Testing
name: testvm002
allocated: no
- name: Power On
azure_rm_virtualmachine:
resource_group:
name: testvm002
- name: Restart
azure_rm_virtualmachine:
resource_group:
name: testvm002
restarted: yes
- name: remove vm and all resources except public ips
azure_rm_virtualmachine:
resource_group: Testing
name: testvm002
state: absent
remove_on_absent:
- network_interfaces
- virtual_storage
'''
RETURN = '''
powerstate:
description: Indicates if the state is running, stopped, deallocated
returned: always
type: string
example: running
deleted_vhd_uris:
description: List of deleted Virtual Hard Disk URIs.
returned: 'on delete'
type: list
example: ["https://testvm104519.blob.core.windows.net/vhds/testvm10.vhd"]
deleted_network_interfaces:
description: List of deleted NICs.
returned: 'on delete'
type: list
example: ["testvm1001"]
deleted_public_ips:
description: List of deleted public IP address names.
returned: 'on delete'
type: list
example: ["testvm1001"]
azure_vm:
description: Facts about the current state of the object. Note that facts are not part of the registered output but available directly.
returned: always
type: complex
contains: {
"properties": {
"availabilitySet": {
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Compute/availabilitySets/MYAVAILABILITYSET"
},
"hardwareProfile": {
"vmSize": "Standard_D1"
},
"instanceView": {
"disks": [
{
"name": "testvm10.vhd",
"statuses": [
{
"code": "ProvisioningState/succeeded",
"displayStatus": "Provisioning succeeded",
"level": "Info",
"time": "2016-03-30T07:11:16.187272Z"
}
]
}
],
"statuses": [
{
"code": "ProvisioningState/succeeded",
"displayStatus": "Provisioning succeeded",
"level": "Info",
"time": "2016-03-30T20:33:38.946916Z"
},
{
"code": "PowerState/running",
"displayStatus": "VM running",
"level": "Info"
}
],
"vmAgent": {
"extensionHandlers": [],
"statuses": [
{
"code": "ProvisioningState/succeeded",
"displayStatus": "Ready",
"level": "Info",
"message": "GuestAgent is running and accepting new configurations.",
"time": "2016-03-30T20:31:16.000Z"
}
],
"vmAgentVersion": "WALinuxAgent-2.0.16"
}
},
"networkProfile": {
"networkInterfaces": [
{
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/networkInterfaces/testvm10_NIC01",
"name": "testvm10_NIC01",
"properties": {
"dnsSettings": {
"appliedDnsServers": [],
"dnsServers": []
},
"enableIPForwarding": false,
"ipConfigurations": [
{
"etag": 'W/"041c8c2a-d5dd-4cd7-8465-9125cfbe2cf8"',
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/networkInterfaces/testvm10_NIC01/ipConfigurations/default",
"name": "default",
"properties": {
"privateIPAddress": "10.10.0.5",
"privateIPAllocationMethod": "Dynamic",
"provisioningState": "Succeeded",
"publicIPAddress": {
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/publicIPAddresses/testvm10_PIP01",
"name": "testvm10_PIP01",
"properties": {
"idleTimeoutInMinutes": 4,
"ipAddress": "13.92.246.197",
"ipConfiguration": {
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/networkInterfaces/testvm10_NIC01/ipConfigurations/default"
},
"provisioningState": "Succeeded",
"publicIPAllocationMethod": "Static",
"resourceGuid": "3447d987-ca0d-4eca-818b-5dddc0625b42"
}
}
}
}
],
"macAddress": "00-0D-3A-12-AA-14",
"primary": true,
"provisioningState": "Succeeded",
"resourceGuid": "10979e12-ccf9-42ee-9f6d-ff2cc63b3844",
"virtualMachine": {
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Compute/virtualMachines/testvm10"
}
}
}
]
},
"osProfile": {
"adminUsername": "chouseknecht",
"computerName": "test10",
"linuxConfiguration": {
"disablePasswordAuthentication": false
},
"secrets": []
},
"provisioningState": "Succeeded",
"storageProfile": {
"dataDisks": [
{
"caching": "ReadWrite",
"createOption": "empty",
"diskSizeGB": 64,
"lun": 0,
"name": "datadisk1.vhd",
"vhd": {
"uri": "https://testvm10sa1.blob.core.windows.net/datadisk/datadisk1.vhd"
}
}
],
"imageReference": {
"offer": "CentOS",
"publisher": "OpenLogic",
"sku": "7.1",
"version": "7.1.20160308"
},
"osDisk": {
"caching": "ReadOnly",
"createOption": "fromImage",
"name": "testvm10.vhd",
"osType": "Linux",
"vhd": {
"uri": "https://testvm10sa1.blob.core.windows.net/vhds/testvm10.vhd"
}
}
}
},
"type": "Microsoft.Compute/virtualMachines"
}
''' # NOQA
import base64
import random
import re
try:
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import parse_resource_id
except ImportError:
# This is handled in azure_rm_common
pass
from ansible.module_utils.basic import to_native, to_bytes
from ansible.module_utils.azure_rm_common import AzureRMModuleBase, azure_id_to_dict
AZURE_OBJECT_CLASS = 'VirtualMachine'
AZURE_ENUM_MODULES = ['azure.mgmt.compute.models']
def extract_names_from_blob_uri(blob_uri, storage_suffix):
# HACK: ditch this once python SDK supports get by URI
m = re.match(r'^https://(?P<accountname>[^.]+)\.blob\.{0}/'
r'(?P<containername>[^/]+)/(?P<blobname>.+)$'.format(storage_suffix), blob_uri)
if not m:
raise Exception("unable to parse blob uri '%s'" % blob_uri)
extracted_names = m.groupdict()
return extracted_names
class AzureRMVirtualMachine(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(type='str', required=True),
name=dict(type='str', required=True),
custom_data=dict(type='str'),
state=dict(choices=['present', 'absent'], default='present', type='str'),
location=dict(type='str'),
short_hostname=dict(type='str'),
vm_size=dict(type='str'),
admin_username=dict(type='str'),
admin_password=dict(type='str', no_log=True),
ssh_password_enabled=dict(type='bool', default=True),
ssh_public_keys=dict(type='list'),
image=dict(type='raw'),
availability_set=dict(type='str'),
storage_account_name=dict(type='str', aliases=['storage_account']),
storage_container_name=dict(type='str', aliases=['storage_container'], default='vhds'),
storage_blob_name=dict(type='str', aliases=['storage_blob']),
os_disk_caching=dict(type='str', aliases=['disk_caching'], choices=['ReadOnly', 'ReadWrite'],
default='ReadOnly'),
managed_disk_type=dict(type='str', choices=['Standard_LRS', 'Premium_LRS']),
os_type=dict(type='str', choices=['Linux', 'Windows'], default='Linux'),
public_ip_allocation_method=dict(type='str', choices=['Dynamic', 'Static', 'Disabled'], default='Static',
aliases=['public_ip_allocation']),
open_ports=dict(type='list'),
network_interface_names=dict(type='list', aliases=['network_interfaces']),
remove_on_absent=dict(type='list', default=['all']),
virtual_network_resource_group=dict(type='str'),
virtual_network_name=dict(type='str', aliases=['virtual_network']),
subnet_name=dict(type='str', aliases=['subnet']),
allocated=dict(type='bool', default=True),
restarted=dict(type='bool', default=False),
started=dict(type='bool', default=True),
data_disks=dict(type='list'),
plan=dict(type='dict')
)
self.resource_group = None
self.name = None
self.custom_data = None
self.state = None
self.location = None
self.short_hostname = None
self.vm_size = None
self.admin_username = None
self.admin_password = None
self.ssh_password_enabled = None
self.ssh_public_keys = None
self.image = None
self.availability_set = None
self.storage_account_name = None
self.storage_container_name = None
self.storage_blob_name = None
self.os_type = None
self.os_disk_caching = None
self.managed_disk_type = None
self.network_interface_names = None
self.remove_on_absent = set()
self.tags = None
self.force = None
self.public_ip_allocation_method = None
self.open_ports = None
self.virtual_network_resource_group = None
self.virtual_network_name = None
self.subnet_name = None
self.allocated = None
self.restarted = None
self.started = None
self.differences = None
self.data_disks = None
self.plan = None
self.results = dict(
changed=False,
actions=[],
powerstate_change=None,
ansible_facts=dict(azure_vm=None)
)
super(AzureRMVirtualMachine, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()) + ['tags']:
setattr(self, key, kwargs[key])
# make sure options are lower case
self.remove_on_absent = set([resource.lower() for resource in self.remove_on_absent])
changed = False
powerstate_change = None
results = dict()
vm = None
network_interfaces = []
requested_vhd_uri = None
data_disk_requested_vhd_uri = None
disable_ssh_password = None
vm_dict = None
image_reference = None
custom_image = False
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
# Set default location
self.location = resource_group.location
if self.state == 'present':
# Verify parameters and resolve any defaults
if self.vm_size and not self.vm_size_is_valid():
self.fail("Parameter error: vm_size {0} is not valid for your subscription and location.".format(
self.vm_size
))
if self.network_interface_names:
for name in self.network_interface_names:
nic = self.get_network_interface(name)
network_interfaces.append(nic.id)
if self.ssh_public_keys:
msg = "Parameter error: expecting ssh_public_keys to be a list of type dict where " \
"each dict contains keys: path, key_data."
for key in self.ssh_public_keys:
if not isinstance(key, dict):
self.fail(msg)
if not key.get('path') or not key.get('key_data'):
self.fail(msg)
if self.image and isinstance(self.image, dict):
if all(key in self.image for key in ('publisher', 'offer', 'sku', 'version')):
marketplace_image = self.get_marketplace_image_version()
if self.image['version'] == 'latest':
self.image['version'] = marketplace_image.name
self.log("Using image version {0}".format(self.image['version']))
image_reference = self.compute_models.ImageReference(
publisher=self.image['publisher'],
offer=self.image['offer'],
sku=self.image['sku'],
version=self.image['version']
)
elif self.image.get('name'):
custom_image = True
image_reference = self.get_custom_image_reference(
self.image.get('name'),
self.image.get('resource_group'))
else:
self.fail("parameter error: expecting image to contain [publisher, offer, sku, version] or [name, resource_group]")
elif self.image and isinstance(self.image, str):
custom_image = True
image_reference = self.get_custom_image_reference(self.image)
elif self.image:
self.fail("parameter error: expecting image to be a string or dict not {0}".format(type(self.image).__name__))
if self.plan:
if not self.plan.get('name') or not self.plan.get('product') or not self.plan.get('publisher'):
self.fail("parameter error: plan must include name, product, and publisher")
if not self.storage_blob_name and not self.managed_disk_type:
self.storage_blob_name = self.name + '.vhd'
elif self.managed_disk_type:
self.storage_blob_name = self.name
if self.storage_account_name and not self.managed_disk_type:
properties = self.get_storage_account(self.storage_account_name)
requested_vhd_uri = '{0}{1}/{2}'.format(properties.primary_endpoints.blob,
self.storage_container_name,
self.storage_blob_name)
disable_ssh_password = not self.ssh_password_enabled
try:
self.log("Fetching virtual machine {0}".format(self.name))
vm = self.compute_client.virtual_machines.get(self.resource_group, self.name, expand='instanceview')
self.check_provisioning_state(vm, self.state)
vm_dict = self.serialize_vm(vm)
if self.state == 'present':
differences = []
current_nics = []
results = vm_dict
# Try to determine if the VM needs to be updated
if self.network_interface_names:
for nic in vm_dict['properties']['networkProfile']['networkInterfaces']:
current_nics.append(nic['id'])
if set(current_nics) != set(network_interfaces):
self.log('CHANGED: virtual machine {0} - network interfaces are different.'.format(self.name))
differences.append('Network Interfaces')
updated_nics = [dict(id=id, primary=(i is 0))
for i, id in enumerate(network_interfaces)]
vm_dict['properties']['networkProfile']['networkInterfaces'] = updated_nics
changed = True
if self.os_disk_caching and \
self.os_disk_caching != vm_dict['properties']['storageProfile']['osDisk']['caching']:
self.log('CHANGED: virtual machine {0} - OS disk caching'.format(self.name))
differences.append('OS Disk caching')
changed = True
vm_dict['properties']['storageProfile']['osDisk']['caching'] = self.os_disk_caching
update_tags, vm_dict['tags'] = self.update_tags(vm_dict.get('tags', dict()))
if update_tags:
differences.append('Tags')
changed = True
if self.short_hostname and self.short_hostname != vm_dict['properties']['osProfile']['computerName']:
self.log('CHANGED: virtual machine {0} - short hostname'.format(self.name))
differences.append('Short Hostname')
changed = True
vm_dict['properties']['osProfile']['computerName'] = self.short_hostname
if self.started and vm_dict['powerstate'] not in ['starting', 'running'] and self.allocated:
self.log("CHANGED: virtual machine {0} not running and requested state 'running'".format(self.name))
changed = True
powerstate_change = 'poweron'
elif self.state == 'present' and vm_dict['powerstate'] == 'running' and self.restarted:
self.log("CHANGED: virtual machine {0} {1} and requested state 'restarted'"
.format(self.name, vm_dict['powerstate']))
changed = True
powerstate_change = 'restarted'
elif self.state == 'present' and not self.allocated and vm_dict['powerstate'] not in ['deallocated', 'deallocating']:
self.log("CHANGED: virtual machine {0} {1} and requested state 'deallocated'"
.format(self.name, vm_dict['powerstate']))
changed = True
powerstate_change = 'deallocated'
elif not self.started and vm_dict['powerstate'] == 'running':
self.log("CHANGED: virtual machine {0} running and requested state 'stopped'".format(self.name))
changed = True
powerstate_change = 'poweroff'
self.differences = differences
elif self.state == 'absent':
self.log("CHANGED: virtual machine {0} exists and requested state is 'absent'".format(self.name))
results = dict()
changed = True
except CloudError:
self.log('Virtual machine {0} does not exist'.format(self.name))
if self.state == 'present':
self.log("CHANGED: virtual machine {0} does not exist but state is 'present'.".format(self.name))
changed = True
self.results['changed'] = changed
self.results['ansible_facts']['azure_vm'] = results
self.results['powerstate_change'] = powerstate_change
if self.check_mode:
return self.results
if changed:
if self.state == 'present':
default_storage_account = None
if not vm:
# Create the VM
self.log("Create virtual machine {0}".format(self.name))
self.results['actions'].append('Created VM {0}'.format(self.name))
# Validate parameters
if not self.admin_username:
self.fail("Parameter error: admin_username required when creating a virtual machine.")
if self.os_type == 'Linux':
if disable_ssh_password and not self.ssh_public_keys:
self.fail("Parameter error: ssh_public_keys required when disabling SSH password.")
if not image_reference:
self.fail("Parameter error: an image is required when creating a virtual machine.")
availability_set_resource = None
if self.availability_set:
parsed_availability_set = parse_resource_id(self.availability_set)
availability_set = self.get_availability_set(parsed_availability_set.get('resource_group', self.resource_group),
parsed_availability_set.get('name'))
availability_set_resource = self.compute_models.SubResource(availability_set.id)
# Get defaults
if not self.network_interface_names:
default_nic = self.create_default_nic()
self.log("network interface:")
self.log(self.serialize_obj(default_nic, 'NetworkInterface'), pretty_print=True)
network_interfaces = [default_nic.id]
# os disk
if not self.storage_account_name and not self.managed_disk_type:
storage_account = self.create_default_storage_account()
self.log("storage account:")
self.log(self.serialize_obj(storage_account, 'StorageAccount'), pretty_print=True)
requested_vhd_uri = 'https://{0}.blob.{1}/{2}/{3}'.format(
storage_account.name,
self._cloud_environment.suffixes.storage_endpoint,
self.storage_container_name,
self.storage_blob_name)
default_storage_account = storage_account # store for use by data disks if necessary
if not self.short_hostname:
self.short_hostname = self.name
nics = [self.compute_models.NetworkInterfaceReference(id=id, primary=(i is 0))
for i, id in enumerate(network_interfaces)]
# os disk
if self.managed_disk_type:
vhd = None
managed_disk = self.compute_models.ManagedDiskParameters(storage_account_type=self.managed_disk_type)
elif custom_image:
vhd = None
managed_disk = None
else:
vhd = self.compute_models.VirtualHardDisk(uri=requested_vhd_uri)
managed_disk = None
plan = None
if self.plan:
plan = self.compute_models.Plan(name=self.plan.get('name'), product=self.plan.get('product'),
publisher=self.plan.get('publisher'),
promotion_code=self.plan.get('promotion_code'))
vm_resource = self.compute_models.VirtualMachine(
self.location,
tags=self.tags,
os_profile=self.compute_models.OSProfile(
admin_username=self.admin_username,
computer_name=self.short_hostname,
),
hardware_profile=self.compute_models.HardwareProfile(
vm_size=self.vm_size
),
storage_profile=self.compute_models.StorageProfile(
os_disk=self.compute_models.OSDisk(
name=self.storage_blob_name,
vhd=vhd,
managed_disk=managed_disk,
create_option=self.compute_models.DiskCreateOptionTypes.from_image,
caching=self.os_disk_caching,
),
image_reference=image_reference,
),
network_profile=self.compute_models.NetworkProfile(
network_interfaces=nics
),
availability_set=availability_set_resource,
plan=plan
)
if self.admin_password:
vm_resource.os_profile.admin_password = self.admin_password
if self.custom_data:
# Azure SDK (erroneously?) wants native string type for this
vm_resource.os_profile.custom_data = to_native(base64.b64encode(to_bytes(self.custom_data)))
if self.os_type == 'Linux':
vm_resource.os_profile.linux_configuration = self.compute_models.LinuxConfiguration(
disable_password_authentication=disable_ssh_password
)
if self.ssh_public_keys:
ssh_config = self.compute_models.SshConfiguration()
ssh_config.public_keys = \
[self.compute_models.SshPublicKey(path=key['path'], key_data=key['key_data']) for key in self.ssh_public_keys]
vm_resource.os_profile.linux_configuration.ssh = ssh_config
# data disk
if self.data_disks:
data_disks = []
count = 0
for data_disk in self.data_disks:
if not data_disk.get('managed_disk_type'):
if not data_disk.get('storage_blob_name'):
data_disk['storage_blob_name'] = self.name + '-data-' + str(count) + '.vhd'
count += 1
if data_disk.get('storage_account_name'):
data_disk_storage_account = self.get_storage_account(data_disk['storage_account_name'])
else:
if(not default_storage_account):
data_disk_storage_account = self.create_default_storage_account()
self.log("data disk storage account:")
self.log(self.serialize_obj(data_disk_storage_account, 'StorageAccount'), pretty_print=True)
default_storage_account = data_disk_storage_account # store for use by future data disks if necessary
else:
data_disk_storage_account = default_storage_account
if not data_disk.get('storage_container_name'):
data_disk['storage_container_name'] = 'vhds'
data_disk_requested_vhd_uri = 'https://{0}.blob.{1}/{2}/{3}'.format(
data_disk_storage_account.name,
self._cloud_environment.suffixes.storage_endpoint,
data_disk['storage_container_name'],
data_disk['storage_blob_name']
)
if not data_disk.get('managed_disk_type'):
data_disk_managed_disk = None
disk_name = data_disk['storage_blob_name']
data_disk_vhd = self.compute_models.VirtualHardDisk(uri=data_disk_requested_vhd_uri)
else:
data_disk_vhd = None
data_disk_managed_disk = self.compute_models.ManagedDiskParameters(storage_account_type=data_disk['managed_disk_type'])
disk_name = self.name + "-datadisk-" + str(count)
count += 1
data_disk['caching'] = data_disk.get(
'caching', 'ReadOnly'
)
data_disks.append(self.compute_models.DataDisk(
lun=data_disk['lun'],
name=disk_name,
vhd=data_disk_vhd,
caching=data_disk['caching'],
create_option=self.compute_models.DiskCreateOptionTypes.empty,
disk_size_gb=data_disk['disk_size_gb'],
managed_disk=data_disk_managed_disk,
))
vm_resource.storage_profile.data_disks = data_disks
self.log("Create virtual machine with parameters:")
self.create_or_update_vm(vm_resource)
elif self.differences and len(self.differences) > 0:
# Update the VM based on detected config differences
self.log("Update virtual machine {0}".format(self.name))
self.results['actions'].append('Updated VM {0}'.format(self.name))
nics = [self.compute_models.NetworkInterfaceReference(id=interface['id'], primary=(i is 0))
for i, interface in enumerate(vm_dict['properties']['networkProfile']['networkInterfaces'])]
# os disk
if not vm_dict['properties']['storageProfile']['osDisk'].get('managedDisk'):
managed_disk = None
vhd = self.compute_models.VirtualHardDisk(uri=vm_dict['properties']['storageProfile']['osDisk']['vhd']['uri'])
else:
vhd = None
managed_disk = self.compute_models.ManagedDiskParameters(
storage_account_type=vm_dict['properties']['storageProfile']['osDisk']['managedDisk']['storageAccountType']
)
availability_set_resource = None
try:
availability_set_resource = self.compute_models.SubResource(vm_dict['properties']['availabilitySet']['id'])
except Exception:
# pass if the availability set is not set
pass
vm_resource = self.compute_models.VirtualMachine(
vm_dict['location'],
os_profile=self.compute_models.OSProfile(
admin_username=vm_dict['properties']['osProfile']['adminUsername'],
computer_name=vm_dict['properties']['osProfile']['computerName']
),
hardware_profile=self.compute_models.HardwareProfile(
vm_size=vm_dict['properties']['hardwareProfile']['vmSize']
),
storage_profile=self.compute_models.StorageProfile(
os_disk=self.compute_models.OSDisk(
name=vm_dict['properties']['storageProfile']['osDisk']['name'],
vhd=vhd,
managed_disk=managed_disk,
create_option=vm_dict['properties']['storageProfile']['osDisk']['createOption'],
os_type=vm_dict['properties']['storageProfile']['osDisk']['osType'],
caching=vm_dict['properties']['storageProfile']['osDisk']['caching'],
),
image_reference=self.compute_models.ImageReference(
publisher=vm_dict['properties']['storageProfile']['imageReference']['publisher'],
offer=vm_dict['properties']['storageProfile']['imageReference']['offer'],
sku=vm_dict['properties']['storageProfile']['imageReference']['sku'],
version=vm_dict['properties']['storageProfile']['imageReference']['version']
),
),
availability_set=availability_set_resource,
network_profile=self.compute_models.NetworkProfile(
network_interfaces=nics
),
)
if vm_dict.get('tags'):
vm_resource.tags = vm_dict['tags']
# Add custom_data, if provided
if vm_dict['properties']['osProfile'].get('customData'):
custom_data = vm_dict['properties']['osProfile']['customData']
# Azure SDK (erroneously?) wants native string type for this
vm_resource.os_profile.custom_data = to_native(base64.b64encode(to_bytes(custom_data)))
# Add admin password, if one provided
if vm_dict['properties']['osProfile'].get('adminPassword'):
vm_resource.os_profile.admin_password = vm_dict['properties']['osProfile']['adminPassword']
# Add linux configuration, if applicable
linux_config = vm_dict['properties']['osProfile'].get('linuxConfiguration')
if linux_config:
ssh_config = linux_config.get('ssh', None)
vm_resource.os_profile.linux_configuration = self.compute_models.LinuxConfiguration(
disable_password_authentication=linux_config.get('disablePasswordAuthentication', False)
)
if ssh_config:
public_keys = ssh_config.get('publicKeys')
if public_keys:
vm_resource.os_profile.linux_configuration.ssh = self.compute_models.SshConfiguration(public_keys=[])
for key in public_keys:
vm_resource.os_profile.linux_configuration.ssh.public_keys.append(
self.compute_models.SshPublicKey(path=key['path'], key_data=key['keyData'])
)
# data disk
if vm_dict['properties']['storageProfile'].get('dataDisks'):
data_disks = []
for data_disk in vm_dict['properties']['storageProfile']['dataDisks']:
if data_disk.get('managedDisk'):
managed_disk_type = data_disk['managedDisk']['storageAccountType']
data_disk_managed_disk = self.compute_models.ManagedDiskParameters(storage_account_type=managed_disk_type)
data_disk_vhd = None
else:
data_disk_vhd = data_disk['vhd']['uri']
data_disk_managed_disk = None
data_disks.append(self.compute_models.DataDisk(
lun=int(data_disk['lun']),
name=data_disk.get('name'),
vhd=data_disk_vhd,
caching=data_disk.get('caching'),
create_option=data_disk.get('createOption'),
disk_size_gb=int(data_disk['diskSizeGB']),
managed_disk=data_disk_managed_disk,
))
vm_resource.storage_profile.data_disks = data_disks
self.log("Update virtual machine with parameters:")
self.create_or_update_vm(vm_resource)
# Make sure we leave the machine in requested power state
if (powerstate_change == 'poweron' and
self.results['ansible_facts']['azure_vm']['powerstate'] != 'running'):
# Attempt to power on the machine
self.power_on_vm()
elif (powerstate_change == 'poweroff' and
self.results['ansible_facts']['azure_vm']['powerstate'] == 'running'):
# Attempt to power off the machine
self.power_off_vm()
elif powerstate_change == 'restarted':
self.restart_vm()
elif powerstate_change == 'deallocated':
self.deallocate_vm()
self.results['ansible_facts']['azure_vm'] = self.serialize_vm(self.get_vm())
elif self.state == 'absent':
# delete the VM
self.log("Delete virtual machine {0}".format(self.name))
self.results['ansible_facts']['azure_vm'] = None
self.delete_vm(vm)
# until we sort out how we want to do this globally
del self.results['actions']
return self.results
def get_vm(self):
'''
Get the VM with expanded instanceView
:return: VirtualMachine object
'''
try:
vm = self.compute_client.virtual_machines.get(self.resource_group, self.name, expand='instanceview')
return vm
except Exception as exc:
self.fail("Error getting virtual machine {0} - {1}".format(self.name, str(exc)))
def serialize_vm(self, vm):
'''
Convert a VirtualMachine object to dict.
:param vm: VirtualMachine object
:return: dict
'''
result = self.serialize_obj(vm, AZURE_OBJECT_CLASS, enum_modules=AZURE_ENUM_MODULES)
result['id'] = vm.id
result['name'] = vm.name
result['type'] = vm.type
result['location'] = vm.location
result['tags'] = vm.tags
result['powerstate'] = dict()
if vm.instance_view:
result['powerstate'] = next((s.code.replace('PowerState/', '')
for s in vm.instance_view.statuses if s.code.startswith('PowerState')), None)
# Expand network interfaces to include config properties
for interface in vm.network_profile.network_interfaces:
int_dict = azure_id_to_dict(interface.id)
nic = self.get_network_interface(int_dict['networkInterfaces'])
for interface_dict in result['properties']['networkProfile']['networkInterfaces']:
if interface_dict['id'] == interface.id:
nic_dict = self.serialize_obj(nic, 'NetworkInterface')
interface_dict['name'] = int_dict['networkInterfaces']
interface_dict['properties'] = nic_dict['properties']
# Expand public IPs to include config properties
for interface in result['properties']['networkProfile']['networkInterfaces']:
for config in interface['properties']['ipConfigurations']:
if config['properties'].get('publicIPAddress'):
pipid_dict = azure_id_to_dict(config['properties']['publicIPAddress']['id'])
try:
pip = self.network_client.public_ip_addresses.get(self.resource_group,
pipid_dict['publicIPAddresses'])
except Exception as exc:
self.fail("Error fetching public ip {0} - {1}".format(pipid_dict['publicIPAddresses'],
str(exc)))
pip_dict = self.serialize_obj(pip, 'PublicIPAddress')
config['properties']['publicIPAddress']['name'] = pipid_dict['publicIPAddresses']
config['properties']['publicIPAddress']['properties'] = pip_dict['properties']
self.log(result, pretty_print=True)
if self.state != 'absent' and not result['powerstate']:
self.fail("Failed to determine PowerState of virtual machine {0}".format(self.name))
return result
def power_off_vm(self):
self.log("Powered off virtual machine {0}".format(self.name))
self.results['actions'].append("Powered off virtual machine {0}".format(self.name))
try:
poller = self.compute_client.virtual_machines.power_off(self.resource_group, self.name)
self.get_poller_result(poller)
except Exception as exc:
self.fail("Error powering off virtual machine {0} - {1}".format(self.name, str(exc)))
return True
def power_on_vm(self):
self.results['actions'].append("Powered on virtual machine {0}".format(self.name))
self.log("Power on virtual machine {0}".format(self.name))
try:
poller = self.compute_client.virtual_machines.start(self.resource_group, self.name)
self.get_poller_result(poller)
except Exception as exc:
self.fail("Error powering on virtual machine {0} - {1}".format(self.name, str(exc)))
return True
def restart_vm(self):
self.results['actions'].append("Restarted virtual machine {0}".format(self.name))
self.log("Restart virtual machine {0}".format(self.name))
try:
poller = self.compute_client.virtual_machines.restart(self.resource_group, self.name)
self.get_poller_result(poller)
except Exception as exc:
self.fail("Error restarting virtual machine {0} - {1}".format(self.name, str(exc)))
return True
def deallocate_vm(self):
self.results['actions'].append("Deallocated virtual machine {0}".format(self.name))
self.log("Deallocate virtual machine {0}".format(self.name))
try:
poller = self.compute_client.virtual_machines.deallocate(self.resource_group, self.name)
self.get_poller_result(poller)
except Exception as exc:
self.fail("Error deallocating virtual machine {0} - {1}".format(self.name, str(exc)))
return True
def delete_vm(self, vm):
vhd_uris = []
managed_disk_ids = []
nic_names = []
pip_names = []
if self.remove_on_absent.intersection(set(['all', 'virtual_storage'])):
# store the attached vhd info so we can nuke it after the VM is gone
if(vm.storage_profile.os_disk.managed_disk):
self.log('Storing managed disk ID for deletion')
managed_disk_ids.append(vm.storage_profile.os_disk.managed_disk.id)
elif(vm.storage_profile.os_disk.vhd):
self.log('Storing VHD URI for deletion')
vhd_uris.append(vm.storage_profile.os_disk.vhd.uri)
data_disks = vm.storage_profile.data_disks
for data_disk in data_disks:
if(data_disk.vhd):
vhd_uris.append(data_disk.vhd.uri)
elif(data_disk.managed_disk):
managed_disk_ids.append(data_disk.managed_disk.id)
# FUTURE enable diff mode, move these there...
self.log("VHD URIs to delete: {0}".format(', '.join(vhd_uris)))
self.results['deleted_vhd_uris'] = vhd_uris
self.log("Managed disk IDs to delete: {0}".format(', '.join(managed_disk_ids)))
self.results['deleted_managed_disk_ids'] = managed_disk_ids
if self.remove_on_absent.intersection(set(['all', 'network_interfaces'])):
# store the attached nic info so we can nuke them after the VM is gone
self.log('Storing NIC names for deletion.')
for interface in vm.network_profile.network_interfaces:
id_dict = azure_id_to_dict(interface.id)
nic_names.append(id_dict['networkInterfaces'])
self.log('NIC names to delete {0}'.format(', '.join(nic_names)))
self.results['deleted_network_interfaces'] = nic_names
if self.remove_on_absent.intersection(set(['all', 'public_ips'])):
# also store each nic's attached public IPs and delete after the NIC is gone
for name in nic_names:
nic = self.get_network_interface(name)
for ipc in nic.ip_configurations:
if ipc.public_ip_address:
pip_dict = azure_id_to_dict(ipc.public_ip_address.id)
pip_names.append(pip_dict['publicIPAddresses'])
self.log('Public IPs to delete are {0}'.format(', '.join(pip_names)))
self.results['deleted_public_ips'] = pip_names
self.log("Deleting virtual machine {0}".format(self.name))
self.results['actions'].append("Deleted virtual machine {0}".format(self.name))
try:
poller = self.compute_client.virtual_machines.delete(self.resource_group, self.name)
# wait for the poller to finish
self.get_poller_result(poller)
except Exception as exc:
self.fail("Error deleting virtual machine {0} - {1}".format(self.name, str(exc)))
# TODO: parallelize nic, vhd, and public ip deletions with begin_deleting
# TODO: best-effort to keep deleting other linked resources if we encounter an error
if self.remove_on_absent.intersection(set(['all', 'virtual_storage'])):
self.log('Deleting VHDs')
self.delete_vm_storage(vhd_uris)
self.log('Deleting managed disks')
self.delete_managed_disks(managed_disk_ids)
if self.remove_on_absent.intersection(set(['all', 'network_interfaces'])):
self.log('Deleting network interfaces')
for name in nic_names:
self.delete_nic(name)
if self.remove_on_absent.intersection(set(['all', 'public_ips'])):
self.log('Deleting public IPs')
for name in pip_names:
self.delete_pip(name)
return True
def get_network_interface(self, name):
try:
nic = self.network_client.network_interfaces.get(self.resource_group, name)
return nic
except Exception as exc:
self.fail("Error fetching network interface {0} - {1}".format(name, str(exc)))
def delete_nic(self, name):
self.log("Deleting network interface {0}".format(name))
self.results['actions'].append("Deleted network interface {0}".format(name))
try:
poller = self.network_client.network_interfaces.delete(self.resource_group, name)
except Exception as exc:
self.fail("Error deleting network interface {0} - {1}".format(name, str(exc)))
self.get_poller_result(poller)
# Delete doesn't return anything. If we get this far, assume success
return True
def delete_pip(self, name):
self.results['actions'].append("Deleted public IP {0}".format(name))
try:
poller = self.network_client.public_ip_addresses.delete(self.resource_group, name)
self.get_poller_result(poller)
except Exception as exc:
self.fail("Error deleting {0} - {1}".format(name, str(exc)))
# Delete returns nada. If we get here, assume that all is well.
return True
def delete_managed_disks(self, managed_disk_ids):
for mdi in managed_disk_ids:
try:
poller = self.rm_client.resources.delete_by_id(mdi, '2017-03-30')
self.get_poller_result(poller)
except Exception as exc:
self.fail("Error deleting managed disk {0} - {1}".format(mdi, str(exc)))
def delete_vm_storage(self, vhd_uris):
# FUTURE: figure out a cloud_env indepdendent way to delete these
for uri in vhd_uris:
self.log("Extracting info from blob uri '{0}'".format(uri))
try:
blob_parts = extract_names_from_blob_uri(uri, self._cloud_environment.suffixes.storage_endpoint)
except Exception as exc:
self.fail("Error parsing blob URI {0}".format(str(exc)))
storage_account_name = blob_parts['accountname']
container_name = blob_parts['containername']
blob_name = blob_parts['blobname']
blob_client = self.get_blob_client(self.resource_group, storage_account_name)
self.log("Delete blob {0}:{1}".format(container_name, blob_name))
self.results['actions'].append("Deleted blob {0}:{1}".format(container_name, blob_name))
try:
blob_client.delete_blob(container_name, blob_name)
except Exception as exc:
self.fail("Error deleting blob {0}:{1} - {2}".format(container_name, blob_name, str(exc)))
def get_marketplace_image_version(self):
try:
versions = self.compute_client.virtual_machine_images.list(self.location,
self.image['publisher'],
self.image['offer'],
self.image['sku'])
except Exception as exc:
self.fail("Error fetching image {0} {1} {2} - {3}".format(self.image['publisher'],
self.image['offer'],
self.image['sku'],
str(exc)))
if versions and len(versions) > 0:
if self.image['version'] == 'latest':
return versions[len(versions) - 1]
for version in versions:
if version.name == self.image['version']:
return version
self.fail("Error could not find image {0} {1} {2} {3}".format(self.image['publisher'],
self.image['offer'],
self.image['sku'],
self.image['version']))
def get_custom_image_reference(self, name, resource_group=None):
try:
if resource_group:
vm_images = self.compute_client.images.list_by_resource_group(resource_group)
else:
vm_images = self.compute_client.images.list()
except Exception as exc:
self.fail("Error fetching custom images from subscription - {0}".format(str(exc)))
for vm_image in vm_images:
if vm_image.name == name:
self.log("Using custom image id {0}".format(vm_image.id))
return self.compute_models.ImageReference(id=vm_image.id)
self.fail("Error could not find image with name {0}".format(name))
def get_availability_set(self, resource_group, name):
try:
return self.compute_client.availability_sets.get(resource_group, name)
except Exception as exc:
self.fail("Error fetching availability set {0} - {1}".format(name, str(exc)))
def get_storage_account(self, name):
try:
account = self.storage_client.storage_accounts.get_properties(self.resource_group,
name)
return account
except Exception as exc:
self.fail("Error fetching storage account {0} - {1}".format(name, str(exc)))
def create_or_update_vm(self, params):
try:
poller = self.compute_client.virtual_machines.create_or_update(self.resource_group, self.name, params)
self.get_poller_result(poller)
except Exception as exc:
self.fail("Error creating or updating virtual machine {0} - {1}".format(self.name, str(exc)))
def vm_size_is_valid(self):
'''
Validate self.vm_size against the list of virtual machine sizes available for the account and location.
:return: boolean
'''
try:
sizes = self.compute_client.virtual_machine_sizes.list(self.location)
except Exception as exc:
self.fail("Error retrieving available machine sizes - {0}".format(str(exc)))
for size in sizes:
if size.name == self.vm_size:
return True
return False
def create_default_storage_account(self):
'''
Create a default storage account <vm name>XXXX, where XXXX is a random number. If <vm name>XXXX exists, use it.
Otherwise, create one.
:return: storage account object
'''
account = None
valid_name = False
# Attempt to find a valid storage account name
storage_account_name_base = re.sub('[^a-zA-Z0-9]', '', self.name[:20].lower())
for i in range(0, 5):
rand = random.randrange(1000, 9999)
storage_account_name = storage_account_name_base + str(rand)
if self.check_storage_account_name(storage_account_name):
valid_name = True
break
if not valid_name:
self.fail("Failed to create a unique storage account name for {0}. Try using a different VM name."
.format(self.name))
try:
account = self.storage_client.storage_accounts.get_properties(self.resource_group, storage_account_name)
except CloudError:
pass
if account:
self.log("Storage account {0} found.".format(storage_account_name))
self.check_provisioning_state(account)
return account
sku = self.storage_models.Sku(self.storage_models.SkuName.standard_lrs)
sku.tier = self.storage_models.SkuTier.standard
kind = self.storage_models.Kind.storage
parameters = self.storage_models.StorageAccountCreateParameters(sku, kind, self.location)
self.log("Creating storage account {0} in location {1}".format(storage_account_name, self.location))
self.results['actions'].append("Created storage account {0}".format(storage_account_name))
try:
poller = self.storage_client.storage_accounts.create(self.resource_group, storage_account_name, parameters)
self.get_poller_result(poller)
except Exception as exc:
self.fail("Failed to create storage account: {0} - {1}".format(storage_account_name, str(exc)))
return self.get_storage_account(storage_account_name)
def check_storage_account_name(self, name):
self.log("Checking storage account name availability for {0}".format(name))
try:
response = self.storage_client.storage_accounts.check_name_availability(name)
if response.reason == 'AccountNameInvalid':
raise Exception("Invalid default storage account name: {0}".format(name))
except Exception as exc:
self.fail("Error checking storage account name availability for {0} - {1}".format(name, str(exc)))
return response.name_available
def create_default_nic(self):
'''
Create a default Network Interface <vm name>01. Requires an existing virtual network
with one subnet. If NIC <vm name>01 exists, use it. Otherwise, create one.
:return: NIC object
'''
network_interface_name = self.name + '01'
nic = None
self.log("Create default NIC {0}".format(network_interface_name))
self.log("Check to see if NIC {0} exists".format(network_interface_name))
try:
nic = self.network_client.network_interfaces.get(self.resource_group, network_interface_name)
except CloudError:
pass
if nic:
self.log("NIC {0} found.".format(network_interface_name))
self.check_provisioning_state(nic)
return nic
self.log("NIC {0} does not exist.".format(network_interface_name))
virtual_network_resource_group = None
if self.virtual_network_resource_group:
virtual_network_resource_group = self.virtual_network_resource_group
else:
virtual_network_resource_group = self.resource_group
if self.virtual_network_name:
try:
self.network_client.virtual_networks.list(virtual_network_resource_group, self.virtual_network_name)
virtual_network_name = self.virtual_network_name
except CloudError as exc:
self.fail("Error: fetching virtual network {0} - {1}".format(self.virtual_network_name, str(exc)))
else:
# Find a virtual network
no_vnets_msg = "Error: unable to find virtual network in resource group {0}. A virtual network " \
"with at least one subnet must exist in order to create a NIC for the virtual " \
"machine.".format(virtual_network_resource_group)
virtual_network_name = None
try:
vnets = self.network_client.virtual_networks.list(virtual_network_resource_group)
except CloudError:
self.log('cloud error!')
self.fail(no_vnets_msg)
for vnet in vnets:
virtual_network_name = vnet.name
self.log('vnet name: {0}'.format(vnet.name))
break
if not virtual_network_name:
self.fail(no_vnets_msg)
if self.subnet_name:
try:
subnet = self.network_client.subnets.get(virtual_network_resource_group, virtual_network_name, self.subnet_name)
subnet_id = subnet.id
except Exception as exc:
self.fail("Error: fetching subnet {0} - {1}".format(self.subnet_name, str(exc)))
else:
no_subnets_msg = "Error: unable to find a subnet in virtual network {0}. A virtual network " \
"with at least one subnet must exist in order to create a NIC for the virtual " \
"machine.".format(virtual_network_name)
subnet_id = None
try:
subnets = self.network_client.subnets.list(virtual_network_resource_group, virtual_network_name)
except CloudError:
self.fail(no_subnets_msg)
for subnet in subnets:
subnet_id = subnet.id
self.log('subnet id: {0}'.format(subnet_id))
break
if not subnet_id:
self.fail(no_subnets_msg)
pip = None
if self.public_ip_allocation_method != 'Disabled':
self.results['actions'].append('Created default public IP {0}'.format(self.name + '01'))
pip_info = self.create_default_pip(self.resource_group, self.location, self.name + '01', self.public_ip_allocation_method)
pip = self.network_models.PublicIPAddress(id=pip_info.id, location=pip_info.location, resource_guid=pip_info.resource_guid)
self.results['actions'].append('Created default security group {0}'.format(self.name + '01'))
group = self.create_default_securitygroup(self.resource_group, self.location, self.name + '01', self.os_type,
self.open_ports)
parameters = self.network_models.NetworkInterface(
location=self.location,
ip_configurations=[
self.network_models.NetworkInterfaceIPConfiguration(
private_ip_allocation_method='Dynamic',
)
]
)
parameters.ip_configurations[0].subnet = self.network_models.Subnet(id=subnet_id)
parameters.ip_configurations[0].name = 'default'
parameters.network_security_group = self.network_models.NetworkSecurityGroup(id=group.id,
location=group.location,
resource_guid=group.resource_guid)
parameters.ip_configurations[0].public_ip_address = pip
self.log("Creating NIC {0}".format(network_interface_name))
self.log(self.serialize_obj(parameters, 'NetworkInterface'), pretty_print=True)
self.results['actions'].append("Created NIC {0}".format(network_interface_name))
try:
poller = self.network_client.network_interfaces.create_or_update(self.resource_group,
network_interface_name,
parameters)
new_nic = self.get_poller_result(poller)
except Exception as exc:
self.fail("Error creating network interface {0} - {1}".format(network_interface_name, str(exc)))
return new_nic
def main():
AzureRMVirtualMachine()
if __name__ == '__main__':
main()
| gpl-3.0 |
Qalthos/ansible | test/units/module_utils/test_hetzner.py | 31 | 7252 | # Copyright: (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
import copy
import json
import pytest
from mock import MagicMock
from ansible.module_utils import hetzner
class ModuleFailException(Exception):
def __init__(self, msg, **kwargs):
super(ModuleFailException, self).__init__(msg)
self.fail_msg = msg
self.fail_kwargs = kwargs
def get_module_mock():
def f(msg, **kwargs):
raise ModuleFailException(msg, **kwargs)
module = MagicMock()
module.fail_json = f
module.from_json = json.loads
return module
# ########################################################################################
FETCH_URL_JSON_SUCCESS = [
(
(None, dict(
body=json.dumps(dict(
a='b'
)).encode('utf-8'),
)),
None,
(dict(
a='b'
), None)
),
(
(None, dict(
body=json.dumps(dict(
error=dict(
code="foo",
status=400,
message="bar",
),
a='b'
)).encode('utf-8'),
)),
['foo'],
(dict(
error=dict(
code="foo",
status=400,
message="bar",
),
a='b'
), 'foo')
),
]
FETCH_URL_JSON_FAIL = [
(
(None, dict(
body=json.dumps(dict(
error=dict(
code="foo",
status=400,
message="bar",
),
)).encode('utf-8'),
)),
None,
'Request failed: 400 foo (bar)'
),
(
(None, dict(
body=json.dumps(dict(
error=dict(
code="foo",
status=400,
message="bar",
),
)).encode('utf-8'),
)),
['bar'],
'Request failed: 400 foo (bar)'
),
]
@pytest.mark.parametrize("return_value, accept_errors, result", FETCH_URL_JSON_SUCCESS)
def test_fetch_url_json(monkeypatch, return_value, accept_errors, result):
module = get_module_mock()
hetzner.fetch_url = MagicMock(return_value=return_value)
assert hetzner.fetch_url_json(module, 'https://foo/bar', accept_errors=accept_errors) == result
@pytest.mark.parametrize("return_value, accept_errors, result", FETCH_URL_JSON_FAIL)
def test_fetch_url_json_fail(monkeypatch, return_value, accept_errors, result):
module = get_module_mock()
hetzner.fetch_url = MagicMock(return_value=return_value)
with pytest.raises(ModuleFailException) as exc:
hetzner.fetch_url_json(module, 'https://foo/bar', accept_errors=accept_errors)
assert exc.value.fail_msg == result
assert exc.value.fail_kwargs == dict()
# ########################################################################################
GET_FAILOVER_SUCCESS = [
(
'1.2.3.4',
(None, dict(
body=json.dumps(dict(
failover=dict(
active_server_ip='1.1.1.1',
ip='1.2.3.4',
netmask='255.255.255.255',
)
)).encode('utf-8'),
)),
'1.1.1.1',
dict(
active_server_ip='1.1.1.1',
ip='1.2.3.4',
netmask='255.255.255.255',
)
),
]
GET_FAILOVER_FAIL = [
(
'1.2.3.4',
(None, dict(
body=json.dumps(dict(
error=dict(
code="foo",
status=400,
message="bar",
),
)).encode('utf-8'),
)),
'Request failed: 400 foo (bar)'
),
]
@pytest.mark.parametrize("ip, return_value, result, record", GET_FAILOVER_SUCCESS)
def test_get_failover_record(monkeypatch, ip, return_value, result, record):
module = get_module_mock()
hetzner.fetch_url = MagicMock(return_value=copy.deepcopy(return_value))
assert hetzner.get_failover_record(module, ip) == record
@pytest.mark.parametrize("ip, return_value, result", GET_FAILOVER_FAIL)
def test_get_failover_record_fail(monkeypatch, ip, return_value, result):
module = get_module_mock()
hetzner.fetch_url = MagicMock(return_value=copy.deepcopy(return_value))
with pytest.raises(ModuleFailException) as exc:
hetzner.get_failover_record(module, ip)
assert exc.value.fail_msg == result
assert exc.value.fail_kwargs == dict()
@pytest.mark.parametrize("ip, return_value, result, record", GET_FAILOVER_SUCCESS)
def test_get_failover(monkeypatch, ip, return_value, result, record):
module = get_module_mock()
hetzner.fetch_url = MagicMock(return_value=copy.deepcopy(return_value))
assert hetzner.get_failover(module, ip) == result
@pytest.mark.parametrize("ip, return_value, result", GET_FAILOVER_FAIL)
def test_get_failover_fail(monkeypatch, ip, return_value, result):
module = get_module_mock()
hetzner.fetch_url = MagicMock(return_value=copy.deepcopy(return_value))
with pytest.raises(ModuleFailException) as exc:
hetzner.get_failover(module, ip)
assert exc.value.fail_msg == result
assert exc.value.fail_kwargs == dict()
# ########################################################################################
SET_FAILOVER_SUCCESS = [
(
'1.2.3.4',
'1.1.1.1',
(None, dict(
body=json.dumps(dict(
failover=dict(
active_server_ip='1.1.1.2',
)
)).encode('utf-8'),
)),
('1.1.1.2', True)
),
(
'1.2.3.4',
'1.1.1.1',
(None, dict(
body=json.dumps(dict(
error=dict(
code="FAILOVER_ALREADY_ROUTED",
status=400,
message="Failover already routed",
),
)).encode('utf-8'),
)),
('1.1.1.1', False)
),
]
SET_FAILOVER_FAIL = [
(
'1.2.3.4',
'1.1.1.1',
(None, dict(
body=json.dumps(dict(
error=dict(
code="foo",
status=400,
message="bar",
),
)).encode('utf-8'),
)),
'Request failed: 400 foo (bar)'
),
]
@pytest.mark.parametrize("ip, value, return_value, result", SET_FAILOVER_SUCCESS)
def test_set_failover(monkeypatch, ip, value, return_value, result):
module = get_module_mock()
hetzner.fetch_url = MagicMock(return_value=copy.deepcopy(return_value))
assert hetzner.set_failover(module, ip, value) == result
@pytest.mark.parametrize("ip, value, return_value, result", SET_FAILOVER_FAIL)
def test_set_failover_fail(monkeypatch, ip, value, return_value, result):
module = get_module_mock()
hetzner.fetch_url = MagicMock(return_value=copy.deepcopy(return_value))
with pytest.raises(ModuleFailException) as exc:
hetzner.set_failover(module, ip, value)
assert exc.value.fail_msg == result
assert exc.value.fail_kwargs == dict()
| gpl-3.0 |
fergalbyrne/nupic | examples/opf/experiments/multistep/hotgym/permutations.py | 38 | 3713 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by ExpGenerator to generate the actual
permutations.py file by replacing $XXXXXXXX tokens with desired values.
This permutations.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py'
"""
from nupic.swarming.permutationhelpers import *
# The name of the field being predicted. Any allowed permutation MUST contain
# the prediction field.
# (generated from PREDICTION_FIELD)
predictedField = 'consumption'
permutations = {
'modelParams': {
'inferenceType': PermuteChoices(['NontemporalMultiStep', 'TemporalMultiStep']),
'sensorParams': {
'encoders': {
'timestamp_dayOfWeek': PermuteEncoder(fieldName='timestamp', encoderClass='DateEncoder.dayOfWeek', radius=PermuteFloat(1.000000, 6.000000), w=21),
'timestamp_timeOfDay': PermuteEncoder(fieldName='timestamp', encoderClass='DateEncoder.timeOfDay', radius=PermuteFloat(0.500000, 12.000000), w=21),
'consumption': PermuteEncoder(fieldName='consumption', encoderClass='AdaptiveScalarEncoder', n=PermuteInt(28, 521), w=21, clipInput=True),
'timestamp_weekend': PermuteEncoder(fieldName='timestamp', encoderClass='DateEncoder.weekend', radius=PermuteChoices([1]), w=21),
},
},
'tpParams': {
'minThreshold': PermuteInt(9, 12),
'activationThreshold': PermuteInt(12, 16),
'pamLength': PermuteInt(1, 5),
},
}
}
# Fields selected for final hypersearch report;
# NOTE: These values are used as regular expressions by RunPermutations.py's
# report generator
# (fieldname values generated from PERM_PREDICTED_FIELD_NAME)
report = [
'.*consumption.*',
]
# Permutation optimization setting: either minimize or maximize metric
# used by RunPermutations.
# NOTE: The value is used as a regular expressions by RunPermutations.py's
# report generator
# (generated from minimize = 'prediction:aae:window=1000:field=consumption')
minimize = "multiStepBestPredictions:multiStep:errorMetric='aae':steps=1:window=1000:field=consumption"
def permutationFilter(perm):
""" This function can be used to selectively filter out specific permutation
combinations. It is called by RunPermutations for every possible permutation
of the variables in the permutations dict. It should return True for valid a
combination of permutation values and False for an invalid one.
Parameters:
---------------------------------------------------------
perm: dict of one possible combination of name:value
pairs chosen from permutations.
"""
# An example of how to use this
#if perm['__consumption_encoder']['maxval'] > 300:
# return False;
#
return True
| agpl-3.0 |
ncrocfer/weevely3 | testsuite/test_file_download.py | 14 | 4127 | from testsuite.base_test import BaseTest
from testfixtures import log_capture
from testsuite import config
from core.sessions import SessionURL
from core import modules
from core import messages
import subprocess
import tempfile
import datetime
import logging
import os
class FileDOwnload(BaseTest):
def setUp(self):
session = SessionURL(self.url, self.password, volatile = True)
modules.load_modules(session)
self.file_ok = os.path.join(config.script_folder, 'ok.test')
self.check_call(
config.cmd_env_content_s_to_s % ('OK', self.file_ok),
shell=True)
self.file_ko = os.path.join(config.script_folder, 'ko.test')
self.check_call(
config.cmd_env_content_s_to_s % ('KO', self.file_ko),
shell=True)
# Set ko.test to ---x--x--x 0111 execute, should be no readable
self.check_call(
config.cmd_env_chmod_s_s % ('0111', self.file_ko),
shell=True)
self.run_argv = modules.loaded['file_download'].run_argv
def tearDown(self):
self.check_call(
config.cmd_env_chmod_s_s % ('0777', '%s %s' % (self.file_ok, self.file_ko)),
shell=True)
self.check_call(
config.cmd_env_remove_s % ('%s %s' % (self.file_ok, self.file_ko)),
shell=True)
def test_download_php(self):
temp_file = tempfile.NamedTemporaryFile()
# Simple download
self.assertEqual(self.run_argv(['ok.test', temp_file.name]), 'OK')
self.assertEqual(open(temp_file.name,'r').read(), 'OK')
temp_file.truncate()
# Downoad binary. Skip check cause I don't know the remote content, and
# the md5 check is already done inside file_download.
self.assertTrue(self.run_argv(['/bin/ls', temp_file.name]))
self.assertTrue(open(temp_file.name,'r').read(), 'OK')
temp_file.truncate()
# Download of an unreadable file
self.assertEqual(self.run_argv(['ko.test', temp_file.name]), None)
self.assertEqual(open(temp_file.name,'r').read(), '')
# Download of an remote unexistant file
self.assertEqual(self.run_argv(['bogus', temp_file.name]), None)
self.assertEqual(open(temp_file.name,'r').read(), '')
# Download to a local unexistant folder
self.assertEqual(self.run_argv(['ok.test', '/tmp/bogus/bogus']), None)
self.assertEqual(open(temp_file.name,'r').read(), '')
# Download to a directory
self.assertEqual(self.run_argv(['ok.test', '/tmp/']), None)
self.assertEqual(open(temp_file.name,'r').read(), '')
temp_file.close()
def test_download_sh(self):
temp_file = tempfile.NamedTemporaryFile()
# Simple download
self.assertEqual(self.run_argv(['-vector', 'base64', 'ok.test', temp_file.name]), 'OK')
self.assertEqual(open(temp_file.name,'r').read(), 'OK')
temp_file.truncate()
# Downoad binary. Skip check cause I don't know the remote content, and
# the md5 check is already done inside file_download.
self.assertTrue(self.run_argv(['-vector', 'base64', '/bin/ls', temp_file.name]))
self.assertTrue(open(temp_file.name,'r').read(), 'OK')
temp_file.truncate()
# Download of an unreadable file
self.assertEqual(self.run_argv(['-vector', 'base64', 'ko.test', temp_file.name]), None)
self.assertEqual(open(temp_file.name,'r').read(), '')
# Download of an remote unexistant file
self.assertEqual(self.run_argv(['-vector', 'base64', 'bogus', temp_file.name]), None)
self.assertEqual(open(temp_file.name,'r').read(), '')
# Download to a local unexistant folder
self.assertEqual(self.run_argv(['-vector', 'base64', 'ok.test', '/tmp/bogus/bogus']), None)
self.assertEqual(open(temp_file.name,'r').read(), '')
# Download to a directory
self.assertEqual(self.run_argv(['-vector', 'base64', 'ok.test', '/tmp/']), None)
self.assertEqual(open(temp_file.name,'r').read(), '')
temp_file.close()
| gpl-3.0 |
CERNDocumentServer/invenio | modules/bibformat/lib/elements/bfe_record_stats.py | 1 | 10296 | # This file is part of Invenio.
# Copyright (C) 2009, 2010, 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Prints record statistics
"""
__revision__ = "$Id$"
from invenio.dbquery import run_sql
ELASTICSEARCH_ENABLED = False
try:
from elasticsearch import Elasticsearch
from invenio.config import \
CFG_ELASTICSEARCH_LOGGING, \
CFG_ELASTICSEARCH_SEARCH_HOST, \
CFG_ELASTICSEARCH_INDEX_PREFIX
# if we were able to import all modules and ES logging is enabled, then use
# elasticsearch instead of normal db queries
if CFG_ELASTICSEARCH_LOGGING:
ELASTICSEARCH_ENABLED = True
except ImportError:
pass
# elasticsearch not supported
def format_element(bfo, display='day_distinct_ip_nb_views'):
'''
Prints record statistics
@param display: the type of statistics displayed. Can be 'total_nb_view', 'day_nb_views', 'total_distinct_ip_nb_views', 'day_distincts_ip_nb_views', 'total_distinct_ip_per_day_nb_views'
'''
if ELASTICSEARCH_ENABLED:
page_views = 0
ES_INDEX = CFG_ELASTICSEARCH_INDEX_PREFIX + "*"
recID = bfo.recID
query = ""
es = Elasticsearch(CFG_ELASTICSEARCH_SEARCH_HOST)
if display == 'total_nb_views':
query = {
"query": {
"bool": {
"must": [
{
"match": {
"id_bibrec": recID
}
},
{
"match": {
"_type": "events.pageviews"
}
}
]
}
}
}
results = es.count(index=ES_INDEX, body=query)
if results:
page_views = results.get('count', 0)
elif display == 'day_nb_views':
query = {
"query": {
"filtered": {
"query": {
"bool": {
"must": [
{
"match": {
"id_bibrec": recID
}
},
{
"match": {
"_type": "events.pageviews"
}
}
]
}
},
"filter": {
"range": {
"@timestamp": {
"gt": "now-1d"
}
}
}
}
}
}
results = es.count(index=ES_INDEX, body=query)
if results:
page_views = results.get('count', 0)
elif display == 'total_distinct_ip_nb_views':
search_type = "count"
# TODO this search query with aggregation is slow, maybe there is a way to make it faster ?
query = {
"query": {
"bool": {
"must": [
{
"match": {
"id_bibrec": recID
}
},
{
"match": {
"_type": "events.pageviews"
}
}
]
}
},
"aggregations": {
"distinct_ips": {
"cardinality": {
"field": "client_host"
}
}
}
}
results = es.search(index=ES_INDEX, body=query, search_type=search_type)
if results:
page_views = results.get('aggregations', {}).get('distinct_ips', {}).get('value', 0)
elif display == 'day_distinct_ip_nb_views':
search_type = "count"
# TODO aggregation is slow, maybe there is a way to make a faster query
query = {
"query": {
"filtered": {
"query": {
"bool": {
"must": [
{
"match": {
"id_bibrec": recID
}
},
{
"match": {
"_type": "events.pageviews"
}
}
]
}
},
"filter": {
"range": {
"@timestamp": {
"gt": "now-1d"
}
}
}
}
},
"aggregations": {
"distinct_ips": {
"cardinality": {
"field": "client_host"
}
}
}
}
results = es.search(index=ES_INDEX, body=query, search_type=search_type)
if results:
page_views = results.get('aggregations', {}).get('distinct_ips', {}).get('value', 0)
elif display == 'total_distinct_ip_per_day_nb_views':
search_type = "count"
# TODO aggregation is slow, maybe there is a way to make a faster query
query = {
"query": {
"filtered": {
"query": {
"bool": {
"must": [
{
"match": {
"id_bibrec": recID
}
},
{
"match": {
"_type": "events.pageviews"
}
}
]
}
}
}
},
"aggregations": {
"daily_stats": {
"date_histogram": {
"field": "@timestamp",
"interval": "day"
},
"aggregations": {
"distinct_ips": {
"cardinality": {
"field": "client_host"
}
}
}
}
}
}
results = es.search(index=ES_INDEX, body=query, search_type=search_type)
if results:
buckets = results.get("aggregations", {}).get("daily_stats", {}).get("buckets", {})
page_views = sum([int(bucket.get("distinct_ips", {}).get('value', '0')) for bucket in buckets])
return page_views
else:
if display == 'total_nb_views':
return run_sql("""SELECT COUNT(client_host) FROM rnkPAGEVIEWS
WHERE id_bibrec=%s""",
(bfo.recID,))[0][0]
elif display == 'day_nb_views':
return run_sql("""SELECT COUNT(client_host) FROM rnkPAGEVIEWS
WHERE id_bibrec=%s AND DATE(view_time)=CURDATE()""",
(bfo.recID,))[0][0]
elif display == 'total_distinct_ip_nb_views':
return run_sql("""SELECT COUNT(DISTINCT client_host) FROM rnkPAGEVIEWS
WHERE id_bibrec=%s""",
(bfo.recID,))[0][0]
elif display == 'day_distinct_ip_nb_views':
return run_sql("""SELECT COUNT(DISTINCT client_host) FROM rnkPAGEVIEWS
WHERE id_bibrec=%s AND DATE(view_time)=CURDATE()""",
(bfo.recID,))[0][0]
elif display == 'total_distinct_ip_per_day_nb_views':
# Count the number of distinct IP addresses for every day Then
# sum up. Similar to total_distinct_users_nb_views but assume
# that several different users can be behind a single IP
# (which could change every day)
res = run_sql("""SELECT COUNT(DISTINCT client_host)
FROM rnkPAGEVIEWS
WHERE id_bibrec=%s GROUP BY DATE(view_time)""",
(bfo.recID,))
return sum([row[0] for row in res])
| gpl-2.0 |
Netflix/security_monkey | security_monkey/auditors/openstack/openstack_security_group.py | 1 | 1657 | # Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.openstack.auditors.security_group
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Michael Stair <[email protected]>
"""
from security_monkey.auditors.security_group import SecurityGroupAuditor
from security_monkey.watchers.openstack.network.openstack_security_group import OpenStackSecurityGroup
class OpenStackSecurityGroupAuditor(SecurityGroupAuditor):
index = OpenStackSecurityGroup.index
i_am_singular = OpenStackSecurityGroup.i_am_singular
i_am_plural = OpenStackSecurityGroup.i_am_plural
network_whitelist = []
def __init__(self, accounts=None, debug=False):
super(OpenStackSecurityGroupAuditor, self).__init__(accounts=accounts, debug=debug)
def check_securitygroup_ec2_rfc1918(self, sg_item):
pass
def _check_internet_cidr(self, cidr):
''' some public clouds default to none for any source '''
return not cidr or super(OpenStackSecurityGroupAuditor, self)._check_internet_cidr(cidr)
| apache-2.0 |
dd00/commandergenius | project/jni/python/src/Lib/test/test_popen2.py | 51 | 3151 | #! /usr/bin/env python
"""Test script for popen2.py"""
import warnings
warnings.filterwarnings("ignore", ".*popen2 module is deprecated.*",
DeprecationWarning)
warnings.filterwarnings("ignore", "os\.popen. is deprecated.*",
DeprecationWarning)
import os
import sys
import unittest
import popen2
from test.test_support import TestSkipped, run_unittest, reap_children
if sys.platform[:4] == 'beos' or sys.platform[:6] == 'atheos':
# Locks get messed up or something. Generally we're supposed
# to avoid mixing "posix" fork & exec with native threads, and
# they may be right about that after all.
raise TestSkipped("popen2() doesn't work on " + sys.platform)
# if we don't have os.popen, check that
# we have os.fork. if not, skip the test
# (by raising an ImportError)
try:
from os import popen
del popen
except ImportError:
from os import fork
del fork
class Popen2Test(unittest.TestCase):
cmd = "cat"
if os.name == "nt":
cmd = "more"
teststr = "ab cd\n"
# "more" doesn't act the same way across Windows flavors,
# sometimes adding an extra newline at the start or the
# end. So we strip whitespace off both ends for comparison.
expected = teststr.strip()
def setUp(self):
popen2._cleanup()
# When the test runs, there shouldn't be any open pipes
self.assertFalse(popen2._active, "Active pipes when test starts" +
repr([c.cmd for c in popen2._active]))
def tearDown(self):
for inst in popen2._active:
inst.wait()
popen2._cleanup()
self.assertFalse(popen2._active, "_active not empty")
reap_children()
def validate_output(self, teststr, expected_out, r, w, e=None):
w.write(teststr)
w.close()
got = r.read()
self.assertEquals(expected_out, got.strip(), "wrote %r read %r" %
(teststr, got))
if e is not None:
got = e.read()
self.assertFalse(got, "unexpected %r on stderr" % got)
def test_popen2(self):
r, w = popen2.popen2(self.cmd)
self.validate_output(self.teststr, self.expected, r, w)
def test_popen3(self):
if os.name == 'posix':
r, w, e = popen2.popen3([self.cmd])
self.validate_output(self.teststr, self.expected, r, w, e)
r, w, e = popen2.popen3(self.cmd)
self.validate_output(self.teststr, self.expected, r, w, e)
def test_os_popen2(self):
# same test as test_popen2(), but using the os.popen*() API
w, r = os.popen2(self.cmd)
self.validate_output(self.teststr, self.expected, r, w)
def test_os_popen3(self):
# same test as test_popen3(), but using the os.popen*() API
if os.name == 'posix':
w, r, e = os.popen3([self.cmd])
self.validate_output(self.teststr, self.expected, r, w, e)
w, r, e = os.popen3(self.cmd)
self.validate_output(self.teststr, self.expected, r, w, e)
def test_main():
run_unittest(Popen2Test)
if __name__ == "__main__":
test_main()
| lgpl-2.1 |
dudepare/django | django/middleware/gzip.py | 478 | 1831 | import re
from django.utils.cache import patch_vary_headers
from django.utils.text import compress_sequence, compress_string
re_accepts_gzip = re.compile(r'\bgzip\b')
class GZipMiddleware(object):
"""
This middleware compresses content if the browser allows gzip compression.
It sets the Vary header accordingly, so that caches will base their storage
on the Accept-Encoding header.
"""
def process_response(self, request, response):
# It's not worth attempting to compress really short responses.
if not response.streaming and len(response.content) < 200:
return response
# Avoid gzipping if we've already got a content-encoding.
if response.has_header('Content-Encoding'):
return response
patch_vary_headers(response, ('Accept-Encoding',))
ae = request.META.get('HTTP_ACCEPT_ENCODING', '')
if not re_accepts_gzip.search(ae):
return response
if response.streaming:
# Delete the `Content-Length` header for streaming content, because
# we won't know the compressed size until we stream it.
response.streaming_content = compress_sequence(response.streaming_content)
del response['Content-Length']
else:
# Return the compressed content only if it's actually shorter.
compressed_content = compress_string(response.content)
if len(compressed_content) >= len(response.content):
return response
response.content = compressed_content
response['Content-Length'] = str(len(response.content))
if response.has_header('ETag'):
response['ETag'] = re.sub('"$', ';gzip"', response['ETag'])
response['Content-Encoding'] = 'gzip'
return response
| bsd-3-clause |
prculley/gramps | gramps/gui/widgets/selectionwidget.py | 3 | 31444 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2013 Artem Glebov <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# GTK/Gnome modules
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GdkPixbuf
from gi.repository import GObject
#-------------------------------------------------------------------------
#
# gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.display.name import displayer as name_displayer
#-------------------------------------------------------------------------
#
# grabbers constants and routines
#
#-------------------------------------------------------------------------
from .grabbers import (grabber_generators, can_grab, grabber_position,
switch_grabber, CURSORS, GRABBER_INSIDE, INSIDE,
INNER_GRABBERS, OUTER_GRABBERS, MOTION_FUNCTIONS)
#-------------------------------------------------------------------------
#
# PhotoTaggingGramplet
#
#-------------------------------------------------------------------------
RESIZE_RATIO = 1.5
MAX_ZOOM = 10
MIN_ZOOM = 0.05
MAX_SIZE = 2000
MIN_SIZE = 50
SHADING_OPACITY = 0.7
MIN_SELECTION_SIZE = 10
def scale_to_fit(orig_x, orig_y, target_x, target_y):
"""
Calculates the scale factor to fit the rectangle
orig_x * orig_y by scaling keeping the aspect ratio.
"""
orig_aspect = orig_x / orig_y
target_aspect = target_x / target_y
if orig_aspect > target_aspect:
return target_x / orig_x
else:
return target_y / orig_y
def resize_keep_aspect(orig_x, orig_y, target_x, target_y):
"""
Calculates the dimensions of the rectangle obtained from
the rectangle orig_x * orig_y by scaling to fit
target_x * target_y keeping the aspect ratio.
"""
orig_aspect = orig_x / orig_y
target_aspect = target_x / target_y
if orig_aspect > target_aspect:
return (target_x, target_x * orig_y // orig_x)
else:
return (target_y * orig_x // orig_y, target_y)
def order_coordinates(point1, point2):
"""
Returns the rectangle (x1, y1, x2, y2) based on point1 and point2,
such that x1 <= x2 and y1 <= y2.
"""
x1 = min(point1[0], point2[0])
x2 = max(point1[0], point2[0])
y1 = min(point1[1], point2[1])
y2 = max(point1[1], point2[1])
return (x1, y1, x2, y2)
def minimum_region(point1, point2):
"""
Returns whether the rectangle defined by the corner points point1
and point2 exceeds the minimum dimensions.
"""
return (abs(point1[0] - point2[0]) >= MIN_SELECTION_SIZE and
abs(point1[1] - point2[1]) >= MIN_SELECTION_SIZE)
class Region:
"""
Representation of a region of image that can be associated with
a person.
"""
def __init__(self, x1, y1, x2, y2):
"""
Creates a new region with the specified coordinates.
"""
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.person = None
self.mediaref = None
def coords(self):
"""
Returns the coordinates of the region as a 4-tuple in the
format (x1, y1, x2, y2).
"""
return (self.x1, self.y1, self.x2, self.y2)
def set_coords(self, x1, y1, x2, y2):
"""
Sets the coordinates of this region.
"""
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
def contains(self, x, y):
"""
Returns whether the point with coordinates (x, y) lies insided
this region.
"""
return self.x1 <= x <= self.x2 and self.y1 <= y <= self.y2
def contains_rect(self, other):
"""
Returns whether this region fully contains the region other.
"""
return (self.contains(other.x1, other.y1) and
self.contains(other.x2, other.y2))
def area(self):
"""
Returns the area of this region.
"""
return abs(self.x1 - self.x2) * abs(self.y1 - self.y2)
def intersects(self, other):
"""
Returns whether the current region intersects other.
"""
# assumes that x1 <= x2 and y1 <= y2
return not (self.x2 < other.x1 or self.x1 > other.x2 or
self.y2 < other.y1 or self.y1 > other.y2)
class SelectionWidget(Gtk.ScrolledWindow):
"""
A widget that displays an image and permits GIMP-like selection of regions
within the image. The widget derives from gtk.ScrolledWindow.
"""
__gsignals__ = {
"region-modified": (GObject.SignalFlags.RUN_FIRST, None, ()),
"region-created": (GObject.SignalFlags.RUN_FIRST, None, ()),
"region-selected": (GObject.SignalFlags.RUN_FIRST, None, ()),
"selection-cleared": (GObject.SignalFlags.RUN_FIRST, None, ()),
"right-button-clicked": (GObject.SignalFlags.RUN_FIRST, None, ()),
"zoomed-in": (GObject.SignalFlags.RUN_FIRST, None, ()),
"zoomed-out": (GObject.SignalFlags.RUN_FIRST, None, ())
}
def __init__(self):
"""
Creates a new selection widget.
"""
self.multiple_selection = True
self.loaded = False
self.start_point_screen = None
self.selection = None
self.current = None
self.in_region = None
self.grabber = None
self.regions = []
self.translation = None
self.pixbuf = None
self.scaled_pixbuf = None
self.scale = 1.0
self.old_viewport_size = None
Gtk.ScrolledWindow.__init__(self)
self.add(self._build_gui())
self.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
def _build_gui(self):
"""
Builds and lays out the GUI of the widget.
"""
self.image = Gtk.Image()
self.image.set_has_tooltip(True)
self.image.connect_after("draw", self._expose_handler)
self.image.connect("query-tooltip", self._show_tooltip)
self.event_box = Gtk.EventBox()
self.event_box.connect('button-press-event',
self._button_press_event)
self.event_box.connect('button-release-event',
self._button_release_event)
self.connect('motion-notify-event',
self._motion_notify_event)
self.connect('scroll-event',
self._motion_scroll_event)
self.event_box.add_events(Gdk.EventMask.BUTTON_PRESS_MASK)
self.event_box.add_events(Gdk.EventMask.BUTTON_RELEASE_MASK)
self.event_box.add_events(Gdk.EventMask.POINTER_MOTION_MASK)
self.event_box.add(self.image)
self.viewport = Gtk.Viewport()
self.connect("size-allocate", self._resize)
self.viewport.add(self.event_box)
return self.viewport
# ======================================================
# public field accessors
# ======================================================
def get_multiple_selection(self):
"""
Return whether multiple selection is enabled.
"""
return self.multiple_selection
def set_multiple_selection(self, enable):
"""
Enables or disables multiple selection.
"""
self.multiple_selection = enable
def is_image_loaded(self):
"""
Returns whether an image has been loaded into this selection widget.
"""
return self.loaded
def set_regions(self, regions):
"""
Sets the list of regions to be displayed in the widget.
"""
self.regions = regions
def get_current(self):
"""
Returns the currently active region.
"""
return self.current
def set_current(self, region):
"""
Activates the given region in the widget.
"""
self.current = region
def get_selection(self):
"""
Returns the coordinates of the current selection.
"""
return self.selection
# ======================================================
# loading the image
# ======================================================
def load_image(self, image_path):
"""
Loads an image from a given path into this selection widget.
"""
self.start_point_screen = None
self.selection = None
self.in_region = None
self.grabber_position = None
self.grabber_to_draw = None
try:
self.pixbuf = GdkPixbuf.Pixbuf.new_from_file(image_path)
self.original_image_size = (self.pixbuf.get_width(),
self.pixbuf.get_height())
viewport_size = self.viewport.get_allocation()
self.old_viewport_size = viewport_size
self.scale = scale_to_fit(self.pixbuf.get_width(),
self.pixbuf.get_height(),
viewport_size.width,
viewport_size.height)
self._rescale()
self.loaded = True
except (GObject.GError, OSError):
self.show_missing()
def show_missing(self):
"""
Displays a 'missing image' icon in the widget.
"""
self.pixbuf = None
self.image.set_from_icon_name('image-missing', Gtk.IconSize.DIALOG)
self.image.queue_draw()
def _resize(self, *dummy):
"""
Handles size-allocate' events from Gtk.
"""
if self.pixbuf:
viewport_size = self.viewport.get_allocation()
if viewport_size.height != self.old_viewport_size.height or \
viewport_size.width != self.old_viewport_size.width or \
not self.image.get_pixbuf():
self.scale = scale_to_fit(self.pixbuf.get_width(),
self.pixbuf.get_height(),
viewport_size.width,
viewport_size.height)
self._rescale()
self.old_viewport_size = viewport_size
return False
def expander(self, *dummy):
""" Handler for expander in caller; needed because Gtk doesn't handle
verticle expansion right
"""
self.image.clear()
self.image.set_size_request(2, 2)
self.event_box.set_size_request(2, 2)
return False
# ======================================================
# coordinate transformations (public methods)
# ======================================================
def proportional_to_real_rect(self, rect):
"""
Translates proportional (ranging from 0 to 100) coordinates to image
coordinates (in pixels).
"""
x1, y1, x2, y2 = rect
return (self._proportional_to_real((x1, y1)) +
self._proportional_to_real((x2, y2)))
def real_to_proportional_rect(self, rect):
"""
Translates image coordinates (in pixels) to proportional (ranging
from 0 to 100).
"""
x1, y1, x2, y2 = rect
return (self._real_to_proportional((x1, y1)) +
self._real_to_proportional((x2, y2)))
# ======================================================
# widget manipulation
# ======================================================
def refresh(self):
"""
Schedules a redraw of the image.
"""
self.image.queue_draw()
def can_zoom_in(self):
"""
Returns whether it is possible to zoom in the image.
"""
if self.original_image_size:
scaled_size = (self.original_image_size[0] * self.scale * RESIZE_RATIO,
self.original_image_size[1] * self.scale * RESIZE_RATIO)
return scaled_size[0] < MAX_SIZE and scaled_size[1] < MAX_SIZE
return False
def can_zoom_out(self):
"""
Returns whether it is possible to zoom out the image.
"""
if self.original_image_size:
scaled_size = (self.original_image_size[0] * self.scale * RESIZE_RATIO,
self.original_image_size[1] * self.scale * RESIZE_RATIO)
return scaled_size[0] >= MIN_SIZE and scaled_size[1] >= MIN_SIZE
return False
def zoom_in(self):
"""
Zooms in the image. The zoom factor is defined by RESIZE_RATIO.
"""
if self.can_zoom_in():
self.scale *= RESIZE_RATIO
self._rescale()
self.emit("zoomed-in")
def zoom_out(self):
"""
Zooms out the image. The zoom factor is defined by RESIZE_RATIO.
"""
if self.can_zoom_out():
self.scale /= RESIZE_RATIO
self._rescale()
self.emit("zoomed-out")
def select(self, region):
"""
Highlights the given region in the image.
"""
self.current = region
if self.current is not None:
self.selection = self.current.coords()
self.image.queue_draw()
def clear_selection(self):
"""
Clears the selection.
"""
self.current = None
self.selection = None
self.image.queue_draw()
def find_region(self, x ,y):
"""
Given screen coordinates, find where that point is in the image.
"""
return self._find_region(*self._screen_to_image((x, y)))
# ======================================================
# thumbnails
# ======================================================
def get_thumbnail(self, region, thumbnail_size):
"""
Returns the thumbnail of the given region.
"""
w = region.x2 - region.x1
h = region.y2 - region.y1
if w >= 1 and h >= 1 and self.pixbuf:
subpixbuf = self.pixbuf.new_subpixbuf(region.x1, region.y1, w, h)
size = resize_keep_aspect(w, h, *thumbnail_size)
return subpixbuf.scale_simple(size[0], size[1],
GdkPixbuf.InterpType.BILINEAR)
else:
return None
# ======================================================
# utility functions for retrieving properties
# ======================================================
def _get_original_image_size(self):
"""
Returns the size of the image before scaling.
"""
return self.original_image_size
def _get_scaled_image_size(self):
"""
Returns the size of images scaled with the current scaled.
"""
unscaled_size = self._get_original_image_size()
return (unscaled_size[0] * self.scale, unscaled_size[1] * self.scale)
# ======================================================
# coordinate transformations
# ======================================================
def _proportional_to_real(self, coord):
"""
Translates proportional (ranging from 0 to 100) coordinates to image
coordinates (in pixels).
"""
w, h = self.original_image_size
return (int(round(coord[0] * w / 100)), int(round(coord[1] * h / 100)))
def _real_to_proportional(self, coord):
"""
Translates image coordinates (in pixels) to proportional (ranging
from 0 to 100).
"""
w, h = self.original_image_size
return (int(round(coord[0] * 100 / w)), int(round(coord[1] * 100 / h)))
def _image_to_screen(self, coords):
"""
Translates image coordinates to viewport coordinates using the current
scale and viewport size.
"""
viewport_rect = self.viewport.get_allocation()
image_rect = self.scaled_size
if image_rect[0] < viewport_rect.width:
offset_x = (image_rect[0] - viewport_rect.width) / 2
else:
offset_x = 0.0
if image_rect[1] < viewport_rect.height:
offset_y = (image_rect[1] - viewport_rect.height) / 2
else:
offset_y = 0.0
return (int(coords[0] * self.scale - offset_x),
int(coords[1] * self.scale - offset_y))
def _screen_to_image(self, coords):
"""
Translates viewport coordinates to original (unscaled) image coordinates
using the current scale and viewport size.
"""
viewport_rect = self.viewport.get_allocation()
image_rect = self.scaled_size
if image_rect[0] < viewport_rect.width:
offset_x = (image_rect[0] - viewport_rect.width) / 2
else:
offset_x = 0.0
if image_rect[1] < viewport_rect.height:
offset_y = (image_rect[1] - viewport_rect.height) / 2
else:
offset_y = 0.0
return (int((coords[0] + offset_x) / self.scale),
int((coords[1] + offset_y) / self.scale))
def _truncate_to_image_size(self, coords):
"""
Modifies the coordinates of the given point to ensure that it lies
within the image. Negative values are replaced with 0, positive values
exceeding the image dimensions - with those corresponding dimensions.
"""
x, y = coords
(image_width, image_height) = self._get_original_image_size()
x = max(x, 0)
x = min(x, image_width)
y = max(y, 0)
y = min(y, image_height)
return self._proportional_to_real(self._real_to_proportional((x, y)))
def _screen_to_truncated(self, coords):
"""
Transforms the screen coordinates to image coordinates and truncate to
the image size.
"""
return self._truncate_to_image_size(self._screen_to_image(coords))
def _rect_image_to_screen(self, rect):
"""
Translates the coordinates of the rectangle from image to screen.
"""
x1, y1, x2, y2 = rect
x1, y1 = self._image_to_screen((x1, y1))
x2, y2 = self._image_to_screen((x2, y2))
return (x1, y1, x2, y2)
# ======================================================
# drawing and scaling the image
# ======================================================
def _expose_handler(self, widget, cr):
"""
Handles the expose-event signal of the underlying widget.
"""
if self.pixbuf:
self._draw_selection(widget, cr)
def _draw_selection(self, widget, cr):
"""
Draws the image, the selection boxes and does the necessary
shading.
"""
if not self.scaled_size:
return
w, h = self.scaled_size
offset_x, offset_y = self._image_to_screen((0, 0))
offset_x -= 1
offset_y -= 1
if self.selection:
x1, y1, x2, y2 = self._rect_image_to_screen(self.selection)
# transparent shading
self._draw_transparent_shading(cr, x1, y1, x2, y2, w, h,
offset_x, offset_y)
# selection frame
self._draw_selection_frame(cr, x1, y1, x2, y2)
# draw grabber
self._draw_grabber(cr)
else:
# selection frame
for region in self.regions:
x1, y1, x2, y2 = self._rect_image_to_screen(region.coords())
self._draw_region_frame(cr, x1, y1, x2, y2)
def _draw_transparent_shading(self, cr, x1, y1, x2, y2, w, h,
offset_x, offset_y):
"""
Draws the shading for a selection box.
"""
cr.set_source_rgba(1.0, 1.0, 1.0, SHADING_OPACITY)
cr.rectangle(offset_x, offset_y, x1 - offset_x, y1 - offset_y)
cr.rectangle(offset_x, y1, x1 - offset_x, y2 - y1)
cr.rectangle(offset_x, y2, x1 - offset_x, h - y2 + offset_y)
cr.rectangle(x1, y2 + 1, x2 - x1 + 1, h - y2 + offset_y)
cr.rectangle(x2 + 1, y2 + 1, w - x2 + offset_x, h - y2 + offset_y)
cr.rectangle(x2 + 1, y1, w - x2 + offset_x, y2 - y1 + 1)
cr.rectangle(x2 + 1, offset_y, w - x2 + offset_x, y2 - offset_y)
cr.rectangle(x1, offset_y, x2 - x1 + 1, y1 - offset_y)
cr.fill()
def _draw_selection_frame(self, cr, x1, y1, x2, y2):
"""
Draws the frame during selection.
"""
self._draw_region_frame(cr, x1, y1, x2, y2)
def _draw_region_frame(self, cr, x1, y1, x2, y2):
"""
Draws a region frame.
"""
cr.set_source_rgb(1.0, 1.0, 1.0) # white
cr.rectangle(x1, y1, x2 - x1, y2 - y1)
cr.stroke()
cr.set_source_rgb(0.0, 0.0, 1.0) # blue
cr.rectangle(x1 - 2, y1 - 2, x2 - x1 + 4, y2 - y1 + 4)
cr.stroke()
def _draw_grabber(self, cr):
"""
Draws a grabber.
"""
if self.selection is not None and self.grabber is not None:
selection_rect = self._rect_image_to_screen(self.selection)
cr.set_source_rgb(1.0, 0, 0)
if self.grabber_position is None:
generators = grabber_generators(selection_rect)
elif self.grabber_position == GRABBER_INSIDE:
generators = INNER_GRABBERS
else:
generators = OUTER_GRABBERS
if self.grabber_to_draw is not None:
generator = generators[self.grabber_to_draw]
else:
generator = generators[self.grabber]
if generator is not None:
x1, y1, x2, y2 = generator(*selection_rect)
cr.rectangle(x1, y1, x2 - x1, y2 - y1)
cr.stroke()
def _rescale(self):
"""
Recalculates the sizes using the current scale and updates
the buffers.
"""
self.scaled_size = (int(self.original_image_size[0] * self.scale),
int(self.original_image_size[1] * self.scale))
self.scaled_image = self.pixbuf.scale_simple(self.scaled_size[0],
self.scaled_size[1],
GdkPixbuf.InterpType.BILINEAR)
self.image.set_from_pixbuf(self.scaled_image)
self.image.set_size_request(*self.scaled_size)
self.event_box.set_size_request(*self.scaled_size)
# ======================================================
# managing regions
# ======================================================
def _find_region(self, x, y):
"""
Finds the smallest region containing point (x, y).
"""
result = None
for region in self.regions:
if region.contains(x, y):
if result is None or result.area() > region.area():
result = region
return result
# ======================================================
# mouse event handlers
# ======================================================
def _button_press_event(self, obj, event):
"""
Handles the button-press-event signal.
"""
if not self.is_image_loaded():
return
if event.button == 1: # left button
self.start_point_screen = (event.x, event.y)
if self.current is not None and self.grabber is None and \
self.multiple_selection:
self.current = None
self.selection = None
self.refresh()
self.emit("selection-cleared")
elif event.button == 3: # right button
# select a region, if clicked inside one
click_point = self._screen_to_image((event.x, event.y))
self.current = self._find_region(*click_point)
self.selection = \
self.current.coords() if self.current is not None else None
self.start_point_screen = None
self.refresh()
if self.current is not None:
self.emit("region-selected")
self.emit("right-button-clicked")
else:
self.emit("selection-cleared")
return True # don't propagate the event further
def _button_release_event(self, obj, event):
"""
Handles the button-release-event signal.
"""
if not self.is_image_loaded():
return
if event.button == 1:
if self.start_point_screen:
if self.current is not None:
# a box is currently selected
if self.grabber and self.grabber != INSIDE:
# clicked on one of the grabbers
dx, dy = (event.x - self.start_point_screen[0],
event.y - self.start_point_screen[1])
self.grabber_to_draw = self._modify_selection(dx, dy)
self.current.set_coords(*self.selection)
self.emit("region-modified")
elif self.grabber is None and self.multiple_selection:
# clicked outside of the grabbing area
self.current = None
self.selection = None
self.emit("selection-cleared")
else:
# update current selection
self.current.set_coords(*self.selection)
self.region = self.current
self.emit("region-modified")
else:
# nothing is currently selected
if (minimum_region(self.start_point_screen,
(event.x, event.y)) and
self._can_select()):
# region selection
region = Region(*self.selection)
self.regions.append(region)
self.current = region
self.emit("region-created")
else:
# nothing selected, just a click
click_point = \
self._screen_to_image(self.start_point_screen)
self.current = self._find_region(*click_point)
self.selection = \
self.current.coords() if self.current is not None \
else None
self.emit("region-selected")
self.start_point_screen = None
self.refresh()
def _motion_notify_event(self, widget, event):
"""
Handles the motion-notify-event signal.
"""
if not self.is_image_loaded():
return
end_point_orig = self._screen_to_image((event.x, event.y))
end_point = self._truncate_to_image_size(end_point_orig)
if self.start_point_screen:
# selection or dragging (mouse button pressed)
if self.grabber is not None and self.grabber != INSIDE:
# dragging the grabber
dx, dy = (event.x - self.start_point_screen[0],
event.y - self.start_point_screen[1])
self.grabber_to_draw = self._modify_selection(dx, dy)
else:
# making new selection
start_point = self._screen_to_truncated(self.start_point_screen)
self.selection = order_coordinates(start_point, end_point)
else:
# motion (mouse button is not pressed)
self.in_region = self._find_region(*end_point_orig)
if self.current is not None:
# a box is active, so check if the pointer is inside a grabber
rect = self._rect_image_to_screen(self.current.coords())
self.grabber = can_grab(rect, event.x, event.y)
if self.grabber is not None:
self.grabber_to_draw = self.grabber
self.grabber_position = grabber_position(rect)
self.event_box.get_window().set_cursor(CURSORS[self.grabber])
else:
self.grabber_to_draw = None
self.grabber_position = None
self.event_box.get_window().set_cursor(None)
else:
# nothing is active
self.grabber = None
self.grabber_to_draw = None
self.grabber_position = None
self.event_box.get_window().set_cursor(None)
self.image.queue_draw()
def _motion_scroll_event(self, widget, event):
"""
Handles the motion-scroll-event signal.
"""
if not self.is_image_loaded():
return
if event.direction == Gdk.ScrollDirection.UP:
self.zoom_in()
elif event.direction == Gdk.ScrollDirection.DOWN:
self.zoom_out()
# ======================================================
# helpers for mouse event handlers
# ======================================================
def _can_select(self):
"""
Returns whether selection is currently possible, which is when
multiple selection is enabled or otherwise when no region is
currently selected.
"""
return self.multiple_selection or len(self.regions) < 1
def _modify_selection(self, dx, dy):
"""
Changes the selection when a grabber is dragged, returns the new
grabber if a grabber switch has happened, and the current grabber
otherwise.
"""
x1, y1, x2, y2 = self._rect_image_to_screen(self.current.coords())
x1, y1, x2, y2 = MOTION_FUNCTIONS[self.grabber](x1, y1, x2, y2, dx, dy)
(x1, y1) = self._screen_to_truncated((x1, y1))
(x2, y2) = self._screen_to_truncated((x2, y2))
grabber = switch_grabber(self.grabber, x1, y1, x2, y2)
self.selection = order_coordinates((x1, y1), (x2, y2))
return grabber
# ======================================================
# tooltips
# ======================================================
def _show_tooltip(self, widget, x, y, keyboard_mode, tooltip):
"""
Handles the query-tooltip signal.
"""
if self.in_region:
person = self.in_region.person
if person:
name = name_displayer.display(person)
else:
return False
tooltip.set_text(name)
return True
else:
return False
| gpl-2.0 |
596acres/livinglots-philly | livinglotsphilly/survey/forms.py | 2 | 1197 | from django import forms
from django.contrib.contenttypes.models import ContentType
from forms_builder.forms.forms import FormForForm
from .models import SurveyFieldEntry, SurveyFormEntry
class SurveyFormForForm(FormForForm):
field_entry_model = SurveyFieldEntry
content_type = forms.ModelChoiceField(
queryset=ContentType.objects.all(),
widget=forms.HiddenInput,
)
class Meta(FormForForm.Meta):
model = SurveyFormEntry
widgets = {
'object_id': forms.HiddenInput,
'survey_form': forms.HiddenInput,
}
def __init__(self, *args, **kwargs):
# Get the model instance that the resulting entry will be tied to
initial = kwargs.pop('initial', {})
content_object = initial.pop('content_object', None)
survey_form = initial.pop('survey_form', None)
super(SurveyFormForForm, self).__init__(*args, initial=initial, **kwargs)
if content_object:
self.initial.update({
'content_type': ContentType.objects.get_for_model(content_object),
'object_id': content_object.pk,
'survey_form': survey_form,
})
| gpl-3.0 |
stanlyxiang/incubator-hawq | depends/thirdparty/googletest/googletest/test/gtest_catch_exceptions_test.py | 2139 | 9901 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's exception catching behavior.
This script invokes gtest_catch_exceptions_test_ and
gtest_catch_exceptions_ex_test_ (programs written with
Google Test) and verifies their output.
"""
__author__ = '[email protected] (Vlad Losev)'
import os
import gtest_test_utils
# Constants.
FLAG_PREFIX = '--gtest_'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
NO_CATCH_EXCEPTIONS_FLAG = FLAG_PREFIX + 'catch_exceptions=0'
FILTER_FLAG = FLAG_PREFIX + 'filter'
# Path to the gtest_catch_exceptions_ex_test_ binary, compiled with
# exceptions enabled.
EX_EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_ex_test_')
# Path to the gtest_catch_exceptions_test_ binary, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_no_ex_test_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
TEST_LIST = gtest_test_utils.Subprocess(
[EXE_PATH, LIST_TESTS_FLAG], env=environ).output
SUPPORTS_SEH_EXCEPTIONS = 'ThrowsSehException' in TEST_LIST
if SUPPORTS_SEH_EXCEPTIONS:
BINARY_OUTPUT = gtest_test_utils.Subprocess([EXE_PATH], env=environ).output
EX_BINARY_OUTPUT = gtest_test_utils.Subprocess(
[EX_EXE_PATH], env=environ).output
# The tests.
if SUPPORTS_SEH_EXCEPTIONS:
# pylint:disable-msg=C6302
class CatchSehExceptionsTest(gtest_test_utils.TestCase):
"""Tests exception-catching behavior."""
def TestSehExceptions(self, test_output):
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s constructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s destructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUpTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDownTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUp()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDown()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in the test body'
in test_output)
def testCatchesSehExceptionsWithCxxExceptionsEnabled(self):
self.TestSehExceptions(EX_BINARY_OUTPUT)
def testCatchesSehExceptionsWithCxxExceptionsDisabled(self):
self.TestSehExceptions(BINARY_OUTPUT)
class CatchCxxExceptionsTest(gtest_test_utils.TestCase):
"""Tests C++ exception-catching behavior.
Tests in this test case verify that:
* C++ exceptions are caught and logged as C++ (not SEH) exceptions
* Exception thrown affect the remainder of the test work flow in the
expected manner.
"""
def testCatchesCxxExceptionsInFixtureConstructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s constructor'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInConstructorTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
if ('CxxExceptionInDestructorTest.ThrowsExceptionInDestructor' in
EX_BINARY_OUTPUT):
def testCatchesCxxExceptionsInFixtureDestructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s destructor'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInDestructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUpTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUpTestCase()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInConstructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest constructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::SetUp() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest test body '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTearDownTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDownTestCase()'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUp(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUp()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInSetUpTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
def testCatchesCxxExceptionsInTearDown(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDown()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTestBody(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in the test body'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesNonStdCxxExceptions(self):
self.assert_('Unknown C++ exception thrown in the test body'
in EX_BINARY_OUTPUT)
def testUnhandledCxxExceptionsAbortTheProgram(self):
# Filters out SEH exception tests on Windows. Unhandled SEH exceptions
# cause tests to show pop-up windows there.
FITLER_OUT_SEH_TESTS_FLAG = FILTER_FLAG + '=-*Seh*'
# By default, Google Test doesn't catch the exceptions.
uncaught_exceptions_ex_binary_output = gtest_test_utils.Subprocess(
[EX_EXE_PATH,
NO_CATCH_EXCEPTIONS_FLAG,
FITLER_OUT_SEH_TESTS_FLAG],
env=environ).output
self.assert_('Unhandled C++ exception terminating the program'
in uncaught_exceptions_ex_binary_output)
self.assert_('unexpected' not in uncaught_exceptions_ex_binary_output)
if __name__ == '__main__':
gtest_test_utils.Main()
| apache-2.0 |
Mistobaan/tensorflow | tensorflow/compiler/tests/xla_device_test.py | 76 | 1639 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for XLA devices."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class XlaDeviceTest(test.TestCase):
def testCopies(self):
"""Tests that copies between GPU and XLA devices work."""
if not test.is_gpu_available():
return
with session_lib.Session() as sess:
x = array_ops.placeholder(dtypes.float32, [2])
with ops.device("GPU"):
y = x * 2
with ops.device("device:XLA_CPU:0"):
z = y * y
with ops.device("GPU"):
w = y + z
result = sess.run(w, {x: [1.5, 0.5]})
self.assertAllClose(result, [12., 2.], rtol=1e-3)
if __name__ == "__main__":
test.main()
| apache-2.0 |
xiangel/hue | desktop/core/ext-py/Django-1.6.10/tests/utils_tests/test_ipv6.py | 117 | 2847 | from __future__ import unicode_literals
from django.utils import unittest
from django.utils.ipv6 import is_valid_ipv6_address, clean_ipv6_address
class TestUtilsIPv6(unittest.TestCase):
def test_validates_correct_plain_address(self):
self.assertTrue(is_valid_ipv6_address('fe80::223:6cff:fe8a:2e8a'))
self.assertTrue(is_valid_ipv6_address('2a02::223:6cff:fe8a:2e8a'))
self.assertTrue(is_valid_ipv6_address('1::2:3:4:5:6:7'))
self.assertTrue(is_valid_ipv6_address('::'))
self.assertTrue(is_valid_ipv6_address('::a'))
self.assertTrue(is_valid_ipv6_address('2::'))
def test_validates_correct_with_v4mapping(self):
self.assertTrue(is_valid_ipv6_address('::ffff:254.42.16.14'))
self.assertTrue(is_valid_ipv6_address('::ffff:0a0a:0a0a'))
def test_validates_incorrect_plain_address(self):
self.assertFalse(is_valid_ipv6_address('foo'))
self.assertFalse(is_valid_ipv6_address('127.0.0.1'))
self.assertFalse(is_valid_ipv6_address('12345::'))
self.assertFalse(is_valid_ipv6_address('1::2:3::4'))
self.assertFalse(is_valid_ipv6_address('1::zzz'))
self.assertFalse(is_valid_ipv6_address('1::2:3:4:5:6:7:8'))
self.assertFalse(is_valid_ipv6_address('1:2'))
self.assertFalse(is_valid_ipv6_address('1:::2'))
def test_validates_incorrect_with_v4mapping(self):
self.assertFalse(is_valid_ipv6_address('::ffff:999.42.16.14'))
self.assertFalse(is_valid_ipv6_address('::ffff:zzzz:0a0a'))
# The ::1.2.3.4 format used to be valid but was deprecated
# in rfc4291 section 2.5.5.1
self.assertTrue(is_valid_ipv6_address('::254.42.16.14'))
self.assertTrue(is_valid_ipv6_address('::0a0a:0a0a'))
self.assertFalse(is_valid_ipv6_address('::999.42.16.14'))
self.assertFalse(is_valid_ipv6_address('::zzzz:0a0a'))
def test_cleanes_plain_address(self):
self.assertEqual(clean_ipv6_address('DEAD::0:BEEF'), 'dead::beef')
self.assertEqual(clean_ipv6_address('2001:000:a:0000:0:fe:fe:beef'), '2001:0:a::fe:fe:beef')
self.assertEqual(clean_ipv6_address('2001::a:0000:0:fe:fe:beef'), '2001:0:a::fe:fe:beef')
def test_cleanes_with_v4_mapping(self):
self.assertEqual(clean_ipv6_address('::ffff:0a0a:0a0a'), '::ffff:10.10.10.10')
self.assertEqual(clean_ipv6_address('::ffff:1234:1234'), '::ffff:18.52.18.52')
self.assertEqual(clean_ipv6_address('::ffff:18.52.18.52'), '::ffff:18.52.18.52')
def test_unpacks_ipv4(self):
self.assertEqual(clean_ipv6_address('::ffff:0a0a:0a0a', unpack_ipv4=True), '10.10.10.10')
self.assertEqual(clean_ipv6_address('::ffff:1234:1234', unpack_ipv4=True), '18.52.18.52')
self.assertEqual(clean_ipv6_address('::ffff:18.52.18.52', unpack_ipv4=True), '18.52.18.52')
| apache-2.0 |
nishigori/boto | boto/s3/bucketlogging.py | 153 | 3183 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import xml.sax.saxutils
from boto.s3.acl import Grant
class BucketLogging(object):
def __init__(self, target=None, prefix=None, grants=None):
self.target = target
self.prefix = prefix
if grants is None:
self.grants = []
else:
self.grants = grants
def __repr__(self):
if self.target is None:
return "<BucketLoggingStatus: Disabled>"
grants = []
for g in self.grants:
if g.type == 'CanonicalUser':
u = g.display_name
elif g.type == 'Group':
u = g.uri
else:
u = g.email_address
grants.append("%s = %s" % (u, g.permission))
return "<BucketLoggingStatus: %s/%s (%s)>" % (self.target, self.prefix, ", ".join(grants))
def add_grant(self, grant):
self.grants.append(grant)
def startElement(self, name, attrs, connection):
if name == 'Grant':
self.grants.append(Grant())
return self.grants[-1]
else:
return None
def endElement(self, name, value, connection):
if name == 'TargetBucket':
self.target = value
elif name == 'TargetPrefix':
self.prefix = value
else:
setattr(self, name, value)
def to_xml(self):
# caller is responsible to encode to utf-8
s = u'<?xml version="1.0" encoding="UTF-8"?>'
s += u'<BucketLoggingStatus xmlns="http://doc.s3.amazonaws.com/2006-03-01">'
if self.target is not None:
s += u'<LoggingEnabled>'
s += u'<TargetBucket>%s</TargetBucket>' % self.target
prefix = self.prefix or ''
s += u'<TargetPrefix>%s</TargetPrefix>' % xml.sax.saxutils.escape(prefix)
if self.grants:
s += '<TargetGrants>'
for grant in self.grants:
s += grant.to_xml()
s += '</TargetGrants>'
s += u'</LoggingEnabled>'
s += u'</BucketLoggingStatus>'
return s
| mit |
jbfavre/exaproxy | lib/exaproxy/util/cache.py | 4 | 1214 | # encoding: utf-8
try:
from collections import OrderedDict
except ImportError:
# support installable ordereddict module in older python versions
from ordereddict import OrderedDict
from time import time
class TimeCache (dict):
__default = object()
def __init__ (self,timeout):
self.timeout = timeout
self.last = None
self.time = OrderedDict()
dict.__init__(self)
def __setitem__ (self,key,value):
dict.__setitem__(self,key,value)
if self.timeout > 0:
self.time[key] = time()
def __delitem__ (self,key):
if key in self.time:
del self.time[key]
dict.__delitem__(self,key)
# Cpython implementation of dict.pop does not call __delitem__ - sigh !
def pop (self,key,default=__default):
if key in self.time:
del self.time[key]
if default is self.__default:
return dict.pop(self,key)
return dict.pop(self,key,default)
def expired (self,maximum):
expire = time() - self.timeout
if self.last:
k,t = self.last
if t > expire:
return
if k in self:
maximum -= 1
yield k
self.last = None
while self.time and maximum:
k,t = self.time.popitem(False)
if t > expire:
self.last = k,t
break
if k in self:
maximum -= 1
yield k
| bsd-2-clause |
SmartElect/SmartElect | civil_registry/migrations/0001_initial.py | 1 | 5409 | # Generated by Django 2.2 on 2019-05-03 14:05
import civil_registry.models
from django.db import migrations, models
import libya_elections.libya_bread
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Citizen',
fields=[
('civil_registry_id', models.BigIntegerField(help_text='Uniquely identifies a person, even across changes of national ID', primary_key=True, serialize=False, verbose_name='civil registry id')),
('national_id', models.BigIntegerField(db_index=True, help_text="The citizen's 12-digit national ID number", unique=True, validators=[civil_registry.models.national_id_validator], verbose_name='national id')),
('fbr_number', models.CharField(help_text='Family Book Record Number', max_length=20, validators=[civil_registry.models.fbr_number_validator], verbose_name='family book record number')),
('first_name', models.CharField(blank=True, db_index=True, max_length=255, verbose_name='first name')),
('father_name', models.CharField(blank=True, max_length=255, verbose_name='father name')),
('grandfather_name', models.CharField(blank=True, max_length=255, verbose_name='grandfather name')),
('family_name', models.CharField(blank=True, db_index=True, max_length=255, verbose_name='family name')),
('mother_name', models.CharField(blank=True, max_length=255, verbose_name='mother name')),
('birth_date', models.DateField(db_index=True, verbose_name='birth date')),
('gender', models.IntegerField(choices=[(2, 'Female'), (1, 'Male')], db_index=True, verbose_name='gender')),
('address', models.CharField(blank=True, max_length=1024, verbose_name='address')),
('office_id', models.IntegerField(default=0, verbose_name='office id')),
('branch_id', models.IntegerField(default=0, verbose_name='branch id')),
('state', models.IntegerField(default=0, verbose_name='state')),
('missing', models.DateTimeField(blank=True, help_text='If set, this citizen was not in the last data dump.', null=True, verbose_name='missing')),
],
options={
'verbose_name': 'citizen',
'verbose_name_plural': 'citizens',
'ordering': ['national_id'],
'permissions': (('read_citizen', 'Can read citizens'), ('browse_citizen', 'Can browse citizens')),
},
bases=(libya_elections.libya_bread.BirthDateFormatterMixin, models.Model),
),
migrations.CreateModel(
name='CitizenMetadata',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dump_time', models.DateTimeField()),
],
),
migrations.CreateModel(
name='DumpFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('filename', models.CharField(max_length=256)),
],
),
migrations.CreateModel(
name='TempCitizen',
fields=[
('civil_registry_id', models.BigIntegerField(help_text='Uniquely identifies a person, even across changes of national ID', primary_key=True, serialize=False, verbose_name='civil registry id')),
('national_id', models.BigIntegerField(db_index=True, help_text="The citizen's 12-digit national ID number", unique=True, validators=[civil_registry.models.national_id_validator], verbose_name='national id')),
('fbr_number', models.CharField(help_text='Family Book Record Number', max_length=20, validators=[civil_registry.models.fbr_number_validator], verbose_name='family book record number')),
('first_name', models.CharField(blank=True, db_index=True, max_length=255, verbose_name='first name')),
('father_name', models.CharField(blank=True, max_length=255, verbose_name='father name')),
('grandfather_name', models.CharField(blank=True, max_length=255, verbose_name='grandfather name')),
('family_name', models.CharField(blank=True, db_index=True, max_length=255, verbose_name='family name')),
('mother_name', models.CharField(blank=True, max_length=255, verbose_name='mother name')),
('birth_date', models.DateField(db_index=True, verbose_name='birth date')),
('gender', models.IntegerField(choices=[(2, 'Female'), (1, 'Male')], db_index=True, verbose_name='gender')),
('address', models.CharField(blank=True, max_length=1024, verbose_name='address')),
('office_id', models.IntegerField(default=0, verbose_name='office id')),
('branch_id', models.IntegerField(default=0, verbose_name='branch id')),
('state', models.IntegerField(default=0, verbose_name='state')),
('missing', models.DateTimeField(blank=True, help_text='If set, this citizen was not in the last data dump.', null=True, verbose_name='missing')),
],
options={
'abstract': False,
},
),
]
| apache-2.0 |
nirmeshk/oh-mainline | vendor/packages/Django/django/conf/__init__.py | 95 | 9136 | """
Settings and configuration for Django.
Values will be read from the module specified by the DJANGO_SETTINGS_MODULE environment
variable, and then from django.conf.global_settings; see the global settings file for
a list of all possible variables.
"""
import logging
import os
import sys
import time # Needed for Windows
import warnings
from django.conf import global_settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.functional import LazyObject, empty
from django.utils import importlib
from django.utils import six
ENVIRONMENT_VARIABLE = "DJANGO_SETTINGS_MODULE"
class LazySettings(LazyObject):
"""
A lazy proxy for either global Django settings or a custom settings object.
The user can manually configure settings prior to using them. Otherwise,
Django uses the settings module pointed to by DJANGO_SETTINGS_MODULE.
"""
def _setup(self, name=None):
"""
Load the settings module pointed to by the environment variable. This
is used the first time we need any settings at all, if the user has not
previously configured the settings manually.
"""
try:
settings_module = os.environ[ENVIRONMENT_VARIABLE]
if not settings_module: # If it's set but is an empty string.
raise KeyError
except KeyError:
desc = ("setting %s" % name) if name else "settings"
raise ImproperlyConfigured(
"Requested %s, but settings are not configured. "
"You must either define the environment variable %s "
"or call settings.configure() before accessing settings."
% (desc, ENVIRONMENT_VARIABLE))
self._wrapped = Settings(settings_module)
self._configure_logging()
def __getattr__(self, name):
if self._wrapped is empty:
self._setup(name)
return getattr(self._wrapped, name)
def _configure_logging(self):
"""
Setup logging from LOGGING_CONFIG and LOGGING settings.
"""
if not sys.warnoptions:
try:
# Route warnings through python logging
logging.captureWarnings(True)
# Allow DeprecationWarnings through the warnings filters
warnings.simplefilter("default", DeprecationWarning)
except AttributeError:
# No captureWarnings on Python 2.6, DeprecationWarnings are on anyway
pass
if self.LOGGING_CONFIG:
from django.utils.log import DEFAULT_LOGGING
# First find the logging configuration function ...
logging_config_path, logging_config_func_name = self.LOGGING_CONFIG.rsplit('.', 1)
logging_config_module = importlib.import_module(logging_config_path)
logging_config_func = getattr(logging_config_module, logging_config_func_name)
logging_config_func(DEFAULT_LOGGING)
if self.LOGGING:
# Backwards-compatibility shim for #16288 fix
compat_patch_logging_config(self.LOGGING)
# ... then invoke it with the logging settings
logging_config_func(self.LOGGING)
def configure(self, default_settings=global_settings, **options):
"""
Called to manually configure the settings. The 'default_settings'
parameter sets where to retrieve any unspecified values from (its
argument must support attribute access (__getattr__)).
"""
if self._wrapped is not empty:
raise RuntimeError('Settings already configured.')
holder = UserSettingsHolder(default_settings)
for name, value in options.items():
setattr(holder, name, value)
self._wrapped = holder
self._configure_logging()
@property
def configured(self):
"""
Returns True if the settings have already been configured.
"""
return self._wrapped is not empty
class BaseSettings(object):
"""
Common logic for settings whether set by a module or by the user.
"""
def __setattr__(self, name, value):
if name in ("MEDIA_URL", "STATIC_URL") and value and not value.endswith('/'):
raise ImproperlyConfigured("If set, %s must end with a slash" % name)
elif name == "ALLOWED_INCLUDE_ROOTS" and isinstance(value, six.string_types):
raise ValueError("The ALLOWED_INCLUDE_ROOTS setting must be set "
"to a tuple, not a string.")
object.__setattr__(self, name, value)
class Settings(BaseSettings):
def __init__(self, settings_module):
# update this dict from global settings (but only for ALL_CAPS settings)
for setting in dir(global_settings):
if setting == setting.upper():
setattr(self, setting, getattr(global_settings, setting))
# store the settings module in case someone later cares
self.SETTINGS_MODULE = settings_module
try:
mod = importlib.import_module(self.SETTINGS_MODULE)
except ImportError as e:
raise ImportError("Could not import settings '%s' (Is it on sys.path?): %s" % (self.SETTINGS_MODULE, e))
# Settings that should be converted into tuples if they're mistakenly entered
# as strings.
tuple_settings = ("INSTALLED_APPS", "TEMPLATE_DIRS")
for setting in dir(mod):
if setting == setting.upper():
setting_value = getattr(mod, setting)
if setting in tuple_settings and \
isinstance(setting_value, six.string_types):
warnings.warn("The %s setting must be a tuple. Please fix your "
"settings, as auto-correction is now deprecated." % setting,
PendingDeprecationWarning)
setting_value = (setting_value,) # In case the user forgot the comma.
setattr(self, setting, setting_value)
if not self.SECRET_KEY:
raise ImproperlyConfigured("The SECRET_KEY setting must not be empty.")
if hasattr(time, 'tzset') and self.TIME_ZONE:
# When we can, attempt to validate the timezone. If we can't find
# this file, no check happens and it's harmless.
zoneinfo_root = '/usr/share/zoneinfo'
if (os.path.exists(zoneinfo_root) and not
os.path.exists(os.path.join(zoneinfo_root, *(self.TIME_ZONE.split('/'))))):
raise ValueError("Incorrect timezone setting: %s" % self.TIME_ZONE)
# Move the time zone info into os.environ. See ticket #2315 for why
# we don't do this unconditionally (breaks Windows).
os.environ['TZ'] = self.TIME_ZONE
time.tzset()
class UserSettingsHolder(BaseSettings):
"""
Holder for user configured settings.
"""
# SETTINGS_MODULE doesn't make much sense in the manually configured
# (standalone) case.
SETTINGS_MODULE = None
def __init__(self, default_settings):
"""
Requests for configuration variables not in this class are satisfied
from the module specified in default_settings (if possible).
"""
self.__dict__['_deleted'] = set()
self.default_settings = default_settings
def __getattr__(self, name):
if name in self._deleted:
raise AttributeError
return getattr(self.default_settings, name)
def __setattr__(self, name, value):
self._deleted.discard(name)
return super(UserSettingsHolder, self).__setattr__(name, value)
def __delattr__(self, name):
self._deleted.add(name)
return super(UserSettingsHolder, self).__delattr__(name)
def __dir__(self):
return list(self.__dict__) + dir(self.default_settings)
settings = LazySettings()
def compat_patch_logging_config(logging_config):
"""
Backwards-compatibility shim for #16288 fix. Takes initial value of
``LOGGING`` setting and patches it in-place (issuing deprecation warning)
if "mail_admins" logging handler is configured but has no filters.
"""
# Shim only if LOGGING["handlers"]["mail_admins"] exists,
# but has no "filters" key
if "filters" not in logging_config.get(
"handlers", {}).get(
"mail_admins", {"filters": []}):
warnings.warn(
"You have no filters defined on the 'mail_admins' logging "
"handler: adding implicit debug-false-only filter. "
"See http://docs.djangoproject.com/en/dev/releases/1.4/"
"#request-exceptions-are-now-always-logged",
DeprecationWarning)
filter_name = "require_debug_false"
filters = logging_config.setdefault("filters", {})
while filter_name in filters:
filter_name = filter_name + "_"
filters[filter_name] = {
"()": "django.utils.log.RequireDebugFalse",
}
logging_config["handlers"]["mail_admins"]["filters"] = [filter_name]
| agpl-3.0 |
epssy/hue | desktop/core/ext-py/pysaml2-2.4.0/example/idp2_repoze/idp_user.py | 33 | 1617 | USERS = {
"haho0032": {
"sn": "Hoerberg",
"givenName": "Hans",
"eduPersonScopedAffiliation": "[email protected]",
"eduPersonPrincipalName": "[email protected]",
"uid": "haho",
"eduPersonTargetedID": "one!for!all",
"c": "SE",
"o": "Example Co.",
"ou": "IT",
"initials": "P",
"schacHomeOrganization": "example.com",
"email": "[email protected]",
"displayName": "Hans Hoerberg",
"labeledURL": "http://www.example.com/haho My homepage",
"norEduPersonNIN": "SE199012315555"
},
"roland": {
"sn": "Hedberg",
"givenName": "Roland",
"eduPersonScopedAffiliation": "[email protected]",
"eduPersonPrincipalName": "[email protected]",
"uid": "rohe",
"eduPersonTargetedID": "one!for!all",
"c": "SE",
"o": "Example Co.",
"ou": "IT",
"initials": "P",
#"schacHomeOrganization": "example.com",
"email": "[email protected]",
"displayName": "P. Roland Hedberg",
"labeledURL": "http://www.example.com/rohe My homepage",
"norEduPersonNIN": "SE197001012222"
},
"babs": {
"surname": "Babs",
"givenName": "Ozzie",
"eduPersonAffiliation": "affiliate"
},
"upper": {
"surname": "Jeter",
"givenName": "Derek",
"eduPersonAffiliation": "affiliate"
},
}
EXTRA = {
"roland": {
"eduPersonEntitlement": "urn:mace:swamid.se:foo:bar",
"schacGender": "male",
"schacUserPresenceID": "skype:pepe.perez"
}
} | apache-2.0 |
moijes12/oh-mainline | vendor/packages/Django/django/middleware/common.py | 101 | 7433 | import hashlib
import logging
import re
from django.conf import settings
from django import http
from django.core.mail import mail_managers
from django.utils.http import urlquote
from django.utils import six
from django.core import urlresolvers
logger = logging.getLogger('django.request')
class CommonMiddleware(object):
"""
"Common" middleware for taking care of some basic operations:
- Forbids access to User-Agents in settings.DISALLOWED_USER_AGENTS
- URL rewriting: Based on the APPEND_SLASH and PREPEND_WWW settings,
this middleware appends missing slashes and/or prepends missing
"www."s.
- If APPEND_SLASH is set and the initial URL doesn't end with a
slash, and it is not found in urlpatterns, a new URL is formed by
appending a slash at the end. If this new URL is found in
urlpatterns, then an HTTP-redirect is returned to this new URL;
otherwise the initial URL is processed as usual.
- ETags: If the USE_ETAGS setting is set, ETags will be calculated from
the entire page content and Not Modified responses will be returned
appropriately.
"""
def process_request(self, request):
"""
Check for denied User-Agents and rewrite the URL based on
settings.APPEND_SLASH and settings.PREPEND_WWW
"""
# Check for denied User-Agents
if 'HTTP_USER_AGENT' in request.META:
for user_agent_regex in settings.DISALLOWED_USER_AGENTS:
if user_agent_regex.search(request.META['HTTP_USER_AGENT']):
logger.warning('Forbidden (User agent): %s', request.path,
extra={
'status_code': 403,
'request': request
}
)
return http.HttpResponseForbidden('<h1>Forbidden</h1>')
# Check for a redirect based on settings.APPEND_SLASH
# and settings.PREPEND_WWW
host = request.get_host()
old_url = [host, request.path]
new_url = old_url[:]
if (settings.PREPEND_WWW and old_url[0] and
not old_url[0].startswith('www.')):
new_url[0] = 'www.' + old_url[0]
# Append a slash if APPEND_SLASH is set and the URL doesn't have a
# trailing slash and there is no pattern for the current path
if settings.APPEND_SLASH and (not old_url[1].endswith('/')):
urlconf = getattr(request, 'urlconf', None)
if (not urlresolvers.is_valid_path(request.path_info, urlconf) and
urlresolvers.is_valid_path("%s/" % request.path_info, urlconf)):
new_url[1] = new_url[1] + '/'
if settings.DEBUG and request.method == 'POST':
raise RuntimeError((""
"You called this URL via POST, but the URL doesn't end "
"in a slash and you have APPEND_SLASH set. Django can't "
"redirect to the slash URL while maintaining POST data. "
"Change your form to point to %s%s (note the trailing "
"slash), or set APPEND_SLASH=False in your Django "
"settings.") % (new_url[0], new_url[1]))
if new_url == old_url:
# No redirects required.
return
if new_url[0]:
newurl = "%s://%s%s" % (
request.is_secure() and 'https' or 'http',
new_url[0], urlquote(new_url[1]))
else:
newurl = urlquote(new_url[1])
if request.META.get('QUERY_STRING', ''):
if six.PY3:
newurl += '?' + request.META['QUERY_STRING']
else:
# `query_string` is a bytestring. Appending it to the unicode
# string `newurl` will fail if it isn't ASCII-only. This isn't
# allowed; only broken software generates such query strings.
# Better drop the invalid query string than crash (#15152).
try:
newurl += '?' + request.META['QUERY_STRING'].decode()
except UnicodeDecodeError:
pass
return http.HttpResponsePermanentRedirect(newurl)
def process_response(self, request, response):
"Send broken link emails and calculate the Etag, if needed."
if response.status_code == 404:
if settings.SEND_BROKEN_LINK_EMAILS and not settings.DEBUG:
# If the referrer was from an internal link or a non-search-engine site,
# send a note to the managers.
domain = request.get_host()
referer = request.META.get('HTTP_REFERER', None)
is_internal = _is_internal_request(domain, referer)
path = request.get_full_path()
if referer and not _is_ignorable_404(path) and (is_internal or '?' not in referer):
ua = request.META.get('HTTP_USER_AGENT', '<none>')
ip = request.META.get('REMOTE_ADDR', '<none>')
mail_managers("Broken %slink on %s" % ((is_internal and 'INTERNAL ' or ''), domain),
"Referrer: %s\nRequested URL: %s\nUser agent: %s\nIP address: %s\n" \
% (referer, request.get_full_path(), ua, ip),
fail_silently=True)
return response
# Use ETags, if requested.
if settings.USE_ETAGS:
if response.has_header('ETag'):
etag = response['ETag']
elif response.streaming:
etag = None
else:
etag = '"%s"' % hashlib.md5(response.content).hexdigest()
if etag is not None:
if (200 <= response.status_code < 300
and request.META.get('HTTP_IF_NONE_MATCH') == etag):
cookies = response.cookies
response = http.HttpResponseNotModified()
response.cookies = cookies
else:
response['ETag'] = etag
return response
def _is_ignorable_404(uri):
"""
Returns True if a 404 at the given URL *shouldn't* notify the site managers.
"""
if getattr(settings, 'IGNORABLE_404_STARTS', ()):
import warnings
warnings.warn('The IGNORABLE_404_STARTS setting has been deprecated '
'in favor of IGNORABLE_404_URLS.', DeprecationWarning)
for start in settings.IGNORABLE_404_STARTS:
if uri.startswith(start):
return True
if getattr(settings, 'IGNORABLE_404_ENDS', ()):
import warnings
warnings.warn('The IGNORABLE_404_ENDS setting has been deprecated '
'in favor of IGNORABLE_404_URLS.', DeprecationWarning)
for end in settings.IGNORABLE_404_ENDS:
if uri.endswith(end):
return True
return any(pattern.search(uri) for pattern in settings.IGNORABLE_404_URLS)
def _is_internal_request(domain, referer):
"""
Returns true if the referring URL is the same domain as the current request.
"""
# Different subdomains are treated as different domains.
return referer is not None and re.match("^https?://%s/" % re.escape(domain), referer)
| agpl-3.0 |
neumerance/deploy | .venv/lib/python2.7/site-packages/requests/structures.py | 67 | 3576 | # -*- coding: utf-8 -*-
"""
requests.structures
~~~~~~~~~~~~~~~~~~~
Data structures that power Requests.
"""
import os
import collections
from itertools import islice
class IteratorProxy(object):
"""docstring for IteratorProxy"""
def __init__(self, i):
self.i = i
# self.i = chain.from_iterable(i)
def __iter__(self):
return self.i
def __len__(self):
if hasattr(self.i, '__len__'):
return len(self.i)
if hasattr(self.i, 'len'):
return self.i.len
if hasattr(self.i, 'fileno'):
return os.fstat(self.i.fileno()).st_size
def read(self, n):
return "".join(islice(self.i, None, n))
class CaseInsensitiveDict(collections.MutableMapping):
"""
A case-insensitive ``dict``-like object.
Implements all methods and operations of
``collections.MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive:
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
cid['aCCEPT'] == 'application/json' # True
list(cid) == ['Accept'] # True
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header, regardless
of how the header name was originally stored.
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.lower()``s, the
behavior is undefined.
"""
def __init__(self, data=None, **kwargs):
self._store = dict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.lower()] = (key, value)
def __getitem__(self, key):
return self._store[key.lower()][1]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def lower_items(self):
"""Like iteritems(), but with all lowercase keys."""
return (
(lowerkey, keyval[1])
for (lowerkey, keyval)
in self._store.items()
)
def __eq__(self, other):
if isinstance(other, collections.Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
# Copy is required
def copy(self):
return CaseInsensitiveDict(self._store.values())
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, dict(self.items()))
class LookupDict(dict):
"""Dictionary lookup object."""
def __init__(self, name=None):
self.name = name
super(LookupDict, self).__init__()
def __repr__(self):
return '<lookup \'%s\'>' % (self.name)
def __getitem__(self, key):
# We allow fall-through here, so values default to None
return self.__dict__.get(key, None)
def get(self, key, default=None):
return self.__dict__.get(key, default)
| apache-2.0 |
xq262144/hue | desktop/core/ext-py/pysaml2-2.4.0/src/saml2/metadata.py | 31 | 24394 | #!/usr/bin/env python
from saml2.md import AttributeProfile
from saml2.sigver import security_context
from saml2.config import Config
from saml2.validate import valid_instance
from saml2.time_util import in_a_while
from saml2.extension import mdui
from saml2.extension import idpdisc
from saml2.extension import shibmd
from saml2.extension import mdattr
from saml2.saml import NAME_FORMAT_URI
from saml2.saml import AttributeValue
from saml2.saml import Attribute
from saml2.attribute_converter import from_local_name
from saml2 import md, SAMLError
from saml2 import BINDING_HTTP_POST
from saml2 import BINDING_HTTP_REDIRECT
from saml2 import BINDING_SOAP
from saml2 import samlp
from saml2 import class_name
import xmldsig as ds
from saml2.sigver import pre_signature_part
from saml2.s_utils import factory
from saml2.s_utils import rec_factory
from saml2.s_utils import sid
__author__ = 'rolandh'
NSPAIR = {
"saml2p": "urn:oasis:names:tc:SAML:2.0:protocol",
"saml2": "urn:oasis:names:tc:SAML:2.0:assertion",
"soap11": "http://schemas.xmlsoap.org/soap/envelope/",
"meta": "urn:oasis:names:tc:SAML:2.0:metadata",
"xsi": "http://www.w3.org/2001/XMLSchema-instance",
"ds": "http://www.w3.org/2000/09/xmldsig#",
"shibmd": "urn:mace:shibboleth:metadata:1.0",
"md": "urn:oasis:names:tc:SAML:2.0:metadata",
}
DEFAULTS = {
"want_assertions_signed": "true",
"authn_requests_signed": "false",
"want_authn_requests_signed": "true",
"want_authn_requests_only_with_valid_cert": "false",
}
ORG_ATTR_TRANSL = {
"organization_name": ("name", md.OrganizationName),
"organization_display_name": ("display_name", md.OrganizationDisplayName),
"organization_url": ("url", md.OrganizationURL)
}
MDNS = '"urn:oasis:names:tc:SAML:2.0:metadata"'
XMLNSXS = " xmlns:xs=\"http://www.w3.org/2001/XMLSchema\""
def metadata_tostring_fix(desc, nspair, xmlstring=""):
if not xmlstring:
xmlstring = desc.to_string(nspair)
if "\"xs:string\"" in xmlstring and XMLNSXS not in xmlstring:
xmlstring = xmlstring.replace(MDNS, MDNS+XMLNSXS)
return xmlstring
def create_metadata_string(configfile, config, valid, cert, keyfile, mid, name,
sign):
valid_for = 0
nspair = {"xs": "http://www.w3.org/2001/XMLSchema"}
#paths = [".", "/opt/local/bin"]
if valid:
valid_for = int(valid) # Hours
eds = []
if config is not None:
eds.append(entity_descriptor(config))
else:
if configfile.endswith(".py"):
configfile = configfile[:-3]
config = Config().load_file(configfile, metadata_construction=True)
eds.append(entity_descriptor(config))
conf = Config()
conf.key_file = keyfile
conf.cert_file = cert
conf.debug = 1
conf.xmlsec_binary = config.xmlsec_binary
secc = security_context(conf)
if mid:
desc = entities_descriptor(eds, valid_for, name, mid,
sign, secc)
valid_instance(desc)
return metadata_tostring_fix(desc, nspair)
else:
eid = eds[0]
if sign:
eid, xmldoc = sign_entity_descriptor(eid, mid, secc)
else:
xmldoc = None
valid_instance(eid)
xmldoc = metadata_tostring_fix(eid, nspair, xmldoc)
return xmldoc
def _localized_name(val, klass):
"""If no language is defined 'en' is the default"""
try:
(text, lang) = val
return klass(text=text, lang=lang)
except ValueError:
return klass(text=val, lang="en")
def do_organization_info(ava):
"""
Description of an organization in the configuration is
a dictionary of keys and values, where the values might be tuples::
"organization": {
"name": ("AB Exempel", "se"),
"display_name": ("AB Exempel", "se"),
"url": "http://www.example.org"
}
"""
if ava is None:
return None
org = md.Organization()
for dkey, (ckey, klass) in ORG_ATTR_TRANSL.items():
if ckey not in ava:
continue
if isinstance(ava[ckey], basestring):
setattr(org, dkey, [_localized_name(ava[ckey], klass)])
elif isinstance(ava[ckey], list):
setattr(org, dkey,
[_localized_name(n, klass) for n in ava[ckey]])
else:
setattr(org, dkey, [_localized_name(ava[ckey], klass)])
return org
def do_contact_person_info(lava):
""" Creates a ContactPerson instance from configuration information"""
cps = []
if lava is None:
return cps
contact_person = md.ContactPerson
for ava in lava:
cper = md.ContactPerson()
for (key, classpec) in contact_person.c_children.values():
try:
value = ava[key]
data = []
if isinstance(classpec, list):
# What if value is not a list ?
if isinstance(value, basestring):
data = [classpec[0](text=value)]
else:
for val in value:
data.append(classpec[0](text=val))
else:
data = classpec(text=value)
setattr(cper, key, data)
except KeyError:
pass
for (prop, classpec, _) in contact_person.c_attributes.values():
try:
# should do a check for valid value
setattr(cper, prop, ava[prop])
except KeyError:
pass
# ContactType must have a value
typ = getattr(cper, "contact_type")
if not typ:
setattr(cper, "contact_type", "technical")
cps.append(cper)
return cps
def do_key_descriptor(cert, use="both"):
if use == "both":
return [
md.KeyDescriptor(
key_info=ds.KeyInfo(
x509_data=ds.X509Data(
x509_certificate=ds.X509Certificate(text=cert)
)
),
use="encryption"
),
md.KeyDescriptor(
key_info=ds.KeyInfo(
x509_data=ds.X509Data(
x509_certificate=ds.X509Certificate(text=cert)
)
),
use="signing"
)
]
elif use in ["signing", "encryption"]:
return md.KeyDescriptor(
key_info=ds.KeyInfo(
x509_data=ds.X509Data(
x509_certificate=ds.X509Certificate(text=cert)
)
),
use=use
)
else:
return md.KeyDescriptor(
key_info=ds.KeyInfo(
x509_data=ds.X509Data(
x509_certificate=ds.X509Certificate(text=cert)
)
)
)
def do_requested_attribute(attributes, acs, is_required="false"):
lista = []
for attr in attributes:
attr = from_local_name(acs, attr, NAME_FORMAT_URI)
args = {}
for key in attr.keyswv():
args[key] = getattr(attr, key)
args["is_required"] = is_required
args["name_format"] = NAME_FORMAT_URI
lista.append(md.RequestedAttribute(**args))
return lista
def do_uiinfo(_uiinfo):
uii = mdui.UIInfo()
for attr in ['display_name', 'description', "information_url",
'privacy_statement_url']:
try:
val = _uiinfo[attr]
except KeyError:
continue
aclass = uii.child_class(attr)
inst = getattr(uii, attr)
if isinstance(val, basestring):
ainst = aclass(text=val)
inst.append(ainst)
elif isinstance(val, dict):
ainst = aclass()
ainst.text = val["text"]
ainst.lang = val["lang"]
inst.append(ainst)
else:
for value in val:
if isinstance(value, basestring):
ainst = aclass(text=value)
inst.append(ainst)
elif isinstance(value, dict):
ainst = aclass()
ainst.text = value["text"]
ainst.lang = value["lang"]
inst.append(ainst)
try:
_attr = "logo"
val = _uiinfo[_attr]
inst = getattr(uii, _attr)
# dictionary or list of dictionaries
if isinstance(val, dict):
logo = mdui.Logo()
for attr, value in val.items():
if attr in logo.keys():
setattr(logo, attr, value)
inst.append(logo)
elif isinstance(val, list):
for logga in val:
if not isinstance(logga, dict):
raise SAMLError("Configuration error !!")
logo = mdui.Logo()
for attr, value in logga.items():
if attr in logo.keys():
setattr(logo, attr, value)
inst.append(logo)
except KeyError:
pass
try:
_attr = "keywords"
val = _uiinfo[_attr]
inst = getattr(uii, _attr)
# list of basestrings, dictionary or list of dictionaries
if isinstance(val, list):
for value in val:
keyw = mdui.Keywords()
if isinstance(value, basestring):
keyw.text = value
elif isinstance(value, dict):
keyw.text = " ".join(value["text"])
try:
keyw.lang = value["lang"]
except KeyError:
pass
else:
raise SAMLError("Configuration error: ui_info keywords")
inst.append(keyw)
elif isinstance(val, dict):
keyw = mdui.Keywords()
keyw.text = " ".join(val["text"])
try:
keyw.lang = val["lang"]
except KeyError:
pass
inst.append(keyw)
else:
raise SAMLError("Configuration Error: ui_info keywords")
except KeyError:
pass
return uii
def do_idpdisc(discovery_response):
return idpdisc.DiscoveryResponse(index="0", location=discovery_response,
binding=idpdisc.NAMESPACE)
ENDPOINTS = {
"sp": {
"artifact_resolution_service": (md.ArtifactResolutionService, True),
"single_logout_service": (md.SingleLogoutService, False),
"manage_name_id_service": (md.ManageNameIDService, False),
"assertion_consumer_service": (md.AssertionConsumerService, True),
},
"idp": {
"artifact_resolution_service": (md.ArtifactResolutionService, True),
"single_logout_service": (md.SingleLogoutService, False),
"manage_name_id_service": (md.ManageNameIDService, False),
"single_sign_on_service": (md.SingleSignOnService, False),
"name_id_mapping_service": (md.NameIDMappingService, False),
"assertion_id_request_service": (md.AssertionIDRequestService, False),
},
"aa": {
"artifact_resolution_service": (md.ArtifactResolutionService, True),
"single_logout_service": (md.SingleLogoutService, False),
"manage_name_id_service": (md.ManageNameIDService, False),
"assertion_id_request_service": (md.AssertionIDRequestService, False),
"attribute_service": (md.AttributeService, False)
},
"pdp": {
"authz_service": (md.AuthzService, True)
},
"aq": {
"authn_query_service": (md.AuthnQueryService, True)
}
}
ENDPOINT_EXT = {
"sp": {
"discovery_response": (idpdisc.DiscoveryResponse, True)
}
}
DEFAULT_BINDING = {
"assertion_consumer_service": BINDING_HTTP_POST,
"single_sign_on_service": BINDING_HTTP_REDIRECT,
"single_logout_service": BINDING_HTTP_POST,
"attribute_service": BINDING_SOAP,
"artifact_resolution_service": BINDING_SOAP,
"authn_query_service": BINDING_SOAP
}
def do_extensions(mname, item):
try:
_mod = __import__("saml2.extension.%s" % mname, globals(), locals(),
mname)
except ImportError:
return None
else:
res = []
for _cname, ava in item.items():
cls = getattr(_mod, _cname)
res.append(rec_factory(cls, **ava))
return res
def _do_nameid_format(cls, conf, typ):
namef = conf.getattr("name_id_format", typ)
if namef:
if isinstance(namef, basestring):
ids = [md.NameIDFormat(namef)]
else:
ids = [md.NameIDFormat(text=form) for form in namef]
setattr(cls, "name_id_format", ids)
def do_endpoints(conf, endpoints):
service = {}
for endpoint, (eclass, indexed) in endpoints.items():
try:
servs = []
i = 1
for args in conf[endpoint]:
if isinstance(args, basestring): # Assume it's the location
args = {"location": args,
"binding": DEFAULT_BINDING[endpoint]}
elif isinstance(args, tuple) or isinstance(args, list):
if len(args) == 2: # (location, binding)
args = {"location": args[0], "binding": args[1]}
elif len(args) == 3: # (location, binding, index)
args = {"location": args[0], "binding": args[1],
"index": args[2]}
if indexed:
if "index" not in args:
args["index"] = "%d" % i
i += 1
else:
try:
int(args["index"])
except ValueError:
raise
else:
args["index"] = str(args["index"])
servs.append(factory(eclass, **args))
service[endpoint] = servs
except KeyError:
pass
return service
DEFAULT = {
"want_assertions_signed": "true",
"authn_requests_signed": "false",
"want_authn_requests_signed": "false",
#"want_authn_requests_only_with_valid_cert": "false",
}
def do_attribute_consuming_service(conf, spsso):
service_description = service_name = None
requested_attributes = []
acs = conf.attribute_converters
req = conf.getattr("required_attributes", "sp")
if req:
requested_attributes.extend(do_requested_attribute(req, acs,
is_required="true"))
opt = conf.getattr("optional_attributes", "sp")
if opt:
requested_attributes.extend(do_requested_attribute(opt, acs))
try:
if conf.description:
try:
(text, lang) = conf.description
except ValueError:
text = conf.description
lang = "en"
service_description = [md.ServiceDescription(text=text, lang=lang)]
except KeyError:
pass
try:
if conf.name:
try:
(text, lang) = conf.name
except ValueError:
text = conf.name
lang = "en"
service_name = [md.ServiceName(text=text, lang=lang)]
except KeyError:
pass
# Must be both requested attributes and service name
if requested_attributes:
if not service_name:
service_name = [md.ServiceName(text="", lang="en")]
ac_serv = md.AttributeConsumingService(
index="1", service_name=service_name,
requested_attribute=requested_attributes)
if service_description:
ac_serv.service_description = service_description
spsso.attribute_consuming_service = [ac_serv]
def do_spsso_descriptor(conf, cert=None):
spsso = md.SPSSODescriptor()
spsso.protocol_support_enumeration = samlp.NAMESPACE
exts = conf.getattr("extensions", "sp")
if exts:
if spsso.extensions is None:
spsso.extensions = md.Extensions()
for key, val in exts.items():
_ext = do_extensions(key, val)
if _ext:
for _e in _ext:
spsso.extensions.add_extension_element(_e)
endps = conf.getattr("endpoints", "sp")
if endps:
for (endpoint, instlist) in do_endpoints(endps,
ENDPOINTS["sp"]).items():
setattr(spsso, endpoint, instlist)
ext = do_endpoints(endps, ENDPOINT_EXT["sp"])
if ext:
if spsso.extensions is None:
spsso.extensions = md.Extensions()
for vals in ext.values():
for val in vals:
spsso.extensions.add_extension_element(val)
ui_info = conf.getattr("ui_info", "sp")
if ui_info:
if spsso.extensions is None:
spsso.extensions = md.Extensions()
spsso.extensions.add_extension_element(do_uiinfo(ui_info))
if cert:
encryption_type = conf.encryption_type
spsso.key_descriptor = do_key_descriptor(cert, encryption_type)
for key in ["want_assertions_signed", "authn_requests_signed"]:
try:
val = conf.getattr(key, "sp")
if val is None:
setattr(spsso, key, DEFAULT[key]) # default ?!
else:
strval = "{0:>s}".format(str(val))
setattr(spsso, key, strval.lower())
except KeyError:
setattr(spsso, key, DEFAULTS[key])
do_attribute_consuming_service(conf, spsso)
_do_nameid_format(spsso, conf, "sp")
return spsso
def do_idpsso_descriptor(conf, cert=None):
idpsso = md.IDPSSODescriptor()
idpsso.protocol_support_enumeration = samlp.NAMESPACE
endps = conf.getattr("endpoints", "idp")
if endps:
for (endpoint, instlist) in do_endpoints(endps,
ENDPOINTS["idp"]).items():
setattr(idpsso, endpoint, instlist)
_do_nameid_format(idpsso, conf, "idp")
scopes = conf.getattr("scope", "idp")
if scopes:
if idpsso.extensions is None:
idpsso.extensions = md.Extensions()
for scope in scopes:
mdscope = shibmd.Scope()
mdscope.text = scope
# unless scope contains '*'/'+'/'?' assume non regexp ?
mdscope.regexp = "false"
idpsso.extensions.add_extension_element(mdscope)
ui_info = conf.getattr("ui_info", "idp")
if ui_info:
if idpsso.extensions is None:
idpsso.extensions = md.Extensions()
idpsso.extensions.add_extension_element(do_uiinfo(ui_info))
if cert:
idpsso.key_descriptor = do_key_descriptor(cert)
for key in ["want_authn_requests_signed"]:
#"want_authn_requests_only_with_valid_cert"]:
try:
val = conf.getattr(key, "idp")
if val is None:
setattr(idpsso, key, DEFAULT[key])
else:
setattr(idpsso, key, ("%s" % val).lower())
except KeyError:
setattr(idpsso, key, DEFAULTS[key])
return idpsso
def do_aa_descriptor(conf, cert):
aad = md.AttributeAuthorityDescriptor()
aad.protocol_support_enumeration = samlp.NAMESPACE
endps = conf.getattr("endpoints", "aa")
if endps:
for (endpoint, instlist) in do_endpoints(endps,
ENDPOINTS["aa"]).items():
setattr(aad, endpoint, instlist)
_do_nameid_format(aad, conf, "aa")
if cert:
aad.key_descriptor = do_key_descriptor(cert)
attributes = conf.getattr("attribute", "aa")
if attributes:
for attribute in attributes:
aad.attribute.append(Attribute(text=attribute))
attribute_profiles = conf.getattr("attribute_profile", "aa")
if attribute_profiles:
for attribute_profile in attribute_profiles:
aad.attribute.append(AttributeProfile(text=attribute_profile))
return aad
def do_aq_descriptor(conf, cert):
aqs = md.AuthnAuthorityDescriptor()
aqs.protocol_support_enumeration = samlp.NAMESPACE
endps = conf.getattr("endpoints", "aq")
if endps:
for (endpoint, instlist) in do_endpoints(endps,
ENDPOINTS["aq"]).items():
setattr(aqs, endpoint, instlist)
_do_nameid_format(aqs, conf, "aq")
if cert:
aqs.key_descriptor = do_key_descriptor(cert)
return aqs
def do_pdp_descriptor(conf, cert):
""" Create a Policy Decision Point descriptor """
pdp = md.PDPDescriptor()
pdp.protocol_support_enumeration = samlp.NAMESPACE
endps = conf.getattr("endpoints", "pdp")
if endps:
for (endpoint, instlist) in do_endpoints(endps,
ENDPOINTS["pdp"]).items():
setattr(pdp, endpoint, instlist)
_do_nameid_format(pdp, conf, "pdp")
if cert:
pdp.key_descriptor = do_key_descriptor(cert)
return pdp
def entity_descriptor(confd):
mycert = "".join(open(confd.cert_file).readlines()[1:-1])
entd = md.EntityDescriptor()
entd.entity_id = confd.entityid
if confd.valid_for:
entd.valid_until = in_a_while(hours=int(confd.valid_for))
if confd.organization is not None:
entd.organization = do_organization_info(confd.organization)
if confd.contact_person is not None:
entd.contact_person = do_contact_person_info(confd.contact_person)
if confd.entity_category:
entd.extensions = md.Extensions()
ava = [AttributeValue(text=c) for c in confd.entity_category]
attr = Attribute(attribute_value=ava,
name="http://macedir.org/entity-category")
item = mdattr.EntityAttributes(attribute=attr)
entd.extensions.add_extension_element(item)
serves = confd.serves
if not serves:
raise SAMLError(
'No service type ("sp","idp","aa") provided in the configuration')
if "sp" in serves:
confd.context = "sp"
entd.spsso_descriptor = do_spsso_descriptor(confd, mycert)
if "idp" in serves:
confd.context = "idp"
entd.idpsso_descriptor = do_idpsso_descriptor(confd, mycert)
if "aa" in serves:
confd.context = "aa"
entd.attribute_authority_descriptor = do_aa_descriptor(confd, mycert)
if "pdp" in serves:
confd.context = "pdp"
entd.pdp_descriptor = do_pdp_descriptor(confd, mycert)
if "aq" in serves:
confd.context = "aq"
entd.authn_authority_descriptor = do_aq_descriptor(confd, mycert)
return entd
def entities_descriptor(eds, valid_for, name, ident, sign, secc):
entities = md.EntitiesDescriptor(entity_descriptor=eds)
if valid_for:
entities.valid_until = in_a_while(hours=valid_for)
if name:
entities.name = name
if ident:
entities.id = ident
if sign:
if not ident:
ident = sid()
if not secc.key_file:
raise SAMLError("If you want to do signing you should define " +
"a key to sign with")
if not secc.my_cert:
raise SAMLError("If you want to do signing you should define " +
"where your public key are")
entities.signature = pre_signature_part(ident, secc.my_cert, 1)
entities.id = ident
xmldoc = secc.sign_statement("%s" % entities, class_name(entities))
entities = md.entities_descriptor_from_string(xmldoc)
else:
xmldoc = None
return entities, xmldoc
def sign_entity_descriptor(edesc, ident, secc):
"""
:param edesc: EntityDescriptor instance
:param ident: EntityDescriptor identifier
:param secc: Security context
:return: Tuple with EntityDescriptor instance and Signed XML document
"""
if not ident:
ident = sid()
edesc.signature = pre_signature_part(ident, secc.my_cert, 1)
edesc.id = ident
xmldoc = secc.sign_statement("%s" % edesc, class_name(edesc))
edesc = md.entity_descriptor_from_string(xmldoc)
return edesc, xmldoc | apache-2.0 |
mysz/versionner | versionner/config.py | 2 | 5820 | """Configuration-related classes for versionner"""
import codecs
import configparser
import pathlib
import re
import sys
from versionner import defaults
ENV_VERSIONNER_PROJECT_CONFIG_FILE = 'VERSIONNER_PROJECT_CONFIG_FILE'
# pylint: disable=too-many-instance-attributes,too-few-public-methods
class FileConfig:
"""Single project file configuration"""
def __init__(self, filename, cfg):
"""
Evaluate single file configuration
:param filename:
:param cfg:
"""
self.filename = filename
self.file = pathlib.Path(filename)
self.enabled = cfg.getboolean('enabled', True)
self.search = cfg['search']
self.replace = cfg['replace']
self.date_format = cfg.get('date_format', None)
self.match = cfg.get('match', 'line')
self.search_flags = 0
self.encoding = cfg.get('encoding', 'utf-8')
search_flags = cfg.get('search_flags', '')
if search_flags:
search_flags = re.split(r'\s*,\s*', search_flags)
for search_flag in search_flags:
self.search_flags |= getattr(re, search_flag.upper())
def validate(self):
"""Validate current file configuration
:raise ValueError:
"""
if not self.file.exists():
raise ValueError("File \"%s\" doesn't exists")
if not self.search:
raise ValueError("Search cannot be empty")
if not self.replace:
raise ValueError("Replace cannot be empty")
if self.match not in ('file', 'line'):
raise ValueError("Match must be one of: file, line")
try:
codecs.lookup(self.encoding)
except LookupError:
raise ValueError("Unknown encoding: \"%s\"" % self.encoding)
def __repr__(self):
return '<FileConfig(%s)>' % self.filename
class Config:
"""Configuration"""
__slots__ = (
'command',
'commit',
'date_format',
'default_init_version',
'default_increase_value',
'files',
'value',
'up_part',
'vcs_commit_message',
'vcs_engine',
'vcs_tag_params',
'verbose',
'version_file',
)
def __init__(self, files=None):
"""Evaluate configuration
:return:
"""
self.command = None
self.commit = False
self.date_format = defaults.DEFAULT_DATE_FORMAT
self.default_init_version = defaults.DEFAULT_INIT_VERSION
self.default_increase_value = defaults.DEFAULT_INCREASE_VALUE
self.files = []
self.value = None
self.up_part = defaults.DEFAULT_UP_PART
self.vcs_commit_message = defaults.DEFAULT_VCS_COMMIT_MESSAGE
self.vcs_engine = 'git'
self.vcs_tag_params = []
self.verbose = False
self.version_file = defaults.DEFAULT_VERSION_FILE
if files:
self._parse_config_file(files)
def _parse_config_file(self, cfg_files):
"""Parse config file (ini) and set properties
:return:
"""
cfg_handler = configparser.ConfigParser(interpolation=None)
if not cfg_handler.read(map(str, cfg_files)):
return
self._parse_global_section(cfg_handler)
self._parse_vcs_section(cfg_handler)
self._parse_file_section(cfg_handler)
def _parse_global_section(self, cfg_handler):
"""Parse global ([versionner]) section
:param cfg_handler:
:return:
"""
# global configuration
if 'versionner' in cfg_handler:
cfg = cfg_handler['versionner']
if 'file' in cfg:
self.version_file = cfg['file']
if 'date_format' in cfg:
self.date_format = cfg['date_format']
if 'up_part' in cfg:
self.up_part = cfg['up_part']
if 'default_init_version' in cfg:
self.default_init_version = cfg['default_init_version']
if 'default_increase_value' in cfg:
self.default_increase_value = cfg.getint('default_increase_value')
def _parse_vcs_section(self, cfg_handler):
"""Parse [vcs] section
:param cfg_handler:
:return:
"""
if 'vcs' in cfg_handler:
cfg = cfg_handler['vcs']
if 'engine' in cfg:
self.vcs_engine = cfg['engine']
if 'tag_params' in cfg:
self.vcs_tag_params = list(filter(None, cfg['tag_params'].split("\n")))
if 'commit_message' in cfg:
self.vcs_commit_message = cfg['commit_message']
def _parse_file_section(self, cfg_handler):
"""Parse [file:*] sections
:param cfg_handler:
:return:
"""
_number_rxp = re.compile(r'^\d+:(.)')
# project files configuration
for section in cfg_handler.sections():
if section.startswith('file:'):
path = section[5:]
path = _number_rxp.sub(r'\1', path)
project_file = FileConfig(path, cfg_handler[section])
if not project_file.date_format:
project_file.date_format = self.date_format
if project_file.enabled:
try:
project_file.validate()
except ValueError as exc:
print("Incorrect configuration for file \"%s\": %s" % (project_file.filename, exc.args[0]), file=sys.stderr)
else:
self.files.append(project_file)
def __repr__(self):
ret = '<' + self.__class__.__name__ + ': '
ret += ', '.join('%s=%r' % (name, getattr(self, name)) for name in self.__slots__)
return ret
| mit |
jimmymunoz/jeuxdemots | public/package/node_modules/node-gyp/gyp/pylib/gyp/common.py | 1292 | 20063 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import with_statement
import collections
import errno
import filecmp
import os.path
import re
import tempfile
import sys
# A minimal memoizing decorator. It'll blow up if the args aren't immutable,
# among other "problems".
class memoize(object):
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
result = self.func(*args)
self.cache[args] = result
return result
class GypError(Exception):
"""Error class representing an error, which is to be presented
to the user. The main entry point will catch and display this.
"""
pass
def ExceptionAppend(e, msg):
"""Append a message to the given exception's message."""
if not e.args:
e.args = (msg,)
elif len(e.args) == 1:
e.args = (str(e.args[0]) + ' ' + msg,)
else:
e.args = (str(e.args[0]) + ' ' + msg,) + e.args[1:]
def FindQualifiedTargets(target, qualified_list):
"""
Given a list of qualified targets, return the qualified targets for the
specified |target|.
"""
return [t for t in qualified_list if ParseQualifiedTarget(t)[1] == target]
def ParseQualifiedTarget(target):
# Splits a qualified target into a build file, target name and toolset.
# NOTE: rsplit is used to disambiguate the Windows drive letter separator.
target_split = target.rsplit(':', 1)
if len(target_split) == 2:
[build_file, target] = target_split
else:
build_file = None
target_split = target.rsplit('#', 1)
if len(target_split) == 2:
[target, toolset] = target_split
else:
toolset = None
return [build_file, target, toolset]
def ResolveTarget(build_file, target, toolset):
# This function resolves a target into a canonical form:
# - a fully defined build file, either absolute or relative to the current
# directory
# - a target name
# - a toolset
#
# build_file is the file relative to which 'target' is defined.
# target is the qualified target.
# toolset is the default toolset for that target.
[parsed_build_file, target, parsed_toolset] = ParseQualifiedTarget(target)
if parsed_build_file:
if build_file:
# If a relative path, parsed_build_file is relative to the directory
# containing build_file. If build_file is not in the current directory,
# parsed_build_file is not a usable path as-is. Resolve it by
# interpreting it as relative to build_file. If parsed_build_file is
# absolute, it is usable as a path regardless of the current directory,
# and os.path.join will return it as-is.
build_file = os.path.normpath(os.path.join(os.path.dirname(build_file),
parsed_build_file))
# Further (to handle cases like ../cwd), make it relative to cwd)
if not os.path.isabs(build_file):
build_file = RelativePath(build_file, '.')
else:
build_file = parsed_build_file
if parsed_toolset:
toolset = parsed_toolset
return [build_file, target, toolset]
def BuildFile(fully_qualified_target):
# Extracts the build file from the fully qualified target.
return ParseQualifiedTarget(fully_qualified_target)[0]
def GetEnvironFallback(var_list, default):
"""Look up a key in the environment, with fallback to secondary keys
and finally falling back to a default value."""
for var in var_list:
if var in os.environ:
return os.environ[var]
return default
def QualifiedTarget(build_file, target, toolset):
# "Qualified" means the file that a target was defined in and the target
# name, separated by a colon, suffixed by a # and the toolset name:
# /path/to/file.gyp:target_name#toolset
fully_qualified = build_file + ':' + target
if toolset:
fully_qualified = fully_qualified + '#' + toolset
return fully_qualified
@memoize
def RelativePath(path, relative_to, follow_path_symlink=True):
# Assuming both |path| and |relative_to| are relative to the current
# directory, returns a relative path that identifies path relative to
# relative_to.
# If |follow_symlink_path| is true (default) and |path| is a symlink, then
# this method returns a path to the real file represented by |path|. If it is
# false, this method returns a path to the symlink. If |path| is not a
# symlink, this option has no effect.
# Convert to normalized (and therefore absolute paths).
if follow_path_symlink:
path = os.path.realpath(path)
else:
path = os.path.abspath(path)
relative_to = os.path.realpath(relative_to)
# On Windows, we can't create a relative path to a different drive, so just
# use the absolute path.
if sys.platform == 'win32':
if (os.path.splitdrive(path)[0].lower() !=
os.path.splitdrive(relative_to)[0].lower()):
return path
# Split the paths into components.
path_split = path.split(os.path.sep)
relative_to_split = relative_to.split(os.path.sep)
# Determine how much of the prefix the two paths share.
prefix_len = len(os.path.commonprefix([path_split, relative_to_split]))
# Put enough ".." components to back up out of relative_to to the common
# prefix, and then append the part of path_split after the common prefix.
relative_split = [os.path.pardir] * (len(relative_to_split) - prefix_len) + \
path_split[prefix_len:]
if len(relative_split) == 0:
# The paths were the same.
return ''
# Turn it back into a string and we're done.
return os.path.join(*relative_split)
@memoize
def InvertRelativePath(path, toplevel_dir=None):
"""Given a path like foo/bar that is relative to toplevel_dir, return
the inverse relative path back to the toplevel_dir.
E.g. os.path.normpath(os.path.join(path, InvertRelativePath(path)))
should always produce the empty string, unless the path contains symlinks.
"""
if not path:
return path
toplevel_dir = '.' if toplevel_dir is None else toplevel_dir
return RelativePath(toplevel_dir, os.path.join(toplevel_dir, path))
def FixIfRelativePath(path, relative_to):
# Like RelativePath but returns |path| unchanged if it is absolute.
if os.path.isabs(path):
return path
return RelativePath(path, relative_to)
def UnrelativePath(path, relative_to):
# Assuming that |relative_to| is relative to the current directory, and |path|
# is a path relative to the dirname of |relative_to|, returns a path that
# identifies |path| relative to the current directory.
rel_dir = os.path.dirname(relative_to)
return os.path.normpath(os.path.join(rel_dir, path))
# re objects used by EncodePOSIXShellArgument. See IEEE 1003.1 XCU.2.2 at
# http://www.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_02
# and the documentation for various shells.
# _quote is a pattern that should match any argument that needs to be quoted
# with double-quotes by EncodePOSIXShellArgument. It matches the following
# characters appearing anywhere in an argument:
# \t, \n, space parameter separators
# # comments
# $ expansions (quoted to always expand within one argument)
# % called out by IEEE 1003.1 XCU.2.2
# & job control
# ' quoting
# (, ) subshell execution
# *, ?, [ pathname expansion
# ; command delimiter
# <, >, | redirection
# = assignment
# {, } brace expansion (bash)
# ~ tilde expansion
# It also matches the empty string, because "" (or '') is the only way to
# represent an empty string literal argument to a POSIX shell.
#
# This does not match the characters in _escape, because those need to be
# backslash-escaped regardless of whether they appear in a double-quoted
# string.
_quote = re.compile('[\t\n #$%&\'()*;<=>?[{|}~]|^$')
# _escape is a pattern that should match any character that needs to be
# escaped with a backslash, whether or not the argument matched the _quote
# pattern. _escape is used with re.sub to backslash anything in _escape's
# first match group, hence the (parentheses) in the regular expression.
#
# _escape matches the following characters appearing anywhere in an argument:
# " to prevent POSIX shells from interpreting this character for quoting
# \ to prevent POSIX shells from interpreting this character for escaping
# ` to prevent POSIX shells from interpreting this character for command
# substitution
# Missing from this list is $, because the desired behavior of
# EncodePOSIXShellArgument is to permit parameter (variable) expansion.
#
# Also missing from this list is !, which bash will interpret as the history
# expansion character when history is enabled. bash does not enable history
# by default in non-interactive shells, so this is not thought to be a problem.
# ! was omitted from this list because bash interprets "\!" as a literal string
# including the backslash character (avoiding history expansion but retaining
# the backslash), which would not be correct for argument encoding. Handling
# this case properly would also be problematic because bash allows the history
# character to be changed with the histchars shell variable. Fortunately,
# as history is not enabled in non-interactive shells and
# EncodePOSIXShellArgument is only expected to encode for non-interactive
# shells, there is no room for error here by ignoring !.
_escape = re.compile(r'(["\\`])')
def EncodePOSIXShellArgument(argument):
"""Encodes |argument| suitably for consumption by POSIX shells.
argument may be quoted and escaped as necessary to ensure that POSIX shells
treat the returned value as a literal representing the argument passed to
this function. Parameter (variable) expansions beginning with $ are allowed
to remain intact without escaping the $, to allow the argument to contain
references to variables to be expanded by the shell.
"""
if not isinstance(argument, str):
argument = str(argument)
if _quote.search(argument):
quote = '"'
else:
quote = ''
encoded = quote + re.sub(_escape, r'\\\1', argument) + quote
return encoded
def EncodePOSIXShellList(list):
"""Encodes |list| suitably for consumption by POSIX shells.
Returns EncodePOSIXShellArgument for each item in list, and joins them
together using the space character as an argument separator.
"""
encoded_arguments = []
for argument in list:
encoded_arguments.append(EncodePOSIXShellArgument(argument))
return ' '.join(encoded_arguments)
def DeepDependencyTargets(target_dicts, roots):
"""Returns the recursive list of target dependencies."""
dependencies = set()
pending = set(roots)
while pending:
# Pluck out one.
r = pending.pop()
# Skip if visited already.
if r in dependencies:
continue
# Add it.
dependencies.add(r)
# Add its children.
spec = target_dicts[r]
pending.update(set(spec.get('dependencies', [])))
pending.update(set(spec.get('dependencies_original', [])))
return list(dependencies - set(roots))
def BuildFileTargets(target_list, build_file):
"""From a target_list, returns the subset from the specified build_file.
"""
return [p for p in target_list if BuildFile(p) == build_file]
def AllTargets(target_list, target_dicts, build_file):
"""Returns all targets (direct and dependencies) for the specified build_file.
"""
bftargets = BuildFileTargets(target_list, build_file)
deptargets = DeepDependencyTargets(target_dicts, bftargets)
return bftargets + deptargets
def WriteOnDiff(filename):
"""Write to a file only if the new contents differ.
Arguments:
filename: name of the file to potentially write to.
Returns:
A file like object which will write to temporary file and only overwrite
the target if it differs (on close).
"""
class Writer(object):
"""Wrapper around file which only covers the target if it differs."""
def __init__(self):
# Pick temporary file.
tmp_fd, self.tmp_path = tempfile.mkstemp(
suffix='.tmp',
prefix=os.path.split(filename)[1] + '.gyp.',
dir=os.path.split(filename)[0])
try:
self.tmp_file = os.fdopen(tmp_fd, 'wb')
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
def __getattr__(self, attrname):
# Delegate everything else to self.tmp_file
return getattr(self.tmp_file, attrname)
def close(self):
try:
# Close tmp file.
self.tmp_file.close()
# Determine if different.
same = False
try:
same = filecmp.cmp(self.tmp_path, filename, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(self.tmp_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(self.tmp_path, 0666 & ~umask)
if sys.platform == 'win32' and os.path.exists(filename):
# NOTE: on windows (but not cygwin) rename will not replace an
# existing file, so it must be preceded with a remove. Sadly there
# is no way to make the switch atomic.
os.remove(filename)
os.rename(self.tmp_path, filename)
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
return Writer()
def EnsureDirExists(path):
"""Make sure the directory for |path| exists."""
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
def GetFlavor(params):
"""Returns |params.flavor| if it's set, the system's default flavor else."""
flavors = {
'cygwin': 'win',
'win32': 'win',
'darwin': 'mac',
}
if 'flavor' in params:
return params['flavor']
if sys.platform in flavors:
return flavors[sys.platform]
if sys.platform.startswith('sunos'):
return 'solaris'
if sys.platform.startswith('freebsd'):
return 'freebsd'
if sys.platform.startswith('openbsd'):
return 'openbsd'
if sys.platform.startswith('netbsd'):
return 'netbsd'
if sys.platform.startswith('aix'):
return 'aix'
return 'linux'
def CopyTool(flavor, out_path):
"""Finds (flock|mac|win)_tool.gyp in the gyp directory and copies it
to |out_path|."""
# aix and solaris just need flock emulation. mac and win use more complicated
# support scripts.
prefix = {
'aix': 'flock',
'solaris': 'flock',
'mac': 'mac',
'win': 'win'
}.get(flavor, None)
if not prefix:
return
# Slurp input file.
source_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '%s_tool.py' % prefix)
with open(source_path) as source_file:
source = source_file.readlines()
# Add header and write it out.
tool_path = os.path.join(out_path, 'gyp-%s-tool' % prefix)
with open(tool_path, 'w') as tool_file:
tool_file.write(
''.join([source[0], '# Generated by gyp. Do not edit.\n'] + source[1:]))
# Make file executable.
os.chmod(tool_path, 0755)
# From Alex Martelli,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# First comment, dated 2001/10/13.
# (Also in the printed Python Cookbook.)
def uniquer(seq, idfun=None):
if idfun is None:
idfun = lambda x: x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
# Based on http://code.activestate.com/recipes/576694/.
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev_item, next_item = self.map.pop(key)
prev_item[2] = next_item
next_item[1] = prev_item
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
# The second argument is an addition that causes a pylint warning.
def pop(self, last=True): # pylint: disable=W0221
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
# Extensions to the recipe.
def update(self, iterable):
for i in iterable:
if i not in self:
self.add(i)
class CycleError(Exception):
"""An exception raised when an unexpected cycle is detected."""
def __init__(self, nodes):
self.nodes = nodes
def __str__(self):
return 'CycleError: cycle involving: ' + str(self.nodes)
def TopologicallySorted(graph, get_edges):
r"""Topologically sort based on a user provided edge definition.
Args:
graph: A list of node names.
get_edges: A function mapping from node name to a hashable collection
of node names which this node has outgoing edges to.
Returns:
A list containing all of the node in graph in topological order.
It is assumed that calling get_edges once for each node and caching is
cheaper than repeatedly calling get_edges.
Raises:
CycleError in the event of a cycle.
Example:
graph = {'a': '$(b) $(c)', 'b': 'hi', 'c': '$(b)'}
def GetEdges(node):
return re.findall(r'\$\(([^))]\)', graph[node])
print TopologicallySorted(graph.keys(), GetEdges)
==>
['a', 'c', b']
"""
get_edges = memoize(get_edges)
visited = set()
visiting = set()
ordered_nodes = []
def Visit(node):
if node in visiting:
raise CycleError(visiting)
if node in visited:
return
visited.add(node)
visiting.add(node)
for neighbor in get_edges(node):
Visit(neighbor)
visiting.remove(node)
ordered_nodes.insert(0, node)
for node in sorted(graph):
Visit(node)
return ordered_nodes
def CrossCompileRequested():
# TODO: figure out how to not build extra host objects in the
# non-cross-compile case when this is enabled, and enable unconditionally.
return (os.environ.get('GYP_CROSSCOMPILE') or
os.environ.get('AR_host') or
os.environ.get('CC_host') or
os.environ.get('CXX_host') or
os.environ.get('AR_target') or
os.environ.get('CC_target') or
os.environ.get('CXX_target'))
| bsd-3-clause |
williamthegrey/swift | test/unit/proxy/controllers/test_account.py | 2 | 15815 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
from swift.common.swob import Request, Response
from swift.common.middleware.acl import format_acl
from swift.proxy import server as proxy_server
from swift.proxy.controllers.base import headers_to_account_info
from swift.common import constraints
from test.unit import fake_http_connect, FakeRing, FakeMemcache
from swift.common.storage_policy import StoragePolicy
from swift.common.request_helpers import get_sys_meta_prefix
import swift.proxy.controllers.base
from test.unit import patch_policies
@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestAccountController(unittest.TestCase):
def setUp(self):
self.app = proxy_server.Application(
None, FakeMemcache(),
account_ring=FakeRing(), container_ring=FakeRing())
def _make_callback_func(self, context):
def callback(ipaddr, port, device, partition, method, path,
headers=None, query_string=None, ssl=False):
context['method'] = method
context['path'] = path
context['headers'] = headers or {}
return callback
def _assert_responses(self, method, test_cases):
if method in ('PUT', 'DELETE'):
self.app.allow_account_management = True
controller = proxy_server.AccountController(self.app, 'AUTH_bob')
for responses, expected in test_cases:
with mock.patch(
'swift.proxy.controllers.base.http_connect',
fake_http_connect(*responses)):
req = Request.blank('/v1/AUTH_bob')
resp = getattr(controller, method)(req)
self.assertEqual(expected,
resp.status_int,
'Expected %s but got %s. Failed case: %s' %
(expected, resp.status_int, str(responses)))
def test_account_info_in_response_env(self):
controller = proxy_server.AccountController(self.app, 'AUTH_bob')
with mock.patch('swift.proxy.controllers.base.http_connect',
fake_http_connect(200, body='')):
req = Request.blank('/v1/AUTH_bob', {'PATH_INFO': '/v1/AUTH_bob'})
resp = controller.HEAD(req)
self.assertEqual(2, resp.status_int // 100)
self.assertTrue('swift.account/AUTH_bob' in resp.environ)
self.assertEqual(headers_to_account_info(resp.headers),
resp.environ['swift.account/AUTH_bob'])
def test_swift_owner(self):
owner_headers = {
'x-account-meta-temp-url-key': 'value',
'x-account-meta-temp-url-key-2': 'value'}
controller = proxy_server.AccountController(self.app, 'a')
req = Request.blank('/v1/a')
with mock.patch('swift.proxy.controllers.base.http_connect',
fake_http_connect(200, headers=owner_headers)):
resp = controller.HEAD(req)
self.assertEqual(2, resp.status_int // 100)
for key in owner_headers:
self.assertTrue(key not in resp.headers)
req = Request.blank('/v1/a', environ={'swift_owner': True})
with mock.patch('swift.proxy.controllers.base.http_connect',
fake_http_connect(200, headers=owner_headers)):
resp = controller.HEAD(req)
self.assertEqual(2, resp.status_int // 100)
for key in owner_headers:
self.assertTrue(key in resp.headers)
def test_get_deleted_account(self):
resp_headers = {
'x-account-status': 'deleted',
}
controller = proxy_server.AccountController(self.app, 'a')
req = Request.blank('/v1/a')
with mock.patch('swift.proxy.controllers.base.http_connect',
fake_http_connect(404, headers=resp_headers)):
resp = controller.HEAD(req)
self.assertEqual(410, resp.status_int)
def test_long_acct_names(self):
long_acct_name = '%sLongAccountName' % (
'Very' * (constraints.MAX_ACCOUNT_NAME_LENGTH // 4))
controller = proxy_server.AccountController(self.app, long_acct_name)
req = Request.blank('/v1/%s' % long_acct_name)
with mock.patch('swift.proxy.controllers.base.http_connect',
fake_http_connect(200)):
resp = controller.HEAD(req)
self.assertEqual(400, resp.status_int)
with mock.patch('swift.proxy.controllers.base.http_connect',
fake_http_connect(200)):
resp = controller.GET(req)
self.assertEqual(400, resp.status_int)
with mock.patch('swift.proxy.controllers.base.http_connect',
fake_http_connect(200)):
resp = controller.POST(req)
self.assertEqual(400, resp.status_int)
def test_sys_meta_headers_PUT(self):
# check that headers in sys meta namespace make it through
# the proxy controller
sys_meta_key = '%stest' % get_sys_meta_prefix('account')
sys_meta_key = sys_meta_key.title()
user_meta_key = 'X-Account-Meta-Test'
# allow PUTs to account...
self.app.allow_account_management = True
controller = proxy_server.AccountController(self.app, 'a')
context = {}
callback = self._make_callback_func(context)
hdrs_in = {sys_meta_key: 'foo',
user_meta_key: 'bar',
'x-timestamp': '1.0'}
req = Request.blank('/v1/a', headers=hdrs_in)
with mock.patch('swift.proxy.controllers.base.http_connect',
fake_http_connect(200, 200, give_connect=callback)):
controller.PUT(req)
self.assertEqual(context['method'], 'PUT')
self.assertTrue(sys_meta_key in context['headers'])
self.assertEqual(context['headers'][sys_meta_key], 'foo')
self.assertTrue(user_meta_key in context['headers'])
self.assertEqual(context['headers'][user_meta_key], 'bar')
self.assertNotEqual(context['headers']['x-timestamp'], '1.0')
def test_sys_meta_headers_POST(self):
# check that headers in sys meta namespace make it through
# the proxy controller
sys_meta_key = '%stest' % get_sys_meta_prefix('account')
sys_meta_key = sys_meta_key.title()
user_meta_key = 'X-Account-Meta-Test'
controller = proxy_server.AccountController(self.app, 'a')
context = {}
callback = self._make_callback_func(context)
hdrs_in = {sys_meta_key: 'foo',
user_meta_key: 'bar',
'x-timestamp': '1.0'}
req = Request.blank('/v1/a', headers=hdrs_in)
with mock.patch('swift.proxy.controllers.base.http_connect',
fake_http_connect(200, 200, give_connect=callback)):
controller.POST(req)
self.assertEqual(context['method'], 'POST')
self.assertTrue(sys_meta_key in context['headers'])
self.assertEqual(context['headers'][sys_meta_key], 'foo')
self.assertTrue(user_meta_key in context['headers'])
self.assertEqual(context['headers'][user_meta_key], 'bar')
self.assertNotEqual(context['headers']['x-timestamp'], '1.0')
def _make_user_and_sys_acl_headers_data(self):
acl = {
'admin': ['AUTH_alice', 'AUTH_bob'],
'read-write': ['AUTH_carol'],
'read-only': [],
}
user_prefix = 'x-account-' # external, user-facing
user_headers = {(user_prefix + 'access-control'): format_acl(
version=2, acl_dict=acl)}
sys_prefix = get_sys_meta_prefix('account') # internal, system-facing
sys_headers = {(sys_prefix + 'core-access-control'): format_acl(
version=2, acl_dict=acl)}
return user_headers, sys_headers
def test_account_acl_headers_translated_for_GET_HEAD(self):
# Verify that a GET/HEAD which receives X-Account-Sysmeta-Acl-* headers
# from the account server will remap those headers to X-Account-Acl-*
hdrs_ext, hdrs_int = self._make_user_and_sys_acl_headers_data()
controller = proxy_server.AccountController(self.app, 'acct')
for verb in ('GET', 'HEAD'):
req = Request.blank('/v1/acct', environ={'swift_owner': True})
controller.GETorHEAD_base = lambda *_: Response(
headers=hdrs_int, environ={
'PATH_INFO': '/acct',
'REQUEST_METHOD': verb,
})
method = getattr(controller, verb)
resp = method(req)
for header, value in hdrs_ext.items():
if value:
self.assertEqual(resp.headers.get(header), value)
else:
# blank ACLs should result in no header
self.assertTrue(header not in resp.headers)
def test_add_acls_impossible_cases(self):
# For test coverage: verify that defensive coding does defend, in cases
# that shouldn't arise naturally
# add_acls should do nothing if REQUEST_METHOD isn't HEAD/GET/PUT/POST
resp = Response()
controller = proxy_server.AccountController(self.app, 'a')
resp.environ['PATH_INFO'] = '/a'
resp.environ['REQUEST_METHOD'] = 'OPTIONS'
controller.add_acls_from_sys_metadata(resp)
self.assertEqual(1, len(resp.headers)) # we always get Content-Type
self.assertEqual(2, len(resp.environ))
def test_memcache_key_impossible_cases(self):
# For test coverage: verify that defensive coding does defend, in cases
# that shouldn't arise naturally
self.assertRaises(
ValueError,
lambda: swift.proxy.controllers.base.get_container_memcache_key(
'/a', None))
def test_stripping_swift_admin_headers(self):
# Verify that a GET/HEAD which receives privileged headers from the
# account server will strip those headers for non-swift_owners
headers = {
'x-account-meta-harmless': 'hi mom',
'x-account-meta-temp-url-key': 's3kr1t',
}
controller = proxy_server.AccountController(self.app, 'acct')
for verb in ('GET', 'HEAD'):
for env in ({'swift_owner': True}, {'swift_owner': False}):
req = Request.blank('/v1/acct', environ=env)
controller.GETorHEAD_base = lambda *_: Response(
headers=headers, environ={
'PATH_INFO': '/acct',
'REQUEST_METHOD': verb,
})
method = getattr(controller, verb)
resp = method(req)
self.assertEqual(resp.headers.get('x-account-meta-harmless'),
'hi mom')
privileged_header_present = (
'x-account-meta-temp-url-key' in resp.headers)
self.assertEqual(privileged_header_present, env['swift_owner'])
def test_response_code_for_PUT(self):
PUT_TEST_CASES = [
((201, 201, 201), 201),
((201, 201, 404), 201),
((201, 201, 503), 201),
((201, 404, 404), 404),
((201, 404, 503), 503),
((201, 503, 503), 503),
((404, 404, 404), 404),
((404, 404, 503), 404),
((404, 503, 503), 503),
((503, 503, 503), 503)
]
self._assert_responses('PUT', PUT_TEST_CASES)
def test_response_code_for_DELETE(self):
DELETE_TEST_CASES = [
((204, 204, 204), 204),
((204, 204, 404), 204),
((204, 204, 503), 204),
((204, 404, 404), 404),
((204, 404, 503), 503),
((204, 503, 503), 503),
((404, 404, 404), 404),
((404, 404, 503), 404),
((404, 503, 503), 503),
((503, 503, 503), 503)
]
self._assert_responses('DELETE', DELETE_TEST_CASES)
def test_response_code_for_POST(self):
POST_TEST_CASES = [
((204, 204, 204), 204),
((204, 204, 404), 204),
((204, 204, 503), 204),
((204, 404, 404), 404),
((204, 404, 503), 503),
((204, 503, 503), 503),
((404, 404, 404), 404),
((404, 404, 503), 404),
((404, 503, 503), 503),
((503, 503, 503), 503)
]
self._assert_responses('POST', POST_TEST_CASES)
@patch_policies(
[StoragePolicy(0, 'zero', True, object_ring=FakeRing(replicas=4))])
class TestAccountController4Replicas(TestAccountController):
def setUp(self):
self.app = proxy_server.Application(
None,
FakeMemcache(),
account_ring=FakeRing(replicas=4),
container_ring=FakeRing(replicas=4))
def test_response_code_for_PUT(self):
PUT_TEST_CASES = [
((201, 201, 201, 201), 201),
((201, 201, 201, 404), 201),
((201, 201, 201, 503), 201),
((201, 201, 404, 404), 503),
((201, 201, 404, 503), 503),
((201, 201, 503, 503), 503),
((201, 404, 404, 404), 404),
((201, 404, 404, 503), 503),
((201, 404, 503, 503), 503),
((201, 503, 503, 503), 503),
((404, 404, 404, 404), 404),
((404, 404, 404, 503), 404),
((404, 404, 503, 503), 503),
((404, 503, 503, 503), 503),
((503, 503, 503, 503), 503)
]
self._assert_responses('PUT', PUT_TEST_CASES)
def test_response_code_for_DELETE(self):
DELETE_TEST_CASES = [
((204, 204, 204, 204), 204),
((204, 204, 204, 404), 204),
((204, 204, 204, 503), 204),
((204, 204, 404, 404), 503),
((204, 204, 404, 503), 503),
((204, 204, 503, 503), 503),
((204, 404, 404, 404), 404),
((204, 404, 404, 503), 503),
((204, 404, 503, 503), 503),
((204, 503, 503, 503), 503),
((404, 404, 404, 404), 404),
((404, 404, 404, 503), 404),
((404, 404, 503, 503), 503),
((404, 503, 503, 503), 503),
((503, 503, 503, 503), 503)
]
self._assert_responses('DELETE', DELETE_TEST_CASES)
def test_response_code_for_POST(self):
POST_TEST_CASES = [
((204, 204, 204, 204), 204),
((204, 204, 204, 404), 204),
((204, 204, 204, 503), 204),
((204, 204, 404, 404), 503),
((204, 204, 404, 503), 503),
((204, 204, 503, 503), 503),
((204, 404, 404, 404), 404),
((204, 404, 404, 503), 503),
((204, 404, 503, 503), 503),
((204, 503, 503, 503), 503),
((404, 404, 404, 404), 404),
((404, 404, 404, 503), 404),
((404, 404, 503, 503), 503),
((404, 503, 503, 503), 503),
((503, 503, 503, 503), 503)
]
self._assert_responses('POST', POST_TEST_CASES)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
applicationdevm/XlsxWriter | xlsxwriter/test/comparison/test_chart_name04.py | 8 | 1935 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_font04.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'bar'})
chart.axis_ids = [43944960, 45705472]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
chart.set_title({
'name': ['Sheet1', 0, 0],
'name_font': {'bold': 0, 'italic': 1},
})
chart.set_x_axis({
'name': ['Sheet1', 1, 0],
'name_font': {'bold': 0, 'italic': 1},
})
chart.set_y_axis({
'name': ['Sheet1', 2, 0],
'name_font': {'bold': 1, 'italic': 1},
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| bsd-2-clause |
nmearl/pyqtgraph | pyqtgraph/SRTTransform3D.py | 45 | 10879 | # -*- coding: utf-8 -*-
from .Qt import QtCore, QtGui
from .Vector import Vector
from .Transform3D import Transform3D
from .Vector import Vector
import numpy as np
class SRTTransform3D(Transform3D):
"""4x4 Transform matrix that can always be represented as a combination of 3 matrices: scale * rotate * translate
This transform has no shear; angles are always preserved.
"""
def __init__(self, init=None):
Transform3D.__init__(self)
self.reset()
if init is None:
return
if init.__class__ is QtGui.QTransform:
init = SRTTransform(init)
if isinstance(init, dict):
self.restoreState(init)
elif isinstance(init, SRTTransform3D):
self._state = {
'pos': Vector(init._state['pos']),
'scale': Vector(init._state['scale']),
'angle': init._state['angle'],
'axis': Vector(init._state['axis']),
}
self.update()
elif isinstance(init, SRTTransform):
self._state = {
'pos': Vector(init._state['pos']),
'scale': Vector(init._state['scale']),
'angle': init._state['angle'],
'axis': Vector(0, 0, 1),
}
self._state['scale'][2] = 1.0
self.update()
elif isinstance(init, QtGui.QMatrix4x4):
self.setFromMatrix(init)
else:
raise Exception("Cannot build SRTTransform3D from argument type:", type(init))
def getScale(self):
return Vector(self._state['scale'])
def getRotation(self):
"""Return (angle, axis) of rotation"""
return self._state['angle'], Vector(self._state['axis'])
def getTranslation(self):
return Vector(self._state['pos'])
def reset(self):
self._state = {
'pos': Vector(0,0,0),
'scale': Vector(1,1,1),
'angle': 0.0, ## in degrees
'axis': (0, 0, 1)
}
self.update()
def translate(self, *args):
"""Adjust the translation of this transform"""
t = Vector(*args)
self.setTranslate(self._state['pos']+t)
def setTranslate(self, *args):
"""Set the translation of this transform"""
self._state['pos'] = Vector(*args)
self.update()
def scale(self, *args):
"""adjust the scale of this transform"""
## try to prevent accidentally setting 0 scale on z axis
if len(args) == 1 and hasattr(args[0], '__len__'):
args = args[0]
if len(args) == 2:
args = args + (1,)
s = Vector(*args)
self.setScale(self._state['scale'] * s)
def setScale(self, *args):
"""Set the scale of this transform"""
if len(args) == 1 and hasattr(args[0], '__len__'):
args = args[0]
if len(args) == 2:
args = args + (1,)
self._state['scale'] = Vector(*args)
self.update()
def rotate(self, angle, axis=(0,0,1)):
"""Adjust the rotation of this transform"""
origAxis = self._state['axis']
if axis[0] == origAxis[0] and axis[1] == origAxis[1] and axis[2] == origAxis[2]:
self.setRotate(self._state['angle'] + angle)
else:
m = QtGui.QMatrix4x4()
m.translate(*self._state['pos'])
m.rotate(self._state['angle'], *self._state['axis'])
m.rotate(angle, *axis)
m.scale(*self._state['scale'])
self.setFromMatrix(m)
def setRotate(self, angle, axis=(0,0,1)):
"""Set the transformation rotation to angle (in degrees)"""
self._state['angle'] = angle
self._state['axis'] = Vector(axis)
self.update()
def setFromMatrix(self, m):
"""
Set this transform mased on the elements of *m*
The input matrix must be affine AND have no shear,
otherwise the conversion will most likely fail.
"""
import numpy.linalg
for i in range(4):
self.setRow(i, m.row(i))
m = self.matrix().reshape(4,4)
## translation is 4th column
self._state['pos'] = m[:3,3]
## scale is vector-length of first three columns
scale = (m[:3,:3]**2).sum(axis=0)**0.5
## see whether there is an inversion
z = np.cross(m[0, :3], m[1, :3])
if np.dot(z, m[2, :3]) < 0:
scale[1] *= -1 ## doesn't really matter which axis we invert
self._state['scale'] = scale
## rotation axis is the eigenvector with eigenvalue=1
r = m[:3, :3] / scale[np.newaxis, :]
try:
evals, evecs = numpy.linalg.eig(r)
except:
print("Rotation matrix: %s" % str(r))
print("Scale: %s" % str(scale))
print("Original matrix: %s" % str(m))
raise
eigIndex = np.argwhere(np.abs(evals-1) < 1e-6)
if len(eigIndex) < 1:
print("eigenvalues: %s" % str(evals))
print("eigenvectors: %s" % str(evecs))
print("index: %s, %s" % (str(eigIndex), str(evals-1)))
raise Exception("Could not determine rotation axis.")
axis = evecs[:,eigIndex[0,0]].real
axis /= ((axis**2).sum())**0.5
self._state['axis'] = axis
## trace(r) == 2 cos(angle) + 1, so:
cos = (r.trace()-1)*0.5 ## this only gets us abs(angle)
## The off-diagonal values can be used to correct the angle ambiguity,
## but we need to figure out which element to use:
axisInd = np.argmax(np.abs(axis))
rInd,sign = [((1,2), -1), ((0,2), 1), ((0,1), -1)][axisInd]
## Then we have r-r.T = sin(angle) * 2 * sign * axis[axisInd];
## solve for sin(angle)
sin = (r-r.T)[rInd] / (2. * sign * axis[axisInd])
## finally, we get the complete angle from arctan(sin/cos)
self._state['angle'] = np.arctan2(sin, cos) * 180 / np.pi
if self._state['angle'] == 0:
self._state['axis'] = (0,0,1)
def as2D(self):
"""Return a QTransform representing the x,y portion of this transform (if possible)"""
return SRTTransform(self)
#def __div__(self, t):
#"""A / B == B^-1 * A"""
#dt = t.inverted()[0] * self
#return SRTTransform(dt)
#def __mul__(self, t):
#return SRTTransform(QtGui.QTransform.__mul__(self, t))
def saveState(self):
p = self._state['pos']
s = self._state['scale']
ax = self._state['axis']
#if s[0] == 0:
#raise Exception('Invalid scale: %s' % str(s))
return {
'pos': (p[0], p[1], p[2]),
'scale': (s[0], s[1], s[2]),
'angle': self._state['angle'],
'axis': (ax[0], ax[1], ax[2])
}
def restoreState(self, state):
self._state['pos'] = Vector(state.get('pos', (0.,0.,0.)))
scale = state.get('scale', (1.,1.,1.))
scale = tuple(scale) + (1.,) * (3-len(scale))
self._state['scale'] = Vector(scale)
self._state['angle'] = state.get('angle', 0.)
self._state['axis'] = state.get('axis', (0, 0, 1))
self.update()
def update(self):
Transform3D.setToIdentity(self)
## modifications to the transform are multiplied on the right, so we need to reverse order here.
Transform3D.translate(self, *self._state['pos'])
Transform3D.rotate(self, self._state['angle'], *self._state['axis'])
Transform3D.scale(self, *self._state['scale'])
def __repr__(self):
return str(self.saveState())
def matrix(self, nd=3):
if nd == 3:
return np.array(self.copyDataTo()).reshape(4,4)
elif nd == 2:
m = np.array(self.copyDataTo()).reshape(4,4)
m[2] = m[3]
m[:,2] = m[:,3]
return m[:3,:3]
else:
raise Exception("Argument 'nd' must be 2 or 3")
if __name__ == '__main__':
import widgets
import GraphicsView
from functions import *
app = QtGui.QApplication([])
win = QtGui.QMainWindow()
win.show()
cw = GraphicsView.GraphicsView()
#cw.enableMouse()
win.setCentralWidget(cw)
s = QtGui.QGraphicsScene()
cw.setScene(s)
win.resize(600,600)
cw.enableMouse()
cw.setRange(QtCore.QRectF(-100., -100., 200., 200.))
class Item(QtGui.QGraphicsItem):
def __init__(self):
QtGui.QGraphicsItem.__init__(self)
self.b = QtGui.QGraphicsRectItem(20, 20, 20, 20, self)
self.b.setPen(QtGui.QPen(mkPen('y')))
self.t1 = QtGui.QGraphicsTextItem(self)
self.t1.setHtml('<span style="color: #F00">R</span>')
self.t1.translate(20, 20)
self.l1 = QtGui.QGraphicsLineItem(10, 0, -10, 0, self)
self.l2 = QtGui.QGraphicsLineItem(0, 10, 0, -10, self)
self.l1.setPen(QtGui.QPen(mkPen('y')))
self.l2.setPen(QtGui.QPen(mkPen('y')))
def boundingRect(self):
return QtCore.QRectF()
def paint(self, *args):
pass
#s.addItem(b)
#s.addItem(t1)
item = Item()
s.addItem(item)
l1 = QtGui.QGraphicsLineItem(10, 0, -10, 0)
l2 = QtGui.QGraphicsLineItem(0, 10, 0, -10)
l1.setPen(QtGui.QPen(mkPen('r')))
l2.setPen(QtGui.QPen(mkPen('r')))
s.addItem(l1)
s.addItem(l2)
tr1 = SRTTransform()
tr2 = SRTTransform()
tr3 = QtGui.QTransform()
tr3.translate(20, 0)
tr3.rotate(45)
print("QTransform -> Transform: %s" % str(SRTTransform(tr3)))
print("tr1: %s" % str(tr1))
tr2.translate(20, 0)
tr2.rotate(45)
print("tr2: %s" % str(tr2))
dt = tr2/tr1
print("tr2 / tr1 = %s" % str(dt))
print("tr2 * tr1 = %s" % str(tr2*tr1))
tr4 = SRTTransform()
tr4.scale(-1, 1)
tr4.rotate(30)
print("tr1 * tr4 = %s" % str(tr1*tr4))
w1 = widgets.TestROI((19,19), (22, 22), invertible=True)
#w2 = widgets.TestROI((0,0), (150, 150))
w1.setZValue(10)
s.addItem(w1)
#s.addItem(w2)
w1Base = w1.getState()
#w2Base = w2.getState()
def update():
tr1 = w1.getGlobalTransform(w1Base)
#tr2 = w2.getGlobalTransform(w2Base)
item.setTransform(tr1)
#def update2():
#tr1 = w1.getGlobalTransform(w1Base)
#tr2 = w2.getGlobalTransform(w2Base)
#t1.setTransform(tr1)
#w1.setState(w1Base)
#w1.applyGlobalTransform(tr2)
w1.sigRegionChanged.connect(update)
#w2.sigRegionChanged.connect(update2)
from .SRTTransform import SRTTransform
| mit |
sander76/home-assistant | homeassistant/components/thethingsnetwork/sensor.py | 5 | 4925 | """Support for The Things Network's Data storage integration."""
import asyncio
import logging
import aiohttp
from aiohttp.hdrs import ACCEPT, AUTHORIZATION
import async_timeout
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import (
ATTR_DEVICE_ID,
ATTR_TIME,
CONF_DEVICE_ID,
CONTENT_TYPE_JSON,
HTTP_NOT_FOUND,
HTTP_UNAUTHORIZED,
)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from . import DATA_TTN, TTN_ACCESS_KEY, TTN_APP_ID, TTN_DATA_STORAGE_URL
_LOGGER = logging.getLogger(__name__)
ATTR_RAW = "raw"
DEFAULT_TIMEOUT = 10
CONF_VALUES = "values"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_DEVICE_ID): cv.string,
vol.Required(CONF_VALUES): {cv.string: cv.string},
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up The Things Network Data storage sensors."""
ttn = hass.data.get(DATA_TTN)
device_id = config.get(CONF_DEVICE_ID)
values = config.get(CONF_VALUES)
app_id = ttn.get(TTN_APP_ID)
access_key = ttn.get(TTN_ACCESS_KEY)
ttn_data_storage = TtnDataStorage(hass, app_id, device_id, access_key, values)
success = await ttn_data_storage.async_update()
if not success:
return
devices = []
for value, unit_of_measurement in values.items():
devices.append(
TtnDataSensor(ttn_data_storage, device_id, value, unit_of_measurement)
)
async_add_entities(devices, True)
class TtnDataSensor(SensorEntity):
"""Representation of a The Things Network Data Storage sensor."""
def __init__(self, ttn_data_storage, device_id, value, unit_of_measurement):
"""Initialize a The Things Network Data Storage sensor."""
self._ttn_data_storage = ttn_data_storage
self._state = None
self._device_id = device_id
self._unit_of_measurement = unit_of_measurement
self._value = value
self._name = f"{self._device_id} {self._value}"
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the entity."""
if self._ttn_data_storage.data is not None:
try:
return self._state[self._value]
except KeyError:
return None
return None
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return self._unit_of_measurement
@property
def extra_state_attributes(self):
"""Return the state attributes of the sensor."""
if self._ttn_data_storage.data is not None:
return {
ATTR_DEVICE_ID: self._device_id,
ATTR_RAW: self._state["raw"],
ATTR_TIME: self._state["time"],
}
async def async_update(self):
"""Get the current state."""
await self._ttn_data_storage.async_update()
self._state = self._ttn_data_storage.data
class TtnDataStorage:
"""Get the latest data from The Things Network Data Storage."""
def __init__(self, hass, app_id, device_id, access_key, values):
"""Initialize the data object."""
self.data = None
self._hass = hass
self._app_id = app_id
self._device_id = device_id
self._values = values
self._url = TTN_DATA_STORAGE_URL.format(
app_id=app_id, endpoint="api/v2/query", device_id=device_id
)
self._headers = {ACCEPT: CONTENT_TYPE_JSON, AUTHORIZATION: f"key {access_key}"}
async def async_update(self):
"""Get the current state from The Things Network Data Storage."""
try:
session = async_get_clientsession(self._hass)
with async_timeout.timeout(DEFAULT_TIMEOUT):
response = await session.get(self._url, headers=self._headers)
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.error("Error while accessing: %s", self._url)
return None
status = response.status
if status == 204:
_LOGGER.error("The device is not available: %s", self._device_id)
return None
if status == HTTP_UNAUTHORIZED:
_LOGGER.error("Not authorized for Application ID: %s", self._app_id)
return None
if status == HTTP_NOT_FOUND:
_LOGGER.error("Application ID is not available: %s", self._app_id)
return None
data = await response.json()
self.data = data[-1]
for value in self._values.items():
if value[0] not in self.data:
_LOGGER.warning("Value not available: %s", value[0])
return response
| apache-2.0 |
slowfranklin/samba | source4/heimdal/lib/wind/rfc3454.py | 88 | 2296 | #!/usr/local/bin/python
# -*- coding: iso-8859-1 -*-
# $Id$
# Copyright (c) 2004 Kungliga Tekniska Högskolan
# (Royal Institute of Technology, Stockholm, Sweden).
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the Institute nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import re
import string
def read(filename):
"""return a dict of tables from rfc3454"""
f = open(filename, 'r')
inTable = False
ret = {}
while True:
l = f.readline()
if not l:
break
if inTable:
m = re.search('^ *----- End Table ([A-Z0-9\.]+) ----- *$', l)
if m:
ret[m.group(1)] = t
inTable = False
else:
t.append(l)
if re.search('^ *----- Start Table ([A-Z0-9\.]+) ----- *$', l):
inTable = True
t = []
f.close()
return ret
| gpl-3.0 |
rockneurotiko/django | tests/field_deconstruction/tests.py | 189 | 18358 | from __future__ import unicode_literals
from django.apps import apps
from django.db import models
from django.test import SimpleTestCase, override_settings
from django.test.utils import isolate_lru_cache
from django.utils import six
class FieldDeconstructionTests(SimpleTestCase):
"""
Tests the deconstruct() method on all core fields.
"""
def test_name(self):
"""
Tests the outputting of the correct name if assigned one.
"""
# First try using a "normal" field
field = models.CharField(max_length=65)
name, path, args, kwargs = field.deconstruct()
self.assertIsNone(name)
field.set_attributes_from_name("is_awesome_test")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(name, "is_awesome_test")
self.assertIsInstance(name, six.text_type)
# Now try with a ForeignKey
field = models.ForeignKey("some_fake.ModelName", models.CASCADE)
name, path, args, kwargs = field.deconstruct()
self.assertIsNone(name)
field.set_attributes_from_name("author")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(name, "author")
def test_auto_field(self):
field = models.AutoField(primary_key=True)
field.set_attributes_from_name("id")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.AutoField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"primary_key": True})
def test_big_integer_field(self):
field = models.BigIntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.BigIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_boolean_field(self):
field = models.BooleanField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.BooleanField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.BooleanField(default=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.BooleanField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"default": True})
def test_char_field(self):
field = models.CharField(max_length=65)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.CharField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 65})
field = models.CharField(max_length=65, null=True, blank=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.CharField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 65, "null": True, "blank": True})
def test_char_field_choices(self):
field = models.CharField(max_length=1, choices=(("A", "One"), ("B", "Two")))
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.CharField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"choices": [("A", "One"), ("B", "Two")], "max_length": 1})
def test_csi_field(self):
field = models.CommaSeparatedIntegerField(max_length=100)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.CommaSeparatedIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 100})
def test_date_field(self):
field = models.DateField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.DateField(auto_now=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"auto_now": True})
def test_datetime_field(self):
field = models.DateTimeField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateTimeField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.DateTimeField(auto_now_add=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateTimeField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"auto_now_add": True})
# Bug #21785
field = models.DateTimeField(auto_now=True, auto_now_add=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateTimeField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"auto_now_add": True, "auto_now": True})
def test_decimal_field(self):
field = models.DecimalField(max_digits=5, decimal_places=2)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DecimalField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_digits": 5, "decimal_places": 2})
def test_decimal_field_0_decimal_places(self):
"""
A DecimalField with decimal_places=0 should work (#22272).
"""
field = models.DecimalField(max_digits=5, decimal_places=0)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DecimalField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_digits": 5, "decimal_places": 0})
def test_email_field(self):
field = models.EmailField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.EmailField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 254})
field = models.EmailField(max_length=255)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.EmailField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 255})
def test_file_field(self):
field = models.FileField(upload_to="foo/bar")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FileField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"upload_to": "foo/bar"})
# Test max_length
field = models.FileField(upload_to="foo/bar", max_length=200)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FileField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"upload_to": "foo/bar", "max_length": 200})
def test_file_path_field(self):
field = models.FilePathField(match=".*\.txt$")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FilePathField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"match": ".*\.txt$"})
field = models.FilePathField(recursive=True, allow_folders=True, max_length=123)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FilePathField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"recursive": True, "allow_folders": True, "max_length": 123})
def test_float_field(self):
field = models.FloatField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FloatField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_foreign_key(self):
# Test basic pointing
from django.contrib.auth.models import Permission
field = models.ForeignKey("auth.Permission", models.CASCADE)
field.remote_field.model = Permission
field.remote_field.field_name = "id"
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "on_delete": models.CASCADE})
self.assertFalse(hasattr(kwargs['to'], "setting_name"))
# Test swap detection for swappable model
field = models.ForeignKey("auth.User", models.CASCADE)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.User", "on_delete": models.CASCADE})
self.assertEqual(kwargs['to'].setting_name, "AUTH_USER_MODEL")
# Test nonexistent (for now) model
field = models.ForeignKey("something.Else", models.CASCADE)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "something.Else", "on_delete": models.CASCADE})
# Test on_delete
field = models.ForeignKey("auth.User", models.SET_NULL)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.User", "on_delete": models.SET_NULL})
# Test to_field preservation
field = models.ForeignKey("auth.Permission", models.CASCADE, to_field="foobar")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "to_field": "foobar", "on_delete": models.CASCADE})
# Test related_name preservation
field = models.ForeignKey("auth.Permission", models.CASCADE, related_name="foobar")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "related_name": "foobar", "on_delete": models.CASCADE})
@override_settings(AUTH_USER_MODEL="auth.Permission")
def test_foreign_key_swapped(self):
with isolate_lru_cache(apps.get_swappable_settings_name):
# It doesn't matter that we swapped out user for permission;
# there's no validation. We just want to check the setting stuff works.
field = models.ForeignKey("auth.Permission", models.CASCADE)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "on_delete": models.CASCADE})
self.assertEqual(kwargs['to'].setting_name, "AUTH_USER_MODEL")
def test_image_field(self):
field = models.ImageField(upload_to="foo/barness", width_field="width", height_field="height")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ImageField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"upload_to": "foo/barness", "width_field": "width", "height_field": "height"})
def test_integer_field(self):
field = models.IntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.IntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_ip_address_field(self):
field = models.IPAddressField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.IPAddressField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_generic_ip_address_field(self):
field = models.GenericIPAddressField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.GenericIPAddressField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.GenericIPAddressField(protocol="IPv6")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.GenericIPAddressField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"protocol": "IPv6"})
def test_many_to_many_field(self):
# Test normal
field = models.ManyToManyField("auth.Permission")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission"})
self.assertFalse(hasattr(kwargs['to'], "setting_name"))
# Test swappable
field = models.ManyToManyField("auth.User")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.User"})
self.assertEqual(kwargs['to'].setting_name, "AUTH_USER_MODEL")
# Test through
field = models.ManyToManyField("auth.Permission", through="auth.Group")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "through": "auth.Group"})
# Test custom db_table
field = models.ManyToManyField("auth.Permission", db_table="custom_table")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "db_table": "custom_table"})
# Test related_name
field = models.ManyToManyField("auth.Permission", related_name="custom_table")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "related_name": "custom_table"})
@override_settings(AUTH_USER_MODEL="auth.Permission")
def test_many_to_many_field_swapped(self):
with isolate_lru_cache(apps.get_swappable_settings_name):
# It doesn't matter that we swapped out user for permission;
# there's no validation. We just want to check the setting stuff works.
field = models.ManyToManyField("auth.Permission")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission"})
self.assertEqual(kwargs['to'].setting_name, "AUTH_USER_MODEL")
def test_null_boolean_field(self):
field = models.NullBooleanField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.NullBooleanField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_positive_integer_field(self):
field = models.PositiveIntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.PositiveIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_positive_small_integer_field(self):
field = models.PositiveSmallIntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.PositiveSmallIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_slug_field(self):
field = models.SlugField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.SlugField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.SlugField(db_index=False, max_length=231)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.SlugField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"db_index": False, "max_length": 231})
def test_small_integer_field(self):
field = models.SmallIntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.SmallIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_text_field(self):
field = models.TextField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.TextField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_time_field(self):
field = models.TimeField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.TimeField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.TimeField(auto_now=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(args, [])
self.assertEqual(kwargs, {'auto_now': True})
field = models.TimeField(auto_now_add=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(args, [])
self.assertEqual(kwargs, {'auto_now_add': True})
def test_url_field(self):
field = models.URLField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.URLField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.URLField(max_length=231)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.URLField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 231})
def test_binary_field(self):
field = models.BinaryField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.BinaryField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
| bsd-3-clause |
aaigner/LIGGGHTS-PUBLIC | python/install.py | 2 | 2208 | #!/usr/bin/env python
# copy LIGGGHTS src/libliggghts.so and liggghts.py to system dirs
instructions = """
Syntax: python install.py [-h] [libdir] [pydir]
libdir = target dir for src/libliggghts.so, default = /usr/local/lib
pydir = target dir for liggghts.py, default = Python site-packages dir
"""
import sys,os # ,commands
if sys.version_info[0] == 3:
import subprocess as commands
else:
import commands
if (len(sys.argv) > 1 and sys.argv[1] == "-h") or len(sys.argv) > 3:
print(instructions)
sys.exit()
if len(sys.argv) >= 2: libdir = sys.argv[1]
else: libdir = "/usr/local/lib"
if len(sys.argv) == 3: pydir = sys.argv[2]
else: pydir = ""
# copy C lib to libdir if it exists
# warn if not in LD_LIBRARY_PATH or LD_LIBRARY_PATH is undefined
if not os.path.isdir(libdir):
print("ERROR: libdir %s does not exist" % libdir)
sys.exit()
if "LD_LIBRARY_PATH" not in os.environ:
print("WARNING: LD_LIBRARY_PATH undefined, cannot check libdir %s" % libdir)
else:
libpaths = os.environ['LD_LIBRARY_PATH'].split(':')
if libdir not in libpaths:
print("WARNING: libdir %s not in LD_LIBRARY_PATH" % libdir)
str = "cp ../src/libliggghts.so %s" % libdir
print(str)
outstr = commands.getoutput(str)
if len(outstr.strip()): print(outstr)
# copy liggghts.py to pydir if it exists
# if pydir not specified, install in site-packages via distutils setup()
if pydir:
if not os.path.isdir(pydir):
print("ERROR: pydir %s does not exist" % pydir)
sys.exit()
str = "cp ../python/liggghts.py %s" % pydir
print(str)
outstr = commands.getoutput(str)
if len(outstr.strip()): print(outstr)
sys.exit()
print("installing liggghts.py in Python site-packages dir")
os.chdir('../python') # in case invoked via make in src dir
from distutils.core import setup
sys.argv = ["setup.py","install"] # as if had run "python setup.py install"
setup(name = "liggghts",
version = "3.8.0",
author = "Christoph Kloss",
author_email = "[email protected]",
url = "http://www.cfdem.com",
description = "LIGGGHTS - LAMMPS improved for general granular and granular heat transfer simulations",
py_modules = ["liggghts"])
| gpl-2.0 |
turon/openthread | tests/scripts/thread-cert/Cert_5_1_10_RouterAttachLinkQuality.py | 1 | 6969 | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import config
import mle
import node
LEADER = 1
ROUTER1 = 2
ROUTER2 = 3
ROUTER3 = 4
class Cert_5_1_10_RouterAttachLinkQuality(unittest.TestCase):
def setUp(self):
self.simulator = config.create_default_simulator()
self.nodes = {}
for i in range(1, 5):
self.nodes[i] = node.Node(i, simulator=self.simulator)
self.nodes[LEADER].set_panid(0xface)
self.nodes[LEADER].set_mode('rsdn')
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER1].get_addr64())
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER2].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[ROUTER1].set_panid(0xface)
self.nodes[ROUTER1].set_mode('rsdn')
self.nodes[ROUTER1].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ROUTER1].add_whitelist(self.nodes[ROUTER3].get_addr64())
self.nodes[ROUTER1].enable_whitelist()
self.nodes[ROUTER1].set_router_selection_jitter(1)
self.nodes[ROUTER2].set_panid(0xface)
self.nodes[ROUTER2].set_mode('rsdn')
self.nodes[ROUTER2].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ROUTER2].add_whitelist(
self.nodes[ROUTER3].get_addr64(), rssi=-85
)
self.nodes[ROUTER2].enable_whitelist()
self.nodes[ROUTER2].set_router_selection_jitter(1)
self.nodes[ROUTER3].set_panid(0xface)
self.nodes[ROUTER3].set_mode('rsdn')
self.nodes[ROUTER3].add_whitelist(self.nodes[ROUTER1].get_addr64())
self.nodes[ROUTER3].add_whitelist(self.nodes[ROUTER2].get_addr64())
self.nodes[ROUTER3].enable_whitelist()
self.nodes[ROUTER3].set_router_selection_jitter(1)
def tearDown(self):
for n in list(self.nodes.values()):
n.stop()
n.destroy()
self.simulator.stop()
def test(self):
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ROUTER1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
self.nodes[ROUTER2].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER2].get_state(), 'router')
self.nodes[ROUTER3].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER3].get_state(), 'router')
leader_messages = self.simulator.get_messages_sent_by(LEADER)
router1_messages = self.simulator.get_messages_sent_by(ROUTER1)
router2_messages = self.simulator.get_messages_sent_by(ROUTER2)
router3_messages = self.simulator.get_messages_sent_by(ROUTER3)
# 1 - Leader, Router1, Router2
leader_messages.next_mle_message(mle.CommandType.ADVERTISEMENT)
router1_messages.next_mle_message(mle.CommandType.PARENT_REQUEST)
leader_messages.next_mle_message(mle.CommandType.PARENT_RESPONSE)
router1_messages.next_mle_message(mle.CommandType.CHILD_ID_REQUEST)
leader_messages.next_mle_message(mle.CommandType.CHILD_ID_RESPONSE)
msg = router1_messages.next_coap_message("0.02")
msg.assertCoapMessageRequestUriPath("/a/as")
msg = leader_messages.next_coap_message("2.04")
router2_messages.next_mle_message(mle.CommandType.PARENT_REQUEST)
leader_messages.next_mle_message(mle.CommandType.PARENT_RESPONSE)
router2_messages.next_mle_message(mle.CommandType.CHILD_ID_REQUEST)
leader_messages.next_mle_message(mle.CommandType.CHILD_ID_RESPONSE)
msg = router2_messages.next_coap_message("0.02")
msg.assertCoapMessageRequestUriPath("/a/as")
msg = leader_messages.next_coap_message("2.04")
router1_messages.next_mle_message(mle.CommandType.ADVERTISEMENT)
router2_messages.next_mle_message(mle.CommandType.ADVERTISEMENT)
# 3 - Router3
msg = router3_messages.next_mle_message(mle.CommandType.PARENT_REQUEST)
msg.assertSentWithHopLimit(255)
msg.assertSentToDestinationAddress("ff02::2")
msg.assertMleMessageContainsTlv(mle.Mode)
msg.assertMleMessageContainsTlv(mle.Challenge)
msg.assertMleMessageContainsTlv(mle.ScanMask)
msg.assertMleMessageContainsTlv(mle.Version)
# 4 - Router1, Router2
msg = router1_messages.next_mle_message(
mle.CommandType.PARENT_RESPONSE
)
msg.assertSentToNode(self.nodes[ROUTER3])
msg = router2_messages.next_mle_message(
mle.CommandType.PARENT_RESPONSE
)
msg.assertSentToNode(self.nodes[ROUTER3])
# 5 - Router3
msg = router3_messages.next_mle_message(
mle.CommandType.CHILD_ID_REQUEST
)
msg.assertSentToNode(self.nodes[ROUTER1])
msg.assertMleMessageContainsTlv(mle.Response)
msg.assertMleMessageContainsTlv(mle.LinkLayerFrameCounter)
msg.assertMleMessageContainsOptionalTlv(mle.MleFrameCounter)
msg.assertMleMessageContainsTlv(mle.Mode)
msg.assertMleMessageContainsTlv(mle.Timeout)
msg.assertMleMessageContainsTlv(mle.Version)
msg.assertMleMessageContainsTlv(mle.TlvRequest)
msg.assertMleMessageDoesNotContainTlv(mle.AddressRegistration)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
kingland/runtime | deps/v8/tools/testrunner/server/main.py | 12 | 8953 | # Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import multiprocessing
import os
import shutil
import subprocess
import threading
import time
from . import daemon
from . import local_handler
from . import presence_handler
from . import signatures
from . import status_handler
from . import work_handler
from ..network import perfdata
class Server(daemon.Daemon):
def __init__(self, pidfile, root, stdin="/dev/null",
stdout="/dev/null", stderr="/dev/null"):
super(Server, self).__init__(pidfile, stdin, stdout, stderr)
self.root = root
self.local_handler = None
self.local_handler_thread = None
self.work_handler = None
self.work_handler_thread = None
self.status_handler = None
self.status_handler_thread = None
self.presence_daemon = None
self.presence_daemon_thread = None
self.peers = []
self.jobs = multiprocessing.cpu_count()
self.peer_list_lock = threading.Lock()
self.perf_data_lock = None
self.presence_daemon_lock = None
self.datadir = os.path.join(self.root, "data")
pubkey_fingerprint_filename = os.path.join(self.datadir, "mypubkey")
with open(pubkey_fingerprint_filename) as f:
self.pubkey_fingerprint = f.read().strip()
self.relative_perf_filename = os.path.join(self.datadir, "myperf")
if os.path.exists(self.relative_perf_filename):
with open(self.relative_perf_filename) as f:
try:
self.relative_perf = float(f.read())
except:
self.relative_perf = 1.0
else:
self.relative_perf = 1.0
def run(self):
os.nice(20)
self.ip = presence_handler.GetOwnIP()
self.perf_data_manager = perfdata.PerfDataManager(self.datadir)
self.perf_data_lock = threading.Lock()
self.local_handler = local_handler.LocalSocketServer(self)
self.local_handler_thread = threading.Thread(
target=self.local_handler.serve_forever)
self.local_handler_thread.start()
self.work_handler = work_handler.WorkSocketServer(self)
self.work_handler_thread = threading.Thread(
target=self.work_handler.serve_forever)
self.work_handler_thread.start()
self.status_handler = status_handler.StatusSocketServer(self)
self.status_handler_thread = threading.Thread(
target=self.status_handler.serve_forever)
self.status_handler_thread.start()
self.presence_daemon = presence_handler.PresenceDaemon(self)
self.presence_daemon_thread = threading.Thread(
target=self.presence_daemon.serve_forever)
self.presence_daemon_thread.start()
self.presence_daemon.FindPeers()
time.sleep(0.5) # Give those peers some time to reply.
with self.peer_list_lock:
for p in self.peers:
if p.address == self.ip: continue
status_handler.RequestTrustedPubkeys(p, self)
while True:
try:
self.PeriodicTasks()
time.sleep(60)
except Exception, e:
print("MAIN LOOP EXCEPTION: %s" % e)
self.Shutdown()
break
except KeyboardInterrupt:
self.Shutdown()
break
def Shutdown(self):
with open(self.relative_perf_filename, "w") as f:
f.write("%s" % self.relative_perf)
self.presence_daemon.shutdown()
self.presence_daemon.server_close()
self.local_handler.shutdown()
self.local_handler.server_close()
self.work_handler.shutdown()
self.work_handler.server_close()
self.status_handler.shutdown()
self.status_handler.server_close()
def PeriodicTasks(self):
# If we know peers we don't trust, see if someone else trusts them.
with self.peer_list_lock:
for p in self.peers:
if p.trusted: continue
if self.IsTrusted(p.pubkey):
p.trusted = True
status_handler.ITrustYouNow(p)
continue
for p2 in self.peers:
if not p2.trusted: continue
status_handler.TryTransitiveTrust(p2, p.pubkey, self)
# TODO: Ping for more peers waiting to be discovered.
# TODO: Update the checkout (if currently idle).
def AddPeer(self, peer):
with self.peer_list_lock:
for p in self.peers:
if p.address == peer.address:
return
self.peers.append(peer)
if peer.trusted:
status_handler.ITrustYouNow(peer)
def DeletePeer(self, peer_address):
with self.peer_list_lock:
for i in xrange(len(self.peers)):
if self.peers[i].address == peer_address:
del self.peers[i]
return
def MarkPeerAsTrusting(self, peer_address):
with self.peer_list_lock:
for p in self.peers:
if p.address == peer_address:
p.trusting_me = True
break
def UpdatePeerPerformance(self, peer_address, performance):
with self.peer_list_lock:
for p in self.peers:
if p.address == peer_address:
p.relative_performance = performance
def CopyToTrusted(self, pubkey_filename):
with open(pubkey_filename, "r") as f:
lines = f.readlines()
fingerprint = lines[-1].strip()
target_filename = self._PubkeyFilename(fingerprint)
shutil.copy(pubkey_filename, target_filename)
with self.peer_list_lock:
for peer in self.peers:
if peer.address == self.ip: continue
if peer.pubkey == fingerprint:
status_handler.ITrustYouNow(peer)
else:
result = self.SignTrusted(fingerprint)
status_handler.NotifyNewTrusted(peer, result)
return fingerprint
def _PubkeyFilename(self, pubkey_fingerprint):
return os.path.join(self.root, "trusted", "%s.pem" % pubkey_fingerprint)
def IsTrusted(self, pubkey_fingerprint):
return os.path.exists(self._PubkeyFilename(pubkey_fingerprint))
def ListTrusted(self):
path = os.path.join(self.root, "trusted")
if not os.path.exists(path): return []
return [ f[:-4] for f in os.listdir(path) if f.endswith(".pem") ]
def SignTrusted(self, pubkey_fingerprint):
if not self.IsTrusted(pubkey_fingerprint):
return []
filename = self._PubkeyFilename(pubkey_fingerprint)
result = signatures.ReadFileAndSignature(filename) # Format: [key, sig].
return [pubkey_fingerprint, result[0], result[1], self.pubkey_fingerprint]
def AcceptNewTrusted(self, data):
# The format of |data| matches the return value of |SignTrusted()|.
if not data: return
fingerprint = data[0]
pubkey = data[1]
signature = data[2]
signer = data[3]
if not self.IsTrusted(signer):
return
if self.IsTrusted(fingerprint):
return # Already trusted.
filename = self._PubkeyFilename(fingerprint)
signer_pubkeyfile = self._PubkeyFilename(signer)
if not signatures.VerifySignature(filename, pubkey, signature,
signer_pubkeyfile):
return
return # Nothing more to do.
def AddPerfData(self, test_key, duration, arch, mode):
data_store = self.perf_data_manager.GetStore(arch, mode)
data_store.RawUpdatePerfData(str(test_key), duration)
def CompareOwnPerf(self, test, arch, mode):
data_store = self.perf_data_manager.GetStore(arch, mode)
observed = data_store.FetchPerfData(test)
if not observed: return
own_perf_estimate = observed / test.duration
with self.perf_data_lock:
kLearnRateLimiter = 9999
self.relative_perf *= kLearnRateLimiter
self.relative_perf += own_perf_estimate
self.relative_perf /= (kLearnRateLimiter + 1)
| apache-2.0 |
alikins/ansible | lib/ansible/module_utils/network/common/config.py | 38 | 13532 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2016 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import re
import hashlib
from ansible.module_utils.six.moves import zip
from ansible.module_utils._text import to_bytes, to_native
from ansible.module_utils.network.common.utils import to_list
DEFAULT_COMMENT_TOKENS = ['#', '!', '/*', '*/', 'echo']
DEFAULT_IGNORE_LINES_RE = set([
re.compile(r"Using \d+ out of \d+ bytes"),
re.compile(r"Building configuration"),
re.compile(r"Current configuration : \d+ bytes")
])
class ConfigLine(object):
def __init__(self, raw):
self.text = str(raw).strip()
self.raw = raw
self._children = list()
self._parents = list()
def __str__(self):
return self.raw
def __eq__(self, other):
return self.line == other.line
def __ne__(self, other):
return not self.__eq__(other)
def __getitem__(self, key):
for item in self._children:
if item.text == key:
return item
raise KeyError(key)
@property
def line(self):
line = self.parents
line.append(self.text)
return ' '.join(line)
@property
def children(self):
return _obj_to_text(self._children)
@property
def child_objs(self):
return self._children
@property
def parents(self):
return _obj_to_text(self._parents)
@property
def path(self):
config = _obj_to_raw(self._parents)
config.append(self.raw)
return '\n'.join(config)
@property
def has_children(self):
return len(self._children) > 0
@property
def has_parents(self):
return len(self._parents) > 0
def add_child(self, obj):
if not isinstance(obj, ConfigLine):
raise AssertionError('child must be of type `ConfigLine`')
self._children.append(obj)
def ignore_line(text, tokens=None):
for item in (tokens or DEFAULT_COMMENT_TOKENS):
if text.startswith(item):
return True
for regex in DEFAULT_IGNORE_LINES_RE:
if regex.match(text):
return True
def _obj_to_text(x):
return [o.text for o in x]
def _obj_to_raw(x):
return [o.raw for o in x]
def _obj_to_block(objects, visited=None):
items = list()
for o in objects:
if o not in items:
items.append(o)
for child in o._children:
if child not in items:
items.append(child)
return _obj_to_raw(items)
def dumps(objects, output='block', comments=False):
if output == 'block':
items = _obj_to_block(objects)
elif output == 'commands':
items = _obj_to_text(objects)
else:
raise TypeError('unknown value supplied for keyword output')
if output != 'commands':
if comments:
for index, item in enumerate(items):
nextitem = index + 1
if nextitem < len(items) and not item.startswith(' ') and items[nextitem].startswith(' '):
item = '!\n%s' % item
items[index] = item
items.append('!')
items.append('end')
return '\n'.join(items)
class NetworkConfig(object):
def __init__(self, indent=1, contents=None, ignore_lines=None):
self._indent = indent
self._items = list()
self._config_text = None
if ignore_lines:
for item in ignore_lines:
if not isinstance(item, re._pattern_type):
item = re.compile(item)
DEFAULT_IGNORE_LINES_RE.add(item)
if contents:
self.load(contents)
@property
def items(self):
return self._items
@property
def config_text(self):
return self._config_text
@property
def sha1(self):
sha1 = hashlib.sha1()
sha1.update(to_bytes(str(self), errors='surrogate_or_strict'))
return sha1.digest()
def __getitem__(self, key):
for line in self:
if line.text == key:
return line
raise KeyError(key)
def __iter__(self):
return iter(self._items)
def __str__(self):
return '\n'.join([c.raw for c in self.items])
def __len__(self):
return len(self._items)
def load(self, s):
self._config_text = s
self._items = self.parse(s)
def loadfp(self, fp):
return self.load(open(fp).read())
def parse(self, lines, comment_tokens=None):
toplevel = re.compile(r'\S')
childline = re.compile(r'^\s*(.+)$')
entry_reg = re.compile(r'([{};])')
ancestors = list()
config = list()
curlevel = 0
prevlevel = 0
for linenum, line in enumerate(to_native(lines, errors='surrogate_or_strict').split('\n')):
text = entry_reg.sub('', line).strip()
cfg = ConfigLine(line)
if not text or ignore_line(text, comment_tokens):
continue
# handle top level commands
if toplevel.match(line):
ancestors = [cfg]
prevlevel = curlevel
curlevel = 0
# handle sub level commands
else:
match = childline.match(line)
line_indent = match.start(1)
prevlevel = curlevel
curlevel = int(line_indent / self._indent)
if (curlevel - 1) > prevlevel:
curlevel = prevlevel + 1
parent_level = curlevel - 1
cfg._parents = ancestors[:curlevel]
if curlevel > len(ancestors):
config.append(cfg)
continue
for i in range(curlevel, len(ancestors)):
ancestors.pop()
ancestors.append(cfg)
ancestors[parent_level].add_child(cfg)
config.append(cfg)
return config
def get_object(self, path):
for item in self.items:
if item.text == path[-1]:
if item.parents == path[:-1]:
return item
def get_block(self, path):
if not isinstance(path, list):
raise AssertionError('path argument must be a list object')
obj = self.get_object(path)
if not obj:
raise ValueError('path does not exist in config')
return self._expand_block(obj)
def get_block_config(self, path):
block = self.get_block(path)
return dumps(block, 'block')
def _expand_block(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj._children:
if child in S:
continue
self._expand_block(child, S)
return S
def _diff_line(self, other):
updates = list()
for item in self.items:
if item not in other:
updates.append(item)
return updates
def _diff_strict(self, other):
updates = list()
for index, line in enumerate(self.items):
try:
if str(line).strip() != str(other[index]).strip():
updates.append(line)
except (AttributeError, IndexError):
updates.append(line)
return updates
def _diff_exact(self, other):
updates = list()
if len(other) != len(self.items):
updates.extend(self.items)
else:
for ours, theirs in zip(self.items, other):
if ours != theirs:
updates.extend(self.items)
break
return updates
def difference(self, other, match='line', path=None, replace=None):
"""Perform a config diff against the another network config
:param other: instance of NetworkConfig to diff against
:param match: type of diff to perform. valid values are 'line',
'strict', 'exact'
:param path: context in the network config to filter the diff
:param replace: the method used to generate the replacement lines.
valid values are 'block', 'line'
:returns: a string of lines that are different
"""
if path and match != 'line':
try:
other = other.get_block(path)
except ValueError:
other = list()
else:
other = other.items
# generate a list of ConfigLines that aren't in other
meth = getattr(self, '_diff_%s' % match)
updates = meth(other)
if replace == 'block':
parents = list()
for item in updates:
if not item.has_parents:
parents.append(item)
else:
for p in item._parents:
if p not in parents:
parents.append(p)
updates = list()
for item in parents:
updates.extend(self._expand_block(item))
visited = set()
expanded = list()
for item in updates:
for p in item._parents:
if p.line not in visited:
visited.add(p.line)
expanded.append(p)
expanded.append(item)
visited.add(item.line)
return expanded
def add(self, lines, parents=None):
ancestors = list()
offset = 0
obj = None
# global config command
if not parents:
for line in lines:
item = ConfigLine(line)
item.raw = line
if item not in self.items:
self.items.append(item)
else:
for index, p in enumerate(parents):
try:
i = index + 1
obj = self.get_block(parents[:i])[0]
ancestors.append(obj)
except ValueError:
# add parent to config
offset = index * self._indent
obj = ConfigLine(p)
obj.raw = p.rjust(len(p) + offset)
if ancestors:
obj._parents = list(ancestors)
ancestors[-1]._children.append(obj)
self.items.append(obj)
ancestors.append(obj)
# add child objects
for line in lines:
# check if child already exists
for child in ancestors[-1]._children:
if child.text == line:
break
else:
offset = len(parents) * self._indent
item = ConfigLine(line)
item.raw = line.rjust(len(line) + offset)
item._parents = ancestors
ancestors[-1]._children.append(item)
self.items.append(item)
class CustomNetworkConfig(NetworkConfig):
def items_text(self):
return [item.text for item in self.items]
def expand_section(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj.child_objs:
if child in S:
continue
self.expand_section(child, S)
return S
def to_block(self, section):
return '\n'.join([item.raw for item in section])
def get_section(self, path):
try:
section = self.get_section_objects(path)
return self.to_block(section)
except ValueError:
return list()
def get_section_objects(self, path):
if not isinstance(path, list):
path = [path]
obj = self.get_object(path)
if not obj:
raise ValueError('path does not exist in config')
return self.expand_section(obj)
| gpl-3.0 |
Thhhza/XlsxWriter | xlsxwriter/test/comparison/test_chart_title01.py | 8 | 1535 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_title01.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of an XlsxWriter file with default title."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [46165376, 54462720]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5',
'name': 'Foo'})
chart.set_title({'none': True})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| bsd-2-clause |
trbs/django-oauth-toolkit | oauth2_provider/ext/rest_framework/permissions.py | 11 | 1922 | import logging
from django.core.exceptions import ImproperlyConfigured
from rest_framework.permissions import BasePermission
from ...settings import oauth2_settings
log = logging.getLogger('oauth2_provider')
SAFE_HTTP_METHODS = ['GET', 'HEAD', 'OPTIONS']
class TokenHasScope(BasePermission):
"""
The request is authenticated as a user and the token used has the right scope
"""
def has_permission(self, request, view):
token = request.auth
if not token:
return False
if hasattr(token, 'scope'): # OAuth 2
required_scopes = self.get_scopes(request, view)
log.debug("Required scopes to access resource: {0}".format(required_scopes))
return token.is_valid(required_scopes)
assert False, ('TokenHasScope requires either the'
'`oauth2_provider.rest_framework.OAuth2Authentication` authentication '
'class to be used.')
def get_scopes(self, request, view):
try:
return getattr(view, 'required_scopes')
except AttributeError:
raise ImproperlyConfigured(
'TokenHasScope requires the view to define the required_scopes attribute')
class TokenHasReadWriteScope(TokenHasScope):
"""
The request is authenticated as a user and the token used has the right scope
"""
def get_scopes(self, request, view):
try:
required_scopes = super(TokenHasReadWriteScope, self).get_scopes(request, view)
except ImproperlyConfigured:
required_scopes = []
# TODO: code duplication!! see dispatch in ReadWriteScopedResourceMixin
if request.method.upper() in SAFE_HTTP_METHODS:
read_write_scope = oauth2_settings.READ_SCOPE
else:
read_write_scope = oauth2_settings.WRITE_SCOPE
return required_scopes + [read_write_scope]
| bsd-2-clause |
saurabh6790/OFF-RISAPP | patches/april_2013/p05_update_file_data.py | 30 | 2406 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
import webnotes, webnotes.utils, os
def execute():
webnotes.reload_doc("core", "doctype", "file_data")
webnotes.reset_perms("File Data")
singles = get_single_doctypes()
for doctype in webnotes.conn.sql_list("""select parent from tabDocField where
fieldname='file_list'"""):
# the other scenario is handled in p07_update_file_data_2
if doctype in singles:
update_file_list(doctype, singles)
# export_to_files([["DocType", doctype]])
def get_single_doctypes():
return webnotes.conn.sql_list("""select name from tabDocType
where ifnull(issingle,0)=1""")
def update_file_list(doctype, singles):
if doctype in singles:
doc = webnotes.doc(doctype, doctype)
if doc.file_list:
update_for_doc(doctype, doc)
webnotes.conn.set_value(doctype, None, "file_list", None)
else:
try:
for doc in webnotes.conn.sql("""select name, file_list from `tab%s` where
ifnull(file_list, '')!=''""" % doctype, as_dict=True):
update_for_doc(doctype, doc)
webnotes.conn.commit()
webnotes.conn.sql("""alter table `tab%s` drop column `file_list`""" % doctype)
except Exception, e:
print webnotes.getTraceback()
if (e.args and e.args[0]!=1054) or not e.args:
raise
def update_for_doc(doctype, doc):
for filedata in doc.file_list.split("\n"):
if not filedata:
continue
filedata = filedata.split(",")
if len(filedata)==2:
filename, fileid = filedata[0], filedata[1]
else:
continue
exists = True
if not (filename.startswith("http://") or filename.startswith("https://")):
if not os.path.exists(webnotes.utils.get_site_path(webnotes.conf.files_path, filename)):
exists = False
if exists:
if webnotes.conn.exists("File Data", fileid):
try:
fd = webnotes.bean("File Data", fileid)
if not (fd.doc.attached_to_doctype and fd.doc.attached_to_name):
fd.doc.attached_to_doctype = doctype
fd.doc.attached_to_name = doc.name
fd.save()
else:
fd = webnotes.bean("File Data", copy=fd.doclist)
fd.doc.attached_to_doctype = doctype
fd.doc.attached_to_name = doc.name
fd.doc.name = None
fd.insert()
except webnotes.DuplicateEntryError:
pass
else:
webnotes.conn.sql("""delete from `tabFile Data` where name=%s""",
fileid)
| agpl-3.0 |
wyvernnot/learn_python_through_unittest | tests/ClassMagicTest.py | 1 | 2708 | import unittest
from unittest.mock import Mock
class ClassMagicTest(unittest.TestCase):
def test_new(self):
class A(object):
def __new__(cls, *args, **kwargs):
# 重载 __new__ 必须返回
return object.__new__(cls, *args, **kwargs)
def __init__(self):
self.foo = 0
a = A()
self.assertEqual(a.foo, 0)
def test_toString(self):
class A(object):
def __str__(self):
return 'a instance'
a = A()
self.assertEqual(str(a), 'a instance')
def test_iter(self):
class Range(object):
def __init__(self, count):
self.count = count
def __iter__(self):
return self
def __next__(self):
if self.count == 0:
raise StopIteration
self.count -= 1
return self.count
r = Range(3)
m = Mock()
for a in r:
m(a)
self.assertEqual(m.call_count, 3)
def test_items(self):
class A(object):
ok = 'foo'
def __getitem__(self, item):
return item.upper()
a = A()
self.assertEqual(a["ok"], 'OK')
class B(object):
def __init__(self):
self.foo = 1
def __getattr__(self, item):
return item
b = B()
self.assertEqual(b.z, 'z')
self.assertEqual(b.foo, 1)
def test_call(self):
class A(object):
pass
class B(A):
def __call__(self, *args, **kwargs):
return 0
b = B()
self.assertTrue(callable(b))
def test_property(self):
class Exam(object):
def __init__(self, score):
self.__score = score
@property
def score(self):
return self.__score
# 为什么这个名字要和 score 一样呢 ?
@score.setter
def score(self, val):
self.__score = val
e = Exam(99)
self.assertEqual(e.score, 99)
e.score = 8
self.assertEqual(e.score, 8)
def test_super(self):
class Animal(object):
def __init__(self, name):
self.name = name
class Cat(Animal):
def __init__(self, name, age):
super().__init__(name)
self.age = age
@property
def super(self):
return super()
c = Cat('Lulu', 3)
self.assertEqual(c.name, 'Lulu')
self.assertEqual(c.age, 3)
| mit |
kaushik94/boto | boto/ec2/reservedinstance.py | 17 | 12948 | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.resultset import ResultSet
from boto.ec2.ec2object import EC2Object
from boto.utils import parse_ts
class ReservedInstancesOffering(EC2Object):
def __init__(self, connection=None, id=None, instance_type=None,
availability_zone=None, duration=None, fixed_price=None,
usage_price=None, description=None, instance_tenancy=None,
currency_code=None, offering_type=None,
recurring_charges=None, pricing_details=None):
super(ReservedInstancesOffering, self).__init__(connection)
self.id = id
self.instance_type = instance_type
self.availability_zone = availability_zone
self.duration = duration
self.fixed_price = fixed_price
self.usage_price = usage_price
self.description = description
self.instance_tenancy = instance_tenancy
self.currency_code = currency_code
self.offering_type = offering_type
self.recurring_charges = recurring_charges
self.pricing_details = pricing_details
def __repr__(self):
return 'ReservedInstanceOffering:%s' % self.id
def startElement(self, name, attrs, connection):
if name == 'recurringCharges':
self.recurring_charges = ResultSet([('item', RecurringCharge)])
return self.recurring_charges
elif name == 'pricingDetailsSet':
self.pricing_details = ResultSet([('item', PricingDetail)])
return self.pricing_details
return None
def endElement(self, name, value, connection):
if name == 'reservedInstancesOfferingId':
self.id = value
elif name == 'instanceType':
self.instance_type = value
elif name == 'availabilityZone':
self.availability_zone = value
elif name == 'duration':
self.duration = int(value)
elif name == 'fixedPrice':
self.fixed_price = value
elif name == 'usagePrice':
self.usage_price = value
elif name == 'productDescription':
self.description = value
elif name == 'instanceTenancy':
self.instance_tenancy = value
elif name == 'currencyCode':
self.currency_code = value
elif name == 'offeringType':
self.offering_type = value
elif name == 'marketplace':
self.marketplace = True if value == 'true' else False
def describe(self):
print 'ID=%s' % self.id
print '\tInstance Type=%s' % self.instance_type
print '\tZone=%s' % self.availability_zone
print '\tDuration=%s' % self.duration
print '\tFixed Price=%s' % self.fixed_price
print '\tUsage Price=%s' % self.usage_price
print '\tDescription=%s' % self.description
def purchase(self, instance_count=1, dry_run=False):
return self.connection.purchase_reserved_instance_offering(
self.id,
instance_count,
dry_run=dry_run
)
class RecurringCharge(object):
def __init__(self, connection=None, frequency=None, amount=None):
self.frequency = frequency
self.amount = amount
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
setattr(self, name, value)
class PricingDetail(object):
def __init__(self, connection=None, price=None, count=None):
self.price = price
self.count = count
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
setattr(self, name, value)
class ReservedInstance(ReservedInstancesOffering):
def __init__(self, connection=None, id=None, instance_type=None,
availability_zone=None, duration=None, fixed_price=None,
usage_price=None, description=None,
instance_count=None, state=None):
super(ReservedInstance, self).__init__(connection, id, instance_type,
availability_zone, duration,
fixed_price, usage_price,
description)
self.instance_count = instance_count
self.state = state
self.start = None
def __repr__(self):
return 'ReservedInstance:%s' % self.id
def endElement(self, name, value, connection):
if name == 'reservedInstancesId':
self.id = value
if name == 'instanceCount':
self.instance_count = int(value)
elif name == 'state':
self.state = value
elif name == 'start':
self.start = value
else:
super(ReservedInstance, self).endElement(name, value, connection)
class ReservedInstanceListing(EC2Object):
def __init__(self, connection=None, listing_id=None, id=None,
create_date=None, update_date=None,
status=None, status_message=None, client_token=None):
self.connection = connection
self.listing_id = listing_id
self.id = id
self.create_date = create_date
self.update_date = update_date
self.status = status
self.status_message = status_message
self.client_token = client_token
def startElement(self, name, attrs, connection):
if name == 'instanceCounts':
self.instance_counts = ResultSet([('item', InstanceCount)])
return self.instance_counts
elif name == 'priceSchedules':
self.price_schedules = ResultSet([('item', PriceSchedule)])
return self.price_schedules
return None
def endElement(self, name, value, connection):
if name == 'reservedInstancesListingId':
self.listing_id = value
elif name == 'reservedInstancesId':
self.id = value
elif name == 'createDate':
self.create_date = value
elif name == 'updateDate':
self.update_date = value
elif name == 'status':
self.status = value
elif name == 'statusMessage':
self.status_message = value
else:
setattr(self, name, value)
class InstanceCount(object):
def __init__(self, connection=None, state=None, instance_count=None):
self.state = state
self.instance_count = instance_count
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'state':
self.state = value
elif name == 'instanceCount':
self.instance_count = int(value)
else:
setattr(self, name, value)
class PriceSchedule(object):
def __init__(self, connection=None, term=None, price=None,
currency_code=None, active=None):
self.connection = connection
self.term = term
self.price = price
self.currency_code = currency_code
self.active = active
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'term':
self.term = int(value)
elif name == 'price':
self.price = value
elif name == 'currencyCode':
self.currency_code = value
elif name == 'active':
self.active = True if value == 'true' else False
else:
setattr(self, name, value)
class ReservedInstancesConfiguration(object):
def __init__(self, connection=None, availability_zone=None, platform=None,
instance_count=None, instance_type=None):
self.connection = connection
self.availability_zone = availability_zone
self.platform = platform
self.instance_count = instance_count
self.instance_type = instance_type
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'availabilityZone':
self.availability_zone = value
elif name == 'platform':
self.platform = value
elif name == 'instanceCount':
self.instance_count = int(value)
elif name == 'instanceType':
self.instance_type = value
else:
setattr(self, name, value)
class ModifyReservedInstancesResult(object):
def __init__(self, connection=None, modification_id=None):
self.connection = connection
self.modification_id = modification_id
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'reservedInstancesModificationId':
self.modification_id = value
else:
setattr(self, name, value)
class ModificationResult(object):
def __init__(self, connection=None, modification_id=None,
availability_zone=None, platform=None, instance_count=None,
instance_type=None):
self.connection = connection
self.modification_id = modification_id
self.availability_zone = availability_zone
self.platform = platform
self.instance_count = instance_count
self.instance_type = instance_type
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'reservedInstancesModificationId':
self.modification_id = value
elif name == 'availabilityZone':
self.availability_zone = value
elif name == 'platform':
self.platform = value
elif name == 'instanceCount':
self.instance_count = int(value)
elif name == 'instanceType':
self.instance_type = value
else:
setattr(self, name, value)
class ReservedInstancesModification(object):
def __init__(self, connection=None, modification_id=None,
reserved_instances=None, modification_results=None,
create_date=None, update_date=None, effective_date=None,
status=None, status_message=None, client_token=None):
self.connection = connection
self.modification_id = modification_id
self.reserved_instances = reserved_instances
self.modification_results = modification_results
self.create_date = create_date
self.update_date = update_date
self.effective_date = effective_date
self.status = status
self.status_message = status_message
self.client_token = client_token
def startElement(self, name, attrs, connection):
if name == 'reservedInstancesSet':
self.reserved_instances = ResultSet([
('item', ReservedInstance)
])
return self.reserved_instances
elif name == 'modificationResultSet':
self.modification_results = ResultSet([
('item', ModificationResult)
])
return self.modification_results
return None
def endElement(self, name, value, connection):
if name == 'reservedInstancesModificationId':
self.modification_id = value
elif name == 'createDate':
self.create_date = parse_ts(value)
elif name == 'updateDate':
self.update_date = parse_ts(value)
elif name == 'effectiveDate':
self.effective_date = parse_ts(value)
elif name == 'status':
self.status = value
elif name == 'statusMessage':
self.status_message = value
elif name == 'clientToken':
self.client_token = value
else:
setattr(self, name, value)
| mit |
bqbn/addons-server | src/olympia/git/tests/test_admin.py | 1 | 2320 | from django.contrib.admin.sites import AdminSite
from pyquery import PyQuery as pq
from olympia.amo.tests import TestCase, addon_factory, user_factory
from olympia.amo.urlresolvers import reverse
from olympia.git.admin import GitExtractionEntryAdmin
from olympia.git.models import GitExtractionEntry
class TestGitExtractionEntryAdmin(TestCase):
def setUp(self):
super().setUp()
self.user = user_factory()
self.grant_permission(self.user, 'Admin:GitExtractionEdit')
self.client.login(email=self.user.email)
self.list_url = reverse('admin:git_gitextractionentry_changelist')
self.admin = GitExtractionEntryAdmin(
model=GitExtractionEntry, admin_site=AdminSite()
)
def test_has_add_permission(self):
assert self.admin.has_add_permission(request=None) is False
def test_has_change_permission(self):
assert self.admin.has_change_permission(request=None) is False
def test_list_view(self):
GitExtractionEntry.objects.create(addon=addon_factory())
# 9 queries:
# - 2 transaction savepoints because of tests
# - 2 request user and groups
# - 2 COUNT(*) on extraction entries for pagination and total display
# - 1 all git extraction entries in one query
# - 1 all add-ons in one query
# - 1 all add-ons translations in one query
with self.assertNumQueries(9):
response = self.client.get(self.list_url)
assert response.status_code == 200
html = pq(response.content)
assert html('.column-id').length == 1
assert html('.actions option[value="delete_selected"]').length == 1
def test_list_view_is_restricted(self):
user = user_factory()
self.grant_permission(user, 'Admin:Curation')
self.client.login(email=user.email)
response = self.client.get(self.list_url)
assert response.status_code == 403
def test_formatted_addon(self):
addon = addon_factory()
entry = GitExtractionEntry.objects.create(addon=addon)
formatted_addon = self.admin.formatted_addon(entry)
assert (
reverse('admin:addons_addon_change', args=(addon.pk,))
in formatted_addon
)
assert str(addon) in formatted_addon
| bsd-3-clause |
pranavtendolkr/horizon | openstack_dashboard/dashboards/admin/metadata_defs/urls.py | 62 | 1253 | # (c) Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import patterns # noqa
from django.conf.urls import url # noqa
from openstack_dashboard.dashboards.admin.metadata_defs import views
NAMESPACES = r'^(?P<namespace_id>[^/]+)/%s$'
urlpatterns = patterns(
'openstack_dashboard.dashboards.admin.metadata_defs.views',
url(r'^$', views.AdminIndexView.as_view(), name='index'),
url(r'^create/$', views.CreateView.as_view(), name='create'),
url(NAMESPACES % 'detail', views.DetailView.as_view(), name='detail'),
url(r'^(?P<id>[^/]+)/resource_types/$',
views.ManageResourceTypes.as_view(), name='resource_types'),
)
| apache-2.0 |
ghtmtt/QGIS | tests/src/python/test_layer_dependencies.py | 31 | 14442 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsSnappingUtils (complement to C++-based tests)
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Hugo Mercier'
__date__ = '12/07/2016'
__copyright__ = 'Copyright 2016, The QGIS Project'
import qgis # NOQA
import os
from qgis.core import (QgsProject,
QgsVectorLayer,
QgsMapSettings,
QgsSnappingUtils,
QgsSnappingConfig,
QgsTolerance,
QgsRectangle,
QgsPointXY,
QgsFeature,
QgsGeometry,
QgsLayerDefinition,
QgsMapLayerDependency
)
from qgis.testing import start_app, unittest
from qgis.PyQt.QtCore import QSize, QPoint
from qgis.PyQt.QtTest import QSignalSpy
import tempfile
from qgis.utils import spatialite_connect
# Convenience instances in case you may need them
start_app()
class TestLayerDependencies(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
pass
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
pass
def setUp(self):
"""Run before each test."""
# create a temp SpatiaLite db with a trigger
fo = tempfile.NamedTemporaryFile()
fn = fo.name
fo.close()
self.fn = fn
con = spatialite_connect(fn)
cur = con.cursor()
cur.execute("SELECT InitSpatialMetadata(1)")
cur.execute("create table node(id integer primary key autoincrement);")
cur.execute("select AddGeometryColumn('node', 'geom', 4326, 'POINT');")
cur.execute("create table section(id integer primary key autoincrement, node1 integer, node2 integer);")
cur.execute("select AddGeometryColumn('section', 'geom', 4326, 'LINESTRING');")
cur.execute("create trigger add_nodes after insert on section begin insert into node (geom) values (st_startpoint(NEW.geom)); insert into node (geom) values (st_endpoint(NEW.geom)); end;")
cur.execute("insert into node (geom) values (geomfromtext('point(0 0)', 4326));")
cur.execute("insert into node (geom) values (geomfromtext('point(1 0)', 4326));")
cur.execute("create table node2(id integer primary key autoincrement);")
cur.execute("select AddGeometryColumn('node2', 'geom', 4326, 'POINT');")
cur.execute("create trigger add_nodes2 after insert on node begin insert into node2 (geom) values (st_translate(NEW.geom, 0.2, 0, 0)); end;")
con.commit()
con.close()
self.pointsLayer = QgsVectorLayer("dbname='%s' table=\"node\" (geom) sql=" % fn, "points", "spatialite")
assert (self.pointsLayer.isValid())
self.linesLayer = QgsVectorLayer("dbname='%s' table=\"section\" (geom) sql=" % fn, "lines", "spatialite")
assert (self.linesLayer.isValid())
self.pointsLayer2 = QgsVectorLayer("dbname='%s' table=\"node2\" (geom) sql=" % fn, "_points2", "spatialite")
assert (self.pointsLayer2.isValid())
QgsProject.instance().addMapLayers([self.pointsLayer, self.linesLayer, self.pointsLayer2])
# save the project file
fo = tempfile.NamedTemporaryFile()
fn = fo.name
fo.close()
self.projectFile = fn
QgsProject.instance().setFileName(self.projectFile)
QgsProject.instance().write()
def tearDown(self):
"""Run after each test."""
QgsProject.instance().clear()
pass
def test_resetSnappingIndex(self):
self.pointsLayer.setDependencies([])
self.linesLayer.setDependencies([])
self.pointsLayer2.setDependencies([])
ms = QgsMapSettings()
ms.setOutputSize(QSize(100, 100))
ms.setExtent(QgsRectangle(0, 0, 1, 1))
self.assertTrue(ms.hasValidSettings())
u = QgsSnappingUtils()
u.setMapSettings(ms)
cfg = u.config()
cfg.setEnabled(True)
cfg.setMode(QgsSnappingConfig.AdvancedConfiguration)
cfg.setIndividualLayerSettings(self.pointsLayer,
QgsSnappingConfig.IndividualLayerSettings(True,
QgsSnappingConfig.VertexFlag, 20, QgsTolerance.Pixels, 0.0, 0.0))
u.setConfig(cfg)
m = u.snapToMap(QPoint(95, 100))
self.assertTrue(m.isValid())
self.assertTrue(m.hasVertex())
self.assertEqual(m.point(), QgsPointXY(1, 0))
f = QgsFeature(self.linesLayer.fields())
f.setId(1)
geom = QgsGeometry.fromWkt("LINESTRING(0 0,1 1)")
f.setGeometry(geom)
self.linesLayer.startEditing()
self.linesLayer.addFeatures([f])
self.linesLayer.commitChanges()
l1 = len([f for f in self.pointsLayer.getFeatures()])
self.assertEqual(l1, 4)
m = u.snapToMap(QPoint(95, 0))
# snapping not updated
self.pointsLayer.setDependencies([])
self.assertEqual(m.isValid(), False)
# set layer dependencies
self.pointsLayer.setDependencies([QgsMapLayerDependency(self.linesLayer.id())])
# add another line
f = QgsFeature(self.linesLayer.fields())
f.setId(2)
geom = QgsGeometry.fromWkt("LINESTRING(0 0,0.5 0.5)")
f.setGeometry(geom)
self.linesLayer.startEditing()
self.linesLayer.addFeatures([f])
self.linesLayer.commitChanges()
# check the snapped point is OK
m = u.snapToMap(QPoint(45, 50))
self.assertTrue(m.isValid())
self.assertTrue(m.hasVertex())
self.assertEqual(m.point(), QgsPointXY(0.5, 0.5))
self.pointsLayer.setDependencies([])
# test chained layer dependencies A -> B -> C
cfg.setIndividualLayerSettings(self.pointsLayer2,
QgsSnappingConfig.IndividualLayerSettings(True,
QgsSnappingConfig.VertexFlag, 20, QgsTolerance.Pixels, 0.0, 0.0))
u.setConfig(cfg)
self.pointsLayer.setDependencies([QgsMapLayerDependency(self.linesLayer.id())])
self.pointsLayer2.setDependencies([QgsMapLayerDependency(self.pointsLayer.id())])
# add another line
f = QgsFeature(self.linesLayer.fields())
f.setId(3)
geom = QgsGeometry.fromWkt("LINESTRING(0 0.2,0.5 0.8)")
f.setGeometry(geom)
self.linesLayer.startEditing()
self.linesLayer.addFeatures([f])
self.linesLayer.commitChanges()
# check the second snapped point is OK
m = u.snapToMap(QPoint(75, 100 - 80))
self.assertTrue(m.isValid())
self.assertTrue(m.hasVertex())
self.assertEqual(m.point(), QgsPointXY(0.7, 0.8))
self.pointsLayer.setDependencies([])
self.pointsLayer2.setDependencies([])
def test_circular_dependencies_with_2_layers(self):
spy_points_data_changed = QSignalSpy(self.pointsLayer.dataChanged)
spy_lines_data_changed = QSignalSpy(self.linesLayer.dataChanged)
spy_points_repaint_requested = QSignalSpy(self.pointsLayer.repaintRequested)
spy_lines_repaint_requested = QSignalSpy(self.linesLayer.repaintRequested)
# only points fire dataChanged because we change its dependencies
self.assertTrue(self.pointsLayer.setDependencies([QgsMapLayerDependency(self.linesLayer.id())]))
self.assertEqual(len(spy_points_data_changed), 1)
self.assertEqual(len(spy_lines_data_changed), 0)
# lines fire dataChanged because we changes its dependencies
# points fire dataChanged because it depends on line
self.assertTrue(self.linesLayer.setDependencies([QgsMapLayerDependency(self.pointsLayer.id())]))
self.assertEqual(len(spy_points_data_changed), 2)
self.assertEqual(len(spy_lines_data_changed), 1)
f = QgsFeature(self.pointsLayer.fields())
f.setId(1)
geom = QgsGeometry.fromWkt("POINT(0 0)")
f.setGeometry(geom)
self.pointsLayer.startEditing()
# new point fire featureAdded so depending line fire dataChanged
# point depends on line, so fire dataChanged
self.pointsLayer.addFeatures([f])
self.assertEqual(len(spy_points_data_changed), 3)
self.assertEqual(len(spy_lines_data_changed), 2)
# added feature is deleted and added with its new defined id
# (it was -1 before) so it fires 2 more signal dataChanged on
# depending line (on featureAdded and on featureDeleted)
# and so 2 more signal on points because it depends on line
self.pointsLayer.commitChanges()
self.assertEqual(len(spy_points_data_changed), 5)
self.assertEqual(len(spy_lines_data_changed), 4)
# repaintRequested is called on commit changes on point
# so it is on depending line
self.assertEqual(len(spy_lines_repaint_requested), 1)
self.assertEqual(len(spy_points_repaint_requested), 1)
def test_circular_dependencies_with_1_layer(self):
# You can define a layer dependent on it self (for instance, a line
# layer that trigger connected lines modifications when you modify
# one line)
spy_lines_data_changed = QSignalSpy(self.linesLayer.dataChanged)
spy_lines_repaint_requested = QSignalSpy(self.linesLayer.repaintRequested)
# line fire dataChanged because we change its dependencies
self.assertTrue(self.linesLayer.setDependencies([QgsMapLayerDependency(self.linesLayer.id())]))
self.assertEqual(len(spy_lines_data_changed), 1)
f = QgsFeature(self.linesLayer.fields())
f.setId(1)
geom = QgsGeometry.fromWkt("LINESTRING(0 0,1 1)")
f.setGeometry(geom)
self.linesLayer.startEditing()
# line fire featureAdded so depending line fire dataChanged once more
self.linesLayer.addFeatures([f])
self.assertEqual(len(spy_lines_data_changed), 2)
# added feature is deleted and added with its new defined id
# (it was -1 before) so it fires 2 more signal dataChanged on
# depending line (on featureAdded and on featureDeleted)
self.linesLayer.commitChanges()
self.assertEqual(len(spy_lines_data_changed), 4)
# repaintRequested is called only once on commit changes on line
self.assertEqual(len(spy_lines_repaint_requested), 1)
def test_layerDefinitionRewriteId(self):
tmpfile = os.path.join(tempfile.tempdir, "test.qlr")
ltr = QgsProject.instance().layerTreeRoot()
self.pointsLayer.setDependencies([QgsMapLayerDependency(self.linesLayer.id())])
QgsLayerDefinition.exportLayerDefinition(tmpfile, [ltr])
grp = ltr.addGroup("imported")
QgsLayerDefinition.loadLayerDefinition(tmpfile, QgsProject.instance(), grp)
newPointsLayer = None
newLinesLayer = None
for l in grp.findLayers():
if l.layerId().startswith('points'):
newPointsLayer = l.layer()
elif l.layerId().startswith('lines'):
newLinesLayer = l.layer()
self.assertIsNotNone(newPointsLayer)
self.assertIsNotNone(newLinesLayer)
self.assertTrue(newLinesLayer.id() in [dep.layerId() for dep in newPointsLayer.dependencies()])
self.pointsLayer.setDependencies([])
def test_signalConnection(self):
# remove all layers
QgsProject.instance().removeAllMapLayers()
# set dependencies and add back layers
self.pointsLayer = QgsVectorLayer("dbname='%s' table=\"node\" (geom) sql=" % self.fn, "points", "spatialite")
assert (self.pointsLayer.isValid())
self.linesLayer = QgsVectorLayer("dbname='%s' table=\"section\" (geom) sql=" % self.fn, "lines", "spatialite")
assert (self.linesLayer.isValid())
self.pointsLayer2 = QgsVectorLayer("dbname='%s' table=\"node2\" (geom) sql=" % self.fn, "_points2", "spatialite")
assert (self.pointsLayer2.isValid())
self.pointsLayer.setDependencies([QgsMapLayerDependency(self.linesLayer.id())])
self.pointsLayer2.setDependencies([QgsMapLayerDependency(self.pointsLayer.id())])
# this should update connections between layers
QgsProject.instance().addMapLayers([self.pointsLayer])
QgsProject.instance().addMapLayers([self.linesLayer])
QgsProject.instance().addMapLayers([self.pointsLayer2])
ms = QgsMapSettings()
ms.setOutputSize(QSize(100, 100))
ms.setExtent(QgsRectangle(0, 0, 1, 1))
self.assertTrue(ms.hasValidSettings())
u = QgsSnappingUtils()
u.setMapSettings(ms)
cfg = u.config()
cfg.setEnabled(True)
cfg.setMode(QgsSnappingConfig.AdvancedConfiguration)
cfg.setIndividualLayerSettings(self.pointsLayer,
QgsSnappingConfig.IndividualLayerSettings(True,
QgsSnappingConfig.VertexFlag, 20, QgsTolerance.Pixels, 0.0, 0.0))
cfg.setIndividualLayerSettings(self.pointsLayer2,
QgsSnappingConfig.IndividualLayerSettings(True,
QgsSnappingConfig.VertexFlag, 20, QgsTolerance.Pixels, 0.0, 0.0))
u.setConfig(cfg)
# add another line
f = QgsFeature(self.linesLayer.fields())
f.setId(4)
geom = QgsGeometry.fromWkt("LINESTRING(0.5 0.2,0.6 0)")
f.setGeometry(geom)
self.linesLayer.startEditing()
self.linesLayer.addFeatures([f])
self.linesLayer.commitChanges()
# check the second snapped point is OK
m = u.snapToMap(QPoint(75, 100 - 0))
self.assertTrue(m.isValid())
self.assertTrue(m.hasVertex())
self.assertEqual(m.point(), QgsPointXY(0.8, 0.0))
self.pointsLayer.setDependencies([])
self.pointsLayer2.setDependencies([])
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
topxiaoke/myedx | lms/djangoapps/certificates/migrations/0011_auto__del_field_generatedcertificate_certificate_id__add_field_generat.py | 188 | 5961 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'GeneratedCertificate.certificate_id'
db.delete_column('certificates_generatedcertificate', 'certificate_id')
# Adding field 'GeneratedCertificate.verify_uuid'
db.add_column('certificates_generatedcertificate', 'verify_uuid',
self.gf('django.db.models.fields.CharField')(default='', max_length=32, blank=True),
keep_default=False)
# Adding field 'GeneratedCertificate.download_uuid'
db.add_column('certificates_generatedcertificate', 'download_uuid',
self.gf('django.db.models.fields.CharField')(default='', max_length=32, blank=True),
keep_default=False)
def backwards(self, orm):
# Adding field 'GeneratedCertificate.certificate_id'
db.add_column('certificates_generatedcertificate', 'certificate_id',
self.gf('django.db.models.fields.CharField')(default='', max_length=32, blank=True),
keep_default=False)
# Deleting field 'GeneratedCertificate.verify_uuid'
db.delete_column('certificates_generatedcertificate', 'verify_uuid')
# Deleting field 'GeneratedCertificate.download_uuid'
db.delete_column('certificates_generatedcertificate', 'download_uuid')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'certificates.generatedcertificate': {
'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'GeneratedCertificate'},
'course_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'distinction': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'download_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}),
'download_uuid': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}),
'grade': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'unavailable'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'verify_uuid': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['certificates']
| agpl-3.0 |
JuliBakagianni/CEF-ELRC | lib/python2.7/site-packages/django/conf/locale/pt/formats.py | 232 | 1532 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r'j \de F \de Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = r'j \de F \de Y à\s H:i'
YEAR_MONTH_FORMAT = r'F \de Y'
MONTH_DAY_FORMAT = r'j \de F'
SHORT_DATE_FORMAT = 'd/m/Y'
SHORT_DATETIME_FORMAT = 'd/m/Y H:i'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%d/%m/%Y', '%d/%m/%y', # '2006-10-25', '25/10/2006', '25/10/06'
# '%d de %b de %Y', '%d de %b, %Y', # '25 de Out de 2006', '25 Out, 2006'
# '%d de %B de %Y', '%d de %B, %Y', # '25 de Outubro de 2006', '25 de Outubro, 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause |
catlinman/input-dungeon | interface.py | 1 | 5859 |
# This interface module contains additional behavior for curses to make it more suited for
# the current aim of this project. It enables quick interaction between curses and the
# actual game while still attempting to be flexible and expendable at the same time.
# Developer note: It should be good practice to allow the user to interrupt the program at
# any given point. With that said - there should be checks in place that catch signals
# such as your usual interrupt and actually handle them as expected. This is however not
# automatically the case since curses catches key presses in a raw format, suppressing their
# default behavior and as such completely removing their functionality and use.
import time
import curses
import helpers
NUMUPDATES = 30 # The number of interface updates per second.
DEBUG = True # Enables development settings to make debugging easier.
# Menu interface class. Creates a scrolling menu inside a specified window.
class Menu:
def __init__(self, win, x=0, y=0):
self.window = win # The window this menu is attached to.
self.x, self.y = x, y # Positional offset of the menu.
self.max_length = 4 # Maximum number of displayed selections.
self.height, self.width = self.window.getmaxyx() # Get the maximum width and height of the assigned window.
self.selections = [] # Stores the possible selections the user can make.
self.total_selections = 0 # Stores the number of selections in the selection array.
self.finished = False # Stores the state of the menu.
self.line_symbol = "> " # Symbol displayed at the beginning of each menu option.
self.cursor = 0 # Current hovered selection index.
self.cursor_symbol = " => "
self.shift = 0 # The amount of lines the menu is shifted by.
self.shift_symbol = "+"
# Add a selection option stored under a specified index. Selections can be overwritten by reassignment to a given index.
def set_selection(self, item, index):
# Make sure the option dictionary contains the correct keys. Else replace them with default values.
if not item.get("desc"):
item["desc"] = "Menu item"
if not item.get("out"):
item["out"] = "None"
self.selections.append(item) # Append the selection item.
self.total_selections += 1 # Update the selection count.
# Remove a selection by it's key.
def remove_selection(self, index):
del self.selections[index]
self.total_selections -= 1 # Update the selection count.
# Clear the list of selection.
def remove_all(self):
del self.selections[index]
self.total_selections = 0 # Update the selection count.
# Displays the menu and requires input.
def interact(self):
self.window.nodelay(1) # Make getch non-blocking.
self.window.keypad(1) # Enable special keys to return keycodes.
self.draw() # Draw the current menu.
while not self.finished:
key = self.window.getch() # Get the current pressed keycode. Also, refresh the screen.
if key == curses.KEY_DOWN: # Arrow down
self.move(-1) # Move the cursor down. Also redraws the menu.
elif key == curses.KEY_UP:
self.move(1) # Move the cursor up. Also redraws the menu.
elif key == curses.KEY_ENTER or key == 10:
self.finished = True
return self.selections[self.cursor + self.shift]["out"], self.selections[self.cursor + self.shift]["desc"]
elif key == 3:
exit() # Exit the entire program if the user presses the interrupt key.
if DEBUG == True:
if key == 27:
self.finished = True # Exit the menu if the user presses the escape key.
if key != -1: # Show the keycode.
self.clearline(self.height - 1)
self.window.addstr(self.height - 1, self.width - len(str(key)) - 1, str(key))
time.sleep(1 / NUMUPDATES) # Sleep between checks
# Clear a specific line.
def clearline(self, column):
self.window.move(column, 0)
self.window.clrtoeol()
# Draw all menu options/items.
def draw(self):
for i in range(min(self.max_length, self.total_selections)):
entry = self.selections[i + self.shift]
self.clearline(i + self.y) # Clear the line beneath.
self.window.addstr(i + self.y, self.x + 1, "%s%s" % (self.line_symbol, entry["desc"]))
if self.shift < self.total_selections - min(self.total_selections, self.max_length):
self.window.addstr(self.max_length + self.y - 1, self.x, self.shift_symbol)
if self.shift > 0:
self.window.addstr(self.y, self.x, self.shift_symbol)
self.window.addstr(self.cursor + self.y, self.x + 1, "%s%s" % (self.cursor_symbol, self.selections[self.cursor + self.shift]["desc"]))
# Move the current selected line and redraw.
def move(self, value):
self.clearline(self.cursor + self.y) # Clear the previously selected line to avoid leftover characters.
if self.cursor == min(self.total_selections, self.max_length) - 1 and value < 0: # Shift the displayed selections up if possible.
self.shift = min(self.shift - value, self.total_selections - min(self.total_selections, self.max_length))
if self.cursor == 0 and value > 0: # Shift the displayed selections down if possible.
self.shift = max(self.shift - value, 0)
self.cursor = min(min(self.total_selections, self.max_length) - 1, max(0, self.cursor - value)) # Move and clamp the cursor.
self.draw() # Redraw the menu to avoid lines from staying selected.
class Bar:
pass
| mit |
robinro/ansible-modules-core | cloud/google/gce_pd.py | 51 | 9869 | #!/usr/bin/python
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: gce_pd
version_added: "1.4"
short_description: utilize GCE persistent disk resources
description:
- This module can create and destroy unformatted GCE persistent disks
U(https://developers.google.com/compute/docs/disks#persistentdisks).
It also supports attaching and detaching disks from running instances.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
options:
detach_only:
description:
- do not destroy the disk, merely detach it from an instance
required: false
default: "no"
choices: ["yes", "no"]
aliases: []
instance_name:
description:
- instance name if you wish to attach or detach the disk
required: false
default: null
aliases: []
mode:
description:
- GCE mount mode of disk, READ_ONLY (default) or READ_WRITE
required: false
default: "READ_ONLY"
choices: ["READ_WRITE", "READ_ONLY"]
aliases: []
name:
description:
- name of the disk
required: true
default: null
aliases: []
size_gb:
description:
- whole integer size of disk (in GB) to create, default is 10 GB
required: false
default: 10
aliases: []
image:
description:
- the source image to use for the disk
required: false
default: null
aliases: []
version_added: "1.7"
snapshot:
description:
- the source snapshot to use for the disk
required: false
default: null
aliases: []
version_added: "1.7"
state:
description:
- desired state of the persistent disk
required: false
default: "present"
choices: ["active", "present", "absent", "deleted"]
aliases: []
zone:
description:
- zone in which to create the disk
required: false
default: "us-central1-b"
aliases: []
service_account_email:
version_added: "1.6"
description:
- service account email
required: false
default: null
aliases: []
pem_file:
version_added: "1.6"
description:
- path to the pem file associated with the service account email
This option is deprecated. Use 'credentials_file'.
required: false
default: null
aliases: []
credentials_file:
version_added: "2.1.0"
description:
- path to the JSON file associated with the service account email
required: false
default: null
aliases: []
project_id:
version_added: "1.6"
description:
- your GCE project ID
required: false
default: null
aliases: []
disk_type:
version_added: "1.9"
description:
- type of disk provisioned
required: false
default: "pd-standard"
choices: ["pd-standard", "pd-ssd"]
aliases: []
requirements:
- "python >= 2.6"
- "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials"
author: "Eric Johnson (@erjohnso) <[email protected]>"
'''
EXAMPLES = '''
# Simple attachment action to an existing instance
- local_action:
module: gce_pd
instance_name: notlocalhost
size_gb: 5
name: pd
'''
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceNotFoundError, ResourceInUseError
_ = Provider.GCE
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
def main():
module = AnsibleModule(
argument_spec = dict(
detach_only = dict(type='bool'),
instance_name = dict(),
mode = dict(default='READ_ONLY', choices=['READ_WRITE', 'READ_ONLY']),
name = dict(required=True),
size_gb = dict(default=10),
disk_type = dict(default='pd-standard'),
image = dict(),
snapshot = dict(),
state = dict(default='present'),
zone = dict(default='us-central1-b'),
service_account_email = dict(),
pem_file = dict(),
credentials_file = dict(),
project_id = dict(),
)
)
if not HAS_LIBCLOUD:
module.fail_json(msg='libcloud with GCE support (0.17.0+) is required for this module')
gce = gce_connect(module)
detach_only = module.params.get('detach_only')
instance_name = module.params.get('instance_name')
mode = module.params.get('mode')
name = module.params.get('name')
size_gb = module.params.get('size_gb')
disk_type = module.params.get('disk_type')
image = module.params.get('image')
snapshot = module.params.get('snapshot')
state = module.params.get('state')
zone = module.params.get('zone')
if detach_only and not instance_name:
module.fail_json(
msg='Must specify an instance name when detaching a disk',
changed=False)
disk = inst = None
changed = is_attached = False
json_output = { 'name': name, 'zone': zone, 'state': state, 'disk_type': disk_type }
if detach_only:
json_output['detach_only'] = True
json_output['detached_from_instance'] = instance_name
if instance_name:
# user wants to attach/detach from an existing instance
try:
inst = gce.ex_get_node(instance_name, zone)
# is the disk attached?
for d in inst.extra['disks']:
if d['deviceName'] == name:
is_attached = True
json_output['attached_mode'] = d['mode']
json_output['attached_to_instance'] = inst.name
except:
pass
# find disk if it already exists
try:
disk = gce.ex_get_volume(name)
json_output['size_gb'] = int(disk.size)
except ResourceNotFoundError:
pass
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
# user wants a disk to exist. If "instance_name" is supplied the user
# also wants it attached
if state in ['active', 'present']:
if not size_gb:
module.fail_json(msg="Must supply a size_gb", changed=False)
try:
size_gb = int(round(float(size_gb)))
if size_gb < 1:
raise Exception
except:
module.fail_json(msg="Must supply a size_gb larger than 1 GB",
changed=False)
if instance_name and inst is None:
module.fail_json(msg='Instance %s does not exist in zone %s' % (
instance_name, zone), changed=False)
if not disk:
if image is not None and snapshot is not None:
module.fail_json(
msg='Cannot give both image (%s) and snapshot (%s)' % (
image, snapshot), changed=False)
lc_image = None
lc_snapshot = None
if image is not None:
lc_image = gce.ex_get_image(image)
elif snapshot is not None:
lc_snapshot = gce.ex_get_snapshot(snapshot)
try:
disk = gce.create_volume(
size_gb, name, location=zone, image=lc_image,
snapshot=lc_snapshot, ex_disk_type=disk_type)
except ResourceExistsError:
pass
except QuotaExceededError:
module.fail_json(msg='Requested disk size exceeds quota',
changed=False)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['size_gb'] = size_gb
if image is not None:
json_output['image'] = image
if snapshot is not None:
json_output['snapshot'] = snapshot
changed = True
if inst and not is_attached:
try:
gce.attach_volume(inst, disk, device=name, ex_mode=mode)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['attached_to_instance'] = inst.name
json_output['attached_mode'] = mode
changed = True
# user wants to delete a disk (or perhaps just detach it).
if state in ['absent', 'deleted'] and disk:
if inst and is_attached:
try:
gce.detach_volume(disk, ex_node=inst)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
changed = True
if not detach_only:
try:
gce.destroy_volume(disk)
except ResourceInUseError as e:
module.fail_json(msg=str(e.value), changed=False)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
changed = True
json_output['changed'] = changed
module.exit_json(**json_output)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.gce import *
if __name__ == '__main__':
main()
| gpl-3.0 |
jhartford/pybo | tests/test_acquisitions.py | 1 | 1134 | """
Unit tests for different acquisition functions. This mainly tests that the
gradients of each acquisition function are computed correctly.
"""
# future imports
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
# global imports
import numpy as np
import numpy.testing as nt
import scipy.optimize as spop
# local imports
import pygp
import pybo.bayesopt.policies as policies
def check_acq_gradient(policy):
# randomly generate some data.
rng = np.random.RandomState(0)
X = rng.rand(10, 2)
y = rng.rand(10)
# create the model.
model = pygp.BasicGP(0.5, 1, [1, 1])
model.add_data(X, y)
# get the computed gradients.
index = policy(model)
xtest = rng.rand(20, 2)
_, grad = index(xtest, grad=True)
# numericall approximate the gradients
index_ = lambda x: index(x[None])
grad_ = np.array([spop.approx_fprime(x, index_, 1e-8) for x in xtest])
nt.assert_allclose(grad, grad_, rtol=1e-6, atol=1e-6)
def test_acqs():
for fname in policies.__all__:
yield check_acq_gradient, getattr(policies, fname)
| bsd-2-clause |
egor-tensin/sorting_algorithms | algorithms/impl/heapsort.py | 2 | 1858 | # Copyright (c) 2015 Egor Tensin <[email protected]>
# This file is part of the "Sorting algorithms" project.
# For details, see https://github.com/egor-tensin/sorting-algorithms.
# Distributed under the MIT License.
import sys
from ..algorithm import SortingAlgorithm
# Disclaimer: implemented in the most literate way.
def heapsort(xs):
_heapify(xs)
first, last = 0, len(xs) - 1
for end in range(last, first, -1):
xs[end], xs[first] = xs[first], xs[end]
_siftdown(xs, first, end - 1)
return xs
# In a heap stored in a zero-based array,
# left_child = node * 2 + 1
# right_child = node * 2 + 2
# parent = (node - 1) // 2
def _get_parent(node):
return (node - 1) // 2
def _get_left_child(node):
return node * 2 + 1
def _get_right_child(node):
return node * 2 + 2
def _heapify(xs):
last = len(xs) - 1
first_parent, last_parent = 0, _get_parent(last)
for parent in range(last_parent, first_parent - 1, -1):
_siftdown(xs, parent, last)
def _siftdown(xs, start, end):
root = start
while True:
# We swap if there is at least one child
child = _get_left_child(root)
if child > end:
break
# If there are two children, select the minimum
right_child = _get_right_child(root)
if right_child <= end and xs[child] < xs[right_child]:
child = right_child
if xs[root] < xs[child]:
xs[root], xs[child] = xs[child], xs[root]
root = child
else:
break
_ALGORITHMS = [
SortingAlgorithm('heapsort', 'Heapsort', heapsort),
]
def _parse_args(args=None):
if args is None:
args = sys.argv[1:]
return list(map(int, args))
def main(args=None):
xs = _parse_args(args)
print(heapsort(list(xs)))
if __name__ == '__main__':
main()
| mit |
mlcommons/training | single_stage_detector/ssd/distributed.py | 1 | 3611 | import torch
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
import torch.distributed as dist
from torch.nn.modules import Module
'''
This version of DistributedDataParallel is designed to be used in conjunction with the multiproc.py
launcher included with this example. It assumes that your run is using multiprocess with 1
GPU/process, that the model is on the correct device, and that torch.set_device has been
used to set the device.
Parameters are broadcasted to the other processes on initialization of DistributedDataParallel,
and will be allreduced at the finish of the backward pass.
'''
class DistributedDataParallel(Module):
def __init__(self, module):
super(DistributedDataParallel, self).__init__()
self.warn_on_half = True if dist._backend == dist.dist_backend.GLOO else False
self.module = module
for p in self.module.state_dict().values():
if not torch.is_tensor(p):
continue
if dist._backend == dist.dist_backend.NCCL:
assert p.is_cuda, "NCCL backend only supports model parameters to be on GPU."
dist.broadcast(p, 0)
def allreduce_params():
if(self.needs_reduction):
self.needs_reduction = False
buckets = {}
for param in self.module.parameters():
if param.requires_grad and param.grad is not None:
tp = param.data.type()
if tp not in buckets:
buckets[tp] = []
buckets[tp].append(param)
if self.warn_on_half:
if torch.cuda.HalfTensor in buckets:
print("WARNING: gloo dist backend for half parameters may be extremely slow." +
" It is recommended to use the NCCL backend in this case.")
self.warn_on_half = False
for tp in buckets:
bucket = buckets[tp]
grads = [param.grad.data for param in bucket]
coalesced = _flatten_dense_tensors(grads)
dist.all_reduce(coalesced)
coalesced /= dist.get_world_size()
for buf, synced in zip(grads, _unflatten_dense_tensors(coalesced, grads)):
buf.copy_(synced)
for param in list(self.module.parameters()):
def allreduce_hook(*unused):
param._execution_engine.queue_callback(allreduce_params)
if param.requires_grad:
param.register_hook(allreduce_hook)
def forward(self, *inputs, **kwargs):
self.needs_reduction = True
return self.module(*inputs, **kwargs)
'''
def _sync_buffers(self):
buffers = list(self.module._all_buffers())
if len(buffers) > 0:
# cross-node buffer sync
flat_buffers = _flatten_dense_tensors(buffers)
dist.broadcast(flat_buffers, 0)
for buf, synced in zip(buffers, _unflatten_dense_tensors(flat_buffers, buffers)):
buf.copy_(synced)
def train(self, mode=True):
# Clear NCCL communicator and CUDA event cache of the default group ID,
# These cache will be recreated at the later call. This is currently a
# work-around for a potential NCCL deadlock.
if dist._backend == dist.dist_backend.NCCL:
dist._clear_group_cache()
super(DistributedDataParallel, self).train(mode)
self.module.train(mode)
'''
| apache-2.0 |
amir-qayyum-khan/edx-platform | lms/djangoapps/courseware/tests/test_lti_integration.py | 10 | 9292 | """LTI integration tests"""
from collections import OrderedDict
import json
import mock
from nose.plugins.attrib import attr
import oauthlib
import urllib
from django.conf import settings
from django.core.urlresolvers import reverse
from courseware.tests import BaseTestXmodule
from courseware.views.views import get_course_lti_endpoints
from openedx.core.lib.url_utils import quote_slashes
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.x_module import STUDENT_VIEW
@attr(shard=1)
class TestLTI(BaseTestXmodule):
"""
Integration test for lti xmodule.
It checks overall code, by assuring that context that goes to template is correct.
As part of that, checks oauth signature generation by mocking signing function
of `oauthlib` library.
"""
CATEGORY = "lti"
def setUp(self):
"""
Mock oauth1 signing of requests library for testing.
"""
super(TestLTI, self).setUp()
mocked_nonce = u'135685044251684026041377608307'
mocked_timestamp = u'1234567890'
mocked_signature_after_sign = u'my_signature%3D'
mocked_decoded_signature = u'my_signature='
# Note: this course_id is actually a course_key
context_id = self.item_descriptor.course_id.to_deprecated_string()
user_id = unicode(self.item_descriptor.xmodule_runtime.anonymous_student_id)
hostname = self.item_descriptor.xmodule_runtime.hostname
resource_link_id = unicode(urllib.quote('{}-{}'.format(hostname, self.item_descriptor.location.html_id())))
sourcedId = "{context}:{resource_link}:{user_id}".format(
context=urllib.quote(context_id),
resource_link=resource_link_id,
user_id=user_id
)
self.correct_headers = {
u'user_id': user_id,
u'oauth_callback': u'about:blank',
u'launch_presentation_return_url': '',
u'lti_message_type': u'basic-lti-launch-request',
u'lti_version': 'LTI-1p0',
u'roles': u'Student',
u'context_id': context_id,
u'resource_link_id': resource_link_id,
u'lis_result_sourcedid': sourcedId,
u'oauth_nonce': mocked_nonce,
u'oauth_timestamp': mocked_timestamp,
u'oauth_consumer_key': u'',
u'oauth_signature_method': u'HMAC-SHA1',
u'oauth_version': u'1.0',
u'oauth_signature': mocked_decoded_signature
}
saved_sign = oauthlib.oauth1.Client.sign
self.expected_context = {
'display_name': self.item_descriptor.display_name,
'input_fields': self.correct_headers,
'element_class': self.item_descriptor.category,
'element_id': self.item_descriptor.location.html_id(),
'launch_url': 'http://www.example.com', # default value
'open_in_a_new_page': True,
'form_url': self.item_descriptor.xmodule_runtime.handler_url(self.item_descriptor,
'preview_handler').rstrip('/?'),
'hide_launch': False,
'has_score': False,
'module_score': None,
'comment': u'',
'weight': 1.0,
'ask_to_send_username': self.item_descriptor.ask_to_send_username,
'ask_to_send_email': self.item_descriptor.ask_to_send_email,
'description': self.item_descriptor.description,
'button_text': self.item_descriptor.button_text,
'accept_grades_past_due': self.item_descriptor.accept_grades_past_due,
}
def mocked_sign(self, *args, **kwargs):
"""
Mocked oauth1 sign function.
"""
# self is <oauthlib.oauth1.rfc5849.Client object> here:
__, headers, __ = saved_sign(self, *args, **kwargs)
# we should replace nonce, timestamp and signed_signature in headers:
old = headers[u'Authorization']
old_parsed = OrderedDict([param.strip().replace('"', '').split('=') for param in old.split(',')])
old_parsed[u'OAuth oauth_nonce'] = mocked_nonce
old_parsed[u'oauth_timestamp'] = mocked_timestamp
old_parsed[u'oauth_signature'] = mocked_signature_after_sign
headers[u'Authorization'] = ', '.join([k + '="' + v + '"' for k, v in old_parsed.items()])
return None, headers, None
patcher = mock.patch.object(oauthlib.oauth1.Client, "sign", mocked_sign)
patcher.start()
self.addCleanup(patcher.stop)
def test_lti_constructor(self):
generated_content = self.item_descriptor.render(STUDENT_VIEW).content
expected_content = self.runtime.render_template('lti.html', self.expected_context)
self.assertEqual(generated_content, expected_content)
def test_lti_preview_handler(self):
generated_content = self.item_descriptor.preview_handler(None, None).body
expected_content = self.runtime.render_template('lti_form.html', self.expected_context)
self.assertEqual(generated_content, expected_content)
@attr(shard=1)
class TestLTIModuleListing(SharedModuleStoreTestCase):
"""
a test for the rest endpoint that lists LTI modules in a course
"""
# arbitrary constant
COURSE_SLUG = "100"
COURSE_NAME = "test_course"
@classmethod
def setUpClass(cls):
super(TestLTIModuleListing, cls).setUpClass()
cls.course = CourseFactory.create(display_name=cls.COURSE_NAME, number=cls.COURSE_SLUG)
cls.chapter1 = ItemFactory.create(
parent_location=cls.course.location,
display_name="chapter1",
category='chapter')
cls.section1 = ItemFactory.create(
parent_location=cls.chapter1.location,
display_name="section1",
category='sequential')
cls.chapter2 = ItemFactory.create(
parent_location=cls.course.location,
display_name="chapter2",
category='chapter')
cls.section2 = ItemFactory.create(
parent_location=cls.chapter2.location,
display_name="section2",
category='sequential')
# creates one draft and one published lti module, in different sections
cls.lti_published = ItemFactory.create(
parent_location=cls.section1.location,
display_name="lti published",
category="lti",
location=cls.course.id.make_usage_key('lti', 'lti_published'),
)
cls.lti_draft = ItemFactory.create(
parent_location=cls.section2.location,
display_name="lti draft",
category="lti",
location=cls.course.id.make_usage_key('lti', 'lti_draft'),
publish_item=False,
)
def setUp(self):
"""Create course, 2 chapters, 2 sections"""
super(TestLTIModuleListing, self).setUp()
def expected_handler_url(self, handler):
"""convenience method to get the reversed handler urls"""
return "https://{}{}".format(settings.SITE_NAME, reverse(
'courseware.module_render.handle_xblock_callback_noauth',
args=[
self.course.id.to_deprecated_string(),
quote_slashes(unicode(self.lti_published.scope_ids.usage_id.to_deprecated_string()).encode('utf-8')),
handler
]
))
def test_lti_rest_bad_course(self):
"""Tests what happens when the lti listing rest endpoint gets a bad course_id"""
bad_ids = [u"sf", u"dne/dne/dne", u"fo/ey/\\u5305"]
for bad_course_id in bad_ids:
lti_rest_endpoints_url = 'courses/{}/lti_rest_endpoints/'.format(bad_course_id)
response = self.client.get(lti_rest_endpoints_url)
self.assertEqual(404, response.status_code)
def test_lti_rest_listing(self):
"""tests that the draft lti module is part of the endpoint response"""
request = mock.Mock()
request.method = 'GET'
response = get_course_lti_endpoints(request, course_id=self.course.id.to_deprecated_string())
self.assertEqual(200, response.status_code)
self.assertEqual('application/json', response['Content-Type'])
expected = {
"lti_1_1_result_service_xml_endpoint": self.expected_handler_url('grade_handler'),
"lti_2_0_result_service_json_endpoint":
self.expected_handler_url('lti_2_0_result_rest_handler') + "/user/{anon_user_id}",
"display_name": self.lti_published.display_name,
}
self.assertEqual([expected], json.loads(response.content))
def test_lti_rest_non_get(self):
"""tests that the endpoint returns 404 when hit with NON-get"""
DISALLOWED_METHODS = ("POST", "PUT", "DELETE", "HEAD", "OPTIONS") # pylint: disable=invalid-name
for method in DISALLOWED_METHODS:
request = mock.Mock()
request.method = method
response = get_course_lti_endpoints(request, self.course.id.to_deprecated_string())
self.assertEqual(405, response.status_code)
| agpl-3.0 |
Thhhza/XlsxWriter | xlsxwriter/test/worksheet/test_worksheet04.py | 8 | 2180 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
import unittest
from ...compatibility import StringIO
from ..helperfunctions import _xml_to_list
from ...worksheet import Worksheet
from ...format import Format
class TestAssembleWorksheet(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with row formatting set."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
cell_format = Format({'xf_index': 1})
worksheet.set_row(1, 30)
worksheet.set_row(3, None, None, {'hidden': 1})
worksheet.set_row(6, None, cell_format)
worksheet.set_row(9, 3)
worksheet.set_row(12, 24, None, {'hidden': 1})
worksheet.set_row(14, 0)
worksheet.select()
worksheet._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="A2:A15"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData>
<row r="2" ht="30" customHeight="1"/>
<row r="4" hidden="1"/>
<row r="7" s="1" customFormat="1"/>
<row r="10" ht="3" customHeight="1"/>
<row r="13" ht="24" hidden="1" customHeight="1"/>
<row r="15" hidden="1"/>
</sheetData>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
| bsd-2-clause |
bobobox/ansible | lib/ansible/module_utils/dellos6.py | 21 | 8111 |
#
# (c) 2015 Peter Sprygada, <[email protected]>
#
# Copyright (c) 2016 Dell Inc.
#
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import re
from ansible.module_utils.shell import CliBase
from ansible.module_utils.network import Command, register_transport, to_list
from ansible.module_utils.netcfg import NetworkConfig, ConfigLine, ignore_line, DEFAULT_COMMENT_TOKENS
def get_config(module):
contents = module.params['config']
if not contents:
contents = module.config.get_config()
module.params['config'] = contents
return Dellos6NetworkConfig(indent=0, contents=contents[0])
else:
return Dellos6NetworkConfig(indent=0, contents=contents)
def get_sublevel_config(running_config, module):
contents = list()
current_config_contents = list()
sublevel_config = Dellos6NetworkConfig(indent=0)
obj = running_config.get_object(module.params['parents'])
if obj:
contents = obj.children
for c in contents:
if isinstance(c, ConfigLine):
current_config_contents.append(c.raw)
sublevel_config.add(current_config_contents, module.params['parents'])
return sublevel_config
def os6_parse(lines, indent=None, comment_tokens=None):
sublevel_cmds = [
re.compile(r'^vlan.*$'),
re.compile(r'^stack.*$'),
re.compile(r'^interface.*$'),
re.compile(r'datacenter-bridging.*$'),
re.compile(r'line (console|telnet|ssh).*$'),
re.compile(r'ip ssh !(server).*$'),
re.compile(r'ip (dhcp|vrf).*$'),
re.compile(r'(ip|mac|management|arp) access-list.*$'),
re.compile(r'ipv6 (dhcp|router).*$'),
re.compile(r'mail-server.*$'),
re.compile(r'vpc domain.*$'),
re.compile(r'router.*$'),
re.compile(r'route-map.*$'),
re.compile(r'policy-map.*$'),
re.compile(r'class-map match-all.*$'),
re.compile(r'captive-portal.*$'),
re.compile(r'admin-profile.*$'),
re.compile(r'link-dependency group.*$'),
re.compile(r'banner motd.*$'),
re.compile(r'openflow.*$'),
re.compile(r'support-assist.*$'),
re.compile(r'template.*$'),
re.compile(r'address-family.*$'),
re.compile(r'spanning-tree mst.*$'),
re.compile(r'logging.*$'),
re.compile(r'(radius-server|tacacs-server) host.*$')]
childline = re.compile(r'^exit$')
config = list()
parent = list()
children = []
parent_match = False
for line in str(lines).split('\n'):
text = str(re.sub(r'([{};])', '', line)).strip()
cfg = ConfigLine(text)
cfg.raw = line
if not text or ignore_line(text, comment_tokens):
parent = list()
children = []
continue
else:
parent_match=False
# handle sublevel parent
for pr in sublevel_cmds:
if pr.match(line):
if len(parent) != 0:
cfg.parents.extend(parent)
parent.append(cfg)
config.append(cfg)
if children:
children.insert(len(parent) - 1,[])
children[len(parent) - 2].append(cfg)
parent_match=True
continue
# handle exit
if childline.match(line):
if children:
parent[len(children) - 1].children.extend(children[len(children) - 1])
if len(children)>1:
parent[len(children) - 2].children.extend(parent[len(children) - 1].children)
cfg.parents.extend(parent)
children.pop()
parent.pop()
if not children:
children = list()
if parent:
cfg.parents.extend(parent)
parent = list()
config.append(cfg)
# handle sublevel children
elif parent_match is False and len(parent)>0 :
if not children:
cfglist=[cfg]
children.append(cfglist)
else:
children[len(parent) - 1].append(cfg)
cfg.parents.extend(parent)
config.append(cfg)
# handle global commands
elif not parent:
config.append(cfg)
return config
class Dellos6NetworkConfig(NetworkConfig):
def load(self, contents):
self._config = os6_parse(contents, self.indent, DEFAULT_COMMENT_TOKENS)
def diff_line(self, other, path=None):
diff = list()
for item in self.items:
if str(item) == "exit":
for diff_item in diff:
if item.parents == diff_item.parents:
diff.append(item)
break
elif item not in other:
diff.append(item)
return diff
class Cli(CliBase):
NET_PASSWD_RE = re.compile(r"[\r\n]?password:\s?$", re.I)
CLI_PROMPTS_RE = [
re.compile(r"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(r"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$")
]
CLI_ERRORS_RE = [
re.compile(r"% ?Error"),
re.compile(r"% ?Bad secret"),
re.compile(r"invalid input", re.I),
re.compile(r"(?:incomplete|ambiguous) command", re.I),
re.compile(r"connection timed out", re.I),
re.compile(r"[^\r\n]+ not found", re.I),
re.compile(r"'[^']' +returned error code: ?\d+")]
def connect(self, params, **kwargs):
super(Cli, self).connect(params, kickstart=False, **kwargs)
def authorize(self, params, **kwargs):
passwd = params['auth_pass']
self.run_commands(
Command('enable', prompt=self.NET_PASSWD_RE, response=passwd)
)
self.run_commands('terminal length 0')
def configure(self, commands, **kwargs):
cmds = ['configure terminal']
cmds.extend(to_list(commands))
cmds.append('end')
responses = self.execute(cmds)
responses.pop(0)
return responses
def get_config(self, **kwargs):
return self.execute(['show running-config'])
def load_config(self, commands, **kwargs):
return self.configure(commands)
def save_config(self):
self.execute(['copy running-config startup-config'])
Cli = register_transport('cli', default=True)(Cli)
| gpl-3.0 |
stxnext-csr/volontulo | apps/volontulo/models.py | 1 | 10920 | # -*- coding: utf-8 -*-
u"""
.. module:: models
"""
import logging
import os
# pylint: disable=unused-import
import uuid
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from django.db.models import F
from django.utils import timezone
logger = logging.getLogger('volontulo.models')
class Organization(models.Model):
u"""Model that handles ogranizations/institutions."""
name = models.CharField(max_length=150)
address = models.CharField(max_length=150)
description = models.TextField()
def __str__(self):
u"""Organization model string reprezentation."""
return self.name
class OffersManager(models.Manager):
u"""Offers Manager."""
def get_active(self):
u"""Return active offers."""
return self.filter(
offer_status='published',
action_status__in=('ongoing', 'future'),
recruitment_status__in=('open', 'supplemental'),
).all()
def get_for_administrator(self):
u"""Return all offers for administrator to allow management."""
return self.filter(offer_status='unpublished').all()
def get_weightened(self, count=10):
u"""Return all published offers ordered by weight.
:param count: Integer
:return:
"""
return self.filter(
offer_status='published').order_by('weight')[:count]
def get_archived(self):
u"""Return archived offers."""
return self.filter(
offer_status='published',
action_status__in=('ongoing', 'finished'),
recruitment_status='closed',
).all()
class Offer(models.Model):
u"""Offer model."""
OFFER_STATUSES = (
('unpublished', u'Unpublished'),
('published', u'Published'),
('rejected', u'Rejected'),
)
RECRUITMENT_STATUSES = (
('open', u'Open'),
('supplemental', u'Supplemental'),
('closed', u'Closed'),
)
ACTION_STATUSES = (
('future', u'Future'),
('ongoing', u'Ongoing'),
('finished', u'Finished'),
)
objects = OffersManager()
organization = models.ForeignKey(Organization)
volunteers = models.ManyToManyField(User)
description = models.TextField()
requirements = models.TextField(blank=True, default='')
time_commitment = models.TextField()
benefits = models.TextField()
location = models.CharField(max_length=150)
title = models.CharField(max_length=150)
started_at = models.DateTimeField(blank=True, null=True)
finished_at = models.DateTimeField(blank=True, null=True)
time_period = models.CharField(max_length=150, default='', blank=True)
status_old = models.CharField(
max_length=30,
default='NEW',
null=True,
unique=False
)
offer_status = models.CharField(
max_length=16,
choices=OFFER_STATUSES,
default='unpublished',
)
recruitment_status = models.CharField(
max_length=16,
choices=RECRUITMENT_STATUSES,
default='open',
)
action_status = models.CharField(
max_length=16,
choices=ACTION_STATUSES,
default='ongoing',
)
votes = models.BooleanField(default=0)
recruitment_start_date = models.DateTimeField(blank=True, null=True)
recruitment_end_date = models.DateTimeField(blank=True, null=True)
reserve_recruitment = models.BooleanField(blank=True, default=True)
reserve_recruitment_start_date = models.DateTimeField(
blank=True,
null=True
)
reserve_recruitment_end_date = models.DateTimeField(
blank=True,
null=True
)
action_ongoing = models.BooleanField(default=False, blank=True)
constant_coop = models.BooleanField(default=False, blank=True)
action_start_date = models.DateTimeField(blank=True, null=True)
action_end_date = models.DateTimeField(blank=True, null=True)
volunteers_limit = models.IntegerField(default=0, null=True, blank=True)
weight = models.IntegerField(default=0, null=True, blank=True)
def __str__(self):
u"""Offer string representation."""
return self.title
def set_main_image(self, is_main):
u"""Set main image flag unsetting other offers images.
:param is_main: Boolean flag resetting offer main image
"""
if is_main:
OfferImage.objects.filter(offer=self).update(is_main=False)
return True
return False
def save_offer_image(self, gallery, userprofile, is_main=False):
u"""Handle image upload for user profile page.
:param gallery: UserProfile model instance
:param userprofile: UserProfile model instance
:param is_main: Boolean main image flag
"""
gallery.offer = self
gallery.userprofile = userprofile
gallery.is_main = self.set_main_image(is_main)
gallery.save()
return self
def create_new(self):
u"""Set status while creating new offer."""
self.offer_status = 'unpublished'
self.recruitment_status = 'open'
if self.started_at or self.finished_at:
self.action_status = self.determine_action_status()
def determine_action_status(self):
u"""Determine action status by offer dates."""
if (
(
self.finished_at and
self.started_at < timezone.now() < self.finished_at
) or
(
self.started_at < timezone.now() and
not self.finished_at
)
):
return 'ongoing'
elif self.started_at > timezone.now():
return 'future'
else:
return 'finished'
def change_status(self, status):
u"""Change offer status.
:param status: string Offer status
"""
if status in ('published', 'rejected', 'unpublished'):
self.offer_status = status
self.save()
return self
def unpublish(self):
u"""Unpublish offer."""
self.offer_status = 'unpublished'
self.save()
return self
def publish(self):
u"""Publish offer."""
self.offer_status = 'published'
Offer.objects.all().update(weight=F('weight') + 1)
self.weight = 0
self.save()
return self
def reject(self):
u"""Reject offer."""
self.offer_status = 'rejected'
self.save()
return self
def close_offer(self):
u"""Change offer status to close."""
self.offer_status = 'unpublished'
self.action_status = 'finished'
self.recruitment_status = 'closed'
self.save()
return self
class UserProfile(models.Model):
u"""Model that handles users' profiles."""
user = models.OneToOneField(User)
organizations = models.ManyToManyField(
Organization,
related_name='userprofiles',
)
is_administrator = models.BooleanField(default=False, blank=True)
phone_no = models.CharField(
max_length=32,
blank=True,
default='',
null=True
)
uuid = models.UUIDField(default=uuid.uuid4, unique=True)
def is_admin(self):
u"""Return True if current user is administrator, else return False"""
return self.is_administrator
def is_volunteer(self):
u"""Return True if current user is volunteer, else return False"""
return not (self.is_administrator and self.organizations)
def can_edit_offer(self, offer=None, offer_id=None):
u"""Checks if the user can edit an offer based on its ID"""
if offer is None:
offer = Offer.objects.get(id=offer_id)
return self.is_administrator or self.organizations.filter(
id=offer.organization_id).exists()
def get_avatar(self):
u"""Return avatar for current user."""
return UserGallery.objects.filter(
userprofile=self,
is_avatar=True
)
def clean_images(self):
u"""Clean user images."""
images = UserGallery.objects.filter(userprofile=self)
for image in images:
try:
os.remove(os.path.join(settings.MEDIA_ROOT, str(image.image)))
except OSError as ex:
logger.error(ex)
image.delete()
def __str__(self):
return self.user.email
class UserGallery(models.Model):
u"""Handling user images."""
userprofile = models.ForeignKey(UserProfile, related_name='images')
image = models.ImageField(upload_to='profile/')
is_avatar = models.BooleanField(default=False)
def __str__(self):
u"""String representation of an image."""
return str(self.image)
class OfferImage(models.Model):
u"""Handling offer image."""
userprofile = models.ForeignKey(UserProfile, related_name='offerimages')
offer = models.ForeignKey(Offer, related_name='images')
path = models.ImageField(upload_to='offers/')
is_main = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
u"""String representation of an image."""
return str(self.path)
class OrganizationGallery(models.Model):
u"""Handling organizations gallery."""
organization = models.ForeignKey(Organization, related_name='images')
published_by = models.ForeignKey(UserProfile, related_name='gallery')
path = models.ImageField(upload_to='gallery/')
is_main = models.BooleanField(default=False, blank=True)
def __str__(self):
u"""String representation of an image."""
return str(self.path)
def remove(self):
u"""Remove image."""
self.remove()
def set_as_main(self, organization):
u"""Save image as main.
:param organization: Organization model instance
"""
OrganizationGallery.objects.filter(organization_id=organization.id)\
.update(
is_main=False
)
self.is_main = True
self.save()
@staticmethod
def get_organizations_galleries(userprofile):
u"""Get images grouped by organizations
:param userprofile: UserProfile model instance
"""
organizations = Organization.objects.filter(
userprofiles=userprofile
).all()
return {o.name: o.images.all() for o in organizations}
class Page(models.Model):
"""Static page model."""
title = models.CharField(max_length=255)
content = models.TextField()
author = models.ForeignKey(UserProfile)
published = models.BooleanField(default=False)
modified_at = models.DateTimeField(auto_now=True)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.title
| mit |
WendellDuncan/or-tools | examples/data/nonogram_regular/nonogram_p200.py | 74 | 1810 | # Copyright 2010 Hakan Kjellerstrand [email protected]
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Nonogram problem from Gecode: P200
# http://www.gecode.org/gecode-doc-latest/classNonogram.html
#
rows = 25
row_rule_len = 7
row_rules = [
[0,0,0,0,2,2,3],
[0,0,4,1,1,1,4],
[0,0,4,1,2,1,1],
[4,1,1,1,1,1,1],
[0,2,1,1,2,3,5],
[0,1,1,1,1,2,1],
[0,0,3,1,5,1,2],
[0,3,2,2,1,2,2],
[2,1,4,1,1,1,1],
[0,2,2,1,2,1,2],
[0,1,1,1,3,2,3],
[0,0,1,1,2,7,3],
[0,0,1,2,2,1,5],
[0,0,3,2,2,1,2],
[0,0,0,3,2,1,2],
[0,0,0,0,5,1,2],
[0,0,0,2,2,1,2],
[0,0,0,4,2,1,2],
[0,0,0,6,2,3,2],
[0,0,0,7,4,3,2],
[0,0,0,0,7,4,4],
[0,0,0,0,7,1,4],
[0,0,0,0,6,1,4],
[0,0,0,0,4,2,2],
[0,0,0,0,0,2,1]
]
cols = 25
col_rule_len = 6
col_rules = [
[0,0,1,1,2,2],
[0,0,0,5,5,7],
[0,0,5,2,2,9],
[0,0,3,2,3,9],
[0,1,1,3,2,7],
[0,0,0,3,1,5],
[0,7,1,1,1,3],
[1,2,1,1,2,1],
[0,0,0,4,2,4],
[0,0,1,2,2,2],
[0,0,0,4,6,2],
[0,0,1,2,2,1],
[0,0,3,3,2,1],
[0,0,0,4,1,15],
[1,1,1,3,1,1],
[2,1,1,2,2,3],
[0,0,1,4,4,1],
[0,0,1,4,3,2],
[0,0,1,1,2,2],
[0,7,2,3,1,1],
[0,2,1,1,1,5],
[0,0,0,1,2,5],
[0,0,1,1,1,3],
[0,0,0,4,2,1],
[0,0,0,0,0,3]
]
| apache-2.0 |
pennersr/django-allauth | allauth/socialaccount/providers/draugiem/tests.py | 2 | 4796 | from hashlib import md5
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.urls import reverse
from django.utils.http import urlencode
from allauth.socialaccount import providers
from allauth.socialaccount.models import SocialApp, SocialToken
from allauth.tests import Mock, TestCase, patch
from . import views
from .provider import DraugiemProvider
class DraugiemTests(TestCase):
def setUp(self):
# workaround to create a session. see:
# https://code.djangoproject.com/ticket/11475
User.objects.create_user(
"anakin", "[email protected]", "s1thrul3s"
)
self.client.login(username="anakin", password="s1thrul3s")
self.provider = providers.registry.by_id(DraugiemProvider.id)
app = SocialApp.objects.create(
provider=self.provider.id,
name=self.provider.id,
client_id="app123id",
key=self.provider.id,
secret="dummy",
)
app.sites.add(Site.objects.get_current())
self.app = app
def get_draugiem_login_response(self):
"""
Sample Draugiem.lv response
"""
return {
"apikey": "12345",
"uid": "42",
"users": {
"42": {
"age": "266",
"imgl": "http://cdn.memegenerator.net/instances/500x/23395689.jpg",
"surname": "Skywalker",
"url": "/user/42/",
"imgi": "http://cdn.memegenerator.net/instances/500x/23395689.jpg",
"nick": "Sky Guy",
"created": "09.11.1812 11:26:15",
"deleted": "false",
"imgm": "http://cdn.memegenerator.net/instances/500x/23395689.jpg",
"sex": "M",
"type": "User_Default",
"uid": "42",
"place": "London",
"emailHash": "3f198f21434gfd2f2b4rs05939shk93f3815bc6aa",
"name": "Anakin",
"adult": "1",
"birthday": "1750-09-13",
"img": "http://cdn.memegenerator.net/instances/500x/23395689.jpg",
}
},
}
def get_socialaccount(self, response, token):
"""
Returns SocialLogin based on the data from the request
"""
request = Mock()
login = self.provider.sociallogin_from_response(request, response)
login.token = token
return login
def mock_socialaccount_state(self):
"""
SocialLogin depends on Session state - a tuple of request
params and a random string
"""
session = self.client.session
session["socialaccount_state"] = (
{"process": "login", "scope": "", "auth_params": ""},
"12345",
)
session.save()
def test_login_redirect(self):
response = self.client.get(reverse(views.login))
redirect_url = reverse(views.callback)
full_redirect_url = "http://testserver" + redirect_url
secret = self.app.secret + full_redirect_url
redirect_url_hash = md5(secret.encode("utf-8")).hexdigest()
params = {
"app": self.app.client_id,
"hash": redirect_url_hash,
"redirect": full_redirect_url,
}
self.assertRedirects(
response,
"%s?%s" % (views.AUTHORIZE_URL, urlencode(params)),
fetch_redirect_response=False,
)
def test_callback_no_auth_status(self):
response = self.client.get(reverse(views.callback))
self.assertTemplateUsed(response, "socialaccount/authentication_error.html")
def test_callback_invalid_auth_status(self):
response = self.client.get(reverse(views.callback), {"dr_auth_status": "fail"})
self.assertTemplateUsed(response, "socialaccount/authentication_error.html")
def test_callback(self):
with patch(
"allauth.socialaccount.providers.draugiem.views" ".draugiem_complete_login"
) as draugiem_complete_login:
self.mock_socialaccount_state()
response_json = self.get_draugiem_login_response()
token = SocialToken(app=self.app, token=response_json["apikey"])
login = self.get_socialaccount(response_json, token)
draugiem_complete_login.return_value = login
response = self.client.get(
reverse(views.callback),
{"dr_auth_status": "ok", "dr_auth_code": "42"},
)
self.assertRedirects(
response, "/accounts/profile/", fetch_redirect_response=False
)
| mit |
40223117cda/w16cdaa | static/Brython3.1.0-20150301-090019/Lib/multiprocessing/pool.py | 694 | 23263 | #
# Module providing the `Pool` class for managing a process pool
#
# multiprocessing/pool.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = ['Pool']
#
# Imports
#
import threading
import queue
import itertools
import collections
import time
from multiprocessing import Process, cpu_count, TimeoutError
from multiprocessing.util import Finalize, debug
#
# Constants representing the state of a pool
#
RUN = 0
CLOSE = 1
TERMINATE = 2
#
# Miscellaneous
#
job_counter = itertools.count()
def mapstar(args):
return list(map(*args))
def starmapstar(args):
return list(itertools.starmap(args[0], args[1]))
#
# Code run by worker processes
#
class MaybeEncodingError(Exception):
"""Wraps possible unpickleable errors, so they can be
safely sent through the socket."""
def __init__(self, exc, value):
self.exc = repr(exc)
self.value = repr(value)
super(MaybeEncodingError, self).__init__(self.exc, self.value)
def __str__(self):
return "Error sending result: '%s'. Reason: '%s'" % (self.value,
self.exc)
def __repr__(self):
return "<MaybeEncodingError: %s>" % str(self)
def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None):
assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0)
put = outqueue.put
get = inqueue.get
if hasattr(inqueue, '_writer'):
inqueue._writer.close()
outqueue._reader.close()
if initializer is not None:
initializer(*initargs)
completed = 0
while maxtasks is None or (maxtasks and completed < maxtasks):
try:
task = get()
except (EOFError, IOError):
debug('worker got EOFError or IOError -- exiting')
break
if task is None:
debug('worker got sentinel -- exiting')
break
job, i, func, args, kwds = task
try:
result = (True, func(*args, **kwds))
except Exception as e:
result = (False, e)
try:
put((job, i, result))
except Exception as e:
wrapped = MaybeEncodingError(e, result[1])
debug("Possible encoding error while sending result: %s" % (
wrapped))
put((job, i, (False, wrapped)))
completed += 1
debug('worker exiting after %d tasks' % completed)
#
# Class representing a process pool
#
class Pool(object):
'''
Class which supports an async version of applying functions to arguments.
'''
Process = Process
def __init__(self, processes=None, initializer=None, initargs=(),
maxtasksperchild=None):
self._setup_queues()
self._taskqueue = queue.Queue()
self._cache = {}
self._state = RUN
self._maxtasksperchild = maxtasksperchild
self._initializer = initializer
self._initargs = initargs
if processes is None:
try:
processes = cpu_count()
except NotImplementedError:
processes = 1
if processes < 1:
raise ValueError("Number of processes must be at least 1")
if initializer is not None and not callable(initializer):
raise TypeError('initializer must be a callable')
self._processes = processes
self._pool = []
self._repopulate_pool()
self._worker_handler = threading.Thread(
target=Pool._handle_workers,
args=(self, )
)
self._worker_handler.daemon = True
self._worker_handler._state = RUN
self._worker_handler.start()
self._task_handler = threading.Thread(
target=Pool._handle_tasks,
args=(self._taskqueue, self._quick_put, self._outqueue, self._pool)
)
self._task_handler.daemon = True
self._task_handler._state = RUN
self._task_handler.start()
self._result_handler = threading.Thread(
target=Pool._handle_results,
args=(self._outqueue, self._quick_get, self._cache)
)
self._result_handler.daemon = True
self._result_handler._state = RUN
self._result_handler.start()
self._terminate = Finalize(
self, self._terminate_pool,
args=(self._taskqueue, self._inqueue, self._outqueue, self._pool,
self._worker_handler, self._task_handler,
self._result_handler, self._cache),
exitpriority=15
)
def _join_exited_workers(self):
"""Cleanup after any worker processes which have exited due to reaching
their specified lifetime. Returns True if any workers were cleaned up.
"""
cleaned = False
for i in reversed(range(len(self._pool))):
worker = self._pool[i]
if worker.exitcode is not None:
# worker exited
debug('cleaning up worker %d' % i)
worker.join()
cleaned = True
del self._pool[i]
return cleaned
def _repopulate_pool(self):
"""Bring the number of pool processes up to the specified number,
for use after reaping workers which have exited.
"""
for i in range(self._processes - len(self._pool)):
w = self.Process(target=worker,
args=(self._inqueue, self._outqueue,
self._initializer,
self._initargs, self._maxtasksperchild)
)
self._pool.append(w)
w.name = w.name.replace('Process', 'PoolWorker')
w.daemon = True
w.start()
debug('added worker')
def _maintain_pool(self):
"""Clean up any exited workers and start replacements for them.
"""
if self._join_exited_workers():
self._repopulate_pool()
def _setup_queues(self):
from .queues import SimpleQueue
self._inqueue = SimpleQueue()
self._outqueue = SimpleQueue()
self._quick_put = self._inqueue._writer.send
self._quick_get = self._outqueue._reader.recv
def apply(self, func, args=(), kwds={}):
'''
Equivalent of `func(*args, **kwds)`.
'''
assert self._state == RUN
return self.apply_async(func, args, kwds).get()
def map(self, func, iterable, chunksize=None):
'''
Apply `func` to each element in `iterable`, collecting the results
in a list that is returned.
'''
return self._map_async(func, iterable, mapstar, chunksize).get()
def starmap(self, func, iterable, chunksize=None):
'''
Like `map()` method but the elements of the `iterable` are expected to
be iterables as well and will be unpacked as arguments. Hence
`func` and (a, b) becomes func(a, b).
'''
return self._map_async(func, iterable, starmapstar, chunksize).get()
def starmap_async(self, func, iterable, chunksize=None, callback=None,
error_callback=None):
'''
Asynchronous version of `starmap()` method.
'''
return self._map_async(func, iterable, starmapstar, chunksize,
callback, error_callback)
def imap(self, func, iterable, chunksize=1):
'''
Equivalent of `map()` -- can be MUCH slower than `Pool.map()`.
'''
if self._state != RUN:
raise ValueError("Pool not running")
if chunksize == 1:
result = IMapIterator(self._cache)
self._taskqueue.put((((result._job, i, func, (x,), {})
for i, x in enumerate(iterable)), result._set_length))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapIterator(self._cache)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), result._set_length))
return (item for chunk in result for item in chunk)
def imap_unordered(self, func, iterable, chunksize=1):
'''
Like `imap()` method but ordering of results is arbitrary.
'''
if self._state != RUN:
raise ValueError("Pool not running")
if chunksize == 1:
result = IMapUnorderedIterator(self._cache)
self._taskqueue.put((((result._job, i, func, (x,), {})
for i, x in enumerate(iterable)), result._set_length))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapUnorderedIterator(self._cache)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), result._set_length))
return (item for chunk in result for item in chunk)
def apply_async(self, func, args=(), kwds={}, callback=None,
error_callback=None):
'''
Asynchronous version of `apply()` method.
'''
if self._state != RUN:
raise ValueError("Pool not running")
result = ApplyResult(self._cache, callback, error_callback)
self._taskqueue.put(([(result._job, None, func, args, kwds)], None))
return result
def map_async(self, func, iterable, chunksize=None, callback=None,
error_callback=None):
'''
Asynchronous version of `map()` method.
'''
return self._map_async(func, iterable, mapstar, chunksize, callback,
error_callback)
def _map_async(self, func, iterable, mapper, chunksize=None, callback=None,
error_callback=None):
'''
Helper function to implement map, starmap and their async counterparts.
'''
if self._state != RUN:
raise ValueError("Pool not running")
if not hasattr(iterable, '__len__'):
iterable = list(iterable)
if chunksize is None:
chunksize, extra = divmod(len(iterable), len(self._pool) * 4)
if extra:
chunksize += 1
if len(iterable) == 0:
chunksize = 0
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = MapResult(self._cache, chunksize, len(iterable), callback,
error_callback=error_callback)
self._taskqueue.put((((result._job, i, mapper, (x,), {})
for i, x in enumerate(task_batches)), None))
return result
@staticmethod
def _handle_workers(pool):
thread = threading.current_thread()
# Keep maintaining workers until the cache gets drained, unless the pool
# is terminated.
while thread._state == RUN or (pool._cache and thread._state != TERMINATE):
pool._maintain_pool()
time.sleep(0.1)
# send sentinel to stop workers
pool._taskqueue.put(None)
debug('worker handler exiting')
@staticmethod
def _handle_tasks(taskqueue, put, outqueue, pool):
thread = threading.current_thread()
for taskseq, set_length in iter(taskqueue.get, None):
i = -1
for i, task in enumerate(taskseq):
if thread._state:
debug('task handler found thread._state != RUN')
break
try:
put(task)
except IOError:
debug('could not put task on queue')
break
else:
if set_length:
debug('doing set_length()')
set_length(i+1)
continue
break
else:
debug('task handler got sentinel')
try:
# tell result handler to finish when cache is empty
debug('task handler sending sentinel to result handler')
outqueue.put(None)
# tell workers there is no more work
debug('task handler sending sentinel to workers')
for p in pool:
put(None)
except IOError:
debug('task handler got IOError when sending sentinels')
debug('task handler exiting')
@staticmethod
def _handle_results(outqueue, get, cache):
thread = threading.current_thread()
while 1:
try:
task = get()
except (IOError, EOFError):
debug('result handler got EOFError/IOError -- exiting')
return
if thread._state:
assert thread._state == TERMINATE
debug('result handler found thread._state=TERMINATE')
break
if task is None:
debug('result handler got sentinel')
break
job, i, obj = task
try:
cache[job]._set(i, obj)
except KeyError:
pass
while cache and thread._state != TERMINATE:
try:
task = get()
except (IOError, EOFError):
debug('result handler got EOFError/IOError -- exiting')
return
if task is None:
debug('result handler ignoring extra sentinel')
continue
job, i, obj = task
try:
cache[job]._set(i, obj)
except KeyError:
pass
if hasattr(outqueue, '_reader'):
debug('ensuring that outqueue is not full')
# If we don't make room available in outqueue then
# attempts to add the sentinel (None) to outqueue may
# block. There is guaranteed to be no more than 2 sentinels.
try:
for i in range(10):
if not outqueue._reader.poll():
break
get()
except (IOError, EOFError):
pass
debug('result handler exiting: len(cache)=%s, thread._state=%s',
len(cache), thread._state)
@staticmethod
def _get_tasks(func, it, size):
it = iter(it)
while 1:
x = tuple(itertools.islice(it, size))
if not x:
return
yield (func, x)
def __reduce__(self):
raise NotImplementedError(
'pool objects cannot be passed between processes or pickled'
)
def close(self):
debug('closing pool')
if self._state == RUN:
self._state = CLOSE
self._worker_handler._state = CLOSE
def terminate(self):
debug('terminating pool')
self._state = TERMINATE
self._worker_handler._state = TERMINATE
self._terminate()
def join(self):
debug('joining pool')
assert self._state in (CLOSE, TERMINATE)
self._worker_handler.join()
self._task_handler.join()
self._result_handler.join()
for p in self._pool:
p.join()
@staticmethod
def _help_stuff_finish(inqueue, task_handler, size):
# task_handler may be blocked trying to put items on inqueue
debug('removing tasks from inqueue until task handler finished')
inqueue._rlock.acquire()
while task_handler.is_alive() and inqueue._reader.poll():
inqueue._reader.recv()
time.sleep(0)
@classmethod
def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool,
worker_handler, task_handler, result_handler, cache):
# this is guaranteed to only be called once
debug('finalizing pool')
worker_handler._state = TERMINATE
task_handler._state = TERMINATE
debug('helping task handler/workers to finish')
cls._help_stuff_finish(inqueue, task_handler, len(pool))
assert result_handler.is_alive() or len(cache) == 0
result_handler._state = TERMINATE
outqueue.put(None) # sentinel
# We must wait for the worker handler to exit before terminating
# workers because we don't want workers to be restarted behind our back.
debug('joining worker handler')
if threading.current_thread() is not worker_handler:
worker_handler.join()
# Terminate workers which haven't already finished.
if pool and hasattr(pool[0], 'terminate'):
debug('terminating workers')
for p in pool:
if p.exitcode is None:
p.terminate()
debug('joining task handler')
if threading.current_thread() is not task_handler:
task_handler.join()
debug('joining result handler')
if threading.current_thread() is not result_handler:
result_handler.join()
if pool and hasattr(pool[0], 'terminate'):
debug('joining pool workers')
for p in pool:
if p.is_alive():
# worker has not yet exited
debug('cleaning up worker %d' % p.pid)
p.join()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.terminate()
#
# Class whose instances are returned by `Pool.apply_async()`
#
class ApplyResult(object):
def __init__(self, cache, callback, error_callback):
self._event = threading.Event()
self._job = next(job_counter)
self._cache = cache
self._callback = callback
self._error_callback = error_callback
cache[self._job] = self
def ready(self):
return self._event.is_set()
def successful(self):
assert self.ready()
return self._success
def wait(self, timeout=None):
self._event.wait(timeout)
def get(self, timeout=None):
self.wait(timeout)
if not self.ready():
raise TimeoutError
if self._success:
return self._value
else:
raise self._value
def _set(self, i, obj):
self._success, self._value = obj
if self._callback and self._success:
self._callback(self._value)
if self._error_callback and not self._success:
self._error_callback(self._value)
self._event.set()
del self._cache[self._job]
AsyncResult = ApplyResult # create alias -- see #17805
#
# Class whose instances are returned by `Pool.map_async()`
#
class MapResult(ApplyResult):
def __init__(self, cache, chunksize, length, callback, error_callback):
ApplyResult.__init__(self, cache, callback,
error_callback=error_callback)
self._success = True
self._value = [None] * length
self._chunksize = chunksize
if chunksize <= 0:
self._number_left = 0
self._event.set()
del cache[self._job]
else:
self._number_left = length//chunksize + bool(length % chunksize)
def _set(self, i, success_result):
success, result = success_result
if success:
self._value[i*self._chunksize:(i+1)*self._chunksize] = result
self._number_left -= 1
if self._number_left == 0:
if self._callback:
self._callback(self._value)
del self._cache[self._job]
self._event.set()
else:
self._success = False
self._value = result
if self._error_callback:
self._error_callback(self._value)
del self._cache[self._job]
self._event.set()
#
# Class whose instances are returned by `Pool.imap()`
#
class IMapIterator(object):
def __init__(self, cache):
self._cond = threading.Condition(threading.Lock())
self._job = next(job_counter)
self._cache = cache
self._items = collections.deque()
self._index = 0
self._length = None
self._unsorted = {}
cache[self._job] = self
def __iter__(self):
return self
def next(self, timeout=None):
self._cond.acquire()
try:
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
raise StopIteration
self._cond.wait(timeout)
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
raise StopIteration
raise TimeoutError
finally:
self._cond.release()
success, value = item
if success:
return value
raise value
__next__ = next # XXX
def _set(self, i, obj):
self._cond.acquire()
try:
if self._index == i:
self._items.append(obj)
self._index += 1
while self._index in self._unsorted:
obj = self._unsorted.pop(self._index)
self._items.append(obj)
self._index += 1
self._cond.notify()
else:
self._unsorted[i] = obj
if self._index == self._length:
del self._cache[self._job]
finally:
self._cond.release()
def _set_length(self, length):
self._cond.acquire()
try:
self._length = length
if self._index == self._length:
self._cond.notify()
del self._cache[self._job]
finally:
self._cond.release()
#
# Class whose instances are returned by `Pool.imap_unordered()`
#
class IMapUnorderedIterator(IMapIterator):
def _set(self, i, obj):
self._cond.acquire()
try:
self._items.append(obj)
self._index += 1
self._cond.notify()
if self._index == self._length:
del self._cache[self._job]
finally:
self._cond.release()
#
#
#
class ThreadPool(Pool):
from .dummy import Process
def __init__(self, processes=None, initializer=None, initargs=()):
Pool.__init__(self, processes, initializer, initargs)
def _setup_queues(self):
self._inqueue = queue.Queue()
self._outqueue = queue.Queue()
self._quick_put = self._inqueue.put
self._quick_get = self._outqueue.get
@staticmethod
def _help_stuff_finish(inqueue, task_handler, size):
# put sentinels at head of inqueue to make workers finish
inqueue.not_empty.acquire()
try:
inqueue.queue.clear()
inqueue.queue.extend([None] * size)
inqueue.not_empty.notify_all()
finally:
inqueue.not_empty.release()
| gpl-3.0 |
peterbarker/MAVProxy | MAVProxy/modules/mavproxy_antenna.py | 16 | 2064 | #!/usr/bin/env python
'''
antenna pointing module
Andrew Tridgell
June 2012
'''
import sys, os, time
from cuav.lib import cuav_util
from MAVProxy.modules.lib import mp_module
class AntennaModule(mp_module.MPModule):
def __init__(self, mpstate):
super(AntennaModule, self).__init__(mpstate, "antenna", "antenna pointing module")
self.gcs_location = None
self.last_bearing = 0
self.last_announce = 0
self.add_command('antenna', self.cmd_antenna, "antenna link control")
def cmd_antenna(self, args):
'''set gcs location'''
if len(args) != 2:
if self.gcs_location is None:
print("GCS location not set")
else:
print("GCS location %s" % str(self.gcs_location))
return
self.gcs_location = (float(args[0]), float(args[1]))
def mavlink_packet(self, m):
'''handle an incoming mavlink packet'''
if self.gcs_location is None and self.module('wp').wploader.count() > 0:
home = self.module('wp').wploader.wp(0)
self.gcs_location = (home.x, home.y)
print("Antenna home set")
if self.gcs_location is None:
return
if m.get_type() == 'GPS_RAW' and self.gcs_location is not None:
(gcs_lat, gcs_lon) = self.gcs_location
bearing = cuav_util.gps_bearing(gcs_lat, gcs_lon, m.lat, m.lon)
elif m.get_type() == 'GPS_RAW_INT' and self.gcs_location is not None:
(gcs_lat, gcs_lon) = self.gcs_location
bearing = cuav_util.gps_bearing(gcs_lat, gcs_lon, m.lat / 1.0e7, m.lon / 1.0e7)
else:
return
self.console.set_status('Antenna', 'Antenna %.0f' % bearing, row=0)
if abs(bearing - self.last_bearing) > 5 and (time.time() - self.last_announce) > 15:
self.last_bearing = bearing
self.last_announce = time.time()
self.say("Antenna %u" % int(bearing + 0.5))
def init(mpstate):
'''initialise module'''
return AntennaModule(mpstate)
| gpl-3.0 |
garnaat/boto | boto/sns/__init__.py | 131 | 2117 | # Copyright (c) 2010-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010-2011, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# this is here for backward compatibility
# originally, the SNSConnection class was defined here
from boto.sns.connection import SNSConnection
from boto.regioninfo import RegionInfo, get_regions
def regions():
"""
Get all available regions for the SNS service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo` instances
"""
return get_regions('sns', connection_cls=SNSConnection)
def connect_to_region(region_name, **kw_params):
"""
Given a valid region name, return a
:class:`boto.sns.connection.SNSConnection`.
:type: str
:param region_name: The name of the region to connect to.
:rtype: :class:`boto.sns.connection.SNSConnection` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
"""
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
| mit |
jbedorf/tensorflow | tensorflow/contrib/gan/python/eval/python/summaries_impl.py | 5 | 11802 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common TF-GAN summaries."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.gan.python import namedtuples
from tensorflow.contrib.gan.python.eval.python import eval_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.losses import util as loss_util
from tensorflow.python.summary import summary
__all__ = [
'add_gan_model_image_summaries',
'add_image_comparison_summaries',
'add_gan_model_summaries',
'add_regularization_loss_summaries',
'add_cyclegan_image_summaries',
'add_stargan_image_summaries'
]
def _assert_is_image(data):
data.shape.assert_has_rank(4)
data.shape[1:].assert_is_fully_defined()
def add_gan_model_image_summaries(gan_model, grid_size=4, model_summaries=True):
"""Adds image summaries for real and fake images.
Args:
gan_model: A GANModel tuple.
grid_size: The size of an image grid.
model_summaries: Also add summaries of the model.
Raises:
ValueError: If real and generated data aren't images.
"""
if isinstance(gan_model, namedtuples.CycleGANModel):
raise ValueError(
'`add_gan_model_image_summaries` does not take CycleGANModels. Please '
'use `add_cyclegan_image_summaries` instead.')
_assert_is_image(gan_model.real_data)
_assert_is_image(gan_model.generated_data)
num_images = grid_size ** 2
real_image_shape = gan_model.real_data.shape.as_list()[1:3]
generated_image_shape = gan_model.generated_data.shape.as_list()[1:3]
real_channels = gan_model.real_data.shape.as_list()[3]
generated_channels = gan_model.generated_data.shape.as_list()[3]
summary.image(
'real_data',
eval_utils.image_grid(
gan_model.real_data[:num_images],
grid_shape=(grid_size, grid_size),
image_shape=real_image_shape,
num_channels=real_channels),
max_outputs=1)
summary.image(
'generated_data',
eval_utils.image_grid(
gan_model.generated_data[:num_images],
grid_shape=(grid_size, grid_size),
image_shape=generated_image_shape,
num_channels=generated_channels),
max_outputs=1)
if model_summaries:
add_gan_model_summaries(gan_model)
def add_cyclegan_image_summaries(cyclegan_model):
"""Adds image summaries for CycleGAN.
There are two summaries, one for each generator. The first image is the
generator input, the second is the generator output, and the third is G(F(x)).
Args:
cyclegan_model: A CycleGANModel tuple.
Raises:
ValueError: If `cyclegan_model` isn't a CycleGANModel.
ValueError: If generated data, generator inputs, and reconstructions aren't
images.
ValueError: If the generator input, generated data, and reconstructions
aren't all the same size.
"""
if not isinstance(cyclegan_model, namedtuples.CycleGANModel):
raise ValueError('`cyclegan_model` was not a CycleGANModel. Instead, was '
'%s' % type(cyclegan_model))
_assert_is_image(cyclegan_model.model_x2y.generator_inputs)
_assert_is_image(cyclegan_model.model_x2y.generated_data)
_assert_is_image(cyclegan_model.reconstructed_x)
_assert_is_image(cyclegan_model.model_y2x.generator_inputs)
_assert_is_image(cyclegan_model.model_y2x.generated_data)
_assert_is_image(cyclegan_model.reconstructed_y)
def _add_comparison_summary(gan_model, reconstructions):
image_list = (array_ops.unstack(gan_model.generator_inputs[:1]) +
array_ops.unstack(gan_model.generated_data[:1]) +
array_ops.unstack(reconstructions[:1]))
summary.image(
'image_comparison', eval_utils.image_reshaper(
image_list, num_cols=len(image_list)), max_outputs=1)
with ops.name_scope('x2y_image_comparison_summaries'):
_add_comparison_summary(
cyclegan_model.model_x2y, cyclegan_model.reconstructed_x)
with ops.name_scope('y2x_image_comparison_summaries'):
_add_comparison_summary(
cyclegan_model.model_y2x, cyclegan_model.reconstructed_y)
def add_image_comparison_summaries(gan_model, num_comparisons=2,
display_diffs=False):
"""Adds image summaries to compare triplets of images.
The first image is the generator input, the second is the generator output,
and the third is the real data. This style of comparison is useful for
image translation problems, where the generator input is a corrupted image,
the generator output is the reconstruction, and the real data is the target.
Args:
gan_model: A GANModel tuple.
num_comparisons: The number of image triplets to display.
display_diffs: Also display the difference between generated and target.
Raises:
ValueError: If real data, generated data, and generator inputs aren't
images.
ValueError: If the generator input, real, and generated data aren't all the
same size.
"""
_assert_is_image(gan_model.generator_inputs)
_assert_is_image(gan_model.generated_data)
_assert_is_image(gan_model.real_data)
gan_model.generated_data.shape.assert_is_compatible_with(
gan_model.generator_inputs.shape)
gan_model.real_data.shape.assert_is_compatible_with(
gan_model.generated_data.shape)
image_list = []
image_list.extend(
array_ops.unstack(gan_model.generator_inputs[:num_comparisons]))
image_list.extend(
array_ops.unstack(gan_model.generated_data[:num_comparisons]))
image_list.extend(array_ops.unstack(gan_model.real_data[:num_comparisons]))
if display_diffs:
generated_list = array_ops.unstack(
gan_model.generated_data[:num_comparisons])
real_list = array_ops.unstack(gan_model.real_data[:num_comparisons])
diffs = [
math_ops.abs(math_ops.cast(generated, dtypes.float32) -
math_ops.cast(real, dtypes.float32))
for generated, real in zip(generated_list, real_list)
]
image_list.extend(diffs)
# Reshape image and display.
summary.image(
'image_comparison',
eval_utils.image_reshaper(image_list, num_cols=num_comparisons),
max_outputs=1)
def add_stargan_image_summaries(stargan_model,
num_images=2,
display_diffs=False):
"""Adds image summaries to see StarGAN image results.
If display_diffs is True, each image result has `2` rows and `num_domains + 1`
columns.
The first row looks like:
[original_image, transformed_to_domain_0, transformed_to_domain_1, ...]
The second row looks like:
[no_modification_baseline, transformed_to_domain_0-original_image, ...]
If display_diffs is False, only the first row is shown.
IMPORTANT:
Since the model originally does not transformed the image to every domains,
we will transform them on-the-fly within this function in parallel.
Args:
stargan_model: A StarGANModel tuple.
num_images: The number of examples/images to be transformed and shown.
display_diffs: Also display the difference between generated and target.
Raises:
ValueError: If input_data is not images.
ValueError: If input_data_domain_label is not rank 2.
ValueError: If dimension 2 of input_data_domain_label is not fully defined.
"""
_assert_is_image(stargan_model.input_data)
stargan_model.input_data_domain_label.shape.assert_has_rank(2)
stargan_model.input_data_domain_label.shape[1:].assert_is_fully_defined()
num_domains = stargan_model.input_data_domain_label.get_shape().as_list()[-1]
def _build_image(image):
"""Helper function to create a result for each image on the fly."""
# Expand the first dimension as batch_size = 1.
images = array_ops.expand_dims(image, axis=0)
# Tile the image num_domains times, so we can get all transformed together.
images = array_ops.tile(images, [num_domains, 1, 1, 1])
# Create the targets to 0, 1, 2, ..., num_domains-1.
targets = array_ops.one_hot(list(range(num_domains)), num_domains)
with variable_scope.variable_scope(
stargan_model.generator_scope, reuse=True):
# Add the original image.
output_images_list = [image]
# Generate the image and add to the list.
gen_images = stargan_model.generator_fn(images, targets)
gen_images_list = array_ops.split(gen_images, num_domains)
gen_images_list = [
array_ops.squeeze(img, axis=0) for img in gen_images_list
]
output_images_list.extend(gen_images_list)
# Display diffs.
if display_diffs:
diff_images = gen_images - images
diff_images_list = array_ops.split(diff_images, num_domains)
diff_images_list = [
array_ops.squeeze(img, axis=0) for img in diff_images_list
]
output_images_list.append(array_ops.zeros_like(image))
output_images_list.extend(diff_images_list)
# Create the final image.
final_image = eval_utils.image_reshaper(
output_images_list, num_cols=num_domains + 1)
# Reduce the first rank.
return array_ops.squeeze(final_image, axis=0)
summary.image(
'stargan_image_generation',
map_fn.map_fn(
_build_image,
stargan_model.input_data[:num_images],
parallel_iterations=num_images,
back_prop=False,
swap_memory=True),
max_outputs=num_images)
def add_gan_model_summaries(gan_model):
"""Adds typical GANModel summaries.
Args:
gan_model: A GANModel tuple.
"""
if isinstance(gan_model, namedtuples.CycleGANModel):
with ops.name_scope('cyclegan_x2y_summaries'):
add_gan_model_summaries(gan_model.model_x2y)
with ops.name_scope('cyclegan_y2x_summaries'):
add_gan_model_summaries(gan_model.model_y2x)
return
with ops.name_scope('generator_variables'):
for var in gan_model.generator_variables:
summary.histogram(var.name, var)
with ops.name_scope('discriminator_variables'):
for var in gan_model.discriminator_variables:
summary.histogram(var.name, var)
def add_regularization_loss_summaries(gan_model):
"""Adds summaries for a regularization losses..
Args:
gan_model: A GANModel tuple.
"""
if isinstance(gan_model, namedtuples.CycleGANModel):
with ops.name_scope('cyclegan_x2y_regularization_loss_summaries'):
add_regularization_loss_summaries(gan_model.model_x2y)
with ops.name_scope('cyclegan_y2x_regularization_loss_summaries'):
add_regularization_loss_summaries(gan_model.model_y2x)
return
if gan_model.generator_scope:
summary.scalar(
'generator_regularization_loss',
loss_util.get_regularization_loss(gan_model.generator_scope.name))
if gan_model.discriminator_scope:
summary.scalar(
'discriminator_regularization_loss',
loss_util.get_regularization_loss(gan_model.discriminator_scope.name))
| apache-2.0 |
jcoady9/youtube-dl | youtube_dl/extractor/beatportpro.py | 142 | 3423 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import int_or_none
class BeatportProIE(InfoExtractor):
_VALID_URL = r'https?://pro\.beatport\.com/track/(?P<display_id>[^/]+)/(?P<id>[0-9]+)'
_TESTS = [{
'url': 'https://pro.beatport.com/track/synesthesia-original-mix/5379371',
'md5': 'b3c34d8639a2f6a7f734382358478887',
'info_dict': {
'id': '5379371',
'display_id': 'synesthesia-original-mix',
'ext': 'mp4',
'title': 'Froxic - Synesthesia (Original Mix)',
},
}, {
'url': 'https://pro.beatport.com/track/love-and-war-original-mix/3756896',
'md5': 'e44c3025dfa38c6577fbaeb43da43514',
'info_dict': {
'id': '3756896',
'display_id': 'love-and-war-original-mix',
'ext': 'mp3',
'title': 'Wolfgang Gartner - Love & War (Original Mix)',
},
}, {
'url': 'https://pro.beatport.com/track/birds-original-mix/4991738',
'md5': 'a1fd8e8046de3950fd039304c186c05f',
'info_dict': {
'id': '4991738',
'display_id': 'birds-original-mix',
'ext': 'mp4',
'title': "Tos, Middle Milk, Mumblin' Johnsson - Birds (Original Mix)",
}
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
track_id = mobj.group('id')
display_id = mobj.group('display_id')
webpage = self._download_webpage(url, display_id)
playables = self._parse_json(
self._search_regex(
r'window\.Playables\s*=\s*({.+?});', webpage,
'playables info', flags=re.DOTALL),
track_id)
track = next(t for t in playables['tracks'] if t['id'] == int(track_id))
title = ', '.join((a['name'] for a in track['artists'])) + ' - ' + track['name']
if track['mix']:
title += ' (' + track['mix'] + ')'
formats = []
for ext, info in track['preview'].items():
if not info['url']:
continue
fmt = {
'url': info['url'],
'ext': ext,
'format_id': ext,
'vcodec': 'none',
}
if ext == 'mp3':
fmt['preference'] = 0
fmt['acodec'] = 'mp3'
fmt['abr'] = 96
fmt['asr'] = 44100
elif ext == 'mp4':
fmt['preference'] = 1
fmt['acodec'] = 'aac'
fmt['abr'] = 96
fmt['asr'] = 44100
formats.append(fmt)
self._sort_formats(formats)
images = []
for name, info in track['images'].items():
image_url = info.get('url')
if name == 'dynamic' or not image_url:
continue
image = {
'id': name,
'url': image_url,
'height': int_or_none(info.get('height')),
'width': int_or_none(info.get('width')),
}
images.append(image)
return {
'id': compat_str(track.get('id')) or track_id,
'display_id': track.get('slug') or display_id,
'title': title,
'formats': formats,
'thumbnails': images,
}
| unlicense |
nhuntwalker/astroML | examples/algorithms/plot_bayesian_blocks.py | 3 | 2706 | """
Bayesian Blocks for Histograms
------------------------------
.. currentmodule:: astroML
Bayesian Blocks is a dynamic histogramming method which optimizes one of
several possible fitness functions to determine an optimal binning for
data, where the bins are not necessarily uniform width. The astroML
implementation is based on [1]_. For more discussion of this technique,
see the blog post at [2]_.
The code below uses a fitness function suitable for event data with possible
repeats. More fitness functions are available: see :mod:`density_estimation`
References
~~~~~~~~~~
.. [1] Scargle, J `et al.` (2012)
http://adsabs.harvard.edu/abs/2012arXiv1207.5578S
.. [2] http://jakevdp.github.com/blog/2012/09/12/dynamic-programming-in-python/
"""
# Author: Jake VanderPlas <[email protected]>
# License: BSD
# The figure is an example from astroML: see http://astroML.github.com
import numpy as np
from scipy import stats
from matplotlib import pyplot as plt
from astroML.plotting import hist
# draw a set of variables
np.random.seed(0)
t = np.concatenate([stats.cauchy(-5, 1.8).rvs(500),
stats.cauchy(-4, 0.8).rvs(2000),
stats.cauchy(-1, 0.3).rvs(500),
stats.cauchy(2, 0.8).rvs(1000),
stats.cauchy(4, 1.5).rvs(500)])
# truncate values to a reasonable range
t = t[(t > -15) & (t < 15)]
#------------------------------------------------------------
# First figure: show normal histogram binning
fig = plt.figure(figsize=(10, 4))
fig.subplots_adjust(left=0.1, right=0.95, bottom=0.15)
ax1 = fig.add_subplot(121)
ax1.hist(t, bins=15, histtype='stepfilled', alpha=0.2, normed=True)
ax1.set_xlabel('t')
ax1.set_ylabel('P(t)')
ax2 = fig.add_subplot(122)
ax2.hist(t, bins=200, histtype='stepfilled', alpha=0.2, normed=True)
ax2.set_xlabel('t')
ax2.set_ylabel('P(t)')
#------------------------------------------------------------
# Second & Third figure: Knuth bins & Bayesian Blocks
fig = plt.figure(figsize=(10, 4))
fig.subplots_adjust(left=0.1, right=0.95, bottom=0.15)
for bins, title, subplot in zip(['knuth', 'blocks'],
["Knuth's rule", 'Bayesian blocks'],
[121, 122]):
ax = fig.add_subplot(subplot)
# plot a standard histogram in the background, with alpha transparency
hist(t, bins=200, histtype='stepfilled',
alpha=0.2, normed=True, label='standard histogram')
# plot an adaptive-width histogram on top
hist(t, bins=bins, ax=ax, color='black',
histtype='step', normed=True, label=title)
ax.legend(prop=dict(size=12))
ax.set_xlabel('t')
ax.set_ylabel('P(t)')
plt.show()
| bsd-2-clause |
jalavik/invenio-records | invenio_records/providers/recid.py | 4 | 1834 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Define PID provider for recids."""
from invenio_ext.sqlalchemy import db
from invenio_pidstore.provider import PidProvider
from sqlalchemy.exc import SQLAlchemyError
from ..models import Record
class RecordID(PidProvider):
"""Provider for recids."""
pid_type = 'recid'
def create_new_pid(self, pid_value):
"""Create a new row inside the ``Record`` table.
If ``pid_value`` is not ``None`` will be use as ``id`` to create this
new row.
"""
if pid_value is not None:
record = Record(id=int(pid_value))
else:
record = Record()
with db.session.begin_nested():
db.session.add(record)
return str(record.id)
def reserve(self, pid, *args, **kwargs):
"""Reserve recid."""
pid.log("RESERVE", "Successfully reserved recid")
return True
@classmethod
def is_provider_for_pid(cls, pid_str):
"""A recid is valid is it is ``None`` or ``Integer``."""
return pid_str is None or pid_str.isdigit()
| gpl-2.0 |
cycotech/WAR-app | env/lib/python3.5/site-packages/django/conf/locale/fr/formats.py | 504 | 1454 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'j N Y'
SHORT_DATETIME_FORMAT = 'j N Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
'%d.%m.%Y', '%d.%m.%y', # Swiss [fr_CH), '25.10.2006', '25.10.06'
# '%d %B %Y', '%d %b %Y', # '25 octobre 2006', '25 oct. 2006'
]
DATETIME_INPUT_FORMATS = [
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d.%m.%Y %H:%M:%S', # Swiss [fr_CH), '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # Swiss (fr_CH), '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # Swiss (fr_CH), '25.10.2006 14:30'
'%d.%m.%Y', # Swiss (fr_CH), '25.10.2006'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| mit |
linjoahow/W16_test1 | static/Brython3.1.3-20150514-095342/Lib/VFS_import.py | 738 | 3059 | import os
from browser import doc
#_scripts=doc.createElement('script')
#_scripts.src="/src/py_VFS.js"
#_scripts.type="text/javascript"
#doc.get(tag='head')[0].appendChild(_scripts)
VFS=dict(JSObject(__BRYTHON__.py_VFS))
class VFSModuleFinder:
def __init__(self, path_entry):
print("in VFSModuleFinder")
if path_entry.startswith('/libs') or path_entry.startswith('/Lib'):
self.path_entry=path_entry
else:
raise ImportError()
def __str__(self):
return '<%s for "%s">' % (self.__class__.__name__, self.path_entry)
def find_module(self, fullname, path=None):
path = path or self.path_entry
#print('looking for "%s" in %s ...' % (fullname, path))
for _ext in ['js', 'pyj', 'py']:
_filepath=os.path.join(self.path_entry, '%s.%s' % (fullname, _ext))
if _filepath in VFS:
print("module found at %s:%s" % (_filepath, fullname))
return VFSModuleLoader(_filepath, fullname)
print('module %s not found' % fullname)
raise ImportError()
return None
class VFSModuleLoader:
"""Load source for modules"""
def __init__(self, filepath, name):
self._filepath=filepath
self._name=name
def get_source(self):
if self._filepath in VFS:
return JSObject(readFromVFS(self._filepath))
raise ImportError('could not find source for %s' % fullname)
def is_package(self):
return '.' in self._name
def load_module(self):
if self._name in sys.modules:
#print('reusing existing module from previous import of "%s"' % fullname)
mod = sys.modules[self._name]
return mod
_src=self.get_source()
if self._filepath.endswith('.js'):
mod=JSObject(import_js_module(_src, self._filepath, self._name))
elif self._filepath.endswith('.py'):
mod=JSObject(import_py_module(_src, self._filepath, self._name))
elif self._filepath.endswith('.pyj'):
mod=JSObject(import_pyj_module(_src, self._filepath, self._name))
else:
raise ImportError('Invalid Module: %s' % self._filepath)
# Set a few properties required by PEP 302
mod.__file__ = self._filepath
mod.__name__ = self._name
mod.__path__ = os.path.abspath(self._filepath)
mod.__loader__ = self
mod.__package__ = '.'.join(self._name.split('.')[:-1])
if self.is_package():
print('adding path for package')
# Set __path__ for packages
# so we can find the sub-modules.
mod.__path__ = [ self.path_entry ]
else:
print('imported as regular module')
print('creating a new module object for "%s"' % self._name)
sys.modules.setdefault(self._name, mod)
JSObject(__BRYTHON__.imported)[self._name]=mod
return mod
JSObject(__BRYTHON__.path_hooks.insert(0, VFSModuleFinder))
| gpl-3.0 |
antonve/s4-project-mooc | lms/djangoapps/instructor/access.py | 83 | 2634 | """
Access control operations for use by instructor APIs.
Does not include any access control, be sure to check access before calling.
TO DO sync instructor and staff flags
e.g. should these be possible?
{instructor: true, staff: false}
{instructor: true, staff: true}
"""
import logging
from django_comment_common.models import Role
from student.roles import (
CourseBetaTesterRole,
CourseInstructorRole,
CourseCcxCoachRole,
CourseStaffRole,
)
log = logging.getLogger(__name__)
ROLES = {
'beta': CourseBetaTesterRole,
'instructor': CourseInstructorRole,
'staff': CourseStaffRole,
'ccx_coach': CourseCcxCoachRole,
}
def list_with_level(course, level):
"""
List users who have 'level' access.
`level` is in ['instructor', 'staff', 'beta'] for standard courses.
There could be other levels specific to the course.
If there is no Group for that course-level, returns an empty list
"""
return ROLES[level](course.id).users_with_role()
def allow_access(course, user, level):
"""
Allow user access to course modification.
`level` is one of ['instructor', 'staff', 'beta']
"""
_change_access(course, user, level, 'allow')
def revoke_access(course, user, level):
"""
Revoke access from user to course modification.
`level` is one of ['instructor', 'staff', 'beta']
"""
_change_access(course, user, level, 'revoke')
def _change_access(course, user, level, action):
"""
Change access of user.
`level` is one of ['instructor', 'staff', 'beta']
action is one of ['allow', 'revoke']
NOTE: will create a group if it does not yet exist.
"""
try:
role = ROLES[level](course.id)
except KeyError:
raise ValueError("unrecognized level '{}'".format(level))
if action == 'allow':
role.add_users(user)
elif action == 'revoke':
role.remove_users(user)
else:
raise ValueError("unrecognized action '{}'".format(action))
def update_forum_role(course_id, user, rolename, action):
"""
Change forum access of user.
`rolename` is one of [FORUM_ROLE_ADMINISTRATOR, FORUM_ROLE_MODERATOR, FORUM_ROLE_COMMUNITY_TA]
`action` is one of ['allow', 'revoke']
if `action` is bad, raises ValueError
if `rolename` does not exist, raises Role.DoesNotExist
"""
role = Role.objects.get(course_id=course_id, name=rolename)
if action == 'allow':
role.users.add(user)
elif action == 'revoke':
role.users.remove(user)
else:
raise ValueError("unrecognized action '{}'".format(action))
| agpl-3.0 |
guewen/odoo | openerp/report/render/rml2pdf/__init__.py | 381 | 1101 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from trml2pdf import parseString, parseNode
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
gechr/ansible-modules-extras | packaging/os/slackpkg.py | 131 | 6382 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Kim Nørgaard
# Written by Kim Nørgaard <[email protected]>
# Based on pkgng module written by bleader <[email protected]>
# that was based on pkgin module written by Shaun Zinck <shaun.zinck at gmail.com>
# that was based on pacman module written by Afterburn <http://github.com/afterburn>
# that was based on apt module written by Matthew Williams <[email protected]>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: slackpkg
short_description: Package manager for Slackware >= 12.2
description:
- Manage binary packages for Slackware using 'slackpkg' which
is available in versions after 12.2.
version_added: "2.0"
options:
name:
description:
- name of package to install/remove
required: true
state:
description:
- state of the package, you can use "installed" as an alias for C(present) and removed as one for c(absent).
choices: [ 'present', 'absent', 'latest' ]
required: false
default: present
update_cache:
description:
- update the package database first
required: false
default: false
choices: [ true, false ]
author: Kim Nørgaard (@KimNorgaard)
requirements: [ "Slackware >= 12.2" ]
'''
EXAMPLES = '''
# Install package foo
- slackpkg: name=foo state=present
# Remove packages foo and bar
- slackpkg: name=foo,bar state=absent
# Make sure that it is the most updated package
- slackpkg: name=foo state=latest
'''
def query_package(module, slackpkg_path, name):
import glob
import platform
machine = platform.machine()
packages = glob.glob("/var/log/packages/%s-*-[%s|noarch]*" % (name,
machine))
if len(packages) > 0:
return True
return False
def remove_packages(module, slackpkg_path, packages):
remove_c = 0
# Using a for loop incase of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
if not query_package(module, slackpkg_path, package):
continue
if not module.check_mode:
rc, out, err = module.run_command("%s -default_answer=y -batch=on \
remove %s" % (slackpkg_path,
package))
if not module.check_mode and query_package(module, slackpkg_path,
package):
module.fail_json(msg="failed to remove %s: %s" % (package, out))
remove_c += 1
if remove_c > 0:
module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
module.exit_json(changed=False, msg="package(s) already absent")
def install_packages(module, slackpkg_path, packages):
install_c = 0
for package in packages:
if query_package(module, slackpkg_path, package):
continue
if not module.check_mode:
rc, out, err = module.run_command("%s -default_answer=y -batch=on \
install %s" % (slackpkg_path,
package))
if not module.check_mode and not query_package(module, slackpkg_path,
package):
module.fail_json(msg="failed to install %s: %s" % (package, out),
stderr=err)
install_c += 1
if install_c > 0:
module.exit_json(changed=True, msg="present %s package(s)"
% (install_c))
module.exit_json(changed=False, msg="package(s) already present")
def upgrade_packages(module, slackpkg_path, packages):
install_c = 0
for package in packages:
if not module.check_mode:
rc, out, err = module.run_command("%s -default_answer=y -batch=on \
upgrade %s" % (slackpkg_path,
package))
if not module.check_mode and not query_package(module, slackpkg_path,
package):
module.fail_json(msg="failed to install %s: %s" % (package, out),
stderr=err)
install_c += 1
if install_c > 0:
module.exit_json(changed=True, msg="present %s package(s)"
% (install_c))
module.exit_json(changed=False, msg="package(s) already present")
def update_cache(module, slackpkg_path):
rc, out, err = module.run_command("%s -batch=on update" % (slackpkg_path))
if rc != 0:
module.fail_json(msg="Could not update package cache")
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(default="installed", choices=['installed', 'removed', 'absent', 'present', 'latest']),
name=dict(aliases=["pkg"], required=True, type='list'),
update_cache=dict(default=False, aliases=["update-cache"],
type='bool'),
),
supports_check_mode=True)
slackpkg_path = module.get_bin_path('slackpkg', True)
p = module.params
pkgs = p['name']
if p["update_cache"]:
update_cache(module, slackpkg_path)
if p['state'] == 'latest':
upgrade_packages(module, slackpkg_path, pkgs)
elif p['state'] in ['present', 'installed']:
install_packages(module, slackpkg_path, pkgs)
elif p["state"] in ['removed', 'absent']:
remove_packages(module, slackpkg_path, pkgs)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
KohlsTechnology/ansible | lib/ansible/modules/utilities/logic/async_status.py | 22 | 3006 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <[email protected]>, and others
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: async_status
short_description: Obtain status of asynchronous task
description:
- This module gets the status of an asynchronous task.
- This module is also supported for Windows targets.
version_added: "0.5"
options:
jid:
description:
- Job or task identifier
required: true
mode:
description:
- if C(status), obtain the status; if C(cleanup), clean up the async job cache (by default in C(~/.ansible_async/)) for the specified job I(jid).
choices: [ "status", "cleanup" ]
default: "status"
notes:
- See also U(https://docs.ansible.com/playbooks_async.html)
- This module is also supported for Windows targets.
author:
- "Ansible Core Team"
- "Michael DeHaan"
'''
import json
import os
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
def main():
module = AnsibleModule(argument_spec=dict(
jid=dict(required=True),
mode=dict(default='status', choices=['status', 'cleanup']),
))
mode = module.params['mode']
jid = module.params['jid']
async_dir = os.environ.get('ANSIBLE_ASYNC_DIR', '~/.ansible_async')
# setup logging directory
logdir = os.path.expanduser(async_dir)
log_path = os.path.join(logdir, jid)
if not os.path.exists(log_path):
module.fail_json(msg="could not find job", ansible_job_id=jid, started=1, finished=1)
if mode == 'cleanup':
os.unlink(log_path)
module.exit_json(ansible_job_id=jid, erased=log_path)
# NOT in cleanup mode, assume regular status mode
# no remote kill mode currently exists, but probably should
# consider log_path + ".pid" file and also unlink that above
data = None
try:
data = open(log_path).read()
data = json.loads(data)
except Exception:
if not data:
# file not written yet? That means it is running
module.exit_json(results_file=log_path, ansible_job_id=jid, started=1, finished=0)
else:
module.fail_json(ansible_job_id=jid, results_file=log_path,
msg="Could not parse job output: %s" % data, started=1, finished=1)
if 'started' not in data:
data['finished'] = 1
data['ansible_job_id'] = jid
elif 'finished' not in data:
data['finished'] = 0
# Fix error: TypeError: exit_json() keywords must be strings
data = dict([(str(k), v) for k, v in iteritems(data)])
module.exit_json(**data)
if __name__ == '__main__':
main()
| gpl-3.0 |
bplancher/odoo | addons/account_tax_exigible/models/account_move.py | 12 | 1726 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp import fields, models, api
class AccountMoveLine(models.Model):
_inherit = "account.move.line"
tax_exigible = fields.Boolean(string='Appears in VAT report', default=True,
help="Technical field used to mark a tax line as exigible in the vat report or not (only exigible journal items are displayed). By default all new journal items are directly exigible, but with the module account_tax_cash_basis, some will become exigible only when the payment is recorded.")
@api.model
def create(self, vals, apply_taxes=True):
taxes = False
if vals.get('tax_line_id'):
taxes = [{'use_cash_basis': self.env['account.tax'].browse(vals['tax_line_id']).use_cash_basis}]
if vals.get('tax_ids'):
taxes = self.env['account.move.line'].resolve_2many_commands('tax_ids', vals['tax_ids'])
if taxes and any([tax['use_cash_basis'] for tax in taxes]) and not vals.get('tax_exigible'):
vals['tax_exigible'] = False
return super(AccountMoveLine, self).create(vals, apply_taxes=apply_taxes)
class AccountPartialReconcileCashBasis(models.Model):
_inherit = 'account.partial.reconcile'
def _check_tax_exigible(self, line):
return line.tax_exigible
def _get_tax_cash_basis_lines(self, value_before_reconciliation):
lines, move_date = super(AccountPartialReconcileCashBasis, self)._get_tax_cash_basis_lines(value_before_reconciliation)
for i in range(len(lines)):
vals = lines[i][2]
vals['tax_exigible'] = True
lines[i] = (0, 0, vals)
return lines, move_date
| agpl-3.0 |
TylerKirby/cltk | cltk/stem/akkadian/cv_pattern.py | 2 | 2145 | """
Return a CV patterned string based on the word.
"""
__author__ = ['M. Willis Monroe <[email protected]>']
__license__ = 'MIT License. See LICENSE.'
from cltk.stem.akkadian.syllabifier import AKKADIAN
class CVPattern(object):
"""Return a patterned string representing the consonants
and vowels of the input word."""
def __init__(self):
self.akkadian = AKKADIAN
def get_cv_pattern(self, word, pprint=False):
"""
input = iparras
pattern = [('V', 1, 'i'), ('C', 1, 'p'), ('V', 2, 'a'), ('C', 2, 'r'),
('C', 2, 'r'), ('V', 2, 'a'), ('C', 3, 's')]
pprint = V₁C₁V₂C₂C₂V₂C₃
"""
subscripts = {
1: '₁',
2: '₂',
3: '₃',
4: '₄',
5: '₅',
6: '₆',
7: '₇',
8: '₈',
9: '₉',
0: '₀'
}
pattern = []
c_count = 1
v_count = 1
count = 0
for char in word:
if char in self.akkadian['consonants']:
cv = 'C'
else:
cv = 'V'
# remove length:
if char in self.akkadian['macron_vowels']:
char = self.akkadian['short_vowels'][self.akkadian['macron_vowels'].index(char)]
elif char in self.akkadian['circumflex_vowels']:
char = self.akkadian['short_vowels'][self.akkadian['circumflex_vowels'].index(char)]
if char not in [x[2] for x in pattern]:
if cv == 'C':
count = c_count
c_count += 1
elif cv == 'V':
count = v_count
v_count += 1
pattern.append((cv, count, char))
elif char in [x[2] for x in pattern]:
pattern.append((cv, next(x[1] for x in pattern if x[2] == char), char))
if pprint:
output = ''
for item in pattern:
output += (item[0] + subscripts[item[1]])
return output
return pattern
| mit |
ky822/scikit-learn | sklearn/utils/estimator_checks.py | 9 | 51912 | from __future__ import print_function
import types
import warnings
import sys
import traceback
import inspect
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash, Memory
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns
from sklearn.base import (clone, ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin, BaseEstimator)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.lda import LDA
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.pipeline import make_pipeline
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils import ConvergenceWarning
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet',
'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess',
'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso',
'LassoLars', 'LinearRegression', 'MultiTaskElasticNet',
'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV',
'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression',
'RANSACRegressor', 'RadiusNeighborsRegressor',
'RandomForestRegressor', 'Ridge', 'RidgeCV']
def _yield_non_meta_checks(name, Estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_estimators_fit_returns_self
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
def _yield_classifier_checks(name, Classifier):
# test classfiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier",
"ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
yield check_supervised_y_2d
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in Classifier().get_params().keys():
yield check_class_weight_classifiers
def _yield_regressor_checks(name, Regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
yield check_supervised_y_2d
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
# Test if NotFittedError is raised
yield check_estimators_unfitted
def _yield_transformer_checks(name, Transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer',
'FunctionTransformer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
def _yield_clustering_checks(name, Clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
def _yield_all_checks(name, Estimator):
for check in _yield_non_meta_checks(name, Estimator):
yield check
if issubclass(Estimator, ClassifierMixin):
for check in _yield_classifier_checks(name, Estimator):
yield check
if issubclass(Estimator, RegressorMixin):
for check in _yield_regressor_checks(name, Estimator):
yield check
if issubclass(Estimator, TransformerMixin):
for check in _yield_transformer_checks(name, Estimator):
yield check
if issubclass(Estimator, ClusterMixin):
for check in _yield_clustering_checks(name, Estimator):
yield check
yield check_fit2d_predict1d
yield check_fit2d_1sample
yield check_fit2d_1feature
yield check_fit1d_1feature
yield check_fit1d_1sample
def check_estimator(Estimator):
"""Check if estimator adheres to sklearn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Parameters
----------
Estimator : class
Class to check.
"""
name = Estimator.__class__.__name__
check_parameters_default_constructible(name, Estimator)
for check in _yield_all_checks(name, Estimator):
check(name, Estimator)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_fast_parameters(estimator):
# speed up some estimators
params = estimator.get_params()
if ("n_iter" in params
and estimator.__class__.__name__ != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
warnings.simplefilter("ignore", ConvergenceWarning)
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X_csr = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
for sparse_format in ['csr', 'csc', 'dok', 'lil', 'coo', 'dia', 'bsr']:
X = X_csr.asformat(sparse_format)
# catch deprecation warnings
with warnings.catch_warnings():
if name in ['Scaler', 'StandardScaler']:
estimator = Estimator(with_mean=False)
else:
estimator = Estimator()
set_fast_parameters(estimator)
# fit and predict
try:
estimator.fit(X, y)
if hasattr(estimator, "predict"):
pred = estimator.predict(X)
assert_equal(pred.shape, (X.shape[0],))
if hasattr(estimator, 'predict_proba'):
probs = estimator.predict_proba(X)
assert_equal(probs.shape, (X.shape[0], 4))
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_dtype_object(name, Estimator):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
with warnings.catch_warnings():
estimator = Estimator()
set_fast_parameters(estimator)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, "transform"):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
@ignore_warnings
def check_fit2d_predict1d(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
try:
assert_warns(DeprecationWarning,
getattr(estimator, method), X[0])
except ValueError:
pass
@ignore_warnings
def check_fit2d_1sample(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(1, 10))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit2d_1feature(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(10, 1))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1feature(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = X.astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1sample(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = np.array([1])
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError :
pass
def check_transformer_general(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def check_transformers_unfitted(name, Transformer):
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
transformer = Transformer()
assert_raises((AttributeError, ValueError), transformer.transform, X)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
set_random_state(transformer)
set_fast_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
# check for consistent n_samples
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
assert_equal(len(X_pred2), n_samples)
assert_equal(len(X_pred3), n_samples)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, Estimator):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_array_almost_equal(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, Estimator):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = inspect.getargspec(func).args
assert_true(args[2] in ["y", "Y"])
@ignore_warnings
def check_estimators_dtypes(name, Estimator):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(name, y)
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
def check_estimators_empty_data_messages(name, Estimator):
e = Estimator()
set_fast_parameters(e)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
assert_raises(ValueError, e.fit, X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1]))
msg = "0 feature\(s\) \(shape=\(3, 0\)\) while a minimum of \d* is required."
assert_raises_regex(ValueError, msg, e.fit, X_zero_features, y)
def check_estimators_nan_inf(name, Estimator):
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
def check_estimators_pickle(name, Estimator):
"""Test that we can pickle all estimators"""
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
# some estimators only take multioutputs
y = multioutput_estimator_convert_y_2d(name, y)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_random_state(estimator)
set_fast_parameters(estimator)
estimator.fit(X, y)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
unpickled_estimator = pickle.loads(pickled_estimator)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_array_almost_equal(result[method], unpickled_result)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name is 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# catch deprecation warnings
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_fast_parameters(classifier)
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
def check_estimators_fit_returns_self(name, Estimator):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
@ignore_warnings
def check_estimators_unfitted(name, Estimator):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
est = Estimator()
msg = "fit"
if hasattr(est, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict, X)
if hasattr(est, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_log_proba, X)
def check_supervised_y_2d(name, Estimator):
if "MultiTask" in name:
# These only work on 2d, so this test makes no sense
return
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
if name not in MULTI_OUTPUT:
# check that we warned if we don't support multi-output
assert_greater(len(w), 0, msg)
assert_true("DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected" in msg)
assert_array_almost_equal(y_pred.ravel(), y_pred_2d.ravel())
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_fast_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_fast_parameters(regressor_1)
set_fast_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y.reshape(-1, 1)) # X is already scaled
y = y.ravel()
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_fast_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
print(regressor)
assert_greater(regressor.score(X, y_), 0.5)
@ignore_warnings
def check_regressors_no_decision_function(name, Regressor):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
y = multioutput_estimator_convert_y_2d(name, X[:, 0])
regressor = Regressor()
set_fast_parameters(regressor)
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
def check_class_weight_classifiers(name, Classifier):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.89)
def check_class_weight_balanced_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_balanced, coef_manual)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
with warnings.catch_warnings(record=True):
# catch deprecation warnings
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# Make a physical copy of the orginal estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
def check_sparsify_coefficients(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_fast_parameters(estimator_1)
set_fast_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LDA()
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
args, varargs, kws, defaults = inspect.getargspec(init)
except TypeError:
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they need a non-default argument
args = args[2:]
else:
args = args[1:]
if args:
# non-empty list
assert_equal(len(args), len(defaults))
else:
return
for arg, default in zip(args, defaults):
assert_in(type(default), [str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if arg not in params.keys():
# deprecated parameter, not in get_params
assert_true(default is None)
continue
if isinstance(params[arg], np.ndarray):
assert_array_equal(params[arg], default)
else:
assert_equal(params[arg], default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if name in (['MultiTaskElasticNetCV', 'MultiTaskLassoCV',
'MultiTaskLasso', 'MultiTaskElasticNet']):
return y[:, np.newaxis]
return y
def check_non_transformer_estimators_n_iter(name, estimator,
multi_output=False):
# Check if all iterative solvers, run for more than one iteratiom
iris = load_iris()
X, y_ = iris.data, iris.target
if multi_output:
y_ = y_[:, np.newaxis]
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
assert_greater(estimator.n_iter_, 0)
def check_transformer_n_iter(name, estimator):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater(iter_, 1)
else:
assert_greater(estimator.n_iter_, 1)
def check_get_params_invariance(name, estimator):
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
if name in ('FeatureUnion', 'Pipeline'):
e = estimator([('clf', T())])
elif name in ('GridSearchCV' 'RandomizedSearchCV'):
return
else:
e = estimator()
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
| bsd-3-clause |
Rav3nPL/doubloons-0.10 | qa/rpc-tests/mempool_coinbase_spends.py | 143 | 3850 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test re-org scenarios with a mempool that contains transactions
# that spend (directly or indirectly) coinbase transactions.
#
from test_framework import BitcoinTestFramework
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
import os
import shutil
# Create one-input, one-output, no-fee transaction:
class MempoolCoinbaseTest(BitcoinTestFramework):
alert_filename = None # Set by setup_network
def setup_network(self):
args = ["-checkmempool", "-debug=mempool"]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.nodes.append(start_node(1, self.options.tmpdir, args))
connect_nodes(self.nodes[1], 0)
self.is_network_split = False
self.sync_all
def create_tx(self, from_txid, to_address, amount):
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signresult = self.nodes[0].signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
def run_test(self):
start_count = self.nodes[0].getblockcount()
# Mine three blocks. After this, nodes[0] blocks
# 101, 102, and 103 are spend-able.
new_blocks = self.nodes[1].setgenerate(True, 4)
self.sync_all()
node0_address = self.nodes[0].getnewaddress()
node1_address = self.nodes[1].getnewaddress()
# Three scenarios for re-orging coinbase spends in the memory pool:
# 1. Direct coinbase spend : spend_101
# 2. Indirect (coinbase spend in chain, child in mempool) : spend_102 and spend_102_1
# 3. Indirect (coinbase and child both in chain) : spend_103 and spend_103_1
# Use invalidatblock to make all of the above coinbase spends invalid (immature coinbase),
# and make sure the mempool code behaves correctly.
b = [ self.nodes[0].getblockhash(n) for n in range(102, 105) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spend_101_raw = self.create_tx(coinbase_txids[0], node1_address, 50)
spend_102_raw = self.create_tx(coinbase_txids[1], node0_address, 50)
spend_103_raw = self.create_tx(coinbase_txids[2], node0_address, 50)
# Broadcast and mine spend_102 and 103:
spend_102_id = self.nodes[0].sendrawtransaction(spend_102_raw)
spend_103_id = self.nodes[0].sendrawtransaction(spend_103_raw)
self.nodes[0].setgenerate(True, 1)
# Create 102_1 and 103_1:
spend_102_1_raw = self.create_tx(spend_102_id, node1_address, 50)
spend_103_1_raw = self.create_tx(spend_103_id, node1_address, 50)
# Broadcast and mine 103_1:
spend_103_1_id = self.nodes[0].sendrawtransaction(spend_103_1_raw)
self.nodes[0].setgenerate(True, 1)
# ... now put spend_101 and spend_102_1 in memory pools:
spend_101_id = self.nodes[0].sendrawtransaction(spend_101_raw)
spend_102_1_id = self.nodes[0].sendrawtransaction(spend_102_1_raw)
self.sync_all()
assert_equal(set(self.nodes[0].getrawmempool()), set([ spend_101_id, spend_102_1_id ]))
# Use invalidateblock to re-org back and make all those coinbase spends
# immature/invalid:
for node in self.nodes:
node.invalidateblock(new_blocks[0])
self.sync_all()
# mempool should be empty.
assert_equal(set(self.nodes[0].getrawmempool()), set())
if __name__ == '__main__':
MempoolCoinbaseTest().main()
| mit |
kmggh/python-guess-number | guess_num_player.py | 1 | 1325 | #!/usr/bin/env python
# Fri 2013-05-03 23:49:55 -0400
# Copyright (c) 2013 by Ken Guyton. All Rights Reserved.
"""A game for guessing a number. Used as a Python class example.
A computer player plays the game.
"""
__author__ = 'Ken Guyton'
import argparse
import guess_num2
import player
MAX_DEFAULT = 100
MIN_DEFAULT = 0
MAP = {-1: 'low', 1: 'high'}
def parse_args():
"""Parse the command line args for main."""
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--max', help='Max guess amount', type=int,
default=100)
parser.add_argument('-b', '--binary', action='store_true',
help='Use the binary player')
parser.add_argument('-r', '--random', action='store_true',
help='Use the random player')
return parser.parse_args()
def main():
args = parse_args()
game = guess_num2.Game(max_val=args.max)
if args.binary:
the_player = player.BinaryPlayer(game)
elif args.random:
the_player = player.RandomPlayer(game)
else:
the_player = player.BinaryPlayer(game)
print 'Guessing from {0} to {1}: '.format(game.min, game.max)
for guess, direction in the_player.play():
print game.evaluate(guess, direction)
print 'You guessed {0} times.'.format(game.count)
if __name__ == '__main__':
main()
| artistic-2.0 |
rpip/mpower-python | setup.py | 2 | 2405 | from setuptools import setup, Command
from unittest import TextTestRunner, TestLoader
from glob import glob
from os.path import splitext, basename, join as pjoin
try:
from os.path import walk
except ImportError:
from os import walk
import os
class TestCommand(Command):
user_options = []
def initialize_options(self):
self._dir = os.getcwd()
def finalize_options(self):
pass
def run(self):
'''
Finds all the tests modules in tests/, and runs them.
'''
testfiles = []
for t in glob(pjoin(self._dir, 'tests', '*.py')):
if not t.endswith('__init__.py'):
testfiles.append('.'.join(
['tests', splitext(basename(t))[0]])
)
import sys
ROOT = os.path.dirname(os.getcwd())
MPOWER_LIBS = os.path.join(ROOT, "mpower")
sys.path.append(MPOWER_LIBS)
tests = TestLoader().loadTestsFromNames(testfiles)
t = TextTestRunner(verbosity=1)
t.run(tests)
class CleanCommand(Command):
"""Recursively Delete all compile python modules"""
user_options = []
def initialize_options(self):
self._clean_me = []
for root, dirs, files in os.walk('.'):
for f in files:
if f.endswith('.pyc'):
self._clean_me.append(pjoin(root, f))
def finalize_options(self):
pass
def run(self):
for clean_me in self._clean_me:
try:
os.unlink(clean_me)
except:
pass
def readme(filename='README.rst'):
with open('README.rst') as f:
text = f.read()
f.close()
return text
setup(
name='mpower',
version=__import__('mpower').__version__,
author='Mawuli Adzaku',
author_email='[email protected]',
packages=['mpower'],
cmdclass={'test': TestCommand, 'clean': CleanCommand},
scripts=[],
url='https://github.com/mawuli/mpower-python',
license='LICENSE.txt',
keywords="mpower mobile money payments",
description='MPower Payments Python client library',
long_description=readme('README.rst'),
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
],
install_requires=['requests >=2.0'],
)
| mit |
diofant/diofant | diofant/core/power.py | 1 | 47841 | import math
from mpmath.libmp import sqrtrem as mpmath_sqrtrem
from ..logic import true
from ..utilities import sift
from .add import Add
from .cache import cacheit
from .compatibility import as_int
from .evalf import PrecisionExhausted
from .evaluate import global_evaluate
from .expr import Expr
from .function import (_coeff_isneg, expand_complex, expand_mul,
expand_multinomial)
from .logic import fuzzy_or
from .mul import Mul, _keep_coeff
from .numbers import E, I, Integer, Rational, nan, oo, pi, zoo
from .singleton import S
from .symbol import Dummy, symbols
from .sympify import sympify
def integer_nthroot(y, n):
"""
Return a tuple containing x = floor(y**(1/n))
and a boolean indicating whether the result is exact (that is,
whether x**n == y).
>>> integer_nthroot(16, 2)
(4, True)
>>> integer_nthroot(26, 2)
(5, False)
"""
y, n = int(y), int(n)
if y < 0:
raise ValueError('y must be nonnegative')
if n < 1:
raise ValueError('n must be positive')
if y in (0, 1):
return y, True
if n == 1:
return y, True
if n == 2:
x, rem = mpmath_sqrtrem(y)
return int(x), not rem
if n > y:
return 1, False
# Get initial estimate for Newton's method. Care must be taken to
# avoid overflow
try:
guess = int(y**(1./n) + 0.5)
except OverflowError:
exp = math.log(y, 2)/n
if exp > 53:
shift = int(exp - 53)
guess = int(2.0**(exp - shift) + 1) << shift
else:
guess = int(2.0**exp)
if guess > 2**50:
# Newton iteration
xprev, x = -1, guess
while 1:
t = x**(n - 1)
xprev, x = x, ((n - 1)*x + y//t)//n
if abs(x - xprev) < 2:
break
else:
x = guess
# Compensate
t = x**n
while t < y:
x += 1
t = x**n
while t > y:
x -= 1
t = x**n
return x, t == y
class Pow(Expr):
"""
Defines the expression x**y as "x raised to a power y".
For complex numbers `x` and `y`, ``Pow`` gives the principal
value of `exp(y*log(x))`.
Singleton definitions involving (0, 1, -1, oo, -oo, I, -I):
+--------------+---------+-----------------------------------------------+
| expr | value | reason |
+==============+=========+===============================================+
| z**0 | 1 | Although arguments over 0**0 exist, see [2]. |
+--------------+---------+-----------------------------------------------+
| z**1 | z | |
+--------------+---------+-----------------------------------------------+
| (-oo)**(-1) | 0 | |
+--------------+---------+-----------------------------------------------+
| (-1)**-1 | -1 | |
+--------------+---------+-----------------------------------------------+
| 0**-1 | zoo | This is not strictly true, as 0**-1 may be |
| | | undefined, but is convenient in some contexts |
| | | where the base is assumed to be positive. |
+--------------+---------+-----------------------------------------------+
| 1**-1 | 1 | |
+--------------+---------+-----------------------------------------------+
| oo**-1 | 0 | |
+--------------+---------+-----------------------------------------------+
| 0**oo | 0 | Because for all complex numbers z near |
| | | 0, z**oo -> 0. |
+--------------+---------+-----------------------------------------------+
| 0**-oo | zoo | This is not strictly true, as 0**oo may be |
| | | oscillating between positive and negative |
| | | values or rotating in the complex plane. |
| | | It is convenient, however, when the base |
| | | is positive. |
+--------------+---------+-----------------------------------------------+
| 1**oo | nan | Because there are various cases where |
| 1**-oo | | lim(x(t),t)=1, lim(y(t),t)=oo (or -oo), |
| | | but lim(x(t)**y(t), t) != 1. See [3]. |
+--------------+---------+-----------------------------------------------+
| z**zoo | nan | No limit for z**t for t -> zoo. |
+--------------+---------+-----------------------------------------------+
| (-1)**oo | nan | Because of oscillations in the limit. |
| (-1)**(-oo) | | |
+--------------+---------+-----------------------------------------------+
| oo**oo | oo | |
+--------------+---------+-----------------------------------------------+
| oo**-oo | 0 | |
+--------------+---------+-----------------------------------------------+
| (-oo)**oo | nan | |
| (-oo)**-oo | | |
+--------------+---------+-----------------------------------------------+
| oo**I | nan | oo**e could probably be best thought of as |
| (-oo)**I | | the limit of x**e for real x as x tends to |
| | | oo. If e is I, then the limit does not exist |
| | | and nan is used to indicate that. |
+--------------+---------+-----------------------------------------------+
| oo**(1+I) | zoo | If the real part of e is positive, then the |
| (-oo)**(1+I) | | limit of abs(x**e) is oo. So the limit value |
| | | is zoo. |
+--------------+---------+-----------------------------------------------+
| oo**(-1+I) | 0 | If the real part of e is negative, then the |
| -oo**(-1+I) | | limit is 0. |
+--------------+---------+-----------------------------------------------+
Because symbolic computations are more flexible that floating point
calculations and we prefer to never return an incorrect answer,
we choose not to conform to all IEEE 754 conventions. This helps
us avoid extra test-case code in the calculation of limits.
See Also
========
diofant.core.numbers.Infinity
diofant.core.numbers.NaN
References
==========
* https://en.wikipedia.org/wiki/Exponentiation
* https://en.wikipedia.org/wiki/Zero_to_the_power_of_zero
* https://en.wikipedia.org/wiki/Indeterminate_forms
"""
is_Pow = True
@cacheit
def __new__(cls, b, e, evaluate=None):
if evaluate is None:
evaluate = global_evaluate[0]
from ..functions.elementary.exponential import exp_polar
b = sympify(b, strict=True)
e = sympify(e, strict=True)
if evaluate:
if nan in (b, e):
return nan
elif e is S.Zero:
return Integer(1)
elif e is S.One:
return b
elif e is zoo:
return nan
elif e.is_integer and _coeff_isneg(b):
if e.is_even:
b = -b
elif e.is_odd:
return -Pow(-b, e)
if b is S.One:
if abs(e).is_infinite:
return nan
return Integer(1)
else:
# recognize base as E
if not e.is_Atom and b is not E and not isinstance(b, exp_polar):
from ..functions import im, log, sign
from ..simplify import denom, numer
from .exprtools import factor_terms
c, ex = factor_terms(e, sign=False).as_coeff_Mul()
den = denom(ex)
if isinstance(den, log) and den.args[0] == b:
return E**(c*numer(ex))
elif den.is_Add:
s = sign(im(b))
if s.is_Number and s and den == \
log(-factor_terms(b, sign=False)) + s*I*pi:
return E**(c*numer(ex))
obj = b._eval_power(e)
if obj is not None:
return obj
return Expr.__new__(cls, b, e)
def _eval_is_commutative(self):
return self.base.is_commutative and self.exp.is_commutative
@property
def base(self):
"""Returns base of the power expression."""
return self.args[0]
@property
def exp(self):
"""Returns exponent of the power expression."""
return self.args[1]
@classmethod
def class_key(cls):
"""Nice order of classes."""
return 4, 2, cls.__name__
def _eval_power(self, other):
from ..functions import Abs, arg, exp, floor, im, log, re, sign
b, e = self.as_base_exp()
if b is nan:
return (b**e)**other # let __new__ handle it
s = None
if other.is_integer:
s = 1
elif b.is_polar: # e.g. exp_polar, besselj, var('p', polar=True)...
s = 1
elif e.is_extended_real is not None:
# helper functions ===========================
def _half(e):
"""Return True if the exponent has a literal 2 as the
denominator, else None.
"""
if getattr(e, 'denominator', None) == 2:
return True
n, d = e.as_numer_denom()
if n.is_integer and d == 2:
return True
def _n2(e):
"""Return ``e`` evaluated to a Number with 2 significant
digits, else None.
"""
try:
rv = e.evalf(2)
if rv.is_Number:
return rv
except PrecisionExhausted: # pragma: no cover
pass
# ===================================================
if e.is_extended_real:
# we need _half(other) with constant floor or
# floor(Rational(1, 2) - e*arg(b)/2/pi) == 0
# handle -1 as special case
if (e == -1):
# floor arg. is 1/2 + arg(b)/2/pi
if _half(other):
if b.is_negative is True:
return (-1)**other*Pow(-b, e*other)
if b.is_extended_real is False:
return Pow(b.conjugate()/Abs(b)**2, other)
elif e.is_even:
if b.is_extended_real:
b = abs(b)
if b.is_imaginary:
b = abs(im(b))*I
if (abs(e) < 1) == true or (e == 1):
s = 1 # floor = 0
elif b.is_nonnegative:
s = 1 # floor = 0
elif re(b).is_nonnegative and (abs(e) < 2) == true:
s = 1 # floor = 0
elif im(b).is_nonzero and (abs(e) == 2):
s = 1 # floor = 0
elif b.is_imaginary and (abs(e) == 2):
s = 1 # floor = 0
elif _half(other):
s = exp(2*pi*I*other*floor(
Rational(1, 2) - e*arg(b)/(2*pi)))
if s.is_extended_real and _n2(sign(s) - s) == 0:
s = sign(s)
else:
s = None
else:
# e.is_extended_real is False requires:
# _half(other) with constant floor or
# floor(Rational(1, 2) - im(e*log(b))/2/pi) == 0
s = exp(2*I*pi*other*floor(Rational(1, 2) - im(e*log(b))/2/pi))
# be careful to test that s is -1 or 1 b/c sign(I) == I:
# so check that s is real
if s.is_extended_real and _n2(sign(s) - s) == 0:
s = sign(s)
else:
s = None
if s is not None:
return s*Pow(b, e*other)
def _eval_is_positive(self):
b, e = self.base, self.exp
if b.is_nonnegative and b == e:
return True
elif b.is_positive and (e.is_real or e.is_positive):
return True
elif b.is_negative and e.is_integer and (b.is_finite or e.is_nonnegative):
return e.is_even
elif b.is_nonpositive and e.is_odd and (b.is_finite or e.is_nonnegative):
return False
elif b in {I, -I} and e.is_imaginary:
return True
def _eval_is_nonnegative(self):
b, e = self.base, self.exp
if b.is_imaginary and e.is_nonnegative:
m = e % 4
if m.is_integer:
return m.is_zero
def _eval_is_negative(self):
b, e = self.base, self.exp
if b.is_negative:
if e.is_odd and (b.is_finite or e.is_positive):
return True
if e.is_even:
return False
elif b.is_positive:
if e.is_extended_real:
return False
elif b.is_nonnegative:
if e.is_nonnegative:
return False
elif b.is_nonpositive:
if e.is_even:
return False
elif b.is_extended_real:
if e.is_even:
return False
def _eval_is_zero(self):
b, e = self.base, self.exp
if b.is_zero:
if e.is_positive:
return True
elif e.is_nonpositive:
return False
elif b.is_nonzero:
if e.is_finite:
return False
elif e.is_infinite:
if (1 - abs(b)).is_positive:
return e.is_positive
elif (1 - abs(b)).is_negative:
return e.is_negative
def _eval_is_integer(self):
b, e = self.base, self.exp
if b.is_rational:
if b.is_integer is False and e.is_positive:
return False # rat**nonneg
if b.is_integer and e.is_integer:
if b is S.NegativeOne:
return True
if e.is_nonnegative or e.is_positive:
return True
if b.is_integer and e.is_negative and (e.is_finite or e.is_integer):
if (b - 1).is_nonzero and (b + 1).is_nonzero:
return False
if b.is_Number and e.is_Number:
check = self.func(*self.args)
if check.is_Integer:
return True
def _eval_is_extended_real(self):
from ..functions import arg, log
from .mul import Mul
b, e = self.base, self.exp
if b is E:
if e.is_extended_real:
return True
elif e.is_imaginary:
return (2*I*e/pi).is_even
if b.is_extended_real is None:
if b.func == self.func and b.base is E and b.exp.is_imaginary:
return e.is_imaginary
if e.is_extended_real is None:
return
if b.is_extended_real and e.is_extended_real:
if b.is_positive:
return True
elif b.is_nonnegative:
if e.is_nonnegative:
return True
else:
if e.is_integer:
if b.is_nonzero or e.is_nonnegative:
return True
elif b.is_negative:
if e.is_rational and e.is_noninteger:
return False
if b.is_nonzero and e.is_negative:
return (b**-e).is_extended_real
if b.is_imaginary:
if e.is_integer:
if e.is_even:
if b.is_nonzero or e.is_nonnegative:
return True
elif e.is_odd:
return False
elif e.is_imaginary and log(b).is_imaginary:
return True
elif e.is_Add:
c, a = e.as_coeff_Add()
if c and c.is_Integer:
return Mul(b**c, b**a, evaluate=False).is_extended_real
elif b in (-I, I) and (e/2).is_noninteger:
return False
return
if b.is_extended_real and e.is_imaginary:
if b is S.NegativeOne:
return True
c = e.coeff(I)
if c in (1, -1):
if b == 2:
return False
if b.is_extended_real is False: # we already know it's not imag
i = arg(b)*e/pi
return i.is_integer
def _eval_is_complex(self):
from ..functions import log
b, e = self.base, self.exp
if b.is_complex:
exp = log(b)*e
return fuzzy_or([exp.is_complex, exp.is_negative])
def _eval_is_imaginary(self):
from ..functions import arg, log
b, e = self.base, self.exp
if b.is_imaginary:
if e.is_integer:
return e.is_odd
if e.is_imaginary and e.is_nonzero:
if log(b).is_imaginary:
return False
if b.is_real and e.is_real:
if b.is_positive:
return False
else:
if e.is_integer:
return False
else:
if (2*e).is_integer:
return b.is_negative
if b.is_real is False: # we already know it's not imag
return (2*arg(b)*e/pi).is_odd
def _eval_is_odd(self):
b, e = self.base, self.exp
if e.is_integer:
if e.is_positive:
return b.is_odd
elif e.is_nonnegative and b.is_odd:
return True
elif b is S.NegativeOne:
return True
def _eval_is_finite(self):
b, e = self.base, self.exp
if e.is_negative:
if b.is_zero:
return False
if b.is_finite and e.is_finite:
if e.is_nonnegative or b.is_nonzero:
return True
def _eval_is_polar(self):
b, e = self.base, self.exp
if b.is_polar and e.is_commutative:
return True
def _eval_subs(self, old, new):
from ..functions import log
from .symbol import Symbol
def _check(ct1, ct2, old):
"""Return bool, pow where, if bool is True, then the exponent of
Pow `old` will combine with `pow` so the substitution is valid,
otherwise bool will be False,
cti are the coefficient and terms of an exponent of self or old
In this _eval_subs routine a change like (b**(2*x)).subs({b**x: y})
will give y**2 since (b**x)**2 == b**(2*x); if that equality does
not hold then the substitution should not occur so `bool` will be
False.
"""
coeff1, terms1 = ct1
coeff2, terms2 = ct2
if terms1 == terms2:
pow = coeff1/coeff2
try:
pow = as_int(pow)
combines = True
except ValueError:
combines = Pow._eval_power(
Pow(*old.as_base_exp(), evaluate=False),
pow)
if isinstance(combines, Pow):
combines = combines.base is old.base
else:
combines = False
return combines, pow
return False, None
if old == self.base:
return new**self.exp._subs(old, new)
if old.func is self.func and self.exp == old.exp:
l = log(self.base, old.base)
if l.is_Number:
return Pow(new, l)
if isinstance(old, self.func) and self.base == old.base:
if self.exp.is_Add is False:
ct2 = old.exp.as_independent(Symbol, as_Add=False)
ct1 = (self.exp/ct2[1], ct2[1])
ok, pow = _check(ct1, ct2, old)
if ok:
# issue sympy/sympy#5180: (x**(6*y)).subs({x**(3*y):z})->z**2
return self.func(new, pow)
else: # b**(6*x+a).subs({b**(3*x): y}) -> y**2 * b**a
# exp(exp(x) + exp(x**2)).subs({exp(exp(x)): w}) -> w * exp(exp(x**2))
oarg = old.exp
new_l = []
o_al = []
ct2 = oarg.as_coeff_mul()
for a in self.exp.args:
newa = a._subs(old, new)
ct1 = newa.as_coeff_mul()
ok, pow = _check(ct1, ct2, old)
if ok:
new_l.append(new**pow)
continue
o_al.append(newa)
if new_l:
new_l.append(Pow(self.base, Add(*o_al), evaluate=False))
return Mul(*new_l)
if old.is_Pow and old.base is E and self.exp.is_extended_real and self.base.is_positive:
ct1 = old.exp.as_independent(Symbol, as_Add=False)
ct2 = (self.exp*log(self.base)).as_independent(
Symbol, as_Add=False)
ok, pow = _check(ct1, ct2, old)
if ok:
return self.func(new, pow) # (2**x).subs({exp(x*log(2)): z}) -> z
def as_base_exp(self):
"""Return base and exp of self.
If base is 1/Integer, then return Integer, -exp. If this extra
processing is not needed, the base and exp properties will
give the raw arguments
Examples
========
>>> p = Pow(Rational(1, 2), 2, evaluate=False)
>>> p.as_base_exp()
(2, -2)
>>> p.args
(1/2, 2)
"""
b, e = self.base, self.exp
if b.is_Rational and b.numerator == 1 and b.denominator != 1:
return Integer(b.denominator), -e
return b, e
def _eval_adjoint(self):
from ..functions.elementary.complexes import adjoint
i, p = self.exp.is_integer, self.base.is_positive
if i:
return adjoint(self.base)**self.exp
if p:
return self.base**adjoint(self.exp)
def _eval_conjugate(self):
if self.is_extended_real:
return self
from ..functions.elementary.complexes import conjugate as c
i, p = self.exp.is_integer, self.base.is_positive
if i:
return c(self.base)**self.exp
if p:
return self.base**c(self.exp)
if i is False and p is False:
expanded = expand_complex(self)
assert expanded != self
return c(expanded)
def _eval_transpose(self):
from ..functions.elementary.complexes import transpose
i, p = self.exp.is_integer, self.base.is_complex
if p:
return self.base**self.exp
if i:
return transpose(self.base)**self.exp
def _eval_expand_power_exp(self, **hints):
"""a**(n+m) -> a**n*a**m."""
b = self.base
e = self.exp
if e.is_Add and e.is_commutative:
expr = []
for x in e.args:
expr.append(self.func(self.base, x))
return Mul(*expr)
return self.func(b, e)
def _eval_expand_power_base(self, **hints):
"""(a*b)**n -> a**n * b**n."""
force = hints.get('force', False)
b = self.base
e = self.exp
if not b.is_Mul:
return self
cargs, nc = b.args_cnc(split_1=False)
# expand each term - this is top-level-only
# expansion but we have to watch out for things
# that don't have an _eval_expand method
if nc:
nc = [i._eval_expand_power_base(**hints)
if hasattr(i, '_eval_expand_power_base') else i
for i in nc]
if e.is_Integer:
if e.is_positive:
rv = Mul(*nc*e)
else:
rv = 1/Mul(*nc*-e)
if cargs:
rv *= Mul(*cargs)**e
return rv
if not cargs:
return self.func(Mul(*nc), e, evaluate=False)
nc = [Mul(*nc)]
# sift the commutative bases
def pred(x):
if x is I:
return I
polar = x.is_polar
if polar:
return True
if polar is None:
return fuzzy_or([x.is_nonnegative, (1/x).is_nonnegative])
sifted = sift(cargs, pred)
nonneg = sifted[True]
other = sifted[None]
neg = sifted[False]
imag = sifted[I]
if imag:
i = len(imag) % 4
if i == 0:
pass
elif i == 1:
other.append(I)
elif i == 2:
if neg:
nonn = -neg.pop()
if nonn is not S.One:
nonneg.append(nonn)
else:
neg.append(Integer(-1))
else:
if neg:
nonn = -neg.pop()
if nonn is not S.One:
nonneg.append(nonn)
else:
neg.append(Integer(-1))
other.append(I)
del imag
# bring out the bases that can be separated from the base
if force or e.is_integer:
# treat all commutatives the same and put nc in other
cargs = nonneg + neg + other
other = nc
else:
# this is just like what is happening automatically, except
# that now we are doing it for an arbitrary exponent for which
# no automatic expansion is done
assert not e.is_Integer
# handle negatives by making them all positive and putting
# the residual -1 in other
if len(neg) > 1:
o = Integer(1)
if not other and neg[0].is_Number:
o *= neg.pop(0)
if len(neg) % 2:
o = -o
for n in neg:
nonneg.append(-n)
if o is not S.One:
other.append(o)
elif neg and other:
if neg[0].is_Number and neg[0] is not S.NegativeOne:
other.append(Integer(-1))
nonneg.append(-neg[0])
else:
other.extend(neg)
else:
other.extend(neg)
del neg
cargs = nonneg
other += nc
rv = Integer(1)
if cargs:
rv *= Mul(*[self.func(b, e, evaluate=False) for b in cargs])
if other:
rv *= self.func(Mul(*other), e, evaluate=False)
return rv
def _eval_expand_multinomial(self, **hints):
"""(a+b+..) ** n -> a**n + n*a**(n-1)*b + .., n is nonzero integer."""
base, exp = self.base, self.exp
result = self
if exp.is_Rational and exp.numerator > 0 and base.is_Add:
if not exp.is_Integer:
n = Integer(exp.numerator // exp.denominator)
if not n:
return result
else:
radical, result = self.func(base, exp - n), []
expanded_base_n = self.func(base, n)
if expanded_base_n.is_Pow:
expanded_base_n = \
expanded_base_n._eval_expand_multinomial()
for term in Add.make_args(expanded_base_n):
result.append(term*radical)
return Add(*result)
n = int(exp)
if base.is_commutative:
order_terms, other_terms = [], []
for b in base.args:
if b.is_Order:
order_terms.append(b)
else:
other_terms.append(b)
if order_terms:
# (f(x) + O(x^n))^m -> f(x)^m + m*f(x)^{m-1} *O(x^n)
f = Add(*other_terms)
o = Add(*order_terms)
if n == 2:
return expand_multinomial(f**n, deep=False) + n*f*o
else:
g = expand_multinomial(f**(n - 1), deep=False)
return expand_mul(f*g, deep=False) + n*g*o
if base.is_number:
# Efficiently expand expressions of the form (a + b*I)**n
# where 'a' and 'b' are real numbers and 'n' is integer.
a, b = base.as_real_imag()
if a.is_Rational and b.is_Rational:
if not a.is_Integer:
if not b.is_Integer:
k = self.func(a.denominator * b.denominator, n)
a, b = a.numerator*b.denominator, a.denominator*b.numerator
else:
k = self.func(a.denominator, n)
a, b = a.numerator, a.denominator*b
elif not b.is_Integer:
k = self.func(b.denominator, n)
a, b = a*b.denominator, b.numerator
else:
k = 1
a, b, c, d = int(a), int(b), 1, 0
while n:
if n & 1:
c, d = a*c - b*d, b*c + a*d
n -= 1
a, b = a*a - b*b, 2*a*b
n //= 2
if k == 1:
return c + I*d
else:
return Integer(c)/k + I*d/k
p = other_terms
# (x+y)**3 -> x**3 + 3*x**2*y + 3*x*y**2 + y**3
# in this particular example:
# p = [x,y]; n = 3
# so now it's easy to get the correct result -- we get the
# coefficients first:
from ..ntheory import multinomial_coefficients
from ..polys import Poly
expansion_dict = multinomial_coefficients(len(p), n)
# in our example: {(3, 0): 1, (1, 2): 3, (0, 3): 1, (2, 1): 3}
# and now construct the expression.
return Poly(expansion_dict, *p).as_expr()
else:
if n == 2:
return Add(*[f*g for f in base.args for g in base.args])
else:
multi = (base**(n - 1))._eval_expand_multinomial()
assert multi.is_Add
return Add(*[f*g for f in base.args for g in multi.args])
elif (exp.is_Rational and exp.numerator < 0 and base.is_Add and
abs(exp.numerator) > exp.denominator):
return 1 / self.func(base, -exp)._eval_expand_multinomial()
elif exp.is_Add and base.is_Number:
# a + b a b
# n --> n n , where n, a, b are Numbers
coeff, tail = Integer(1), Integer(0)
for term in exp.args:
if term.is_Number:
coeff *= self.func(base, term)
else:
tail += term
return coeff * self.func(base, tail)
else:
return result
def as_real_imag(self, deep=True, **hints):
"""Returns real and imaginary parts of self
See Also
========
diofant.core.expr.Expr.as_real_imag
"""
from ..functions import arg, cos, sin
if self.exp.is_Integer:
exp = self.exp
re, im = self.base.as_real_imag(deep=deep)
if not im:
return self, Integer(0)
a, b = symbols('a b', cls=Dummy)
if exp >= 0:
if re.is_Number and im.is_Number:
# We can be more efficient in this case
expr = expand_multinomial(self.base**exp)
return expr.as_real_imag()
expr = ((a + b)**exp).as_poly() # a = re, b = im; expr = (a + b*I)**exp
else:
mag = re**2 + im**2
re, im = re/mag, -im/mag
if re.is_Number and im.is_Number:
# We can be more efficient in this case
expr = expand_multinomial((re + im*I)**-exp)
return expr.as_real_imag()
expr = ((a + b)**-exp).as_poly()
# Terms with even b powers will be real
r = [i for i in expr.terms() if not i[0][1] % 2]
re_part = Add(*[cc*a**aa*b**bb for (aa, bb), cc in r])
# Terms with odd b powers will be imaginary
r = [i for i in expr.terms() if i[0][1] % 4 == 1]
im_part1 = Add(*[cc*a**aa*b**bb for (aa, bb), cc in r])
r = [i for i in expr.terms() if i[0][1] % 4 == 3]
im_part3 = Add(*[cc*a**aa*b**bb for (aa, bb), cc in r])
return (re_part.subs({a: re, b: I*im}),
im_part1.subs({a: re, b: im}) + im_part3.subs({a: re, b: -im}))
elif self.exp.is_Rational:
re, im = self.base.as_real_imag(deep=deep)
if im.is_zero and self.exp is S.Half:
if re.is_nonnegative:
return self, Integer(0)
if re.is_nonpositive:
return Integer(0), (-self.base)**self.exp
# XXX: This is not totally correct since for x**(p/q) with
# x being imaginary there are actually q roots, but
# only a single one is returned from here.
r = self.func(self.func(re, 2) + self.func(im, 2), Rational(1, 2))
t = arg(re + I*im)
rp, tp = self.func(r, self.exp), t*self.exp
return rp*cos(tp), rp*sin(tp)
elif self.base is E:
from ..functions import exp
re, im = self.exp.as_real_imag()
if deep:
re = re.expand(deep, **hints)
im = im.expand(deep, **hints)
c, s = cos(im), sin(im)
return exp(re)*c, exp(re)*s
else:
from ..functions import im, re
if deep:
hints['complex'] = False
expanded = self.expand(deep, **hints)
if hints.get('ignore') != expanded:
return re(expanded), im(expanded)
else:
return re(self), im(self)
def _eval_derivative(self, s):
from ..functions import log
dbase = self.base.diff(s)
dexp = self.exp.diff(s)
return self * (dexp * log(self.base) + dbase * self.exp/self.base)
def _eval_evalf(self, prec):
base, exp = self.as_base_exp()
base = base._evalf(prec)
if not exp.is_Integer:
exp = exp._evalf(prec)
return self.func(base, exp)
def _eval_is_polynomial(self, syms):
if self.exp.has(*syms):
return False
if self.base.has(*syms):
return bool(self.base._eval_is_polynomial(syms) and
self.exp.is_Integer and (self.exp >= 0))
else:
return True
def _eval_is_rational(self):
p = self.func(*self.as_base_exp()) # in case it's unevaluated
if not p.is_Pow:
return p.is_rational
b, e = p.base, p.exp
if e.is_Rational and b.is_Rational:
# we didn't check that e is not an Integer
# because Rational**Integer autosimplifies
return False
if e.is_integer:
if b.is_rational:
if b.is_nonzero or e.is_nonnegative:
return True
if b == e: # always rational, even for 0**0
return True
elif b.is_irrational:
if e.is_zero:
return True
if b is E:
if e.is_rational and e.is_nonzero:
return False
def _eval_is_algebraic(self):
b, e = self.base, self.exp
if b.is_zero or (b - 1).is_zero:
return True
elif b is E:
s = self.doit()
if s.func == self.func:
if e.is_nonzero:
if e.is_algebraic:
return False
elif (e/pi).is_rational:
return False
elif (e/(I*pi)).is_rational:
return True
else:
return s.is_algebraic
elif e.is_rational and e.is_nonzero:
if b.is_nonzero or e.is_nonnegative:
return b.is_algebraic
elif b.is_algebraic and e.is_algebraic:
if (b.is_nonzero and (b - 1).is_nonzero) or b.is_irrational:
return e.is_rational
def _eval_is_rational_function(self, syms):
if self.exp.has(*syms):
return False
if self.base.has(*syms):
return self.base._eval_is_rational_function(syms) and \
self.exp.is_Integer
else:
return True
def _eval_is_algebraic_expr(self, syms):
if self.exp.has(*syms):
return False
if self.base.has(*syms):
return self.base._eval_is_algebraic_expr(syms) and \
self.exp.is_Rational
else:
return True
def _eval_as_numer_denom(self):
"""Expression -> a/b -> a, b.
See Also
========
diofant.core.expr.Expr.as_numer_denom
"""
if not self.is_commutative:
return self, Integer(1)
base, exp = self.as_base_exp()
if base is S.One:
return self, Integer(1)
n, d = base.as_numer_denom()
neg_exp = exp.is_negative
if not neg_exp and not (-exp).is_negative:
neg_exp = _coeff_isneg(exp)
int_exp = exp.is_integer
# the denominator cannot be separated from the numerator if
# its sign is unknown unless the exponent is an integer, e.g.
# sqrt(a/b) != sqrt(a)/sqrt(b) when a=1 and b=-1. But if the
# denominator is negative the numerator and denominator can
# be negated and the denominator (now positive) separated.
if not (d.is_extended_real or int_exp):
n = base
d = Integer(1)
dnonpos = d.is_nonpositive
if dnonpos:
n, d = -n, -d
elif dnonpos is None and not int_exp:
n = base
d = Integer(1)
if neg_exp:
n, d = d, n
exp = -exp
if d is S.One:
return self.func(n, exp), Integer(1)
if n is S.One:
return Integer(1), self.func(d, exp)
return self.func(n, exp), self.func(d, exp)
def _matches(self, expr, repl_dict={}):
"""Helper method for match().
See Also
========
diofant.core.basic.Basic.matches
"""
expr = sympify(expr, strict=True)
# special case, pattern = 1 and expr.exp can match to 0
if expr is S.One:
d = repl_dict.copy()
d = self.exp._matches(Integer(0), d)
if d is not None:
return d
# make sure the expression to be matched is an Expr
if not isinstance(expr, Expr):
return
b, e = expr.as_base_exp()
# special case number
sb, se = self.as_base_exp()
if sb.is_Symbol and se.is_Integer and expr:
if e.is_rational:
return sb._matches(b**(e/se), repl_dict)
return sb._matches(expr**(1/se), repl_dict)
d = repl_dict.copy()
d = self.base._matches(b, d)
if d is None:
return
d = self.exp.xreplace(d)._matches(e, d)
if d is None:
return Expr._matches(self, expr, repl_dict)
return d
def _eval_nseries(self, x, n, logx):
from ..functions import arg, exp, floor, log
from ..series import Order, limit
from ..simplify import powsimp
if self.base is E:
e_series = self.exp.nseries(x, n=n, logx=logx)
if e_series.is_Order:
return 1 + e_series
e0 = limit(e_series.removeO(), x, 0)
if e0 in (-oo, oo):
return self
t = e_series - e0
exp_series = term = exp(e0)
# series of exp(e0 + t) in t
for i in range(1, n):
term *= t/i
term = term.nseries(x, n=n, logx=logx)
exp_series += term
exp_series += Order(t**n, x)
return powsimp(exp_series, deep=True, combine='exp')
elif self.exp.has(x):
return exp(self.exp*log(self.base)).nseries(x, n=n, logx=logx)
else:
b_series = self.base.nseries(x, n=n, logx=logx)
while b_series.is_Order:
n += 1
b_series = self.base.nseries(x, n=n, logx=logx)
b0 = b_series.as_leading_term(x)
t = expand_mul((b_series/b0 - 1).cancel())
if t.is_Add:
t = t.func(*[i for i in t.args if i.limit(x, 0).is_finite])
c, e = b0.as_coeff_exponent(x)
if self.exp is oo:
if e != 0:
sig = -e
else:
sig = abs(c) - 1 if c != 1 else t.removeO()
if sig.is_positive:
return oo
elif sig.is_negative:
return Integer(0)
else:
raise NotImplementedError
pow_series = term = Integer(1)
# series of (1 + t)**e in t
for i in range(1, n):
term *= (self.exp - i + 1)*t/i
term = term.nseries(x, n=n, logx=logx)
pow_series += term
factor = b0**self.exp
if t != 0 and not (self.exp.is_Integer and self.exp >= 0 and n > self.exp):
pow_series += Order(t**n, x)
# branch handling
if c.is_negative:
l = floor(arg(t.removeO()*c)/(2*pi)).limit(x, 0)
assert l.is_finite
factor *= exp(2*pi*I*self.exp*l)
pow_series = expand_mul(factor*pow_series)
return powsimp(pow_series, deep=True, combine='exp')
def _eval_as_leading_term(self, x):
from ..functions import exp, log
from ..series import Order
if not self.exp.has(x):
return self.func(self.base.as_leading_term(x), self.exp)
elif self.base is E:
if self.exp.is_Mul:
k, arg = self.exp.as_independent(x)
else:
k, arg = Integer(1), self.exp
if arg.is_Add:
return Mul(*[exp(k*f).as_leading_term(x) for f in arg.args])
arg = self.exp.as_leading_term(x)
if Order(1, x).contains(arg):
return Integer(1)
return exp(arg)
else:
return exp(self.exp*log(self.base)).as_leading_term(x)
def _eval_rewrite_as_sin(self, base, exp):
from ..functions import sin
if self.base is E:
return sin(I*self.exp + pi/2) - I*sin(I*self.exp)
def _eval_rewrite_as_cos(self, base, exp):
from ..functions import cos
if self.base is E:
return cos(I*self.exp) + I*cos(I*self.exp + pi/2)
def _eval_rewrite_as_tanh(self, base, exp):
from ..functions import tanh
if self.base is E:
return (1 + tanh(self.exp/2))/(1 - tanh(self.exp/2))
def as_content_primitive(self, radical=False):
"""Return the tuple (R, self/R) where R is the positive Rational
extracted from self.
Examples
========
>>> sqrt(4 + 4*sqrt(2)).as_content_primitive()
(2, sqrt(1 + sqrt(2)))
>>> sqrt(3 + 3*sqrt(2)).as_content_primitive()
(1, sqrt(3)*sqrt(1 + sqrt(2)))
>>> ((2*x + 2)**2).as_content_primitive()
(4, (x + 1)**2)
>>> (4**((1 + y)/2)).as_content_primitive()
(2, 4**(y/2))
>>> (3**((1 + y)/2)).as_content_primitive()
(1, 3**((y + 1)/2))
>>> (3**((5 + y)/2)).as_content_primitive()
(9, 3**((y + 1)/2))
>>> eq = 3**(2 + 2*x)
>>> powsimp(eq) == eq
True
>>> eq.as_content_primitive()
(9, 3**(2*x))
>>> powsimp(Mul(*_))
3**(2*x + 2)
>>> eq = (2 + 2*x)**y
>>> s = expand_power_base(eq)
>>> s.is_Mul, s
(False, (2*x + 2)**y)
>>> eq.as_content_primitive()
(1, (2*(x + 1))**y)
>>> s = expand_power_base(_[1])
>>> s.is_Mul, s
(True, 2**y*(x + 1)**y)
See Also
========
diofant.core.expr.Expr.as_content_primitive
"""
b, e = self.as_base_exp()
b = _keep_coeff(*b.as_content_primitive(radical=radical))
ce, pe = e.as_content_primitive(radical=radical)
if b.is_Rational:
# e
# = ce*pe
# = ce*(h + t)
# = ce*h + ce*t
# => self
# = b**(ce*h)*b**(ce*t)
# = b**(cehp/cehq)*b**(ce*t)
# = b**(iceh+r/cehq)*b**(ce*t)
# = b**iceh*b**(r/cehq)*b**(ce*t)
# = b**iceh*b**(ce*t + r/cehq)
h, t = pe.as_coeff_Add()
if h.is_Rational:
ceh = ce*h
c = self.func(b, ceh)
r = Integer(0)
if not c.is_Rational:
iceh, r = divmod(ceh.numerator, ceh.denominator)
c = self.func(b, iceh)
return c, self.func(b, _keep_coeff(ce, t + r/ce/ceh.denominator))
e = _keep_coeff(ce, pe)
# b**e = (h*t)**e = h**e*t**e = c*m*t**e
if e.is_Rational and b.is_Mul:
h, t = b.as_content_primitive(radical=radical) # h is positive
c, m = self.func(h, e).as_coeff_Mul() # so c is positive
m, me = m.as_base_exp()
if m is S.One or me == e: # probably always true
# return the following, not return c, m*Pow(t, e)
# which would change Pow into Mul; we let diofant
# decide what to do by using the unevaluated Mul, e.g
# should it stay as sqrt(2 + 2*sqrt(5)) or become
# sqrt(2)*sqrt(1 + sqrt(5))
return c, self.func(_keep_coeff(m, t), e)
return Integer(1), self.func(b, e)
| bsd-3-clause |
openaid-IATI/deprecated-version-OIPA-v2 | iati/data/migrations/0013_auto__add_field_country_dac_region_code__add_field_country_dac_region_.py | 1 | 19581 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Country.dac_region_code'
db.add_column('data_country', 'dac_region_code',
self.gf('django.db.models.fields.IntegerField')(null=True, blank=True),
keep_default=False)
# Adding field 'Country.dac_region_name'
db.add_column('data_country', 'dac_region_name',
self.gf('django.db.models.fields.CharField')(max_length=40, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Country.dac_region_code'
db.delete_column('data_country', 'dac_region_code')
# Deleting field 'Country.dac_region_name'
db.delete_column('data_country', 'dac_region_name')
models = {
'data.activitystatistics': {
'Meta': {'object_name': 'ActivityStatistics'},
'iati_identifier': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['data.IATIActivity']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'total_budget': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2', 'blank': 'True'})
},
'data.activitystatustype': {
'Meta': {'object_name': 'ActivityStatusType'},
'code': ('django.db.models.fields.IntegerField', [], {'max_length': '8', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Country']", 'null': 'True', 'blank': 'True'})
},
'data.aidtype': {
'Meta': {'object_name': 'AidType'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '5', 'primary_key': 'True'})
},
'data.budget': {
'Meta': {'object_name': 'Budget'},
'currency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.CurrencyType']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'period_end': ('django.db.models.fields.DateField', [], {}),
'period_start': ('django.db.models.fields.DateField', [], {}),
'type': ('django.db.models.fields.IntegerField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '2'})
},
'data.collaborationtype': {
'Meta': {'object_name': 'CollaborationType'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '55', 'primary_key': 'True'})
},
'data.country': {
'Meta': {'object_name': 'Country'},
'dac_region_code': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'dac_region_name': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'iso': ('django.db.models.fields.CharField', [], {'max_length': '2', 'primary_key': 'True'})
},
'data.countrystatistics': {
'Meta': {'object_name': 'CountryStatistics'},
'country': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['data.Country']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'total_activities': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'data.currencytype': {
'Meta': {'object_name': 'CurrencyType'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Country']", 'null': 'True', 'blank': 'True'})
},
'data.financetype': {
'Meta': {'object_name': 'FinanceType'},
'code': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'primary_key': 'True'})
},
'data.flowtype': {
'Meta': {'object_name': 'FlowType'},
'code': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'primary_key': 'True'})
},
'data.iatiactivity': {
'Meta': {'object_name': 'IATIActivity'},
'activity_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.ActivityStatusType']", 'null': 'True', 'blank': 'True'}),
'collaboration_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.CollaborationType']", 'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'default_aid_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.AidType']", 'null': 'True', 'blank': 'True'}),
'default_finance_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.FinanceType']", 'null': 'True', 'blank': 'True'}),
'default_flow_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.FlowType']", 'null': 'True', 'blank': 'True'}),
'default_tied_status_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.TiedAidStatusType']", 'null': 'True', 'blank': 'True'}),
'end_actual': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'end_planned': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'iati_identifier': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'reporting_organisation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Organisation']"}),
'start_actual': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'start_planned': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'data.iatiactivitybudget': {
'Meta': {'object_name': 'IATIActivityBudget', '_ormbases': ['data.Budget']},
'budget_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['data.Budget']", 'unique': 'True', 'primary_key': 'True'}),
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.IATIActivity']"})
},
'data.iatiactivitycontact': {
'Meta': {'object_name': 'IATIActivityContact'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.IATIActivity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mailing_address': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'organisation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'person_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'telephone': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
'data.iatiactivitycountry': {
'Meta': {'object_name': 'IATIActivityCountry'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Country']"}),
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.IATIActivity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'percentage': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'data.iatiactivitydescription': {
'Meta': {'object_name': 'IATIActivityDescription'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.IATIActivity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Language']", 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'})
},
'data.iatiactivitydocument': {
'Meta': {'object_name': 'IATIActivityDocument'},
'format': ('django.db.models.fields.CharField', [], {'max_length': '55', 'null': 'True', 'blank': 'True'}),
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.IATIActivity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Language']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'data.iatiactivitypolicymarker': {
'Meta': {'object_name': 'IATIActivityPolicyMarker'},
'code': ('django.db.models.fields.IntegerField', [], {'max_length': '5'}),
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.IATIActivity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'significance_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.SignificanceType']", 'null': 'True', 'blank': 'True'}),
'vocabulary_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.VocabularyType']", 'null': 'True', 'blank': 'True'})
},
'data.iatiactivityregion': {
'Meta': {'object_name': 'IATIActivityRegion'},
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.IATIActivity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'percentage': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Region']"})
},
'data.iatiactivitysector': {
'Meta': {'object_name': 'IATIActivitySector'},
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sectors'", 'to': "orm['data.IATIActivity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'percentage': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'sector': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Sector']"})
},
'data.iatiactivitytitle': {
'Meta': {'object_name': 'IATIActivityTitle'},
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.IATIActivity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Language']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'data.iatiactivitywebsite': {
'Meta': {'object_name': 'IATIActivityWebsite'},
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.IATIActivity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'data.iatitransaction': {
'Meta': {'object_name': 'IATITransaction', '_ormbases': ['data.Transaction']},
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.IATIActivity']"}),
'transaction_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['data.Transaction']", 'unique': 'True', 'primary_key': 'True'})
},
'data.language': {
'Meta': {'object_name': 'Language'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '55'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'data.organisation': {
'Meta': {'object_name': 'Organisation'},
'date_created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'org_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'org_name_lang': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '25', 'primary_key': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'data.organisationstatistics': {
'Meta': {'object_name': 'OrganisationStatistics'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organisation': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['data.Organisation']", 'unique': 'True'}),
'total_activities': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'data.otheridentifier': {
'Meta': {'object_name': 'OtherIdentifier'},
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.IATIActivity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner_ref': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'data.participatingorganisation': {
'Meta': {'object_name': 'ParticipatingOrganisation'},
'date_created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.IATIActivity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'org_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'org_name_lang': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'type': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'data.planneddisbursement': {
'Meta': {'object_name': 'PlannedDisbursement'},
'currency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.CurrencyType']"}),
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.IATIActivity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'period_end': ('django.db.models.fields.DateField', [], {}),
'period_start': ('django.db.models.fields.DateField', [], {})
},
'data.region': {
'Meta': {'object_name': 'Region'},
'code': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'primary_key': 'True'})
},
'data.sector': {
'Meta': {'object_name': 'Sector'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '55', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'vocabulary_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.VocabularyType']", 'null': 'True', 'blank': 'True'})
},
'data.significancetype': {
'Meta': {'object_name': 'SignificanceType'},
'code': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'primary_key': 'True'})
},
'data.tiedaidstatustype': {
'Meta': {'object_name': 'TiedAidStatusType'},
'code': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'primary_key': 'True'})
},
'data.transaction': {
'Meta': {'object_name': 'Transaction'},
'aid_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.AidType']", 'null': 'True', 'blank': 'True'}),
'currency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.CurrencyType']", 'null': 'True', 'blank': 'True'}),
'disbursement_channel': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'finance_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.FinanceType']", 'null': 'True', 'blank': 'True'}),
'flow_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.FlowType']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'provider_org': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'provider_org'", 'to': "orm['data.Organisation']"}),
'receiver_org': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'receiver_org'", 'null': 'True', 'to': "orm['data.Organisation']"}),
'tied_aid_status_type': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'transaction_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'transaction_type': ('django.db.models.fields.CharField', [], {'max_length': '55'}),
'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '2'}),
'value_date': ('django.db.models.fields.DateField', [], {})
},
'data.vocabularytype': {
'Meta': {'object_name': 'VocabularyType'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'primary_key': 'True'})
}
}
complete_apps = ['data'] | agpl-3.0 |
axsauze/eventsfinder | django/db/backends/postgresql_psycopg2/creation.py | 107 | 4139 | import psycopg2.extensions
from django.db.backends.creation import BaseDatabaseCreation
from django.db.backends.util import truncate_name
class DatabaseCreation(BaseDatabaseCreation):
# This dictionary maps Field objects to their associated PostgreSQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
data_types = {
'AutoField': 'serial',
'BooleanField': 'boolean',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'timestamp with time zone',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'inet',
'GenericIPAddressField': 'inet',
'NullBooleanField': 'boolean',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer CHECK ("%(column)s" >= 0)',
'PositiveSmallIntegerField': 'smallint CHECK ("%(column)s" >= 0)',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
}
def sql_table_creation_suffix(self):
assert self.connection.settings_dict['TEST_COLLATION'] is None, "PostgreSQL does not support collation setting at database creation time."
if self.connection.settings_dict['TEST_CHARSET']:
return "WITH ENCODING '%s'" % self.connection.settings_dict['TEST_CHARSET']
return ''
def sql_indexes_for_field(self, model, f, style):
output = []
if f.db_index or f.unique:
qn = self.connection.ops.quote_name
db_table = model._meta.db_table
tablespace = f.db_tablespace or model._meta.db_tablespace
if tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(tablespace)
if tablespace_sql:
tablespace_sql = ' ' + tablespace_sql
else:
tablespace_sql = ''
def get_index_sql(index_name, opclass=''):
return (style.SQL_KEYWORD('CREATE INDEX') + ' ' +
style.SQL_TABLE(qn(truncate_name(index_name,self.connection.ops.max_name_length()))) + ' ' +
style.SQL_KEYWORD('ON') + ' ' +
style.SQL_TABLE(qn(db_table)) + ' ' +
"(%s%s)" % (style.SQL_FIELD(qn(f.column)), opclass) +
"%s;" % tablespace_sql)
if not f.unique:
output = [get_index_sql('%s_%s' % (db_table, f.column))]
# Fields with database column types of `varchar` and `text` need
# a second index that specifies their operator class, which is
# needed when performing correct LIKE queries outside the
# C locale. See #12234.
db_type = f.db_type(connection=self.connection)
if db_type.startswith('varchar'):
output.append(get_index_sql('%s_%s_like' % (db_table, f.column),
' varchar_pattern_ops'))
elif db_type.startswith('text'):
output.append(get_index_sql('%s_%s_like' % (db_table, f.column),
' text_pattern_ops'))
return output
def set_autocommit(self):
self._prepare_for_test_db_ddl()
def _prepare_for_test_db_ddl(self):
"""Rollback and close the active transaction."""
self.connection.connection.rollback()
self.connection.connection.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
| bsd-3-clause |
Exterminus/harpia | harpia/bpGUI/matchTem.py | 2 | 8161 | # -*- coding: utf-8 -*-
# [HARPIA PROJECT]
#
#
# S2i - Intelligent Industrial Systems
# DAS - Automation and Systems Department
# UFSC - Federal University of Santa Catarina
# Copyright: 2006 - 2007 Luis Carlos Dill Junges ([email protected]), Clovis Peruchi Scotti ([email protected]),
# Guilherme Augusto Rutzen ([email protected]), Mathias Erdtmann ([email protected]) and S2i (www.s2i.das.ufsc.br)
# 2007 - 2009 Clovis Peruchi Scotti ([email protected]), S2i (www.s2i.das.ufsc.br)
#
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
# For further information, check the COPYING file distributed with this software.
#
# ----------------------------------------------------------------------
import gtk
from harpia.GladeWindow import GladeWindow
from harpia.s2icommonproperties import S2iCommonProperties, APP, DIR
# i18n
import os
from harpia.utils.XMLUtils import XMLParser
import gettext
_ = gettext.gettext
gettext.bindtextdomain(APP, DIR)
gettext.textdomain(APP)
# ----------------------------------------------------------------------
class Properties(GladeWindow, S2iCommonProperties):
# ----------------------------------------------------------------------
def __init__(self, PropertiesXML, S2iBlockProperties):
self.m_sDataDir = os.environ['HARPIA_DATA_DIR']
filename = self.m_sDataDir + 'glade/matchTem.ui'
self.m_oPropertiesXML = PropertiesXML
self.m_oS2iBlockProperties = S2iBlockProperties
widget_list = [
'Properties',
'method',
'scaleFactor',
'BackgroundColor',
'BorderColor',
'HelpView'
]
handlers = [
'on_cancel_clicked',
'on_prop_confirm_clicked',
'on_BackColorButton_clicked',
'on_BorderColorButton_clicked'
]
top_window = 'Properties'
GladeWindow.__init__(self, filename, top_window, widget_list, handlers)
# load properties values
self.block_properties = self.m_oPropertiesXML.getTag("properties").getTag("block").getChildTags("property")
for Property in self.block_properties:
if Property.name == "scaleFactor":
self.widgets['scaleFactor'].set_value(float(Property.value));
if Property.name == "method":
if Property.value == "CV_TM_CCOEFF_NORMED":
self.widgets['method'].set_active(int(0))
if Property.value == "CV_TM_CCOEFF":
self.widgets['method'].set_active(int(1))
if Property.value == "CV_TM_CCORR_NORMED":
self.widgets['method'].set_active(int(2))
if Property.value == "CV_TM_CCORR":
self.widgets['method'].set_active(int(3))
if Property.value == "CV_TM_SQDIFF_NORMED":
self.widgets['method'].set_active(int(4))
if Property.value == "CV_TM_SQDIFF":
self.widgets['method'].set_active(int(5))
self.configure()
# load help text
# t_oS2iHelp = XMLParser(self.m_sDataDir + "help/checkCir" + _("_en.help"))
# t_oTextBuffer = gtk.TextBuffer()
# t_oTextBuffer.set_text(unicode(str(t_oS2iHelp.getTag("help").getTag("content").getTagContent())))
# self.widgets['HelpView'].set_buffer(t_oTextBuffer)
# ----------------------------------------------------------------------
def getHelp(self):
return "operação de filtragem destinada a suavizar uma imagem."
# ----------------------------------------------------------------------
def __del__(self):
pass
# ----------------------------------------------------------------------
def on_prop_confirm_clicked(self, *args):
for Property in self.block_properties:
if Property.name == "scaleFactor":
Property.value = unicode(self.widgets['scaleFactor'].get_value())
if Property.name == "method":
Active = self.widgets['method'].get_active()
if int(Active) == 0:
Property.value = unicode("CV_TM_CCOEFF_NORMED")
if int(Active) == 1:
Property.value = unicode("CV_TM_CCOEFF")
if int(Active) == 2:
Property.value = unicode("CV_TM_CCORR_NORMED")
if int(Active) == 3:
Property.value = unicode("CV_TM_CCORR")
if int(Active) == 4:
Property.value = unicode("CV_TM_SQDIFF_NORMED")
if int(Active) == 5:
Property.value = unicode("CV_TM_SQDIFF")
self.m_oS2iBlockProperties.SetPropertiesXML(self.m_oPropertiesXML)
self.m_oS2iBlockProperties.SetBorderColor(self.m_oBorderColor)
self.m_oS2iBlockProperties.SetBackColor(self.m_oBackColor)
self.widgets['Properties'].destroy()
# ----------------------------------------------------------------------
# propProperties = Properties()()
# propProperties.show( center=0 )
# ------------------------------------------------------------------------------
# Code generation
# ------------------------------------------------------------------------------
def generate(blockTemplate):
for propIter in blockTemplate.properties:
if propIter[0] == 'method':
interMethod = propIter[1]
if propIter[0] == "scaleFactor":
scaleFactor = propIter[1]
blockTemplate.imagesIO = \
'IplImage * block$$_img_i1 = NULL;\n' + \
'IplImage * block$$_img_i2 = NULL;\n' + \
'IplImage * block$$_img_t1 = NULL;\n' + \
'IplImage * block$$_img_o1 = NULL;\n'
blockTemplate.functionCall = '\nif(block$$_img_i1 && block$$_img_i2)\n' + \
'{\n' + \
' block$$_img_t1 = cvCreateImage(cvSize(block$$_img_i1->width - block$$_img_i2->width +1,block$$_img_i1->height - block$$_img_i2->height +1),32,1);\n' + \
' block$$_img_o1 = cvCreateImage(cvSize(block$$_img_i1->width - block$$_img_i2->width +1,block$$_img_i1->height - block$$_img_i2->height +1),8,1);\n' + \
' cvMatchTemplate(block$$_img_i1 , block$$_img_i2, block$$_img_t1, ' + interMethod + ');\n' + \
' cvConvertScale(block$$_img_t1,block$$_img_o1, ' + str(
10 ** -(int(float(scaleFactor)))) + ',0);\n' + \
'}\n'
blockTemplate.dealloc = 'cvReleaseImage(&block$$_img_o1);\n' + \
'cvReleaseImage(&block$$_img_t1);\n' + \
'cvReleaseImage(&block$$_img_i2);\n' + \
'cvReleaseImage(&block$$_img_i1);\n'
# ------------------------------------------------------------------------------
# Block Setup
# ------------------------------------------------------------------------------
def getBlock():
return {"Label": _("Match Template"),
"Path": {"Python": "matchTem",
"Glade": "glade/matchTem.ui",
"Xml": "xml/matchTem.xml"},
"Icon": "images/matchTem.png",
"Color": "180:180:10:150",
"InTypes": {0: "HRP_IMAGE", 1: "HRP_IMAGE"},
"OutTypes": {0: "HRP_IMAGE"},
"Description": _("Output shows the matching relation between image (input1) and template (input2)"),
"TreeGroup": _("Feature Detection")
}
| gpl-2.0 |
pschella/scipy | scipy/_lib/tests/test__util.py | 25 | 2076 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_equal, assert_, assert_raises
from scipy._lib._util import _aligned_zeros, check_random_state
def test__aligned_zeros():
niter = 10
def check(shape, dtype, order, align):
err_msg = repr((shape, dtype, order, align))
x = _aligned_zeros(shape, dtype, order, align=align)
if align is None:
align = np.dtype(dtype).alignment
assert_equal(x.__array_interface__['data'][0] % align, 0)
if hasattr(shape, '__len__'):
assert_equal(x.shape, shape, err_msg)
else:
assert_equal(x.shape, (shape,), err_msg)
assert_equal(x.dtype, dtype)
if order == "C":
assert_(x.flags.c_contiguous, err_msg)
elif order == "F":
if x.size > 0:
# Size-0 arrays get invalid flags on Numpy 1.5
assert_(x.flags.f_contiguous, err_msg)
elif order is None:
assert_(x.flags.c_contiguous, err_msg)
else:
raise ValueError()
# try various alignments
for align in [1, 2, 3, 4, 8, 16, 32, 64, None]:
for n in [0, 1, 3, 11]:
for order in ["C", "F", None]:
for dtype in [np.uint8, np.float64]:
for shape in [n, (1, 2, 3, n)]:
for j in range(niter):
check(shape, dtype, order, align)
def test_check_random_state():
# If seed is None, return the RandomState singleton used by np.random.
# If seed is an int, return a new RandomState instance seeded with seed.
# If seed is already a RandomState instance, return it.
# Otherwise raise ValueError.
rsi = check_random_state(1)
assert_equal(type(rsi), np.random.RandomState)
rsi = check_random_state(rsi)
assert_equal(type(rsi), np.random.RandomState)
rsi = check_random_state(None)
assert_equal(type(rsi), np.random.RandomState)
assert_raises(ValueError, check_random_state, 'a')
| bsd-3-clause |
seanli9jan/tensorflow | tensorflow/contrib/memory_stats/__init__.py | 53 | 1224 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for memory statistics.
@@BytesInUse
@@BytesLimit
@@MaxBytesInUse
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.memory_stats.python.ops.memory_stats_ops import BytesInUse
from tensorflow.contrib.memory_stats.python.ops.memory_stats_ops import BytesLimit
from tensorflow.contrib.memory_stats.python.ops.memory_stats_ops import MaxBytesInUse
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.