repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
renaelectronics/linuxcnc | src/emc/usr_intf/axis/scripts/teach-in.py | 5 | 2641 | #!/usr/bin/env python
"""Usage:
python teach.py nmlfile outputfile
If outputfile is not specified, writes to standard output.
You must ". scripts/rip-environment" before running this script, if you use
run-in-place.
"""
# Copyright 2007 Jeff Epler <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import linuxcnc
import Tkinter
import sys
linenumber = 1;
if len(sys.argv) > 1:
linuxcnc.nmlfile = sys.argv[1]
if len(sys.argv) > 2:
outfile = sys.argv[2]
sys.stdout = open(outfile, 'w')
s = linuxcnc.stat()
def get_cart():
s.poll()
position = ""
for i,a in enumerate("XYZABCUVW"):
if s.axis_mask & (1<<i):
position = position + "%-8.4f " % (s.position[i])
return position[:-1] # remove the final space char
def get_joint():
s.poll()
position = " ".join(["%-8.4f"] * s.joints)
return position % s.joint_actual_position[:s.joints]
def log():
global linenumber;
if world.get():
p = get_cart()
else:
p = get_joint()
label1.configure(text='Learned: %s' % p)
print linenumber, p, s.flood, s.mist, s.lube, s.spindle_enabled;
linenumber += 1;
def show():
s.poll()
if world.get():
p = get_cart()
else:
p = get_joint()
label2.configure(text='Position: %s' % p)
app.after(100, show)
app = Tkinter.Tk(); app.wm_title('LinuxCNC Teach-In')
world = Tkinter.IntVar(app)
button = Tkinter.Button(app, command=log, text='Learn', font=("helvetica", 14))
button.pack(side='left')
label2 = Tkinter.Label(app, width=60, font='fixed', anchor="w")
label2.pack(side='top')
label1 = Tkinter.Label(app, width=60, font='fixed', text="Learned: (nothing yet)", anchor="w")
label1.pack(side='top')
r1 = Tkinter.Radiobutton(app, text="Joint", variable=world, value=0)
r1.pack(side='left')
r2 = Tkinter.Radiobutton(app, text="World", variable=world, value=1)
r2.pack(side='left')
show()
app.mainloop()
| gpl-2.0 |
harayz/raspberry_pwn | src/pentest/fimap/singleScan.py | 8 | 5441 | #
# This file is part of fimap.
#
# Copyright(c) 2009-2010 Iman Karim([email protected]).
# http://fimap.googlecode.com
#
# This file may be licensed under the terms of of the
# GNU General Public License Version 2 (the ``GPL'').
#
# Software distributed under the License is distributed
# on an ``AS IS'' basis, WITHOUT WARRANTY OF ANY KIND, either
# express or implied. See the GPL for the specific language
# governing rights and limitations.
#
# You should have received a copy of the GPL along with this
# program. If not, go to http://www.gnu.org/licenses/gpl.html
# or write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
from baseClass import baseClass
from targetScanner import targetScanner
import sys, time
__author__="Iman Karim([email protected])"
__date__ ="$03.09.2009 01:29:37$"
class singleScan(baseClass):
def _load(self):
self.URL = None
self.quite = False
def setURL(self, URL):
self.URL = URL
def setQuite(self, b):
self.quite = b
def scan(self):
try:
self.localLog("SingleScan is testing URL: '%s'" %self.URL)
t = targetScanner(self.config)
t.MonkeyTechnique = self.config["p_monkeymode"]
idx = 0
if (t.prepareTarget(self.URL)):
res = t.testTargetVuln()
if (len(res) == 0):
self.localLog("Target URL isn't affected by any file inclusion bug :(")
else:
for i in res:
report = i[0]
files = i[1]
idx = idx +1
boxarr = []
header = "[%d] Possible File Inclusion"%(idx)
if (report.getLanguage() != None):
header = "[%d] Possible %s-File Inclusion"%(idx, report.getLanguage())
boxarr.append(" [URL] %s"%report.getURL())
if (report.getPostData() != None and report.getPostData() != ""): boxarr.append(" [POST] %s"%report.getPostData())
if (report.isPost):
boxarr.append(" [POSTPARM] %s"%report.getVulnKey())
else:
boxarr.append(" [PARAM] %s"%report.getVulnKey())
if (report.isBlindDiscovered()):
boxarr.append(" [PATH] Not received (Blindmode)")
else:
boxarr.append(" [PATH] %s"%report.getServerPath())
if (report.isUnix()):
boxarr.append(" [OS] Unix")
else:
boxarr.append(" [OS] Windows")
boxarr.append(" [TYPE] %s"%report.getType())
if (not report.isBlindDiscovered()):
if (report.isNullbytePossible() == None):
boxarr.append(" [NULLBYTE] No Need. It's clean.")
else:
if (report.isNullbytePossible()):
boxarr.append(" [NULLBYTE] Works. :)")
else:
boxarr.append(" [NULLBYTE] Doesn't work. :(")
else:
if (report.isNullbytePossible()):
boxarr.append(" [NULLBYTE] Is needed.")
else:
boxarr.append(" [NULLBYTE] Not tested.")
boxarr.append(" [READABLE FILES]")
if (len(files) == 0):
boxarr.append(" No Readable files found :(")
else:
fidx = 0
for file in files:
payload = "%s%s%s"%(report.getPrefix(), file, report.getSurfix())
if (file != payload):
if report.isWindows() and file[1]==":":
file = file[3:]
txt = " [%d] %s -> %s"%(fidx, file, payload)
#if (fidx == 0): txt = txt.strip()
boxarr.append(txt)
else:
txt = " [%d] %s"%(fidx, file)
#if (fidx == 0): txt = txt.strip()
boxarr.append(txt)
fidx = fidx +1
self.drawBox(header, boxarr)
except KeyboardInterrupt:
if (self.quite): # We are in google mode.
print "\nCancelled current target..."
print "Press CTRL+C again in the next second to terminate fimap."
try:
time.sleep(1)
except KeyboardInterrupt:
raise
else: # We are in single mode. Simply raise the exception.
raise
def localLog(self, txt):
if (not self.quite):
print txt | gpl-3.0 |
merckhung/bokken | ui/treeviews.py | 3 | 11285 | # treeviews.py
# -*- coding: utf-8 -*-
#
# Copyright 2011 Hugo Teso <[email protected]>
# Copyright 2014 David Martínez Moreno <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import gtk
from lib.common import datafile_path
class TreeViews(gtk.TreeView):
'''Main TextView elements'''
def __init__(self, core, textviews):
self.store = gtk.ListStore(gtk.gdk.Pixbuf, str, str, str, str)
super(TreeViews,self).__init__(self.store)
self.uicore = core
self.textviews = textviews
self.set_rules_hint(True)
self.set_has_tooltip(True)
# Connect right click popup search menu
self.popup_handler = self.connect('button-press-event', self.popup_menu)
self.popup_handler = self.connect('row-activated', self.popup_menu)
def create_functions_columns(self):
rendererText = gtk.CellRendererText()
rendererText.tooltip_handle = self.connect('motion-notify-event', self.fcn_tooltip)
rendererPix = gtk.CellRendererPixbuf()
self.fcn_pix = gtk.gdk.pixbuf_new_from_file(datafile_path('function.png'))
self.bb_pix = gtk.gdk.pixbuf_new_from_file(datafile_path('block.png'))
column = gtk.TreeViewColumn("Function")
column.set_spacing(5)
column.pack_start(rendererPix, False)
column.pack_start(rendererText, True)
column.set_attributes(rendererText, text=1)
column.set_attributes(rendererPix, pixbuf=0)
column.set_sort_column_id(1)
self.store.set_sort_column_id(1,gtk.SORT_ASCENDING)
self.append_column(column)
self.set_model(self.store)
def create_relocs_columns(self):
self.data_sec_pix = gtk.gdk.pixbuf_new_from_file(datafile_path('data-sec.png'))
rendererPix = gtk.CellRendererPixbuf()
rendererText = gtk.CellRendererText()
column = gtk.TreeViewColumn("Name")
column.set_spacing(5)
column.pack_start(rendererPix, False)
column.pack_start(rendererText, True)
column.set_attributes(rendererText, text=1)
column.set_attributes(rendererPix, pixbuf=0)
column.set_sort_column_id(0)
self.append_column(column)
rendererText = gtk.CellRendererText()
column = gtk.TreeViewColumn("Virtual Address", rendererText, text=2)
self.store.set_sort_column_id(2,gtk.SORT_ASCENDING)
column.set_sort_column_id(2)
self.append_column(column)
rendererText = gtk.CellRendererText()
column = gtk.TreeViewColumn("Size", rendererText, text=3)
column.set_sort_column_id(3)
self.append_column(column)
def create_exports_columns(self):
self.exp_pix = gtk.gdk.pixbuf_new_from_file(datafile_path('export.png'))
rendererPix = gtk.CellRendererPixbuf()
rendererText = gtk.CellRendererText()
column = gtk.TreeViewColumn("Offset")
column.set_spacing(5)
column.pack_start(rendererPix, False)
column.pack_start(rendererText, True)
column.set_attributes(rendererText, text=1)
column.set_attributes(rendererPix, pixbuf=0)
self.store.set_sort_column_id(1,gtk.SORT_ASCENDING)
column.set_sort_column_id(1)
self.append_column(column)
rendererText = gtk.CellRendererText()
column = gtk.TreeViewColumn("Name", rendererText, text=2)
column.set_sort_column_id(2)
self.append_column(column)
rendererText = gtk.CellRendererText()
column = gtk.TreeViewColumn("Ordinal", rendererText, text=3)
column.set_sort_column_id(3)
self.append_column(column)
self.set_model(self.store)
def remove_columns(self):
columns = self.get_columns()
for column in columns:
self.remove_column(column)
def create_tree(self, imps):
# Create the column
imports = gtk.TreeViewColumn()
imports.set_title("Imports")
imports.set_spacing(5)
self.treestore = gtk.TreeStore(gtk.gdk.Pixbuf, str)
self.imp_pix = gtk.gdk.pixbuf_new_from_file(datafile_path('import.png'))
rendererPix = gtk.CellRendererPixbuf()
rendererText = gtk.CellRendererText()
imports.pack_start(rendererPix, False)
imports.pack_start(rendererText, True)
imports.set_attributes(rendererText, text=1)
imports.set_attributes(rendererPix, pixbuf=0)
# Iterate imports and add to the tree
for element in imps.keys():
it = self.treestore.append(None, [self.fcn_pix, element])
for imp in imps[element]:
self.treestore.append(it, [self.imp_pix, imp[0] + '\t' + imp[1]])
# Add column to tree
self.append_column(imports)
self.set_model(self.treestore)
self.expand_all()
def search_and_graph(self, widget, link_name):
self.textviews.search(widget, link_name)
if self.dograph:
self.textviews.update_graph(widget, link_name)
def fcn_tooltip(self, widget, event):
x = int(event.x)
y = int(event.y)
tup = widget.get_path_at_pos(x, y)
if "Function" == tup[1].get_title():
model = widget.get_model()
tree_iter = model.get_iter(tup[0])
fcn = model.get_value(tree_iter, 1)
value = self.uicore.send_cmd_str('pdi 15 @ ' + fcn)
widget.set_tooltip_markup("<span font_family=\"monospace\">" + value.rstrip() + "</span>")
else:
widget.set_tooltip_markup("")
def popup_menu(self, tv, event, row=None):
'''Controls the behavior of the treeviews on the left:
Left-click or Enter/Space: Goes to the corresponding graph/address/etc.
Right-click: Shows a menu.
@param tv: The treeview.
@parameter event: The GTK event (gtk.gdk.Event) in case this is a mouse
click. Otherwise it's the activated row index in format (n,).
@parameter row: A gtk.TreeViewColumn object in case it's a keypress,
otherwise None.
The function works by abstracting the event type and then defining
primary_action (True if left-click or Enter on a row, False if
double_click).
'''
self.dograph = False
# Let's get the row clicked whether it was by mouse or keyboard.
if row:
# Keyboard.
path = event
primary_action = True
else:
# Mouse.
# I do this to return fast (and to avoid leaking memory in 'e io.va' for now).
if (event.button != 1) and (event.button !=3):
return False
elif event.button == 1:
# Left-click.
primary_action = True
else:
primary_action = False
coordinates = tv.get_path_at_pos(int(event.x), int(event.y))
# coordinates is None if the click is outside the rows but inside
# the widget.
if not coordinates:
return False
(path, column, x, y) = coordinates
# FIXME: We should do this on the uicore, possibly in every operation.
if self.uicore.use_va:
self.uicore.core.cmd0('e io.va=0')
else:
self.uicore.core.cmd0('e io.va=1')
# Main loop, deciding whether to take an action or display a pop-up.
if primary_action:
# It's a left click or Enter on a row.
# Is it over a plugin name?
# Get the information about the row.
if len(path) == 1:
link_name = self.store[path][1]
# Special for exports
if '0x' in link_name:
link_name = self.store[path][2]
else:
link_name = self.treestore[path][1]
# Detect if search string is from URL or PE/ELF
link_name = link_name.split("\t")
# Elf/PE (function)
if len( link_name ) == 1:
if '0x' in link_name[0]:
link_name = link_name[0]
elif 'reloc.' in link_name[0]:
link_name = link_name[0]
else:
# Just get graph for functions
if not 'loc.' in link_name[0] and link_name[0][0] != '.':
self.dograph = True
# Adjust section name to search inside r2 flags
link_name = "0x%08x" % self.uicore.core.num.get(link_name[0])
# Elf/PE (import/export)
elif len( link_name ) == 2 and link_name[1] != '':
link_name = "0x%08x" % int(link_name[0], 16)
self.search_and_graph(self, link_name)
self.dograph = False
else:
# It's a right click!
_time = event.time
# Is it over a plugin name?
# Get the information about the click.
if len(path) == 1:
link_name = self.store[path][1]
else:
link_name = self.treestore[path][1]
# Detect if search string is from URL or PE/ELF
link_name = link_name.split("\t")
# Elf/PE (function)
if len( link_name ) == 1:
if '0x' in link_name[0]:
link_name = link_name[0]
elif 'reloc.' in link_name[0]:
link_name = link_name[0]
else:
# Just get graph for functions
if not 'loc.' in link_name[0] and link_name[0][0] != '.':
self.dograph = True
# Adjust section name to search inside r2 flags
link_name = "0x%08x" % self.uicore.core.num.get(link_name[0])
# Elf/PE (import/export)
elif len( link_name ) == 2 and link_name[1] != '':
link_name = "0x%08x" % int(link_name[0], 16)
# Ok, now I show the popup menu !
# Create the popup menu
gm = gtk.Menu()
# And the items
e = gtk.MenuItem("Go to")
e.connect('activate', self.search_and_graph, link_name)
gm.append( e )
if self.dograph:
e = gtk.MenuItem("Show graph")
e.connect('activate', self.textviews.update_graph, link_name)
gm.append( e )
gm.show_all()
gm.popup( None, None, None, event.button, _time)
| gpl-2.0 |
malkoto1/just_cook | SQLAlchemy-1.0.4/lib/sqlalchemy/dialects/mysql/zxjdbc.py | 59 | 3942 | # mysql/zxjdbc.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mysql+zxjdbc
:name: zxjdbc for Jython
:dbapi: zxjdbc
:connectstring: mysql+zxjdbc://<user>:<password>@<hostname>[:<port>]/\
<database>
:driverurl: http://dev.mysql.com/downloads/connector/j/
.. note:: Jython is not supported by current versions of SQLAlchemy. The
zxjdbc dialect should be considered as experimental.
Character Sets
--------------
SQLAlchemy zxjdbc dialects pass unicode straight through to the
zxjdbc/JDBC layer. To allow multiple character sets to be sent from the
MySQL Connector/J JDBC driver, by default SQLAlchemy sets its
``characterEncoding`` connection property to ``UTF-8``. It may be
overridden via a ``create_engine`` URL parameter.
"""
import re
from ... import types as sqltypes, util
from ...connectors.zxJDBC import ZxJDBCConnector
from .base import BIT, MySQLDialect, MySQLExecutionContext
class _ZxJDBCBit(BIT):
def result_processor(self, dialect, coltype):
"""Converts boolean or byte arrays from MySQL Connector/J to longs."""
def process(value):
if value is None:
return value
if isinstance(value, bool):
return int(value)
v = 0
for i in value:
v = v << 8 | (i & 0xff)
value = v
return value
return process
class MySQLExecutionContext_zxjdbc(MySQLExecutionContext):
def get_lastrowid(self):
cursor = self.create_cursor()
cursor.execute("SELECT LAST_INSERT_ID()")
lastrowid = cursor.fetchone()[0]
cursor.close()
return lastrowid
class MySQLDialect_zxjdbc(ZxJDBCConnector, MySQLDialect):
jdbc_db_name = 'mysql'
jdbc_driver_name = 'com.mysql.jdbc.Driver'
execution_ctx_cls = MySQLExecutionContext_zxjdbc
colspecs = util.update_copy(
MySQLDialect.colspecs,
{
sqltypes.Time: sqltypes.Time,
BIT: _ZxJDBCBit
}
)
def _detect_charset(self, connection):
"""Sniff out the character set in use for connection results."""
# Prefer 'character_set_results' for the current connection over the
# value in the driver. SET NAMES or individual variable SETs will
# change the charset without updating the driver's view of the world.
#
# If it's decided that issuing that sort of SQL leaves you SOL, then
# this can prefer the driver value.
rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'")
opts = dict((row[0], row[1]) for row in self._compat_fetchall(rs))
for key in ('character_set_connection', 'character_set'):
if opts.get(key, None):
return opts[key]
util.warn("Could not detect the connection character set. "
"Assuming latin1.")
return 'latin1'
def _driver_kwargs(self):
"""return kw arg dict to be sent to connect()."""
return dict(characterEncoding='UTF-8', yearIsDateType='false')
def _extract_error_code(self, exception):
# e.g.: DBAPIError: (Error) Table 'test.u2' doesn't exist
# [SQLCode: 1146], [SQLState: 42S02] 'DESCRIBE `u2`' ()
m = re.compile(r"\[SQLCode\: (\d+)\]").search(str(exception.args))
c = m.group(1)
if c:
return int(c)
def _get_server_version_info(self, connection):
dbapi_con = connection.connection
version = []
r = re.compile('[.\-]')
for n in r.split(dbapi_con.dbversion):
try:
version.append(int(n))
except ValueError:
version.append(n)
return tuple(version)
dialect = MySQLDialect_zxjdbc
| gpl-2.0 |
espadrine/opera | chromium/src/third_party/python_26/Tools/scripts/linktree.py | 101 | 2425 | #! /usr/bin/env python
# linktree
#
# Make a copy of a directory tree with symbolic links to all files in the
# original tree.
# All symbolic links go to a special symbolic link at the top, so you
# can easily fix things if the original source tree moves.
# See also "mkreal".
#
# usage: mklinks oldtree newtree
import sys, os
LINK = '.LINK' # Name of special symlink at the top.
debug = 0
def main():
if not 3 <= len(sys.argv) <= 4:
print 'usage:', sys.argv[0], 'oldtree newtree [linkto]'
return 2
oldtree, newtree = sys.argv[1], sys.argv[2]
if len(sys.argv) > 3:
link = sys.argv[3]
link_may_fail = 1
else:
link = LINK
link_may_fail = 0
if not os.path.isdir(oldtree):
print oldtree + ': not a directory'
return 1
try:
os.mkdir(newtree, 0777)
except os.error, msg:
print newtree + ': cannot mkdir:', msg
return 1
linkname = os.path.join(newtree, link)
try:
os.symlink(os.path.join(os.pardir, oldtree), linkname)
except os.error, msg:
if not link_may_fail:
print linkname + ': cannot symlink:', msg
return 1
else:
print linkname + ': warning: cannot symlink:', msg
linknames(oldtree, newtree, link)
return 0
def linknames(old, new, link):
if debug: print 'linknames', (old, new, link)
try:
names = os.listdir(old)
except os.error, msg:
print old + ': warning: cannot listdir:', msg
return
for name in names:
if name not in (os.curdir, os.pardir):
oldname = os.path.join(old, name)
linkname = os.path.join(link, name)
newname = os.path.join(new, name)
if debug > 1: print oldname, newname, linkname
if os.path.isdir(oldname) and \
not os.path.islink(oldname):
try:
os.mkdir(newname, 0777)
ok = 1
except:
print newname + \
': warning: cannot mkdir:', msg
ok = 0
if ok:
linkname = os.path.join(os.pardir,
linkname)
linknames(oldname, newname, linkname)
else:
os.symlink(linkname, newname)
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
pombreda/seascope | src/backend/plugins/gtags/GtagsProject.py | 2 | 3525 | #!/usr/bin/python
# Copyright (c) 2010 Anil Kumar
# All rights reserved.
#
# License: BSD
import os, string, re
from ..PluginBase import PluginFeatureBase, ProjectBase, ConfigBase, QueryBase
from ..PluginBase import PluginProcess
from ..CtagsCache import CtagsThread
class GtagsFeature(PluginFeatureBase):
def __init__(self):
PluginFeatureBase.__init__(self)
self.feat_desc = [
['REF', '-r'],
['DEF', ''],
#['<--', '2'],
['-->', '-r'],
#[ ['TXT', '4'],
['GREP','-g'],
['FIL', '-P'],
['INC', '-g'],
['QDEF', ''],
['CTREE','12'],
['CLGRAPH', '13'],
['CLGRAPHD', '14'],
['FFGRAPH', '14'],
['UPD', '25'],
]
self.ctree_query_args = [
['-->', '--> F', 'Calling tree' ],
#['<--', 'F -->', 'Called tree' ],
['REF', '==> F', 'Advanced calling tree' ],
]
def query_dlg_cb(self, req, cmd_str, in_opt):
if req != '' and in_opt['substring']:
req = '.*' + req + '.*'
opt = None
if in_opt['ignorecase']:
opt = '-i'
res = (cmd_str, req, opt)
return res
class ConfigGtags(ConfigBase):
def __init__(self):
ConfigBase.__init__(self, 'gtags')
class ProjectGtags(ProjectBase):
def __init__(self):
ProjectBase.__init__(self)
@staticmethod
def _prj_new_or_open(conf):
prj = ProjectGtags()
prj.feat = GtagsFeature()
prj.conf = conf
prj.qry = QueryGtags(prj.conf, prj.feat)
return (prj)
@staticmethod
def prj_new(proj_args):
d = proj_args[0]
prj = ProjectGtags.prj_open(d)
return None
@staticmethod
def prj_open(proj_path):
conf = ConfigGtags()
conf.proj_open(proj_path)
prj = ProjectGtags._prj_new_or_open(conf)
return (prj)
class GtProcess(PluginProcess):
def __init__(self, wdir, rq):
PluginProcess.__init__(self, wdir, rq)
self.name = 'gtags process'
def parse_result(self, text, sig):
text = re.split('\r?\n', text)
if self.cmd_str == 'FIL':
res = [ ['', line.split(' ')[0], '', '' ] for line in text if line != '' ]
return res
res = []
for line in text:
if line == '':
continue
line = line.split(' ', 3)
line = ['<unknown>', line[0], line[2], line[3]]
res.append(line)
CtagsThread(sig).apply_fix(self.cmd_str, res, ['<unknown>'])
return None
class QueryGtags(QueryBase):
def __init__(self, conf, feat):
QueryBase.__init__(self)
self.conf = conf
self.feat = feat
def query(self, rquery):
if (not self.conf):
#or not self.conf.is_ready()):
print "pm_query not is_ready"
return None
cmd_str = rquery['cmd']
req = rquery['req']
opt = rquery['opt']
if opt == None or opt == '':
opt = []
else:
opt = opt.split()
cmd_opt = self.feat.cmd_str2id[cmd_str]
pargs = [ 'global', '-a', '--result=cscope', '-x' ] + opt
if cmd_opt != '':
pargs += [ cmd_opt ]
pargs += [ '--', req ]
qsig = GtProcess(self.conf.c_dir, [cmd_str, req]).run_query_process(pargs, req, rquery)
return qsig
def rebuild(self):
if (not self.conf.is_ready()):
print "pm_query not is_ready"
return None
if (os.path.exists(os.path.join(self.conf.c_dir, 'GTAGS'))):
pargs = [ 'global', '-u' ]
else:
pargs = [ 'gtags', '-i' ]
qsig = GtProcess(self.conf.c_dir, None).run_rebuild_process(pargs)
return qsig
def query_fl(self):
if not os.path.exists(os.path.join(self.conf.c_dir, 'GTAGS')):
return []
pargs = [ 'global', '-P', '-a' ]
qsig = GtProcess(self.conf.c_dir, None).run_query_fl(pargs)
return qsig
def gt_is_open(self):
return self.conf != None
def gt_is_ready(self):
return self.conf.is_ready()
| bsd-3-clause |
RPI-OPENEDX/edx-platform | common/test/acceptance/pages/lms/problem.py | 23 | 5236 | """
Problem Page.
"""
from bok_choy.page_object import PageObject
class ProblemPage(PageObject):
"""
View of problem page.
"""
url = None
CSS_PROBLEM_HEADER = '.problem-header'
def is_browser_on_page(self):
return self.q(css='.xblock-student_view').present
@property
def problem_name(self):
"""
Return the current problem name.
"""
return self.q(css='.problem-header').text[0]
@property
def problem_text(self):
"""
Return the text of the question of the problem.
"""
return self.q(css="div.problem p").text
@property
def message_text(self):
"""
Return the "message" text of the question of the problem.
"""
return self.q(css="div.problem span.message").text[0]
@property
def hint_text(self):
"""
Return the "hint" text of the problem from its div.
"""
return self.q(css="div.problem div.problem-hint").text[0]
def verify_mathjax_rendered_in_problem(self):
"""
Check that MathJax have been rendered in problem hint
"""
def mathjax_present():
""" Returns True if MathJax css is present in the problem body """
mathjax_container = self.q(css="div.problem p .MathJax .math")
return mathjax_container.visible and mathjax_container.present
self.wait_for(
mathjax_present,
description="MathJax rendered in problem body"
)
def verify_mathjax_rendered_in_hint(self):
"""
Check that MathJax have been rendered in problem hint
"""
def mathjax_present():
""" Returns True if MathJax css is present in the problem body """
mathjax_container = self.q(css="div.problem div.problem-hint .MathJax .math")
return mathjax_container.visible and mathjax_container.present
self.wait_for(
mathjax_present,
description="MathJax rendered in hint"
)
def fill_answer(self, text, input_num=None):
"""
Fill in the answer to the problem.
args:
text: String to fill the input with.
kwargs:
input_num: If provided, fills only the input_numth field. Else, all
input fields will be filled.
"""
fields = self.q(css='div.problem div.capa_inputtype.textline input')
fields = fields.nth(input_num) if input_num is not None else fields
fields.fill(text)
def fill_answer_numerical(self, text):
"""
Fill in the answer to a numerical problem.
"""
self.q(css='div.problem section.inputtype input').fill(text)
self.wait_for_ajax()
def click_check(self):
"""
Click the Check button!
"""
self.q(css='div.problem button.check').click()
self.wait_for_ajax()
def wait_for_status_icon(self):
"""
wait for status icon
"""
self.wait_for_element_visibility('div.problem section.inputtype div .status', 'wait for status icon')
def click_hint(self):
"""
Click the Hint button.
"""
self.q(css='div.problem button.hint-button').click()
self.wait_for_ajax()
def click_choice(self, choice_value):
"""
Click the choice input(radio, checkbox or option) where value matches `choice_value` in choice group.
"""
self.q(css='div.problem .choicegroup input[value="' + choice_value + '"]').click()
self.wait_for_ajax()
def is_correct(self):
"""
Is there a "correct" status showing?
"""
return self.q(css="div.problem div.capa_inputtype.textline div.correct span.status").is_present()
def simpleprob_is_correct(self):
"""
Is there a "correct" status showing? Works with simple problem types.
"""
return self.q(css="div.problem section.inputtype div.correct span.status").is_present()
def simpleprob_is_partially_correct(self):
"""
Is there a "partially correct" status showing? Works with simple problem types.
"""
return self.q(css="div.problem section.inputtype div.partially-correct span.status").is_present()
def simpleprob_is_incorrect(self):
"""
Is there an "incorrect" status showing? Works with simple problem types.
"""
return self.q(css="div.problem section.inputtype div.incorrect span.status").is_present()
def click_clarification(self, index=0):
"""
Click on an inline icon that can be included in problem text using an HTML <clarification> element:
Problem <clarification>clarification text hidden by an icon in rendering</clarification> Text
"""
self.q(css='div.problem .clarification:nth-child({index}) i[data-tooltip]'.format(index=index + 1)).click()
@property
def visible_tooltip_text(self):
"""
Get the text seen in any tooltip currently visible on the page.
"""
self.wait_for_element_visibility('body > .tooltip', 'A tooltip is visible.')
return self.q(css='body > .tooltip').text[0]
| agpl-3.0 |
rudischilder/MAV_TU_Delft_gr10 | sw/ground_segment/python/settings_app/settingsframe.py | 29 | 6685 | #Boa:Frame:PlotFrame
from __future__ import division
import wx
import sys
import os
PPRZ_SRC = os.getenv("PAPARAZZI_SRC", os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../../../..')))
sys.path.append(PPRZ_SRC + "/sw/lib/python")
from settings_tool import IvySettingsInterface
def create(parent, ac_ids):
return SettingsFrame(parent, ac_ids)
SLIDER_ID_OFFSET = 250000
BUTTON_ID_OFFSET = 2 * 250000
SLIDER_FACTOR = 100
# Wraps TextCtrl to provide added functionality
class TextCtrlSetting(wx.TextCtrl):
update_callback = None
def __init__(self, parent, setting):
self.setting = setting
wx.TextCtrl.__init__(self, parent=parent, name=setting.shortname, id=setting.index)
self.Bind(wx.EVT_TEXT, self.onEvtText, self)
def RegisterUpdateCallback(self, cb):
self.update_callback = cb
def onEvtText(self, event):
index = int(self.GetId())
try:
value = float(self.GetValue())
self.update_callback(index, value)
except:
return
# helper function to toggle edit box boldness (bold = user-set, normal=downlink-received)
def setBold(self, bold):
font = self.GetFont()
if (bold):
font.SetWeight(wx.FONTWEIGHT_BOLD)
else:
font.SetWeight(wx.FONTWEIGHT_NORMAL)
self.SetFont(font)
def SetSettingValue(self, value):
if (self.setting.step < 1):
self.SetValue("%.2f" % float(value))
else:
self.SetValue("%i" % int(float(value)))
# Wraps slider
class SettingCtrl(wx.Slider):
update_callback = None
def __init__(self, parent, setting):
self.setting = setting
max_v = int(setting.max_value) * SLIDER_FACTOR
min_v = int(setting.min_value) * SLIDER_FACTOR
if (min_v >= max_v):
max_v = max_v + 1
wx.Slider.__init__(self, parent=parent, minValue=min_v, maxValue=max_v, style=wx.SL_HORIZONTAL | wx.SL_AUTOTICKS, size=(200, 30), id=setting.index + SLIDER_ID_OFFSET)
self.SetLineSize(setting.step * SLIDER_FACTOR)
self.Bind(wx.EVT_MOUSEWHEEL, self.sliderWheel, self)
self.Bind(wx.EVT_SLIDER, self.OnEvtSlider, self)
def RegisterUpdateCallback(self, cb):
self.update_callback = cb
def OnEvtSlider(self, event):
slider = event.GetEventObject()
self.update_callback(slider.GetSettingIndex(), slider.GetSettingValue())
# Called on mouse wheel events (default handler seems backwards?)
def sliderWheel(self, event):
slider = event.GetEventObject()
if (event.GetWheelRotation() > 0):
slider.SetValue(slider.GetValue() + slider.GetLineSize())
else:
slider.SetValue(slider.GetValue() - slider.GetLineSize())
self.update_callback(slider.GetSettingIndex(), slider.GetSettingValue())
def GetSettingIndex(self):
index = int(self.GetId())
if index >= SLIDER_ID_OFFSET:
index = index - SLIDER_ID_OFFSET
return index
def SetSettingValue(self, value):
self.SetValue(int(float(value)) * SLIDER_FACTOR)
def GetSettingValue(self):
if (self.setting.step < 1):
return float(self.GetValue()) / SLIDER_FACTOR
else:
return int(self.GetValue()) // SLIDER_FACTOR
class SettingsFrame(wx.Frame):
edits = []
sliders = []
def __init__(self, parent, ac_ids):
self.settings = IvySettingsInterface(ac_ids)
title = "Settings %s (%s)" % (ac_ids, self.settings.GetACName())
wx.Frame.__init__(self, name=u'SettingsFrame', parent=parent, title=title, size=(480, 320))
self.book = wx.Notebook(self)
self.updates = []
self.Bind( wx.EVT_CLOSE, self.OnClose)
for setting_group in self.settings.groups:
page = wx.Panel(self.book)
vert_box = wx.BoxSizer(orient=wx.VERTICAL)
for setting in setting_group.member_list:
horz_box = wx.BoxSizer(orient=wx.HORIZONTAL)
text = wx.StaticText(page, label=setting.shortname, size=(100,30))
# Edit
edit = TextCtrlSetting(page, setting)
edit.RegisterUpdateCallback(self.editUpdate)
self.edits.append(edit)
# Slider
slider = SettingCtrl(page, setting)
slider.RegisterUpdateCallback(self.updateEditFromSlider)
self.sliders.append(slider)
# Button
button = wx.Button(page, id=setting.index + BUTTON_ID_OFFSET, label="Apply")
self.Bind(wx.EVT_BUTTON, self.onButton)
horz_box.AddWindow(text)
horz_box.AddWindow(edit)
horz_box.AddWindow(slider)
horz_box.AddWindow(button)
vert_box.AddWindow(horz_box)
page.SetSizer(vert_box)
self.book.AddPage(page, setting_group.name)
self.settings.RegisterCallback(self.onUpdate)
# Copy slider value into associated edit box
def updateEditFromSlider(self, index, value):
self.edits[index].ChangeValue(str(value))
self.edits[index].setBold(True)
# Called on edit box update
def editUpdate(self, index, value):
self.sliders[index].SetSettingValue(value)
self.edits[index].setBold(True)
# Called on button push
def onButton(self, event):
button = event.GetEventObject()
index = int(button.GetId())
if index >= BUTTON_ID_OFFSET:
index = index - BUTTON_ID_OFFSET
self.settings.lookup[index].value = self.sliders[index].GetSettingValue()
self.settings.SendSetting(index)
# Called for remote settings updates
def onUpdate(self, index, value, fromRemote):
# Schedule the call for later via wx (run after events)
# to prevent crashy crashy
wx.CallAfter(self.update_value, index, value, fromRemote)
# Called to update GUI with new values
def update_value(self, index, value, fromRemote):
editCtrl = self.edits[index]
if fromRemote and editCtrl.FindFocus() == editCtrl:
# don't process remote updates if the control is focused
return
editCtrl.SetSettingValue(value)
editCtrl.setBold(not fromRemote)
self.sliders[index].SetSettingValue(value)
def OnClose(self, event):
# need to forward close to canvas so that ivy is shut down, otherwise ivy hangs the shutdown
self.settings.OnClose()
self.Destroy()
| gpl-2.0 |
temasek/android_external_chromium_org_third_party_WebKit | Source/devtools/scripts/generate_devtools_html.py | 9 | 3173 | #!/usr/bin/env python
#
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from os import path
import os
import sys
def generate_include_tag(resource_path):
if (resource_path.endswith('.js')):
return ' <script type="text/javascript" src="%s"></script>\n' % resource_path
elif (resource_path.endswith('.css')):
return ' <link rel="stylesheet" type="text/css" href="%s">\n' % resource_path
else:
assert resource_path
def write_app_input_html(app_input_file, app_output_file, application_name, debug):
for line in app_input_file:
if not debug:
if '<script ' in line or '<link ' in line:
continue
if '</head>' in line:
app_output_file.write(generate_include_tag("%s.css" % application_name))
app_output_file.write(generate_include_tag("%s.js" % application_name))
app_output_file.write(line)
def main(argv):
if len(argv) < 4:
print('usage: %s app_input_html generated_app_html debug' % argv[0])
return 1
# The first argument is ignored. We put 'web.gyp' in the inputs list
# for this script, so every time the list of script gets changed, our html
# file is rebuilt.
app_input_html_name = argv[1]
app_output_html_name = argv[2]
debug = argv[3] != '0'
application_name = path.splitext(path.basename(app_input_html_name))[0]
with open(app_input_html_name, 'r') as app_input_html:
with open(app_output_html_name, 'w') as app_output_html:
write_app_input_html(app_input_html, app_output_html, application_name, debug)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause |
sriki18/scipy | scipy/sparse/linalg/tests/test_interface.py | 38 | 12724 | """Test functions for the sparse.linalg.interface module
"""
from __future__ import division, print_function, absolute_import
from functools import partial
from itertools import product
import operator
import nose
from numpy.testing import TestCase, assert_, assert_equal, \
assert_raises
import numpy as np
import scipy.sparse as sparse
from scipy.sparse.linalg import interface
# Only test matmul operator (A @ B) when available (Python 3.5+)
TEST_MATMUL = hasattr(operator, 'matmul')
class TestLinearOperator(TestCase):
def setUp(self):
self.A = np.array([[1,2,3],
[4,5,6]])
self.B = np.array([[1,2],
[3,4],
[5,6]])
self.C = np.array([[1,2],
[3,4]])
def test_matvec(self):
def get_matvecs(A):
return [{
'shape': A.shape,
'matvec': lambda x: np.dot(A, x).reshape(A.shape[0]),
'rmatvec': lambda x: np.dot(A.T.conj(),
x).reshape(A.shape[1])
},
{
'shape': A.shape,
'matvec': lambda x: np.dot(A, x),
'rmatvec': lambda x: np.dot(A.T.conj(), x),
'matmat': lambda x: np.dot(A, x)
}]
for matvecs in get_matvecs(self.A):
A = interface.LinearOperator(**matvecs)
assert_(A.args == ())
assert_equal(A.matvec(np.array([1,2,3])), [14,32])
assert_equal(A.matvec(np.array([[1],[2],[3]])), [[14],[32]])
assert_equal(A * np.array([1,2,3]), [14,32])
assert_equal(A * np.array([[1],[2],[3]]), [[14],[32]])
assert_equal(A.dot(np.array([1,2,3])), [14,32])
assert_equal(A.dot(np.array([[1],[2],[3]])), [[14],[32]])
assert_equal(A.matvec(np.matrix([[1],[2],[3]])), [[14],[32]])
assert_equal(A * np.matrix([[1],[2],[3]]), [[14],[32]])
assert_equal(A.dot(np.matrix([[1],[2],[3]])), [[14],[32]])
assert_equal((2*A)*[1,1,1], [12,30])
assert_equal((2*A).rmatvec([1,1]), [10, 14, 18])
assert_equal((2*A).H.matvec([1,1]), [10, 14, 18])
assert_equal((2*A)*[[1],[1],[1]], [[12],[30]])
assert_equal((2*A).matmat([[1],[1],[1]]), [[12],[30]])
assert_equal((A*2)*[1,1,1], [12,30])
assert_equal((A*2)*[[1],[1],[1]], [[12],[30]])
assert_equal((2j*A)*[1,1,1], [12j,30j])
assert_equal((A+A)*[1,1,1], [12, 30])
assert_equal((A+A).rmatvec([1,1]), [10, 14, 18])
assert_equal((A+A).H.matvec([1,1]), [10, 14, 18])
assert_equal((A+A)*[[1],[1],[1]], [[12], [30]])
assert_equal((A+A).matmat([[1],[1],[1]]), [[12], [30]])
assert_equal((-A)*[1,1,1], [-6,-15])
assert_equal((-A)*[[1],[1],[1]], [[-6],[-15]])
assert_equal((A-A)*[1,1,1], [0,0])
assert_equal((A-A)*[[1],[1],[1]], [[0],[0]])
z = A+A
assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] is A)
z = 2*A
assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] == 2)
assert_(isinstance(A.matvec([1, 2, 3]), np.ndarray))
assert_(isinstance(A.matvec(np.array([[1],[2],[3]])), np.ndarray))
assert_(isinstance(A * np.array([1,2,3]), np.ndarray))
assert_(isinstance(A * np.array([[1],[2],[3]]), np.ndarray))
assert_(isinstance(A.dot(np.array([1,2,3])), np.ndarray))
assert_(isinstance(A.dot(np.array([[1],[2],[3]])), np.ndarray))
assert_(isinstance(A.matvec(np.matrix([[1],[2],[3]])), np.ndarray))
assert_(isinstance(A * np.matrix([[1],[2],[3]]), np.ndarray))
assert_(isinstance(A.dot(np.matrix([[1],[2],[3]])), np.ndarray))
assert_(isinstance(2*A, interface._ScaledLinearOperator))
assert_(isinstance(2j*A, interface._ScaledLinearOperator))
assert_(isinstance(A+A, interface._SumLinearOperator))
assert_(isinstance(-A, interface._ScaledLinearOperator))
assert_(isinstance(A-A, interface._SumLinearOperator))
assert_((2j*A).dtype == np.complex_)
assert_raises(ValueError, A.matvec, np.array([1,2]))
assert_raises(ValueError, A.matvec, np.array([1,2,3,4]))
assert_raises(ValueError, A.matvec, np.array([[1],[2]]))
assert_raises(ValueError, A.matvec, np.array([[1],[2],[3],[4]]))
assert_raises(ValueError, lambda: A*A)
assert_raises(ValueError, lambda: A**2)
for matvecsA, matvecsB in product(get_matvecs(self.A),
get_matvecs(self.B)):
A = interface.LinearOperator(**matvecsA)
B = interface.LinearOperator(**matvecsB)
assert_equal((A*B)*[1,1], [50,113])
assert_equal((A*B)*[[1],[1]], [[50],[113]])
assert_equal((A*B).matmat([[1],[1]]), [[50],[113]])
assert_equal((A*B).rmatvec([1,1]), [71,92])
assert_equal((A*B).H.matvec([1,1]), [71,92])
assert_(isinstance(A*B, interface._ProductLinearOperator))
assert_raises(ValueError, lambda: A+B)
assert_raises(ValueError, lambda: A**2)
z = A*B
assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] is B)
for matvecsC in get_matvecs(self.C):
C = interface.LinearOperator(**matvecsC)
assert_equal((C**2)*[1,1], [17,37])
assert_equal((C**2).rmatvec([1,1]), [22,32])
assert_equal((C**2).H.matvec([1,1]), [22,32])
assert_equal((C**2).matmat([[1],[1]]), [[17],[37]])
assert_(isinstance(C**2, interface._PowerLinearOperator))
def test_matmul(self):
if not TEST_MATMUL:
raise nose.SkipTest("matmul is only tested in Python 3.5+")
D = {'shape': self.A.shape,
'matvec': lambda x: np.dot(self.A, x).reshape(self.A.shape[0]),
'rmatvec': lambda x: np.dot(self.A.T.conj(),
x).reshape(self.A.shape[1]),
'matmat': lambda x: np.dot(self.A, x)}
A = interface.LinearOperator(**D)
B = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
b = B[0]
assert_equal(operator.matmul(A, b), A * b)
assert_equal(operator.matmul(A, B), A * B)
assert_raises(ValueError, operator.matmul, A, 2)
assert_raises(ValueError, operator.matmul, 2, A)
class TestAsLinearOperator(TestCase):
def setUp(self):
self.cases = []
def make_cases(dtype):
self.cases.append(np.matrix([[1,2,3],[4,5,6]], dtype=dtype))
self.cases.append(np.array([[1,2,3],[4,5,6]], dtype=dtype))
self.cases.append(sparse.csr_matrix([[1,2,3],[4,5,6]], dtype=dtype))
# Test default implementations of _adjoint and _rmatvec, which
# refer to each other.
def mv(x, dtype):
y = np.array([1 * x[0] + 2 * x[1] + 3 * x[2],
4 * x[0] + 5 * x[1] + 6 * x[2]], dtype=dtype)
if len(x.shape) == 2:
y = y.reshape(-1, 1)
return y
def rmv(x, dtype):
return np.array([1 * x[0] + 4 * x[1],
2 * x[0] + 5 * x[1],
3 * x[0] + 6 * x[1]], dtype=dtype)
class BaseMatlike(interface.LinearOperator):
def __init__(self, dtype):
self.dtype = np.dtype(dtype)
self.shape = (2,3)
def _matvec(self, x):
return mv(x, self.dtype)
class HasRmatvec(BaseMatlike):
def _rmatvec(self,x):
return rmv(x, self.dtype)
class HasAdjoint(BaseMatlike):
def _adjoint(self):
shape = self.shape[1], self.shape[0]
matvec = partial(rmv, dtype=self.dtype)
rmatvec = partial(mv, dtype=self.dtype)
return interface.LinearOperator(matvec=matvec,
rmatvec=rmatvec,
dtype=self.dtype,
shape=shape)
self.cases.append(HasRmatvec(dtype))
self.cases.append(HasAdjoint(dtype))
make_cases('int32')
make_cases('float32')
make_cases('float64')
def test_basic(self):
for M in self.cases:
A = interface.aslinearoperator(M)
M,N = A.shape
assert_equal(A.matvec(np.array([1,2,3])), [14,32])
assert_equal(A.matvec(np.array([[1],[2],[3]])), [[14],[32]])
assert_equal(A * np.array([1,2,3]), [14,32])
assert_equal(A * np.array([[1],[2],[3]]), [[14],[32]])
assert_equal(A.rmatvec(np.array([1,2])), [9,12,15])
assert_equal(A.rmatvec(np.array([[1],[2]])), [[9],[12],[15]])
assert_equal(A.H.matvec(np.array([1,2])), [9,12,15])
assert_equal(A.H.matvec(np.array([[1],[2]])), [[9],[12],[15]])
assert_equal(
A.matmat(np.array([[1,4],[2,5],[3,6]])),
[[14,32],[32,77]])
assert_equal(A * np.array([[1,4],[2,5],[3,6]]), [[14,32],[32,77]])
if hasattr(M,'dtype'):
assert_equal(A.dtype, M.dtype)
def test_dot(self):
for M in self.cases:
A = interface.aslinearoperator(M)
M,N = A.shape
assert_equal(A.dot(np.array([1,2,3])), [14,32])
assert_equal(A.dot(np.array([[1],[2],[3]])), [[14],[32]])
assert_equal(
A.dot(np.array([[1,4],[2,5],[3,6]])),
[[14,32],[32,77]])
def test_repr():
A = interface.LinearOperator(shape=(1, 1), matvec=lambda x: 1)
repr_A = repr(A)
assert_('unspecified dtype' not in repr_A, repr_A)
def test_identity():
ident = interface.IdentityOperator((3, 3))
assert_equal(ident * [1, 2, 3], [1, 2, 3])
assert_equal(ident.dot(np.arange(9).reshape(3, 3)).ravel(), np.arange(9))
assert_raises(ValueError, ident.matvec, [1, 2, 3, 4])
def test_attributes():
A = interface.aslinearoperator(np.arange(16).reshape(4, 4))
def always_four_ones(x):
x = np.asarray(x)
assert_(x.shape == (3,) or x.shape == (3, 1))
return np.ones(4)
B = interface.LinearOperator(shape=(4, 3), matvec=always_four_ones)
for op in [A, B, A * B, A.H, A + A, B + B, A ** 4]:
assert_(hasattr(op, "dtype"))
assert_(hasattr(op, "shape"))
assert_(hasattr(op, "_matvec"))
def matvec(x):
""" Needed for test_pickle as local functions are not pickleable """
return np.zeros(3)
def test_pickle():
import pickle
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
A = interface.LinearOperator((3, 3), matvec)
s = pickle.dumps(A, protocol=protocol)
B = pickle.loads(s)
for k in A.__dict__:
assert_equal(getattr(A, k), getattr(B, k))
def test_inheritance():
class Empty(interface.LinearOperator):
pass
assert_raises(TypeError, Empty)
class Identity(interface.LinearOperator):
def __init__(self, n):
super(Identity, self).__init__(dtype=None, shape=(n, n))
def _matvec(self, x):
return x
id3 = Identity(3)
assert_equal(id3.matvec([1, 2, 3]), [1, 2, 3])
assert_raises(NotImplementedError, id3.rmatvec, [4, 5, 6])
class MatmatOnly(interface.LinearOperator):
def __init__(self, A):
super(MatmatOnly, self).__init__(A.dtype, A.shape)
self.A = A
def _matmat(self, x):
return self.A.dot(x)
mm = MatmatOnly(np.random.randn(5, 3))
assert_equal(mm.matvec(np.random.randn(3)).shape, (5,))
def test_dtypes_of_operator_sum():
# gh-6078
mat_complex = np.random.rand(2,2) + 1j * np.random.rand(2,2)
mat_real = np.random.rand(2,2)
complex_operator = interface.aslinearoperator(mat_complex)
real_operator = interface.aslinearoperator(mat_real)
sum_complex = complex_operator + complex_operator
sum_real = real_operator + real_operator
assert_equal(sum_real.dtype, np.float64)
assert_equal(sum_complex.dtype, np.complex128)
| bsd-3-clause |
Evervolv/android_kernel_samsung_msm8660 | tools/perf/scripts/python/failed-syscalls-by-pid.py | 11180 | 2058 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| gpl-2.0 |
mollstam/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Python-2.7.10/Tools/framer/framer/member.py | 50 | 1933 | from framer import template
from framer.util import cstring, unindent
T_SHORT = "T_SHORT"
T_INT = "T_INT"
T_LONG = "T_LONG"
T_FLOAT = "T_FLOAT"
T_DOUBLE = "T_DOUBLE"
T_STRING = "T_STRING"
T_OBJECT = "T_OBJECT"
T_CHAR = "T_CHAR"
T_BYTE = "T_BYTE"
T_UBYTE = "T_UBYTE"
T_UINT = "T_UINT"
T_ULONG = "T_ULONG"
T_STRING_INPLACE = "T_STRING_INPLACE"
T_OBJECT_EX = "T_OBJECT_EX"
RO = READONLY = "READONLY"
READ_RESTRICTED = "READ_RESTRICTED"
WRITE_RESTRICTED = "WRITE_RESTRICTED"
RESTRICT = "RESTRICTED"
c2t = {"int" : T_INT,
"unsigned int" : T_UINT,
"long" : T_LONG,
"unsigned long" : T_LONG,
"float" : T_FLOAT,
"double" : T_DOUBLE,
"char *" : T_CHAR,
"PyObject *" : T_OBJECT,
}
class member(object):
def __init__(self, cname=None, type=None, flags=None, doc=None):
self.type = type
self.flags = flags
self.cname = cname
self.doc = doc
self.name = None
self.struct = None
def register(self, name, struct):
self.name = name
self.struct = struct
self.initvars()
def initvars(self):
v = self.vars = {}
v["PythonName"] = self.name
if self.cname is not None:
v["CName"] = self.cname
else:
v["CName"] = self.name
v["Flags"] = self.flags or "0"
v["Type"] = self.get_type()
if self.doc is not None:
v["Docstring"] = cstring(unindent(self.doc))
v["StructName"] = self.struct.name
def get_type(self):
"""Deduce type code from struct specification if not defined"""
if self.type is not None:
return self.type
ctype = self.struct.get_type(self.name)
return c2t[ctype]
def dump(self, f):
if self.doc is None:
print >> f, template.memberdef_def % self.vars
else:
print >> f, template.memberdef_def_doc % self.vars
| mit |
Ernesto99/odoo | addons/mrp_operations/report/mrp_code_barcode.py | 381 | 1511 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.report import report_sxw
class code_barcode(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(code_barcode, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
})
report_sxw.report_sxw('report.mrp.code.barcode', 'mrp_operations.operation.code', 'addons/mrp_operations/report/mrp_code_barcode.rml',parser=code_barcode,header=False)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
chusiang/git-repo | subcmds/abandon.py | 48 | 2034 | #
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from command import Command
from git_command import git
from progress import Progress
class Abandon(Command):
common = True
helpSummary = "Permanently abandon a development branch"
helpUsage = """
%prog <branchname> [<project>...]
This subcommand permanently abandons a development branch by
deleting it (and all its history) from your local repository.
It is equivalent to "git branch -D <branchname>".
"""
def Execute(self, opt, args):
if not args:
self.Usage()
nb = args[0]
if not git.check_ref_format('heads/%s' % nb):
print >>sys.stderr, "error: '%s' is not a valid name" % nb
sys.exit(1)
nb = args[0]
err = []
success = []
all_projects = self.GetProjects(args[1:])
pm = Progress('Abandon %s' % nb, len(all_projects))
for project in all_projects:
pm.update()
status = project.AbandonBranch(nb)
if status is not None:
if status:
success.append(project)
else:
err.append(project)
pm.end()
if err:
for p in err:
print >>sys.stderr,\
"error: %s/: cannot abandon %s" \
% (p.relpath, nb)
sys.exit(1)
elif not success:
print >>sys.stderr, 'error: no project has branch %s' % nb
sys.exit(1)
else:
print >>sys.stderr, 'Abandoned in %d project(s):\n %s' % (
len(success), '\n '.join(p.relpath for p in success))
| apache-2.0 |
vedujoshi/tempest | tempest/scenario/test_minimum_basic.py | 1 | 7041 | # Copyright 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from tempest.common import custom_matchers
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from tempest.scenario import manager
from tempest import test
CONF = config.CONF
class TestMinimumBasicScenario(manager.ScenarioTest):
"""This is a basic minimum scenario test.
This test below:
* across the multiple components
* as a regular user
* with and without optional parameters
* check command outputs
Steps:
1. Create image
2. Create keypair
3. Boot instance with keypair and get list of instances
4. Create volume and show list of volumes
5. Attach volume to instance and getlist of volumes
6. Add IP to instance
7. Create and add security group to instance
8. Check SSH connection to instance
9. Reboot instance
10. Check SSH connection to instance after reboot
"""
def nova_show(self, server):
got_server = (self.servers_client.show_server(server['id'])
['server'])
excluded_keys = ['OS-EXT-AZ:availability_zone']
# Exclude these keys because of LP:#1486475
excluded_keys.extend(['OS-EXT-STS:power_state', 'updated'])
self.assertThat(
server, custom_matchers.MatchesDictExceptForKeys(
got_server, excluded_keys=excluded_keys))
def cinder_show(self, volume):
got_volume = self.volumes_client.show_volume(volume['id'])['volume']
self.assertEqual(volume, got_volume)
def nova_reboot(self, server):
self.servers_client.reboot_server(server['id'], type='SOFT')
waiters.wait_for_server_status(self.servers_client,
server['id'], 'ACTIVE')
def check_disks(self):
# NOTE(andreaf) The device name may be different on different guest OS
disks = self.linux_client.get_disks()
self.assertEqual(1, disks.count(CONF.compute.volume_device_name))
def create_and_add_security_group_to_server(self, server):
secgroup = self._create_security_group()
self.servers_client.add_security_group(server['id'],
name=secgroup['name'])
self.addCleanup(self.servers_client.remove_security_group,
server['id'], name=secgroup['name'])
def wait_for_secgroup_add():
body = (self.servers_client.show_server(server['id'])
['server'])
return {'name': secgroup['name']} in body['security_groups']
if not test_utils.call_until_true(wait_for_secgroup_add,
CONF.compute.build_timeout,
CONF.compute.build_interval):
msg = ('Timed out waiting for adding security group %s to server '
'%s' % (secgroup['id'], server['id']))
raise exceptions.TimeoutException(msg)
def _get_floating_ip_in_server_addresses(self, floating_ip, server):
for addresses in server['addresses'].values():
for address in addresses:
if (address['OS-EXT-IPS:type'] == 'floating' and
address['addr'] == floating_ip['ip']):
return address
@decorators.idempotent_id('bdbb5441-9204-419d-a225-b4fdbfb1a1a8')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
@testtools.skipUnless(CONF.network_feature_enabled.floating_ips,
'Floating ips are not available')
@test.services('compute', 'volume', 'image', 'network')
def test_minimum_basic_scenario(self):
image = self.glance_image_create()
keypair = self.create_keypair()
server = self.create_server(image_id=image, key_name=keypair['name'])
servers = self.servers_client.list_servers()['servers']
self.assertIn(server['id'], [x['id'] for x in servers])
self.nova_show(server)
volume = self.create_volume()
volumes = self.volumes_client.list_volumes()['volumes']
self.assertIn(volume['id'], [x['id'] for x in volumes])
self.cinder_show(volume)
volume = self.nova_volume_attach(server, volume)
self.addCleanup(self.nova_volume_detach, server, volume)
self.cinder_show(volume)
floating_ip = self.create_floating_ip(server)
# fetch the server again to make sure the addresses were refreshed
# after associating the floating IP
server = self.servers_client.show_server(server['id'])['server']
address = self._get_floating_ip_in_server_addresses(
floating_ip, server)
self.assertIsNotNone(
address,
"Failed to find floating IP '%s' in server addresses: %s" %
(floating_ip['ip'], server['addresses']))
self.create_and_add_security_group_to_server(server)
# check that we can SSH to the server before reboot
self.linux_client = self.get_remote_client(
floating_ip['ip'], private_key=keypair['private_key'],
server=server)
self.nova_reboot(server)
# check that we can SSH to the server after reboot
# (both connections are part of the scenario)
self.linux_client = self.get_remote_client(
floating_ip['ip'], private_key=keypair['private_key'],
server=server)
self.check_disks()
# delete the floating IP, this should refresh the server addresses
self.compute_floating_ips_client.delete_floating_ip(floating_ip['id'])
def is_floating_ip_detached_from_server():
server_info = self.servers_client.show_server(
server['id'])['server']
address = self._get_floating_ip_in_server_addresses(
floating_ip, server_info)
return (not address)
if not test_utils.call_until_true(
is_floating_ip_detached_from_server,
CONF.compute.build_timeout,
CONF.compute.build_interval):
msg = ("Floating IP '%s' should not be in server addresses: %s" %
(floating_ip['ip'], server['addresses']))
raise exceptions.TimeoutException(msg)
| apache-2.0 |
abhikumar22/MYBLOG | blg/Lib/site-packages/pip/utils/__init__.py | 323 | 27187 | from __future__ import absolute_import
from collections import deque
import contextlib
import errno
import io
import locale
# we have a submodule named 'logging' which would shadow this if we used the
# regular name:
import logging as std_logging
import re
import os
import posixpath
import shutil
import stat
import subprocess
import sys
import tarfile
import zipfile
from pip.exceptions import InstallationError
from pip.compat import console_to_str, expanduser, stdlib_pkgs
from pip.locations import (
site_packages, user_site, running_under_virtualenv, virtualenv_no_global,
write_delete_marker_file,
)
from pip._vendor import pkg_resources
from pip._vendor.six.moves import input
from pip._vendor.six import PY2
from pip._vendor.retrying import retry
if PY2:
from io import BytesIO as StringIO
else:
from io import StringIO
__all__ = ['rmtree', 'display_path', 'backup_dir',
'ask', 'splitext',
'format_size', 'is_installable_dir',
'is_svn_page', 'file_contents',
'split_leading_dir', 'has_leading_dir',
'normalize_path',
'renames', 'get_terminal_size', 'get_prog',
'unzip_file', 'untar_file', 'unpack_file', 'call_subprocess',
'captured_stdout', 'ensure_dir',
'ARCHIVE_EXTENSIONS', 'SUPPORTED_EXTENSIONS',
'get_installed_version']
logger = std_logging.getLogger(__name__)
BZ2_EXTENSIONS = ('.tar.bz2', '.tbz')
XZ_EXTENSIONS = ('.tar.xz', '.txz', '.tlz', '.tar.lz', '.tar.lzma')
ZIP_EXTENSIONS = ('.zip', '.whl')
TAR_EXTENSIONS = ('.tar.gz', '.tgz', '.tar')
ARCHIVE_EXTENSIONS = (
ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS + XZ_EXTENSIONS)
SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS
try:
import bz2 # noqa
SUPPORTED_EXTENSIONS += BZ2_EXTENSIONS
except ImportError:
logger.debug('bz2 module is not available')
try:
# Only for Python 3.3+
import lzma # noqa
SUPPORTED_EXTENSIONS += XZ_EXTENSIONS
except ImportError:
logger.debug('lzma module is not available')
def import_or_raise(pkg_or_module_string, ExceptionType, *args, **kwargs):
try:
return __import__(pkg_or_module_string)
except ImportError:
raise ExceptionType(*args, **kwargs)
def ensure_dir(path):
"""os.path.makedirs without EEXIST."""
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def get_prog():
try:
if os.path.basename(sys.argv[0]) in ('__main__.py', '-c'):
return "%s -m pip" % sys.executable
except (AttributeError, TypeError, IndexError):
pass
return 'pip'
# Retry every half second for up to 3 seconds
@retry(stop_max_delay=3000, wait_fixed=500)
def rmtree(dir, ignore_errors=False):
shutil.rmtree(dir, ignore_errors=ignore_errors,
onerror=rmtree_errorhandler)
def rmtree_errorhandler(func, path, exc_info):
"""On Windows, the files in .svn are read-only, so when rmtree() tries to
remove them, an exception is thrown. We catch that here, remove the
read-only attribute, and hopefully continue without problems."""
# if file type currently read only
if os.stat(path).st_mode & stat.S_IREAD:
# convert to read/write
os.chmod(path, stat.S_IWRITE)
# use the original function to repeat the operation
func(path)
return
else:
raise
def display_path(path):
"""Gives the display value for a given path, making it relative to cwd
if possible."""
path = os.path.normcase(os.path.abspath(path))
if sys.version_info[0] == 2:
path = path.decode(sys.getfilesystemencoding(), 'replace')
path = path.encode(sys.getdefaultencoding(), 'replace')
if path.startswith(os.getcwd() + os.path.sep):
path = '.' + path[len(os.getcwd()):]
return path
def backup_dir(dir, ext='.bak'):
"""Figure out the name of a directory to back up the given dir to
(adding .bak, .bak2, etc)"""
n = 1
extension = ext
while os.path.exists(dir + extension):
n += 1
extension = ext + str(n)
return dir + extension
def ask_path_exists(message, options):
for action in os.environ.get('PIP_EXISTS_ACTION', '').split():
if action in options:
return action
return ask(message, options)
def ask(message, options):
"""Ask the message interactively, with the given possible responses"""
while 1:
if os.environ.get('PIP_NO_INPUT'):
raise Exception(
'No input was expected ($PIP_NO_INPUT set); question: %s' %
message
)
response = input(message)
response = response.strip().lower()
if response not in options:
print(
'Your response (%r) was not one of the expected responses: '
'%s' % (response, ', '.join(options))
)
else:
return response
def format_size(bytes):
if bytes > 1000 * 1000:
return '%.1fMB' % (bytes / 1000.0 / 1000)
elif bytes > 10 * 1000:
return '%ikB' % (bytes / 1000)
elif bytes > 1000:
return '%.1fkB' % (bytes / 1000.0)
else:
return '%ibytes' % bytes
def is_installable_dir(path):
"""Return True if `path` is a directory containing a setup.py file."""
if not os.path.isdir(path):
return False
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
return True
return False
def is_svn_page(html):
"""
Returns true if the page appears to be the index page of an svn repository
"""
return (re.search(r'<title>[^<]*Revision \d+:', html) and
re.search(r'Powered by (?:<a[^>]*?>)?Subversion', html, re.I))
def file_contents(filename):
with open(filename, 'rb') as fp:
return fp.read().decode('utf-8')
def read_chunks(file, size=io.DEFAULT_BUFFER_SIZE):
"""Yield pieces of data from a file-like object until EOF."""
while True:
chunk = file.read(size)
if not chunk:
break
yield chunk
def split_leading_dir(path):
path = path.lstrip('/').lstrip('\\')
if '/' in path and (('\\' in path and path.find('/') < path.find('\\')) or
'\\' not in path):
return path.split('/', 1)
elif '\\' in path:
return path.split('\\', 1)
else:
return path, ''
def has_leading_dir(paths):
"""Returns true if all the paths have the same leading path name
(i.e., everything is in one subdirectory in an archive)"""
common_prefix = None
for path in paths:
prefix, rest = split_leading_dir(path)
if not prefix:
return False
elif common_prefix is None:
common_prefix = prefix
elif prefix != common_prefix:
return False
return True
def normalize_path(path, resolve_symlinks=True):
"""
Convert a path to its canonical, case-normalized, absolute version.
"""
path = expanduser(path)
if resolve_symlinks:
path = os.path.realpath(path)
else:
path = os.path.abspath(path)
return os.path.normcase(path)
def splitext(path):
"""Like os.path.splitext, but take off .tar too"""
base, ext = posixpath.splitext(path)
if base.lower().endswith('.tar'):
ext = base[-4:] + ext
base = base[:-4]
return base, ext
def renames(old, new):
"""Like os.renames(), but handles renaming across devices."""
# Implementation borrowed from os.renames().
head, tail = os.path.split(new)
if head and tail and not os.path.exists(head):
os.makedirs(head)
shutil.move(old, new)
head, tail = os.path.split(old)
if head and tail:
try:
os.removedirs(head)
except OSError:
pass
def is_local(path):
"""
Return True if path is within sys.prefix, if we're running in a virtualenv.
If we're not in a virtualenv, all paths are considered "local."
"""
if not running_under_virtualenv():
return True
return normalize_path(path).startswith(normalize_path(sys.prefix))
def dist_is_local(dist):
"""
Return True if given Distribution object is installed locally
(i.e. within current virtualenv).
Always True if we're not in a virtualenv.
"""
return is_local(dist_location(dist))
def dist_in_usersite(dist):
"""
Return True if given Distribution is installed in user site.
"""
norm_path = normalize_path(dist_location(dist))
return norm_path.startswith(normalize_path(user_site))
def dist_in_site_packages(dist):
"""
Return True if given Distribution is installed in
distutils.sysconfig.get_python_lib().
"""
return normalize_path(
dist_location(dist)
).startswith(normalize_path(site_packages))
def dist_is_editable(dist):
"""Is distribution an editable install?"""
for path_item in sys.path:
egg_link = os.path.join(path_item, dist.project_name + '.egg-link')
if os.path.isfile(egg_link):
return True
return False
def get_installed_distributions(local_only=True,
skip=stdlib_pkgs,
include_editables=True,
editables_only=False,
user_only=False):
"""
Return a list of installed Distribution objects.
If ``local_only`` is True (default), only return installations
local to the current virtualenv, if in a virtualenv.
``skip`` argument is an iterable of lower-case project names to
ignore; defaults to stdlib_pkgs
If ``editables`` is False, don't report editables.
If ``editables_only`` is True , only report editables.
If ``user_only`` is True , only report installations in the user
site directory.
"""
if local_only:
local_test = dist_is_local
else:
def local_test(d):
return True
if include_editables:
def editable_test(d):
return True
else:
def editable_test(d):
return not dist_is_editable(d)
if editables_only:
def editables_only_test(d):
return dist_is_editable(d)
else:
def editables_only_test(d):
return True
if user_only:
user_test = dist_in_usersite
else:
def user_test(d):
return True
return [d for d in pkg_resources.working_set
if local_test(d) and
d.key not in skip and
editable_test(d) and
editables_only_test(d) and
user_test(d)
]
def egg_link_path(dist):
"""
Return the path for the .egg-link file if it exists, otherwise, None.
There's 3 scenarios:
1) not in a virtualenv
try to find in site.USER_SITE, then site_packages
2) in a no-global virtualenv
try to find in site_packages
3) in a yes-global virtualenv
try to find in site_packages, then site.USER_SITE
(don't look in global location)
For #1 and #3, there could be odd cases, where there's an egg-link in 2
locations.
This method will just return the first one found.
"""
sites = []
if running_under_virtualenv():
if virtualenv_no_global():
sites.append(site_packages)
else:
sites.append(site_packages)
if user_site:
sites.append(user_site)
else:
if user_site:
sites.append(user_site)
sites.append(site_packages)
for site in sites:
egglink = os.path.join(site, dist.project_name) + '.egg-link'
if os.path.isfile(egglink):
return egglink
def dist_location(dist):
"""
Get the site-packages location of this distribution. Generally
this is dist.location, except in the case of develop-installed
packages, where dist.location is the source code location, and we
want to know where the egg-link file is.
"""
egg_link = egg_link_path(dist)
if egg_link:
return egg_link
return dist.location
def get_terminal_size():
"""Returns a tuple (x, y) representing the width(x) and the height(x)
in characters of the terminal window."""
def ioctl_GWINSZ(fd):
try:
import fcntl
import termios
import struct
cr = struct.unpack(
'hh',
fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234')
)
except:
return None
if cr == (0, 0):
return None
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
cr = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80))
return int(cr[1]), int(cr[0])
def current_umask():
"""Get the current umask which involves having to set it temporarily."""
mask = os.umask(0)
os.umask(mask)
return mask
def unzip_file(filename, location, flatten=True):
"""
Unzip the file (with path `filename`) to the destination `location`. All
files are written based on system defaults and umask (i.e. permissions are
not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
ensure_dir(location)
zipfp = open(filename, 'rb')
try:
zip = zipfile.ZipFile(zipfp, allowZip64=True)
leading = has_leading_dir(zip.namelist()) and flatten
for info in zip.infolist():
name = info.filename
data = zip.read(name)
fn = name
if leading:
fn = split_leading_dir(name)[1]
fn = os.path.join(location, fn)
dir = os.path.dirname(fn)
if fn.endswith('/') or fn.endswith('\\'):
# A directory
ensure_dir(fn)
else:
ensure_dir(dir)
fp = open(fn, 'wb')
try:
fp.write(data)
finally:
fp.close()
mode = info.external_attr >> 16
# if mode and regular file and any execute permissions for
# user/group/world?
if mode and stat.S_ISREG(mode) and mode & 0o111:
# make dest file have execute for user/group/world
# (chmod +x) no-op on windows per python docs
os.chmod(fn, (0o777 - current_umask() | 0o111))
finally:
zipfp.close()
def untar_file(filename, location):
"""
Untar the file (with path `filename`) to the destination `location`.
All files are written based on system defaults and umask (i.e. permissions
are not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
ensure_dir(location)
if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'):
mode = 'r:gz'
elif filename.lower().endswith(BZ2_EXTENSIONS):
mode = 'r:bz2'
elif filename.lower().endswith(XZ_EXTENSIONS):
mode = 'r:xz'
elif filename.lower().endswith('.tar'):
mode = 'r'
else:
logger.warning(
'Cannot determine compression type for file %s', filename,
)
mode = 'r:*'
tar = tarfile.open(filename, mode)
try:
# note: python<=2.5 doesn't seem to know about pax headers, filter them
leading = has_leading_dir([
member.name for member in tar.getmembers()
if member.name != 'pax_global_header'
])
for member in tar.getmembers():
fn = member.name
if fn == 'pax_global_header':
continue
if leading:
fn = split_leading_dir(fn)[1]
path = os.path.join(location, fn)
if member.isdir():
ensure_dir(path)
elif member.issym():
try:
tar._extract_member(member, path)
except Exception as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
'In the tar file %s the member %s is invalid: %s',
filename, member.name, exc,
)
continue
else:
try:
fp = tar.extractfile(member)
except (KeyError, AttributeError) as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
'In the tar file %s the member %s is invalid: %s',
filename, member.name, exc,
)
continue
ensure_dir(os.path.dirname(path))
with open(path, 'wb') as destfp:
shutil.copyfileobj(fp, destfp)
fp.close()
# Update the timestamp (useful for cython compiled files)
tar.utime(member, path)
# member have any execute permissions for user/group/world?
if member.mode & 0o111:
# make dest file have execute for user/group/world
# no-op on windows per python docs
os.chmod(path, (0o777 - current_umask() | 0o111))
finally:
tar.close()
def unpack_file(filename, location, content_type, link):
filename = os.path.realpath(filename)
if (content_type == 'application/zip' or
filename.lower().endswith(ZIP_EXTENSIONS) or
zipfile.is_zipfile(filename)):
unzip_file(
filename,
location,
flatten=not filename.endswith('.whl')
)
elif (content_type == 'application/x-gzip' or
tarfile.is_tarfile(filename) or
filename.lower().endswith(
TAR_EXTENSIONS + BZ2_EXTENSIONS + XZ_EXTENSIONS)):
untar_file(filename, location)
elif (content_type and content_type.startswith('text/html') and
is_svn_page(file_contents(filename))):
# We don't really care about this
from pip.vcs.subversion import Subversion
Subversion('svn+' + link.url).unpack(location)
else:
# FIXME: handle?
# FIXME: magic signatures?
logger.critical(
'Cannot unpack file %s (downloaded from %s, content-type: %s); '
'cannot detect archive format',
filename, location, content_type,
)
raise InstallationError(
'Cannot determine archive format of %s' % location
)
def call_subprocess(cmd, show_stdout=True, cwd=None,
on_returncode='raise',
command_desc=None,
extra_environ=None, spinner=None):
# This function's handling of subprocess output is confusing and I
# previously broke it terribly, so as penance I will write a long comment
# explaining things.
#
# The obvious thing that affects output is the show_stdout=
# kwarg. show_stdout=True means, let the subprocess write directly to our
# stdout. Even though it is nominally the default, it is almost never used
# inside pip (and should not be used in new code without a very good
# reason); as of 2016-02-22 it is only used in a few places inside the VCS
# wrapper code. Ideally we should get rid of it entirely, because it
# creates a lot of complexity here for a rarely used feature.
#
# Most places in pip set show_stdout=False. What this means is:
# - We connect the child stdout to a pipe, which we read.
# - By default, we hide the output but show a spinner -- unless the
# subprocess exits with an error, in which case we show the output.
# - If the --verbose option was passed (= loglevel is DEBUG), then we show
# the output unconditionally. (But in this case we don't want to show
# the output a second time if it turns out that there was an error.)
#
# stderr is always merged with stdout (even if show_stdout=True).
if show_stdout:
stdout = None
else:
stdout = subprocess.PIPE
if command_desc is None:
cmd_parts = []
for part in cmd:
if ' ' in part or '\n' in part or '"' in part or "'" in part:
part = '"%s"' % part.replace('"', '\\"')
cmd_parts.append(part)
command_desc = ' '.join(cmd_parts)
logger.debug("Running command %s", command_desc)
env = os.environ.copy()
if extra_environ:
env.update(extra_environ)
try:
proc = subprocess.Popen(
cmd, stderr=subprocess.STDOUT, stdin=None, stdout=stdout,
cwd=cwd, env=env)
except Exception as exc:
logger.critical(
"Error %s while executing command %s", exc, command_desc,
)
raise
if stdout is not None:
all_output = []
while True:
line = console_to_str(proc.stdout.readline())
if not line:
break
line = line.rstrip()
all_output.append(line + '\n')
if logger.getEffectiveLevel() <= std_logging.DEBUG:
# Show the line immediately
logger.debug(line)
else:
# Update the spinner
if spinner is not None:
spinner.spin()
proc.wait()
if spinner is not None:
if proc.returncode:
spinner.finish("error")
else:
spinner.finish("done")
if proc.returncode:
if on_returncode == 'raise':
if (logger.getEffectiveLevel() > std_logging.DEBUG and
not show_stdout):
logger.info(
'Complete output from command %s:', command_desc,
)
logger.info(
''.join(all_output) +
'\n----------------------------------------'
)
raise InstallationError(
'Command "%s" failed with error code %s in %s'
% (command_desc, proc.returncode, cwd))
elif on_returncode == 'warn':
logger.warning(
'Command "%s" had error code %s in %s',
command_desc, proc.returncode, cwd,
)
elif on_returncode == 'ignore':
pass
else:
raise ValueError('Invalid value: on_returncode=%s' %
repr(on_returncode))
if not show_stdout:
return ''.join(all_output)
def read_text_file(filename):
"""Return the contents of *filename*.
Try to decode the file contents with utf-8, the preferred system encoding
(e.g., cp1252 on some Windows machines), and latin1, in that order.
Decoding a byte string with latin1 will never raise an error. In the worst
case, the returned string will contain some garbage characters.
"""
with open(filename, 'rb') as fp:
data = fp.read()
encodings = ['utf-8', locale.getpreferredencoding(False), 'latin1']
for enc in encodings:
try:
data = data.decode(enc)
except UnicodeDecodeError:
continue
break
assert type(data) != bytes # Latin1 should have worked.
return data
def _make_build_dir(build_dir):
os.makedirs(build_dir)
write_delete_marker_file(build_dir)
class FakeFile(object):
"""Wrap a list of lines in an object with readline() to make
ConfigParser happy."""
def __init__(self, lines):
self._gen = (l for l in lines)
def readline(self):
try:
try:
return next(self._gen)
except NameError:
return self._gen.next()
except StopIteration:
return ''
def __iter__(self):
return self._gen
class StreamWrapper(StringIO):
@classmethod
def from_stream(cls, orig_stream):
cls.orig_stream = orig_stream
return cls()
# compileall.compile_dir() needs stdout.encoding to print to stdout
@property
def encoding(self):
return self.orig_stream.encoding
@contextlib.contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO.
Taken from Lib/support/__init__.py in the CPython repo.
"""
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, StreamWrapper.from_stream(orig_stdout))
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print('hello')
self.assertEqual(stdout.getvalue(), 'hello\n')
Taken from Lib/support/__init__.py in the CPython repo.
"""
return captured_output('stdout')
class cached_property(object):
"""A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property.
Source: https://github.com/bottlepy/bottle/blob/0.11.5/bottle.py#L175
"""
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
# We're being accessed from the class itself, not from an object
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
def get_installed_version(dist_name, lookup_dirs=None):
"""Get the installed version of dist_name avoiding pkg_resources cache"""
# Create a requirement that we'll look for inside of setuptools.
req = pkg_resources.Requirement.parse(dist_name)
# We want to avoid having this cached, so we need to construct a new
# working set each time.
if lookup_dirs is None:
working_set = pkg_resources.WorkingSet()
else:
working_set = pkg_resources.WorkingSet(lookup_dirs)
# Get the installed distribution from our working set
dist = working_set.find(req)
# Check to see if we got an installed distribution or not, if we did
# we want to return it's version.
return dist.version if dist else None
def consume(iterator):
"""Consume an iterable at C speed."""
deque(iterator, maxlen=0)
| gpl-3.0 |
UnrememberMe/pants | tests/python/pants_test/backend/project_info/tasks/resolve_jars_test_mixin.py | 17 | 3782 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from textwrap import dedent
from pants.util.contextutil import temporary_dir
class ResolveJarsTestMixin(object):
"""Mixin for evaluating tasks which resolve their own source and javadoc jars (such as Export)."""
def evaluate_subtask(self, targets, workdir, load_extra_confs, extra_args, expected_jars):
"""Evaluate the underlying task with the given target specs.
:param targets: the list of targets.
:param string workdir: the working directory to execute in.
:param bool load_extra_confs: whether to attempt to download sources and javadocs.
:param list extra_args: extra args to pass to the task.
:param list expected_jars: list of jars that were expected to be resolved.
"""
raise NotImplementedError()
def _test_jar_lib_with_url(self, load_all):
with self.temporary_workdir() as workdir:
with self.temporary_sourcedir() as source_dir:
with temporary_dir() as dist_dir:
os.makedirs(os.path.join(source_dir, 'src'))
with open(os.path.join(source_dir, 'src', 'BUILD.one'), 'w+') as f:
f.write(dedent("""
jvm_binary(name='synthetic',
source='Main.java',
)
"""))
with open(os.path.join(source_dir, 'src', 'Main.java'), 'w+') as f:
f.write(dedent("""
public class Main {
public static void main(String[] args) {
System.out.println("Hello.");
}
}
"""))
with open(os.path.join(source_dir, 'src', 'Foo.java'), 'w+') as f:
f.write(dedent("""
public class Foo {
public static void main(String[] args) {
Main.main(args);
}
}
"""))
binary_target = '{}:synthetic'.format(os.path.join(source_dir, 'src'))
pants_run = self.run_pants_with_workdir(['binary', binary_target,
'--pants-distdir={}'.format(dist_dir)], workdir)
self.assert_success(pants_run)
jar_path = os.path.realpath(os.path.join(dist_dir, 'synthetic.jar'))
self.assertTrue(os.path.exists(jar_path), 'Synthetic binary was not created!')
jar_url = 'file://{}'.format(os.path.abspath(jar_path))
with open(os.path.join(source_dir, 'src', 'BUILD.two'), 'w+') as f:
f.write(dedent("""
jar_library(name='lib_with_url',
jars=[
jar(org='org.pantsbuild', name='synthetic-test-jar', rev='1.2.3',
url='{jar_url}')
],
)
java_library(name='src',
sources=['Foo.java'],
dependencies=[':lib_with_url'],
)
""").format(jar_url=jar_url))
spec_names = ['lib_with_url', 'src']
targets = ['{0}:{1}'.format(os.path.join(source_dir, 'src'), name) for name in spec_names]
with temporary_dir() as ivy_temp_dir:
extra_args = ['--ivy-cache-dir={}'.format(ivy_temp_dir)]
self.evaluate_subtask(targets, workdir, load_all, extra_args=extra_args,
expected_jars=['org.pantsbuild:synthetic-test-jar:1.2.3'])
def test_jar_lib_with_url_resolve_default(self):
self._test_jar_lib_with_url(False)
def test_jar_lib_with_url_resolve_all(self):
self._test_jar_lib_with_url(True)
| apache-2.0 |
MattsFleaMarket/python-for-android | python3-alpha/python3-src/PC/VS8.0/build_ssl.py | 48 | 10103 | # Script for building the _ssl and _hashlib modules for Windows.
# Uses Perl to setup the OpenSSL environment correctly
# and build OpenSSL, then invokes a simple nmake session
# for the actual _ssl.pyd and _hashlib.pyd DLLs.
# THEORETICALLY, you can:
# * Unpack the latest SSL release one level above your main Python source
# directory. It is likely you will already find the zlib library and
# any other external packages there.
# * Install ActivePerl and ensure it is somewhere on your path.
# * Run this script from the PC/VS8.0 directory.
#
# it should configure and build SSL, then build the _ssl and _hashlib
# Python extensions without intervention.
# Modified by Christian Heimes
# Now this script supports pre-generated makefiles and assembly files.
# Developers don't need an installation of Perl anymore to build Python. A svn
# checkout from our svn repository is enough.
#
# In Order to create the files in the case of an update you still need Perl.
# Run build_ssl in this order:
# python.exe build_ssl.py Release x64
# python.exe build_ssl.py Release Win32
import os, sys, re, shutil
# Find all "foo.exe" files on the PATH.
def find_all_on_path(filename, extras = None):
entries = os.environ["PATH"].split(os.pathsep)
ret = []
for p in entries:
fname = os.path.abspath(os.path.join(p, filename))
if os.path.isfile(fname) and fname not in ret:
ret.append(fname)
if extras:
for p in extras:
fname = os.path.abspath(os.path.join(p, filename))
if os.path.isfile(fname) and fname not in ret:
ret.append(fname)
return ret
# Find a suitable Perl installation for OpenSSL.
# cygwin perl does *not* work. ActivePerl does.
# Being a Perl dummy, the simplest way I can check is if the "Win32" package
# is available.
def find_working_perl(perls):
for perl in perls:
fh = os.popen('"%s" -e "use Win32;"' % perl)
fh.read()
rc = fh.close()
if rc:
continue
return perl
print("Can not find a suitable PERL:")
if perls:
print(" the following perl interpreters were found:")
for p in perls:
print(" ", p)
print(" None of these versions appear suitable for building OpenSSL")
else:
print(" NO perl interpreters were found on this machine at all!")
print(" Please install ActivePerl and ensure it appears on your path")
return None
# Locate the best SSL directory given a few roots to look into.
def find_best_ssl_dir(sources):
candidates = []
for s in sources:
try:
# note: do not abspath s; the build will fail if any
# higher up directory name has spaces in it.
fnames = os.listdir(s)
except os.error:
fnames = []
for fname in fnames:
fqn = os.path.join(s, fname)
if os.path.isdir(fqn) and fname.startswith("openssl-"):
candidates.append(fqn)
# Now we have all the candidates, locate the best.
best_parts = []
best_name = None
for c in candidates:
parts = re.split("[.-]", os.path.basename(c))[1:]
# eg - openssl-0.9.7-beta1 - ignore all "beta" or any other qualifiers
if len(parts) >= 4:
continue
if parts > best_parts:
best_parts = parts
best_name = c
if best_name is not None:
print("Found an SSL directory at '%s'" % (best_name,))
else:
print("Could not find an SSL directory in '%s'" % (sources,))
sys.stdout.flush()
return best_name
def create_makefile64(makefile, m32):
"""Create and fix makefile for 64bit
Replace 32 with 64bit directories
"""
if not os.path.isfile(m32):
return
with open(m32) as fin:
with open(makefile, 'w') as fout:
for line in fin:
line = line.replace("=tmp32", "=tmp64")
line = line.replace("=out32", "=out64")
line = line.replace("=inc32", "=inc64")
# force 64 bit machine
line = line.replace("MKLIB=lib", "MKLIB=lib /MACHINE:X64")
line = line.replace("LFLAGS=", "LFLAGS=/MACHINE:X64 ")
# don't link against the lib on 64bit systems
line = line.replace("bufferoverflowu.lib", "")
fout.write(line)
os.unlink(m32)
def fix_makefile(makefile):
"""Fix some stuff in all makefiles
"""
if not os.path.isfile(makefile):
return
with open(makefile) as fin:
lines = fin.readlines()
with open(makefile, 'w') as fout:
for line in lines:
if line.startswith("PERL="):
continue
if line.startswith("CP="):
line = "CP=copy\n"
if line.startswith("MKDIR="):
line = "MKDIR=mkdir\n"
if line.startswith("CFLAG="):
line = line.strip()
for algo in ("RC5", "MDC2", "IDEA"):
noalgo = " -DOPENSSL_NO_%s" % algo
if noalgo not in line:
line = line + noalgo
line = line + '\n'
fout.write(line)
def run_configure(configure, do_script):
print("perl Configure "+configure+" no-idea no-mdc2")
os.system("perl Configure "+configure+" no-idea no-mdc2")
print(do_script)
os.system(do_script)
def cmp(f1, f2):
bufsize = 1024 * 8
with open(f1, 'rb') as fp1, open(f2, 'rb') as fp2:
while True:
b1 = fp1.read(bufsize)
b2 = fp2.read(bufsize)
if b1 != b2:
return False
if not b1:
return True
def copy(src, dst):
if os.path.isfile(dst) and cmp(src, dst):
return
shutil.copy(src, dst)
def main():
build_all = "-a" in sys.argv
if sys.argv[1] == "Release":
debug = False
elif sys.argv[1] == "Debug":
debug = True
else:
raise ValueError(str(sys.argv))
if sys.argv[2] == "Win32":
arch = "x86"
configure = "VC-WIN32"
do_script = "ms\\do_nasm"
makefile="ms\\nt.mak"
m32 = makefile
dirsuffix = "32"
elif sys.argv[2] == "x64":
arch="amd64"
configure = "VC-WIN64A"
do_script = "ms\\do_win64a"
makefile = "ms\\nt64.mak"
m32 = makefile.replace('64', '')
dirsuffix = "64"
#os.environ["VSEXTCOMP_USECL"] = "MS_OPTERON"
else:
raise ValueError(str(sys.argv))
make_flags = ""
if build_all:
make_flags = "-a"
# perl should be on the path, but we also look in "\perl" and "c:\\perl"
# as "well known" locations
perls = find_all_on_path("perl.exe", ["\\perl\\bin", "C:\\perl\\bin"])
perl = find_working_perl(perls)
if perl:
print("Found a working perl at '%s'" % (perl,))
else:
print("No Perl installation was found. Existing Makefiles are used.")
sys.stdout.flush()
# Look for SSL 3 levels up from PC/VS8.0 - ie, same place zlib etc all live.
ssl_dir = find_best_ssl_dir(("..\\..\\..",))
if ssl_dir is None:
sys.exit(1)
old_cd = os.getcwd()
try:
os.chdir(ssl_dir)
# rebuild makefile when we do the role over from 32 to 64 build
if arch == "amd64" and os.path.isfile(m32) and not os.path.isfile(makefile):
os.unlink(m32)
# If the ssl makefiles do not exist, we invoke Perl to generate them.
# Due to a bug in this script, the makefile sometimes ended up empty
# Force a regeneration if it is.
if not os.path.isfile(makefile) or os.path.getsize(makefile)==0:
if perl is None:
print("Perl is required to build the makefiles!")
sys.exit(1)
print("Creating the makefiles...")
sys.stdout.flush()
# Put our working Perl at the front of our path
os.environ["PATH"] = os.path.dirname(perl) + \
os.pathsep + \
os.environ["PATH"]
run_configure(configure, do_script)
if debug:
print("OpenSSL debug builds aren't supported.")
#if arch=="x86" and debug:
# # the do_masm script in openssl doesn't generate a debug
# # build makefile so we generate it here:
# os.system("perl util\mk1mf.pl debug "+configure+" >"+makefile)
if arch == "amd64":
create_makefile64(makefile, m32)
fix_makefile(makefile)
copy(r"crypto\buildinf.h", r"crypto\buildinf_%s.h" % arch)
copy(r"crypto\opensslconf.h", r"crypto\opensslconf_%s.h" % arch)
# If the assembler files don't exist in tmpXX, copy them there
if perl is None and os.path.exists("asm"+dirsuffix):
if not os.path.exists("tmp"+dirsuffix):
os.mkdir("tmp"+dirsuffix)
for f in os.listdir("asm"+dirsuffix):
if not f.endswith(".asm"): continue
if os.path.isfile(r"tmp%s\%s" % (dirsuffix, f)): continue
shutil.copy(r"asm%s\%s" % (dirsuffix, f), "tmp"+dirsuffix)
# Now run make.
if arch == "amd64":
rc = os.system("ml64 -c -Foms\\uptable.obj ms\\uptable.asm")
if rc:
print("ml64 assembler has failed.")
sys.exit(rc)
copy(r"crypto\buildinf_%s.h" % arch, r"crypto\buildinf.h")
copy(r"crypto\opensslconf_%s.h" % arch, r"crypto\opensslconf.h")
#makeCommand = "nmake /nologo PERL=\"%s\" -f \"%s\"" %(perl, makefile)
makeCommand = "nmake /nologo -f \"%s\"" % makefile
print("Executing ssl makefiles:", makeCommand)
sys.stdout.flush()
rc = os.system(makeCommand)
if rc:
print("Executing "+makefile+" failed")
print(rc)
sys.exit(rc)
finally:
os.chdir(old_cd)
sys.exit(rc)
if __name__=='__main__':
main()
| apache-2.0 |
vmax-feihu/hue | desktop/core/ext-py/Pygments-1.3.1/pygments/formatters/other.py | 75 | 3857 | # -*- coding: utf-8 -*-
"""
pygments.formatters.other
~~~~~~~~~~~~~~~~~~~~~~~~~
Other formatters: NullFormatter, RawTokenFormatter.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.util import OptionError, get_choice_opt, b
from pygments.token import Token
from pygments.console import colorize
__all__ = ['NullFormatter', 'RawTokenFormatter']
class NullFormatter(Formatter):
"""
Output the text unchanged without any formatting.
"""
name = 'Text only'
aliases = ['text', 'null']
filenames = ['*.txt']
def format(self, tokensource, outfile):
enc = self.encoding
for ttype, value in tokensource:
if enc:
outfile.write(value.encode(enc))
else:
outfile.write(value)
class RawTokenFormatter(Formatter):
r"""
Format tokens as a raw representation for storing token streams.
The format is ``tokentype<TAB>repr(tokenstring)\n``. The output can later
be converted to a token stream with the `RawTokenLexer`, described in the
`lexer list <lexers.txt>`_.
Only two options are accepted:
`compress`
If set to ``'gz'`` or ``'bz2'``, compress the output with the given
compression algorithm after encoding (default: ``''``).
`error_color`
If set to a color name, highlight error tokens using that color. If
set but with no value, defaults to ``'red'``.
*New in Pygments 0.11.*
"""
name = 'Raw tokens'
aliases = ['raw', 'tokens']
filenames = ['*.raw']
unicodeoutput = False
def __init__(self, **options):
Formatter.__init__(self, **options)
if self.encoding:
raise OptionError('the raw formatter does not support the '
'encoding option')
self.encoding = 'ascii' # let pygments.format() do the right thing
self.compress = get_choice_opt(options, 'compress',
['', 'none', 'gz', 'bz2'], '')
self.error_color = options.get('error_color', None)
if self.error_color is True:
self.error_color = 'red'
if self.error_color is not None:
try:
colorize(self.error_color, '')
except KeyError:
raise ValueError("Invalid color %r specified" %
self.error_color)
def format(self, tokensource, outfile):
try:
outfile.write(b(''))
except TypeError:
raise TypeError('The raw tokens formatter needs a binary '
'output file')
if self.compress == 'gz':
import gzip
outfile = gzip.GzipFile('', 'wb', 9, outfile)
def write(text):
outfile.write(text.encode())
flush = outfile.flush
elif self.compress == 'bz2':
import bz2
compressor = bz2.BZ2Compressor(9)
def write(text):
outfile.write(compressor.compress(text.encode()))
def flush():
outfile.write(compressor.flush())
outfile.flush()
else:
def write(text):
outfile.write(text.encode())
flush = outfile.flush
lasttype = None
lastval = u''
if self.error_color:
for ttype, value in tokensource:
line = "%s\t%r\n" % (ttype, value)
if ttype is Token.Error:
write(colorize(self.error_color, line))
else:
write(line)
else:
for ttype, value in tokensource:
write("%s\t%r\n" % (ttype, value))
flush()
| apache-2.0 |
thurt/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/pty.py | 109 | 4869 | """Pseudo terminal utilities."""
# Bugs: No signal handling. Doesn't set slave termios and window size.
# Only tested on Linux.
# See: W. Richard Stevens. 1992. Advanced Programming in the
# UNIX Environment. Chapter 19.
# Author: Steen Lumholt -- with additions by Guido.
from select import select
import os
import tty
__all__ = ["openpty","fork","spawn"]
STDIN_FILENO = 0
STDOUT_FILENO = 1
STDERR_FILENO = 2
CHILD = 0
def openpty():
"""openpty() -> (master_fd, slave_fd)
Open a pty master/slave pair, using os.openpty() if possible."""
try:
return os.openpty()
except (AttributeError, OSError):
pass
master_fd, slave_name = _open_terminal()
slave_fd = slave_open(slave_name)
return master_fd, slave_fd
def master_open():
"""master_open() -> (master_fd, slave_name)
Open a pty master and return the fd, and the filename of the slave end.
Deprecated, use openpty() instead."""
try:
master_fd, slave_fd = os.openpty()
except (AttributeError, OSError):
pass
else:
slave_name = os.ttyname(slave_fd)
os.close(slave_fd)
return master_fd, slave_name
return _open_terminal()
def _open_terminal():
"""Open pty master and return (master_fd, tty_name).
SGI and generic BSD version, for when openpty() fails."""
try:
import sgi
except ImportError:
pass
else:
try:
tty_name, master_fd = sgi._getpty(os.O_RDWR, 0666, 0)
except IOError, msg:
raise os.error, msg
return master_fd, tty_name
for x in 'pqrstuvwxyzPQRST':
for y in '0123456789abcdef':
pty_name = '/dev/pty' + x + y
try:
fd = os.open(pty_name, os.O_RDWR)
except os.error:
continue
return (fd, '/dev/tty' + x + y)
raise os.error, 'out of pty devices'
def slave_open(tty_name):
"""slave_open(tty_name) -> slave_fd
Open the pty slave and acquire the controlling terminal, returning
opened filedescriptor.
Deprecated, use openpty() instead."""
result = os.open(tty_name, os.O_RDWR)
try:
from fcntl import ioctl, I_PUSH
except ImportError:
return result
try:
ioctl(result, I_PUSH, "ptem")
ioctl(result, I_PUSH, "ldterm")
except IOError:
pass
return result
def fork():
"""fork() -> (pid, master_fd)
Fork and make the child a session leader with a controlling terminal."""
try:
pid, fd = os.forkpty()
except (AttributeError, OSError):
pass
else:
if pid == CHILD:
try:
os.setsid()
except OSError:
# os.forkpty() already set us session leader
pass
return pid, fd
master_fd, slave_fd = openpty()
pid = os.fork()
if pid == CHILD:
# Establish a new session.
os.setsid()
os.close(master_fd)
# Slave becomes stdin/stdout/stderr of child.
os.dup2(slave_fd, STDIN_FILENO)
os.dup2(slave_fd, STDOUT_FILENO)
os.dup2(slave_fd, STDERR_FILENO)
if (slave_fd > STDERR_FILENO):
os.close (slave_fd)
# Explicitly open the tty to make it become a controlling tty.
tmp_fd = os.open(os.ttyname(STDOUT_FILENO), os.O_RDWR)
os.close(tmp_fd)
else:
os.close(slave_fd)
# Parent and child process.
return pid, master_fd
def _writen(fd, data):
"""Write all the data to a descriptor."""
while data != '':
n = os.write(fd, data)
data = data[n:]
def _read(fd):
"""Default read function."""
return os.read(fd, 1024)
def _copy(master_fd, master_read=_read, stdin_read=_read):
"""Parent copy loop.
Copies
pty master -> standard output (master_read)
standard input -> pty master (stdin_read)"""
while 1:
rfds, wfds, xfds = select(
[master_fd, STDIN_FILENO], [], [])
if master_fd in rfds:
data = master_read(master_fd)
os.write(STDOUT_FILENO, data)
if STDIN_FILENO in rfds:
data = stdin_read(STDIN_FILENO)
_writen(master_fd, data)
def spawn(argv, master_read=_read, stdin_read=_read):
"""Create a spawned process."""
if type(argv) == type(''):
argv = (argv,)
pid, master_fd = fork()
if pid == CHILD:
os.execlp(argv[0], *argv)
try:
mode = tty.tcgetattr(STDIN_FILENO)
tty.setraw(STDIN_FILENO)
restore = 1
except tty.error: # This is the same as termios.error
restore = 0
try:
_copy(master_fd, master_read, stdin_read)
except (IOError, OSError):
if restore:
tty.tcsetattr(STDIN_FILENO, tty.TCSAFLUSH, mode)
os.close(master_fd)
| apache-2.0 |
jvrsantacruz/XlsxWriter | xlsxwriter/test/comparison/test_textbox16.py | 8 | 1117 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'textbox16.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with textbox(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_textbox('E9', 'This is some text',
{'align': {'vertical': 'middle'}})
workbook.close()
self.assertExcelEqual()
| bsd-2-clause |
mahabs/nitro | nssrc/com/citrix/netscaler/nitro/resource/config/aaa/aaasession.py | 1 | 8759 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class aaasession(base_resource) :
""" Configuration for active connection resource. """
def __init__(self) :
self._username = ""
self._groupname = ""
self._iip = ""
self._netmask = ""
self._all = False
self._publicip = ""
self._publicport = 0
self._ipaddress = ""
self._port = 0
self._privateip = ""
self._privateport = 0
self._destip = ""
self._destport = 0
self._intranetip = ""
self._peid = 0
self.___count = 0
@property
def username(self) :
"""Name of the AAA user.<br/>Minimum length = 1.
"""
try :
return self._username
except Exception as e:
raise e
@username.setter
def username(self, username) :
"""Name of the AAA user.<br/>Minimum length = 1
"""
try :
self._username = username
except Exception as e:
raise e
@property
def groupname(self) :
"""Name of the AAA group.<br/>Minimum length = 1.
"""
try :
return self._groupname
except Exception as e:
raise e
@groupname.setter
def groupname(self, groupname) :
"""Name of the AAA group.<br/>Minimum length = 1
"""
try :
self._groupname = groupname
except Exception as e:
raise e
@property
def iip(self) :
"""IP address or the first address in the intranet IP range.<br/>Minimum length = 1.
"""
try :
return self._iip
except Exception as e:
raise e
@iip.setter
def iip(self, iip) :
"""IP address or the first address in the intranet IP range.<br/>Minimum length = 1
"""
try :
self._iip = iip
except Exception as e:
raise e
@property
def netmask(self) :
"""Subnet mask for the intranet IP range.<br/>Minimum length = 1.
"""
try :
return self._netmask
except Exception as e:
raise e
@netmask.setter
def netmask(self, netmask) :
"""Subnet mask for the intranet IP range.<br/>Minimum length = 1
"""
try :
self._netmask = netmask
except Exception as e:
raise e
@property
def all(self) :
"""Terminate all active AAA-TM/VPN sessions.
"""
try :
return self._all
except Exception as e:
raise e
@all.setter
def all(self, all) :
"""Terminate all active AAA-TM/VPN sessions.
"""
try :
self._all = all
except Exception as e:
raise e
@property
def publicip(self) :
"""Client's public IP address.
"""
try :
return self._publicip
except Exception as e:
raise e
@property
def publicport(self) :
"""Client's public port.<br/>Range 1 - 65535.
"""
try :
return self._publicport
except Exception as e:
raise e
@property
def ipaddress(self) :
"""NetScaler's IP address.
"""
try :
return self._ipaddress
except Exception as e:
raise e
@property
def port(self) :
"""NetScaler's port.<br/>Range 1 - 65535.
"""
try :
return self._port
except Exception as e:
raise e
@property
def privateip(self) :
"""Client's private/mapped IP address.
"""
try :
return self._privateip
except Exception as e:
raise e
@property
def privateport(self) :
"""Client's private/mapped port.<br/>Range 1 - 65535.
"""
try :
return self._privateport
except Exception as e:
raise e
@property
def destip(self) :
"""Destination IP address.
"""
try :
return self._destip
except Exception as e:
raise e
@property
def destport(self) :
"""Destination port.<br/>Range 1 - 65535.
"""
try :
return self._destport
except Exception as e:
raise e
@property
def intranetip(self) :
"""Specifies the Intranet IP.
"""
try :
return self._intranetip
except Exception as e:
raise e
@property
def peid(self) :
"""Core id of the session owner.
"""
try :
return self._peid
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(aaasession_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.aaasession
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
return None
except Exception as e :
raise e
@classmethod
def kill(cls, client, resource) :
""" Use this API to kill aaasession.
"""
try :
if type(resource) is not list :
killresource = aaasession()
killresource.username = resource.username
killresource.groupname = resource.groupname
killresource.iip = resource.iip
killresource.netmask = resource.netmask
killresource.all = resource.all
return killresource.perform_operation(client,"kill")
else :
if (resource and len(resource) > 0) :
killresources = [ aaasession() for _ in range(len(resource))]
for i in range(len(resource)) :
killresources[i].username = resource[i].username
killresources[i].groupname = resource[i].groupname
killresources[i].iip = resource[i].iip
killresources[i].netmask = resource[i].netmask
killresources[i].all = resource[i].all
result = cls.perform_operation_bulk_request(client, killresources,"kill")
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the aaasession resources that are configured on netscaler.
"""
try :
if not name :
obj = aaasession()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_args(cls, client, args) :
""" Use this API to fetch all the aaasession resources that are configured on netscaler.
# This uses aaasession_args which is a way to provide additional arguments while fetching the resources.
"""
try :
obj = aaasession()
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(args)
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
""" Use this API to fetch filtered set of aaasession resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = aaasession()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
""" Use this API to count the aaasession resources configured on NetScaler.
"""
try :
obj = aaasession()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
""" Use this API to count filtered the set of aaasession resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = aaasession()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class aaasession_response(base_response) :
def __init__(self, length=1) :
self.aaasession = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.aaasession = [aaasession() for _ in range(length)]
| apache-2.0 |
pschmitt/home-assistant | homeassistant/components/verisure/alarm_control_panel.py | 9 | 3404 | """Support for Verisure alarm control panels."""
import logging
from time import sleep
import homeassistant.components.alarm_control_panel as alarm
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_HOME,
)
from homeassistant.const import (
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_DISARMED,
)
from . import CONF_ALARM, CONF_CODE_DIGITS, CONF_GIID, HUB as hub
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Verisure platform."""
alarms = []
if int(hub.config.get(CONF_ALARM, 1)):
hub.update_overview()
alarms.append(VerisureAlarm())
add_entities(alarms)
def set_arm_state(state, code=None):
"""Send set arm state command."""
transaction_id = hub.session.set_arm_state(code, state)[
"armStateChangeTransactionId"
]
_LOGGER.info("verisure set arm state %s", state)
transaction = {}
while "result" not in transaction:
sleep(0.5)
transaction = hub.session.get_arm_state_transaction(transaction_id)
hub.update_overview(no_throttle=True)
class VerisureAlarm(alarm.AlarmControlPanelEntity):
"""Representation of a Verisure alarm status."""
def __init__(self):
"""Initialize the Verisure alarm panel."""
self._state = None
self._digits = hub.config.get(CONF_CODE_DIGITS)
self._changed_by = None
@property
def name(self):
"""Return the name of the device."""
giid = hub.config.get(CONF_GIID)
if giid is not None:
aliass = {i["giid"]: i["alias"] for i in hub.session.installations}
if giid in aliass.keys():
return "{} alarm".format(aliass[giid])
_LOGGER.error("Verisure installation giid not found: %s", giid)
return "{} alarm".format(hub.session.installations[0]["alias"])
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_ALARM_ARM_HOME | SUPPORT_ALARM_ARM_AWAY
@property
def code_format(self):
"""Return one or more digits/characters."""
return alarm.FORMAT_NUMBER
@property
def changed_by(self):
"""Return the last change triggered by."""
return self._changed_by
def update(self):
"""Update alarm status."""
hub.update_overview()
status = hub.get_first("$.armState.statusType")
if status == "DISARMED":
self._state = STATE_ALARM_DISARMED
elif status == "ARMED_HOME":
self._state = STATE_ALARM_ARMED_HOME
elif status == "ARMED_AWAY":
self._state = STATE_ALARM_ARMED_AWAY
elif status != "PENDING":
_LOGGER.error("Unknown alarm state %s", status)
self._changed_by = hub.get_first("$.armState.name")
def alarm_disarm(self, code=None):
"""Send disarm command."""
set_arm_state("DISARMED", code)
def alarm_arm_home(self, code=None):
"""Send arm home command."""
set_arm_state("ARMED_HOME", code)
def alarm_arm_away(self, code=None):
"""Send arm away command."""
set_arm_state("ARMED_AWAY", code)
| apache-2.0 |
Kingclove/ChannelAPI-Demo | server/lib/itsdangerous.py | 296 | 30509 | # -*- coding: utf-8 -*-
"""
itsdangerous
~~~~~~~~~~~~
A module that implements various functions to deal with untrusted
sources. Mainly useful for web applications.
:copyright: (c) 2011 by Armin Ronacher and the Django Software Foundation.
:license: BSD, see LICENSE for more details.
"""
import sys
import hmac
import zlib
import time
import base64
import hashlib
import operator
from datetime import datetime
PY2 = sys.version_info[0] == 2
if PY2:
from itertools import izip
text_type = unicode
int_to_byte = chr
number_types = (int, long, float)
else:
from functools import reduce
izip = zip
text_type = str
int_to_byte = operator.methodcaller('to_bytes', 1, 'big')
number_types = (int, float)
try:
import simplejson as json
except ImportError:
import json
class _CompactJSON(object):
"""Wrapper around simplejson that strips whitespace.
"""
def loads(self, payload):
return json.loads(payload)
def dumps(self, obj):
return json.dumps(obj, separators=(',', ':'))
compact_json = _CompactJSON()
# 2011/01/01 in UTC
EPOCH = 1293840000
def want_bytes(s, encoding='utf-8', errors='strict'):
if isinstance(s, text_type):
s = s.encode(encoding, errors)
return s
def is_text_serializer(serializer):
"""Checks wheather a serializer generates text or binary."""
return isinstance(serializer.dumps({}), text_type)
# Starting with 3.3 the standard library has a c-implementation for
# constant time string compares.
_builtin_constant_time_compare = getattr(hmac, 'compare_digest', None)
def constant_time_compare(val1, val2):
"""Returns True if the two strings are equal, False otherwise.
The time taken is independent of the number of characters that match. Do
not use this function for anything else than comparision with known
length targets.
This is should be implemented in C in order to get it completely right.
"""
if _builtin_constant_time_compare is not None:
return _builtin_constant_time_compare(val1, val2)
len_eq = len(val1) == len(val2)
if len_eq:
result = 0
left = val1
else:
result = 1
left = val2
for x, y in izip(bytearray(left), bytearray(val2)):
result |= x ^ y
return result == 0
class BadData(Exception):
"""Raised if bad data of any sort was encountered. This is the
base for all exceptions that itsdangerous is currently using.
.. versionadded:: 0.15
"""
message = None
def __init__(self, message):
Exception.__init__(self, message)
self.message = message
def __str__(self):
return text_type(self.message)
if PY2:
__unicode__ = __str__
def __str__(self):
return self.__unicode__().encode('utf-8')
class BadPayload(BadData):
"""This error is raised in situations when payload is loaded without
checking the signature first and an exception happend as a result of
that. The original exception that caused that will be stored on the
exception as :attr:`original_error`.
.. versionadded:: 0.15
"""
def __init__(self, message, original_error=None):
BadData.__init__(self, message)
#: If available, the error that indicates why the payload
#: was not valid. This might be `None`.
self.original_error = original_error
class BadSignature(BadData):
"""This error is raised if a signature does not match. As of
itsdangerous 0.14 there are helpful attributes on the exception
instances. You can also catch down the baseclass :exc:`BadData`.
"""
def __init__(self, message, payload=None):
BadData.__init__(self, message)
#: The payload that failed the signature test. In some
#: situations you might still want to inspect this, even if
#: you know it was tampered with.
#:
#: .. versionadded:: 0.14
self.payload = payload
class BadTimeSignature(BadSignature):
"""Raised for time based signatures that fail. This is a subclass
of :class:`BadSignature` so you can catch those down as well.
"""
def __init__(self, message, payload=None, date_signed=None):
BadSignature.__init__(self, message, payload)
#: If the signature expired this exposes the date of when the
#: signature was created. This can be helpful in order to
#: tell the user how long a link has been gone stale.
#:
#: .. versionadded:: 0.14
self.date_signed = date_signed
class SignatureExpired(BadTimeSignature):
"""Signature timestamp is older than required max_age. This is a
subclass of :exc:`BadTimeSignature` so you can use the baseclass for
catching the error.
"""
def base64_encode(string):
"""base64 encodes a single bytestring (and is tolerant to getting
called with a unicode string).
The resulting bytestring is safe for putting into URLs.
"""
string = want_bytes(string)
return base64.urlsafe_b64encode(string).strip(b'=')
def base64_decode(string):
"""base64 decodes a single bytestring (and is tolerant to getting
called with a unicode string).
The result is also a bytestring.
"""
string = want_bytes(string, encoding='ascii', errors='ignore')
return base64.urlsafe_b64decode(string + b'=' * (-len(string) % 4))
def int_to_bytes(num):
assert num >= 0
rv = []
while num:
rv.append(int_to_byte(num & 0xff))
num >>= 8
return b''.join(reversed(rv))
def bytes_to_int(bytestr):
return reduce(lambda a, b: a << 8 | b, bytearray(bytestr), 0)
class SigningAlgorithm(object):
"""Subclasses of `SigningAlgorithm` have to implement `get_signature` to
provide signature generation functionality.
"""
def get_signature(self, key, value):
"""Returns the signature for the given key and value"""
raise NotImplementedError()
def verify_signature(self, key, value, sig):
"""Verifies the given signature matches the expected signature"""
return constant_time_compare(sig, self.get_signature(key, value))
class NoneAlgorithm(SigningAlgorithm):
"""This class provides a algorithm that does not perform any signing and
returns an empty signature.
"""
def get_signature(self, key, value):
return b''
class HMACAlgorithm(SigningAlgorithm):
"""This class provides signature generation using HMACs."""
#: The digest method to use with the MAC algorithm. This defaults to sha1
#: but can be changed for any other function in the hashlib module.
default_digest_method = staticmethod(hashlib.sha1)
def __init__(self, digest_method=None):
if digest_method is None:
digest_method = self.default_digest_method
self.digest_method = digest_method
def get_signature(self, key, value):
mac = hmac.new(key, msg=value, digestmod=self.digest_method)
return mac.digest()
class Signer(object):
"""This class can sign bytes and unsign it and validate the signature
provided.
Salt can be used to namespace the hash, so that a signed string is only
valid for a given namespace. Leaving this at the default value or re-using
a salt value across different parts of your application where the same
signed value in one part can mean something different in another part
is a security risk.
See :ref:`the-salt` for an example of what the salt is doing and how you
can utilize it.
.. versionadded:: 0.14
`key_derivation` and `digest_method` were added as arguments to the
class constructor.
.. versionadded:: 0.18
`algorithm` was added as an argument to the class constructor.
"""
#: The digest method to use for the signer. This defaults to sha1 but can
#: be changed for any other function in the hashlib module.
#:
#: .. versionchanged:: 0.14
default_digest_method = staticmethod(hashlib.sha1)
#: Controls how the key is derived. The default is Django style
#: concatenation. Possible values are ``concat``, ``django-concat``
#: and ``hmac``. This is used for deriving a key from the secret key
#: with an added salt.
#:
#: .. versionadded:: 0.14
default_key_derivation = 'django-concat'
def __init__(self, secret_key, salt=None, sep='.', key_derivation=None,
digest_method=None, algorithm=None):
self.secret_key = want_bytes(secret_key)
self.sep = sep
self.salt = 'itsdangerous.Signer' if salt is None else salt
if key_derivation is None:
key_derivation = self.default_key_derivation
self.key_derivation = key_derivation
if digest_method is None:
digest_method = self.default_digest_method
self.digest_method = digest_method
if algorithm is None:
algorithm = HMACAlgorithm(self.digest_method)
self.algorithm = algorithm
def derive_key(self):
"""This method is called to derive the key. If you're unhappy with
the default key derivation choices you can override them here.
Keep in mind that the key derivation in itsdangerous is not intended
to be used as a security method to make a complex key out of a short
password. Instead you should use large random secret keys.
"""
salt = want_bytes(self.salt)
if self.key_derivation == 'concat':
return self.digest_method(salt + self.secret_key).digest()
elif self.key_derivation == 'django-concat':
return self.digest_method(salt + b'signer' +
self.secret_key).digest()
elif self.key_derivation == 'hmac':
mac = hmac.new(self.secret_key, digestmod=self.digest_method)
mac.update(salt)
return mac.digest()
elif self.key_derivation == 'none':
return self.secret_key
else:
raise TypeError('Unknown key derivation method')
def get_signature(self, value):
"""Returns the signature for the given value"""
value = want_bytes(value)
key = self.derive_key()
sig = self.algorithm.get_signature(key, value)
return base64_encode(sig)
def sign(self, value):
"""Signs the given string."""
return value + want_bytes(self.sep) + self.get_signature(value)
def verify_signature(self, value, sig):
"""Verifies the signature for the given value."""
key = self.derive_key()
sig = base64_decode(sig)
return self.algorithm.verify_signature(key, value, sig)
def unsign(self, signed_value):
"""Unsigns the given string."""
signed_value = want_bytes(signed_value)
sep = want_bytes(self.sep)
if sep not in signed_value:
raise BadSignature('No %r found in value' % self.sep)
value, sig = signed_value.rsplit(sep, 1)
if self.verify_signature(value, sig):
return value
raise BadSignature('Signature %r does not match' % sig,
payload=value)
def validate(self, signed_value):
"""Just validates the given signed value. Returns `True` if the
signature exists and is valid, `False` otherwise."""
try:
self.unsign(signed_value)
return True
except BadSignature:
return False
class TimestampSigner(Signer):
"""Works like the regular :class:`Signer` but also records the time
of the signing and can be used to expire signatures. The unsign
method can rause a :exc:`SignatureExpired` method if the unsigning
failed because the signature is expired. This exception is a subclass
of :exc:`BadSignature`.
"""
def get_timestamp(self):
"""Returns the current timestamp. This implementation returns the
seconds since 1/1/2011. The function must return an integer.
"""
return int(time.time() - EPOCH)
def timestamp_to_datetime(self, ts):
"""Used to convert the timestamp from `get_timestamp` into a
datetime object.
"""
return datetime.utcfromtimestamp(ts + EPOCH)
def sign(self, value):
"""Signs the given string and also attaches a time information."""
value = want_bytes(value)
timestamp = base64_encode(int_to_bytes(self.get_timestamp()))
sep = want_bytes(self.sep)
value = value + sep + timestamp
return value + sep + self.get_signature(value)
def unsign(self, value, max_age=None, return_timestamp=False):
"""Works like the regular :meth:`~Signer.unsign` but can also
validate the time. See the base docstring of the class for
the general behavior. If `return_timestamp` is set to `True`
the timestamp of the signature will be returned as naive
:class:`datetime.datetime` object in UTC.
"""
try:
result = Signer.unsign(self, value)
sig_error = None
except BadSignature as e:
sig_error = e
result = e.payload or b''
sep = want_bytes(self.sep)
# If there is no timestamp in the result there is something
# seriously wrong. In case there was a signature error, we raise
# that one directly, otherwise we have a weird situation in which
# we shouldn't have come except someone uses a time-based serializer
# on non-timestamp data, so catch that.
if not sep in result:
if sig_error:
raise sig_error
raise BadTimeSignature('timestamp missing', payload=result)
value, timestamp = result.rsplit(sep, 1)
try:
timestamp = bytes_to_int(base64_decode(timestamp))
except Exception:
timestamp = None
# Signature is *not* okay. Raise a proper error now that we have
# split the value and the timestamp.
if sig_error is not None:
raise BadTimeSignature(text_type(sig_error), payload=value,
date_signed=timestamp)
# Signature was okay but the timestamp is actually not there or
# malformed. Should not happen, but well. We handle it nonetheless
if timestamp is None:
raise BadTimeSignature('Malformed timestamp', payload=value)
# Check timestamp is not older than max_age
if max_age is not None:
age = self.get_timestamp() - timestamp
if age > max_age:
raise SignatureExpired(
'Signature age %s > %s seconds' % (age, max_age),
payload=value,
date_signed=self.timestamp_to_datetime(timestamp))
if return_timestamp:
return value, self.timestamp_to_datetime(timestamp)
return value
def validate(self, signed_value, max_age=None):
"""Just validates the given signed value. Returns `True` if the
signature exists and is valid, `False` otherwise."""
try:
self.unsign(signed_value, max_age=max_age)
return True
except BadSignature:
return False
class Serializer(object):
"""This class provides a serialization interface on top of the
signer. It provides a similar API to json/pickle and other modules but is
slightly differently structured internally. If you want to change the
underlying implementation for parsing and loading you have to override the
:meth:`load_payload` and :meth:`dump_payload` functions.
This implementation uses simplejson if available for dumping and loading
and will fall back to the standard library's json module if it's not
available.
Starting with 0.14 you do not need to subclass this class in order to
switch out or customer the :class:`Signer`. You can instead also pass a
different class to the constructor as well as keyword arguments as
dictionary that should be forwarded::
s = Serializer(signer_kwargs={'key_derivation': 'hmac'})
.. versionchanged:: 0.14:
The `signer` and `signer_kwargs` parameters were added to the
constructor.
"""
#: If a serializer module or class is not passed to the constructor
#: this one is picked up. This currently defaults to :mod:`json`.
default_serializer = json
#: The default :class:`Signer` class that is being used by this
#: serializer.
#:
#: .. versionadded:: 0.14
default_signer = Signer
def __init__(self, secret_key, salt=b'itsdangerous', serializer=None,
signer=None, signer_kwargs=None):
self.secret_key = want_bytes(secret_key)
self.salt = want_bytes(salt)
if serializer is None:
serializer = self.default_serializer
self.serializer = serializer
self.is_text_serializer = is_text_serializer(serializer)
if signer is None:
signer = self.default_signer
self.signer = signer
self.signer_kwargs = signer_kwargs or {}
def load_payload(self, payload, serializer=None):
"""Loads the encoded object. This function raises :class:`BadPayload`
if the payload is not valid. The `serializer` parameter can be used to
override the serializer stored on the class. The encoded payload is
always byte based.
"""
if serializer is None:
serializer = self.serializer
is_text = self.is_text_serializer
else:
is_text = is_text_serializer(serializer)
try:
if is_text:
payload = payload.decode('utf-8')
return serializer.loads(payload)
except Exception as e:
raise BadPayload('Could not load the payload because an '
'exception ocurred on unserializing the data',
original_error=e)
def dump_payload(self, obj):
"""Dumps the encoded object. The return value is always a
bytestring. If the internal serializer is text based the value
will automatically be encoded to utf-8.
"""
return want_bytes(self.serializer.dumps(obj))
def make_signer(self, salt=None):
"""A method that creates a new instance of the signer to be used.
The default implementation uses the :class:`Signer` baseclass.
"""
if salt is None:
salt = self.salt
return self.signer(self.secret_key, salt=salt, **self.signer_kwargs)
def dumps(self, obj, salt=None):
"""Returns a signed string serialized with the internal serializer.
The return value can be either a byte or unicode string depending
on the format of the internal serializer.
"""
payload = want_bytes(self.dump_payload(obj))
rv = self.make_signer(salt).sign(payload)
if self.is_text_serializer:
rv = rv.decode('utf-8')
return rv
def dump(self, obj, f, salt=None):
"""Like :meth:`dumps` but dumps into a file. The file handle has
to be compatible with what the internal serializer expects.
"""
f.write(self.dumps(obj, salt))
def loads(self, s, salt=None):
"""Reverse of :meth:`dumps`, raises :exc:`BadSignature` if the
signature validation fails.
"""
s = want_bytes(s)
return self.load_payload(self.make_signer(salt).unsign(s))
def load(self, f, salt=None):
"""Like :meth:`loads` but loads from a file."""
return self.loads(f.read(), salt)
def loads_unsafe(self, s, salt=None):
"""Like :meth:`loads` but without verifying the signature. This is
potentially very dangerous to use depending on how your serializer
works. The return value is ``(signature_okay, payload)`` instead of
just the payload. The first item will be a boolean that indicates
if the signature is okay (``True``) or if it failed. This function
never fails.
Use it for debugging only and if you know that your serializer module
is not exploitable (eg: do not use it with a pickle serializer).
.. versionadded:: 0.15
"""
return self._loads_unsafe_impl(s, salt)
def _loads_unsafe_impl(self, s, salt, load_kwargs=None,
load_payload_kwargs=None):
"""Lowlevel helper function to implement :meth:`loads_unsafe` in
serializer subclasses.
"""
try:
return True, self.loads(s, salt=salt, **(load_kwargs or {}))
except BadSignature as e:
if e.payload is None:
return False, None
try:
return False, self.load_payload(e.payload,
**(load_payload_kwargs or {}))
except BadPayload:
return False, None
def load_unsafe(self, f, *args, **kwargs):
"""Like :meth:`loads_unsafe` but loads from a file.
.. versionadded:: 0.15
"""
return self.loads_unsafe(f.read(), *args, **kwargs)
class TimedSerializer(Serializer):
"""Uses the :class:`TimestampSigner` instead of the default
:meth:`Signer`.
"""
default_signer = TimestampSigner
def loads(self, s, max_age=None, return_timestamp=False, salt=None):
"""Reverse of :meth:`dumps`, raises :exc:`BadSignature` if the
signature validation fails. If a `max_age` is provided it will
ensure the signature is not older than that time in seconds. In
case the signature is outdated, :exc:`SignatureExpired` is raised
which is a subclass of :exc:`BadSignature`. All arguments are
forwarded to the signer's :meth:`~TimestampSigner.unsign` method.
"""
base64d, timestamp = self.make_signer(salt) \
.unsign(s, max_age, return_timestamp=True)
payload = self.load_payload(base64d)
if return_timestamp:
return payload, timestamp
return payload
def loads_unsafe(self, s, max_age=None, salt=None):
load_kwargs = {'max_age': max_age}
load_payload_kwargs = {}
return self._loads_unsafe_impl(s, salt, load_kwargs, load_payload_kwargs)
class JSONWebSignatureSerializer(Serializer):
"""This serializer implements JSON Web Signature (JWS) support. Only
supports the JWS Compact Serialization.
"""
jws_algorithms = {
'HS256': HMACAlgorithm(hashlib.sha256),
'HS384': HMACAlgorithm(hashlib.sha384),
'HS512': HMACAlgorithm(hashlib.sha512),
'none': NoneAlgorithm(),
}
#: The default algorithm to use for signature generation
default_algorithm = 'HS256'
default_serializer = compact_json
def __init__(self, secret_key, salt=None, serializer=None,
signer=None, signer_kwargs=None, algorithm_name=None):
Serializer.__init__(self, secret_key, salt, serializer,
signer, signer_kwargs)
if algorithm_name is None:
algorithm_name = self.default_algorithm
self.algorithm_name = algorithm_name
self.algorithm = self.make_algorithm(algorithm_name)
def load_payload(self, payload, return_header=False):
payload = want_bytes(payload)
if b'.' not in payload:
raise BadPayload('No "." found in value')
base64d_header, base64d_payload = payload.split(b'.', 1)
try:
json_header = base64_decode(base64d_header)
json_payload = base64_decode(base64d_payload)
except Exception as e:
raise BadPayload('Could not base64 decode the payload because of '
'an exception', original_error=e)
header = Serializer.load_payload(self, json_header,
serializer=json)
if not isinstance(header, dict):
raise BadPayload('Header payload is not a JSON object')
payload = Serializer.load_payload(self, json_payload)
if return_header:
return payload, header
return payload
def dump_payload(self, header, obj):
base64d_header = base64_encode(self.serializer.dumps(header))
base64d_payload = base64_encode(self.serializer.dumps(obj))
return base64d_header + b'.' + base64d_payload
def make_algorithm(self, algorithm_name):
try:
return self.jws_algorithms[algorithm_name]
except KeyError:
raise NotImplementedError('Algorithm not supported')
def make_signer(self, salt=None, algorithm=None):
if salt is None:
salt = self.salt
key_derivation = 'none' if salt is None else None
if algorithm is None:
algorithm = self.algorithm
return self.signer(self.secret_key, salt=salt, sep='.',
key_derivation=key_derivation, algorithm=algorithm)
def make_header(self, header_fields):
header = header_fields.copy() if header_fields else {}
header['alg'] = self.algorithm_name
return header
def dumps(self, obj, salt=None, header_fields=None):
"""Like :meth:`~Serializer.dumps` but creates a JSON Web Signature. It
also allows for specifying additional fields to be included in the JWS
Header.
"""
header = self.make_header(header_fields)
signer = self.make_signer(salt, self.algorithm)
return signer.sign(self.dump_payload(header, obj))
def loads(self, s, salt=None, return_header=False):
"""Reverse of :meth:`dumps`. If requested via `return_header` it will
return a tuple of payload and header.
"""
payload, header = self.load_payload(
self.make_signer(salt, self.algorithm).unsign(want_bytes(s)),
return_header=True)
if header.get('alg') != self.algorithm_name:
raise BadSignature('Algorithm mismatch')
if return_header:
return payload, header
return payload
def loads_unsafe(self, s, salt=None, return_header=False):
kwargs = {'return_header': return_header}
return self._loads_unsafe_impl(s, salt, kwargs, kwargs)
class TimedJSONWebSignatureSerializer(JSONWebSignatureSerializer):
"""Works like the regular :class:`JSONWebSignatureSerializer` but also
records the time of the signing and can be used to expire signatures.
JWS currently does not specify this behavior but it mentions a possibility
extension like this in the spec. Expiry date is encoded into the header
similarily as specified in `draft-ietf-oauth-json-web-token
<http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#expDef`_.
The unsign method can raise a :exc:`SignatureExpired` method if the
unsigning failed because the signature is expired. This exception is a
subclass of :exc:`BadSignature`.
"""
DEFAULT_EXPIRES_IN = 3600
def __init__(self, secret_key, expires_in=None, **kwargs):
JSONWebSignatureSerializer.__init__(self, secret_key, **kwargs)
if expires_in is None:
expires_in = self.DEFAULT_EXPIRES_IN
self.expires_in = expires_in
def make_header(self, header_fields):
header = JSONWebSignatureSerializer.make_header(self, header_fields)
iat = self.now()
exp = iat + self.expires_in
header['iat'] = iat
header['exp'] = exp
return header
def loads(self, s, salt=None, return_header=False):
payload, header = JSONWebSignatureSerializer.loads(
self, s, salt, return_header=True)
if 'exp' not in header:
raise BadSignature('Missing expiry date', payload=payload)
if not (isinstance(header['exp'], number_types)
and header['exp'] > 0):
raise BadSignature('expiry date is not an IntDate',
payload=payload)
if header['exp'] < self.now():
raise SignatureExpired('Signature expired', payload=payload,
date_signed=self.get_issue_date(header))
if return_header:
return payload, header
return payload
def get_issue_date(self, header):
rv = header.get('iat')
if isinstance(rv, number_types):
return datetime.utcfromtimestamp(int(rv))
def now(self):
return int(time.time())
class URLSafeSerializerMixin(object):
"""Mixed in with a regular serializer it will attempt to zlib compress
the string to make it shorter if necessary. It will also base64 encode
the string so that it can safely be placed in a URL.
"""
def load_payload(self, payload):
decompress = False
if payload.startswith(b'.'):
payload = payload[1:]
decompress = True
try:
json = base64_decode(payload)
except Exception as e:
raise BadPayload('Could not base64 decode the payload because of '
'an exception', original_error=e)
if decompress:
try:
json = zlib.decompress(json)
except Exception as e:
raise BadPayload('Could not zlib decompress the payload before '
'decoding the payload', original_error=e)
return super(URLSafeSerializerMixin, self).load_payload(json)
def dump_payload(self, obj):
json = super(URLSafeSerializerMixin, self).dump_payload(obj)
is_compressed = False
compressed = zlib.compress(json)
if len(compressed) < (len(json) - 1):
json = compressed
is_compressed = True
base64d = base64_encode(json)
if is_compressed:
base64d = b'.' + base64d
return base64d
class URLSafeSerializer(URLSafeSerializerMixin, Serializer):
"""Works like :class:`Serializer` but dumps and loads into a URL
safe string consisting of the upper and lowercase character of the
alphabet as well as ``'_'``, ``'-'`` and ``'.'``.
"""
default_serializer = compact_json
class URLSafeTimedSerializer(URLSafeSerializerMixin, TimedSerializer):
"""Works like :class:`TimedSerializer` but dumps and loads into a URL
safe string consisting of the upper and lowercase character of the
alphabet as well as ``'_'``, ``'-'`` and ``'.'``.
"""
default_serializer = compact_json
| apache-2.0 |
rruebner/odoo | addons/claim_from_delivery/__init__.py | 374 | 1053 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import stock_picking
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
leonevo/euao | tornadows/xmltypes.py | 1 | 6469 | #!/usr/bin/env python
#
# Copyright 2011 Rodrigo Ancavil del Pino
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Are incorporated the primitive datatypes defined by XML.
Array is defined for the use of array of elements and his respective datatype.
"""
import inspect
from tornadows import complextypes
def createElementXML(name,type,prefix='xsd'):
""" Function used for the creation of xml elements. """
return b'<%s:element name="%s" type="%s:%s"/>'%(prefix,name,prefix,type)
def createArrayXML(name,type,prefix='xsd',maxoccurs=None):
""" Function used for the creation of xml complexElements """
complexType = b'<%s:complexType name="%sParams">\n'%(prefix,name)
complexType += b'<%s:sequence>\n'%prefix
if maxoccurs == None:
complexType += b'<%s:element name="value" type="%s:%s" maxOccurs="unbounded"/>\n'%(prefix,prefix,type)
else:
complexType += b'<%s:element name="value" type="%s:%s" maxOccurs="%d"/>\n'%(prefix,prefix,type,maxoccurs)
complexType += b'</%s:sequence>\n'%prefix
complexType += b'</%s:complexType>\n'%prefix
complexType += b'<%s:element name="%s" type="tns:%sParams"/>\n'%(prefix,name,name)
return complexType
class Array:
""" Create arrays of xml elements.
Here an example:
@webservices(_params=xmltypes.Array(xmltypes.Integer),_returns=xmltypes.Integer)
def function(sefl, list_of_elements):
for e in list_of_elements:
# Do something with the element
return len(list_of_elements)
xmltypes.Array(xmltype.Integer) generate an xml element into schema definition:
<xsd:element name="arrayOfElement" type="xsd:integer" maxOccurs="unbounded"/>
this make the parameter of the function list_of_elements is a python list.
if you specify xmltypes.Array(xmltypes.Integer,10), is generated:
<xsd:element name="arrayOfElement" type="xsd:integer" maxOccurs="10"/>
"""
def __init__(self,type,maxOccurs=None):
self._type = type
self._n = maxOccurs
def createArray(self,name):
type = None
if inspect.isclass(self._type) and not issubclass(self._type,PrimitiveType):
type = complextypes.createPythonType2XMLType(self._type.__name__)
else:
type = self._type.getType(self._type)
return createArrayXML(name,type,'xsd',self._n)
def createType(self,name):
prefix = 'xsd'
type = None
if inspect.isclass(self._type) and not issubclass(self._type,PrimitiveType):
type = complextypes.createPythonType2XMLType(self._type.__name__)
else:
type = self._type.getType(self._type)
maxoccurs = self._n
complexType = b''
if self._n == None:
complexType += b'<%s:element name="%s" type="%s:%s" maxOccurs="unbounded"/>\n'%(prefix,name,prefix,type)
else:
complexType += b'<%s:element name="%s" type="%s:%s" maxOccurs="%d"/>\n'%(prefix,name,prefix,type,maxoccurs)
return complexType
def genType(self,v):
value = None
if inspect.isclass(self._type) and issubclass(self._type,PrimitiveType):
value = self._type.genType(v)
elif hasattr(self._type,'__name__'):
value = complextypes.convert(self._type.__name__,v)
# Convert str to bool
if value == 'true':
value = True
elif value == 'false':
value = False
return value
class PrimitiveType:
""" Class father for all derived types. """
pass
class Integer(PrimitiveType):
""" 1. XML primitive type : integer """
@staticmethod
def createElement(name,prefix='xsd'):
return createElementXML(name,'integer')
@staticmethod
def getType(self):
return 'integer'
@classmethod
def genType(self,v):
return int(v)
class Decimal(PrimitiveType):
""" 2. XML primitive type : decimal """
@staticmethod
def createElement(name,prefix='xsd'):
return createElementXML(name,'decimal')
@staticmethod
def getType(self):
return 'decimal'
@classmethod
def genType(self,v):
return float(v)
class Double(PrimitiveType):
""" 3. XML primitive type : double """
@staticmethod
def createElement(name,prefix='xsd'):
return createElementXML(name,'double')
@staticmethod
def getType(self):
return 'double'
@classmethod
def genType(self,v):
return float(v)
class Float(PrimitiveType):
""" 4. XML primitive type : float """
@staticmethod
def createElement(name,prefix='xsd'):
return createElementXML(name,'float')
@staticmethod
def getType(self):
return 'float'
@classmethod
def genType(self,v):
return float(v)
class Duration(PrimitiveType):
""" 5. XML primitive type : duration """
@staticmethod
def createElement(name,prefix='xsd'):
return createElementXML(name,'duration')
@staticmethod
def getType(self):
return 'duration'
@classmethod
def genType(self,v):
return str(v)
class Date(PrimitiveType):
""" 6. XML primitive type : date """
@staticmethod
def createElement(name,prefix='xsd'):
return createElementXML(name,'date')
@staticmethod
def getType(self):
return 'date'
@classmethod
def genType(self,v):
return str(v)
class Time(PrimitiveType):
""" 7. XML primitive type : time """
@staticmethod
def createElement(name,prefix='xsd'):
return createElementXML(name,'time')
@staticmethod
def getType(self):
return 'time'
@classmethod
def genType(self,v):
return str(v)
class DateTime(PrimitiveType):
""" 8. XML primitive type : dateTime """
@staticmethod
def createElement(name,prefix='xsd'):
return createElementXML(name,'dateTime')
@staticmethod
def getType(self):
return 'dateTime'
@classmethod
def genType(self,v):
return str(v)
class String(PrimitiveType):
""" 9. XML primitive type : string """
@staticmethod
def createElement(name,prefix='xsd'):
return createElementXML(name,'string')
@staticmethod
def getType(self):
return 'string'
@classmethod
def genType(self,v):
return str(v)
class Boolean(PrimitiveType):
""" 10. XML primitive type : boolean """
@staticmethod
def createElement(name,prefix='xsd'):
return createElementXML(name,'boolean')
@staticmethod
def getType(self):
return 'boolean'
@classmethod
def genType(self,v):
return str(v).lower()
| apache-2.0 |
CXQERP/ODOOERP | addons/note/note.py | 223 | 8893 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.osv import osv, fields
from openerp.tools import html2plaintext
class note_stage(osv.osv):
""" Category of Note """
_name = "note.stage"
_description = "Note Stage"
_columns = {
'name': fields.char('Stage Name', translate=True, required=True),
'sequence': fields.integer('Sequence', help="Used to order the note stages"),
'user_id': fields.many2one('res.users', 'Owner', help="Owner of the note stage.", required=True, ondelete='cascade'),
'fold': fields.boolean('Folded by Default'),
}
_order = 'sequence asc'
_defaults = {
'fold': 0,
'user_id': lambda self, cr, uid, ctx: uid,
'sequence' : 1,
}
class note_tag(osv.osv):
_name = "note.tag"
_description = "Note Tag"
_columns = {
'name' : fields.char('Tag Name', required=True),
}
class note_note(osv.osv):
""" Note """
_name = 'note.note'
_inherit = ['mail.thread']
_description = "Note"
#writing method (no modification of values)
def name_create(self, cr, uid, name, context=None):
rec_id = self.create(cr, uid, {'memo': name}, context=context)
return self.name_get(cr, uid, [rec_id], context)[0]
#read the first line (convert hml into text)
def _get_note_first_line(self, cr, uid, ids, name="", args={}, context=None):
res = {}
for note in self.browse(cr, uid, ids, context=context):
res[note.id] = (note.memo and html2plaintext(note.memo) or "").strip().replace('*','').split("\n")[0]
return res
def onclick_note_is_done(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'open': False, 'date_done': fields.date.today()}, context=context)
def onclick_note_not_done(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'open': True}, context=context)
#return the default stage for the uid user
def _get_default_stage_id(self,cr,uid,context=None):
ids = self.pool.get('note.stage').search(cr,uid,[('user_id','=',uid)], context=context)
return ids and ids[0] or False
def _set_stage_per_user(self, cr, uid, id, name, value, args=None, context=None):
note = self.browse(cr, uid, id, context=context)
if not value: return False
stage_ids = [value] + [stage.id for stage in note.stage_ids if stage.user_id.id != uid ]
return self.write(cr, uid, [id], {'stage_ids': [(6, 0, set(stage_ids))]}, context=context)
def _get_stage_per_user(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, False)
for record in self.browse(cr, uid, ids, context=context):
for stage in record.stage_ids:
if stage.user_id.id == uid:
result[record.id] = stage.id
return result
_columns = {
'name': fields.function(_get_note_first_line,
string='Note Summary',
type='text', store=True),
'user_id': fields.many2one('res.users', 'Owner'),
'memo': fields.html('Note Content'),
'sequence': fields.integer('Sequence'),
'stage_id': fields.function(_get_stage_per_user,
fnct_inv=_set_stage_per_user,
string='Stage',
type='many2one',
relation='note.stage'),
'stage_ids': fields.many2many('note.stage','note_stage_rel','note_id','stage_id','Stages of Users'),
'open': fields.boolean('Active', track_visibility='onchange'),
'date_done': fields.date('Date done'),
'color': fields.integer('Color Index'),
'tag_ids' : fields.many2many('note.tag','note_tags_rel','note_id','tag_id','Tags'),
}
_defaults = {
'user_id': lambda self, cr, uid, ctx=None: uid,
'open' : 1,
'stage_id' : _get_default_stage_id,
}
_order = 'sequence'
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
if groupby and groupby[0]=="stage_id":
#search all stages
current_stage_ids = self.pool.get('note.stage').search(cr,uid,[('user_id','=',uid)], context=context)
if current_stage_ids: #if the user have some stages
stages = self.pool['note.stage'].browse(cr, uid, current_stage_ids, context=context)
result = [{ #notes by stage for stages user
'__context': {'group_by': groupby[1:]},
'__domain': domain + [('stage_ids.id', '=', stage.id)],
'stage_id': (stage.id, stage.name),
'stage_id_count': self.search(cr,uid, domain+[('stage_ids', '=', stage.id)], context=context, count=True),
'__fold': stage.fold,
} for stage in stages]
#note without user's stage
nb_notes_ws = self.search(cr,uid, domain+[('stage_ids', 'not in', current_stage_ids)], context=context, count=True)
if nb_notes_ws:
# add note to the first column if it's the first stage
dom_not_in = ('stage_ids', 'not in', current_stage_ids)
if result and result[0]['stage_id'][0] == current_stage_ids[0]:
dom_in = result[0]['__domain'].pop()
result[0]['__domain'] = domain + ['|', dom_in, dom_not_in]
result[0]['stage_id_count'] += nb_notes_ws
else:
# add the first stage column
result = [{
'__context': {'group_by': groupby[1:]},
'__domain': domain + [dom_not_in],
'stage_id': (stages[0].id, stages[0].name),
'stage_id_count':nb_notes_ws,
'__fold': stages[0].name,
}] + result
else: # if stage_ids is empty
#note without user's stage
nb_notes_ws = self.search(cr,uid, domain, context=context, count=True)
if nb_notes_ws:
result = [{ #notes for unknown stage
'__context': {'group_by': groupby[1:]},
'__domain': domain,
'stage_id': False,
'stage_id_count':nb_notes_ws
}]
else:
result = []
return result
else:
return super(note_note, self).read_group(cr, uid, domain, fields, groupby,
offset=offset, limit=limit, context=context, orderby=orderby,lazy=lazy)
#upgrade config setting page to configure pad, fancy and tags mode
class note_base_config_settings(osv.osv_memory):
_inherit = 'base.config.settings'
_columns = {
'module_note_pad': fields.boolean('Use collaborative pads (etherpad)'),
'group_note_fancy': fields.boolean('Use fancy layouts for notes', implied_group='note.group_note_fancy'),
}
class res_users(osv.Model):
_name = 'res.users'
_inherit = ['res.users']
def create(self, cr, uid, data, context=None):
user_id = super(res_users, self).create(cr, uid, data, context=context)
note_obj = self.pool['note.stage']
data_obj = self.pool['ir.model.data']
is_employee = self.has_group(cr, user_id, 'base.group_user')
if is_employee:
for n in range(5):
xmlid = 'note_stage_%02d' % (n,)
try:
_model, stage_id = data_obj.get_object_reference(cr, SUPERUSER_ID, 'note', xmlid)
except ValueError:
continue
note_obj.copy(cr, SUPERUSER_ID, stage_id, default={'user_id': user_id}, context=context)
return user_id
| agpl-3.0 |
sivaprakashniet/push_pull | p2p/lib/python2.7/site-packages/kombu/tests/transport/virtual/test_scheduling.py | 38 | 1835 | from __future__ import absolute_import
from kombu.transport.virtual.scheduling import FairCycle
from kombu.tests.case import Case
class MyEmpty(Exception):
pass
def consume(fun, n):
r = []
for i in range(n):
r.append(fun())
return r
class test_FairCycle(Case):
def test_cycle(self):
resources = ['a', 'b', 'c', 'd', 'e']
def echo(r, timeout=None):
return r
# cycle should be ['a', 'b', 'c', 'd', 'e', ... repeat]
cycle = FairCycle(echo, resources, MyEmpty)
for i in range(len(resources)):
self.assertEqual(cycle.get(), (resources[i],
resources[i]))
for i in range(len(resources)):
self.assertEqual(cycle.get(), (resources[i],
resources[i]))
def test_cycle_breaks(self):
resources = ['a', 'b', 'c', 'd', 'e']
def echo(r):
if r == 'c':
raise MyEmpty(r)
return r
cycle = FairCycle(echo, resources, MyEmpty)
self.assertEqual(
consume(cycle.get, len(resources)),
[('a', 'a'), ('b', 'b'), ('d', 'd'),
('e', 'e'), ('a', 'a')],
)
self.assertEqual(
consume(cycle.get, len(resources)),
[('b', 'b'), ('d', 'd'), ('e', 'e'),
('a', 'a'), ('b', 'b')],
)
cycle2 = FairCycle(echo, ['c', 'c'], MyEmpty)
with self.assertRaises(MyEmpty):
consume(cycle2.get, 3)
def test_cycle_no_resources(self):
cycle = FairCycle(None, [], MyEmpty)
cycle.pos = 10
with self.assertRaises(MyEmpty):
cycle._next()
def test__repr__(self):
self.assertTrue(repr(FairCycle(lambda x: x, [1, 2, 3], MyEmpty)))
| bsd-3-clause |
J861449197/edx-platform | lms/djangoapps/staticbook/views.py | 91 | 6351 | """
Views for serving static textbooks.
"""
from django.contrib.auth.decorators import login_required
from django.http import Http404
from edxmako.shortcuts import render_to_response
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.annotator_token import retrieve_token
from courseware.access import has_access
from courseware.courses import get_course_with_access
from notes.utils import notes_enabled_for_course
from static_replace import replace_static_urls
@login_required
def index(request, course_id, book_index, page=None):
"""
Serve static image-based textbooks.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
staff_access = bool(has_access(request.user, 'staff', course))
book_index = int(book_index)
if book_index < 0 or book_index >= len(course.textbooks):
raise Http404("Invalid book index value: {0}".format(book_index))
textbook = course.textbooks[book_index]
table_of_contents = textbook.table_of_contents
if page is None:
page = textbook.start_page
return render_to_response(
'staticbook.html',
{
'book_index': book_index, 'page': int(page),
'course': course,
'book_url': textbook.book_url,
'table_of_contents': table_of_contents,
'start_page': textbook.start_page,
'end_page': textbook.end_page,
'staff_access': staff_access,
},
)
def remap_static_url(original_url, course):
"""Remap a URL in the ways the course requires."""
# Ick: this should be possible without having to quote and unquote the URL...
input_url = "'" + original_url + "'"
output_url = replace_static_urls(
input_url,
getattr(course, 'data_dir', None),
course_id=course.id,
static_asset_path=course.static_asset_path
)
# strip off the quotes again...
return output_url[1:-1]
@login_required
def pdf_index(request, course_id, book_index, chapter=None, page=None):
"""
Display a PDF textbook.
course_id: course for which to display text. The course should have
"pdf_textbooks" property defined.
book index: zero-based index of which PDF textbook to display.
chapter: (optional) one-based index into the chapter array of textbook PDFs to display.
Defaults to first chapter. Specifying this assumes that there are separate PDFs for
each chapter in a textbook.
page: (optional) one-based page number to display within the PDF. Defaults to first page.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
staff_access = bool(has_access(request.user, 'staff', course))
book_index = int(book_index)
if book_index < 0 or book_index >= len(course.pdf_textbooks):
raise Http404("Invalid book index value: {0}".format(book_index))
textbook = course.pdf_textbooks[book_index]
viewer_params = '&file='
current_url = ''
if 'url' in textbook:
textbook['url'] = remap_static_url(textbook['url'], course)
viewer_params += textbook['url']
current_url = textbook['url']
# then remap all the chapter URLs as well, if they are provided.
current_chapter = None
if 'chapters' in textbook:
for entry in textbook['chapters']:
entry['url'] = remap_static_url(entry['url'], course)
if chapter is not None:
current_chapter = textbook['chapters'][int(chapter) - 1]
else:
current_chapter = textbook['chapters'][0]
viewer_params += current_chapter['url']
current_url = current_chapter['url']
viewer_params += '#zoom=page-fit&disableRange=true'
if page is not None:
viewer_params += '&page={}'.format(page)
if request.GET.get('viewer', '') == 'true':
template = 'pdf_viewer.html'
else:
template = 'static_pdfbook.html'
return render_to_response(
template,
{
'book_index': book_index,
'course': course,
'textbook': textbook,
'chapter': chapter,
'page': page,
'viewer_params': viewer_params,
'current_chapter': current_chapter,
'staff_access': staff_access,
'current_url': current_url,
},
)
@login_required
def html_index(request, course_id, book_index, chapter=None):
"""
Display an HTML textbook.
course_id: course for which to display text. The course should have
"html_textbooks" property defined.
book index: zero-based index of which HTML textbook to display.
chapter: (optional) one-based index into the chapter array of textbook HTML files to display.
Defaults to first chapter. Specifying this assumes that there are separate HTML files for
each chapter in a textbook.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
staff_access = bool(has_access(request.user, 'staff', course))
notes_enabled = notes_enabled_for_course(course)
book_index = int(book_index)
if book_index < 0 or book_index >= len(course.html_textbooks):
raise Http404("Invalid book index value: {0}".format(book_index))
textbook = course.html_textbooks[book_index]
if 'url' in textbook:
textbook['url'] = remap_static_url(textbook['url'], course)
# then remap all the chapter URLs as well, if they are provided.
if 'chapters' in textbook:
for entry in textbook['chapters']:
entry['url'] = remap_static_url(entry['url'], course)
student = request.user
return render_to_response(
'static_htmlbook.html',
{
'book_index': book_index,
'course': course,
'textbook': textbook,
'chapter': chapter,
'student': student,
'staff_access': staff_access,
'notes_enabled': notes_enabled,
'storage': course.annotation_storage_url,
'token': retrieve_token(student.email, course.annotation_token_secret),
},
)
| agpl-3.0 |
jcpowermac/ansible | lib/ansible/module_utils/facts/hardware/linux.py | 7 | 27299 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import collections
import errno
import glob
import json
import os
import re
import sys
from ansible.module_utils.six import iteritems
from ansible.module_utils.basic import bytes_to_human
from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector
from ansible.module_utils.facts.utils import get_file_content, get_file_lines, get_mount_size
# import this as a module to ensure we get the same module isntance
from ansible.module_utils.facts import timeout
def get_partition_uuid(partname):
try:
uuids = os.listdir("/dev/disk/by-uuid")
except OSError:
return
for uuid in uuids:
dev = os.path.realpath("/dev/disk/by-uuid/" + uuid)
if dev == ("/dev/" + partname):
return uuid
return None
class LinuxHardware(Hardware):
"""
Linux-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
In addition, it also defines number of DMI facts and device facts.
"""
platform = 'Linux'
# Originally only had these four as toplevelfacts
ORIGINAL_MEMORY_FACTS = frozenset(('MemTotal', 'SwapTotal', 'MemFree', 'SwapFree'))
# Now we have all of these in a dict structure
MEMORY_FACTS = ORIGINAL_MEMORY_FACTS.union(('Buffers', 'Cached', 'SwapCached'))
# regex used against findmnt output to detect bind mounts
BIND_MOUNT_RE = re.compile(r'.*\]')
# regex used against mtab content to find entries that are bind mounts
MTAB_BIND_MOUNT_RE = re.compile(r'.*bind.*"')
def populate(self, collected_facts=None):
hardware_facts = {}
cpu_facts = self.get_cpu_facts(collected_facts=collected_facts)
memory_facts = self.get_memory_facts()
dmi_facts = self.get_dmi_facts()
device_facts = self.get_device_facts()
uptime_facts = self.get_uptime_facts()
lvm_facts = self.get_lvm_facts()
mount_facts = {}
try:
mount_facts = self.get_mount_facts()
except timeout.TimeoutError:
pass
hardware_facts.update(cpu_facts)
hardware_facts.update(memory_facts)
hardware_facts.update(dmi_facts)
hardware_facts.update(device_facts)
hardware_facts.update(uptime_facts)
hardware_facts.update(lvm_facts)
hardware_facts.update(mount_facts)
return hardware_facts
def get_memory_facts(self):
memory_facts = {}
if not os.access("/proc/meminfo", os.R_OK):
return memory_facts
memstats = {}
for line in get_file_lines("/proc/meminfo"):
data = line.split(":", 1)
key = data[0]
if key in self.ORIGINAL_MEMORY_FACTS:
val = data[1].strip().split(' ')[0]
memory_facts["%s_mb" % key.lower()] = int(val) // 1024
if key in self.MEMORY_FACTS:
val = data[1].strip().split(' ')[0]
memstats[key.lower()] = int(val) // 1024
if None not in (memstats.get('memtotal'), memstats.get('memfree')):
memstats['real:used'] = memstats['memtotal'] - memstats['memfree']
if None not in (memstats.get('cached'), memstats.get('memfree'), memstats.get('buffers')):
memstats['nocache:free'] = memstats['cached'] + memstats['memfree'] + memstats['buffers']
if None not in (memstats.get('memtotal'), memstats.get('nocache:free')):
memstats['nocache:used'] = memstats['memtotal'] - memstats['nocache:free']
if None not in (memstats.get('swaptotal'), memstats.get('swapfree')):
memstats['swap:used'] = memstats['swaptotal'] - memstats['swapfree']
memory_facts['memory_mb'] = {
'real': {
'total': memstats.get('memtotal'),
'used': memstats.get('real:used'),
'free': memstats.get('memfree'),
},
'nocache': {
'free': memstats.get('nocache:free'),
'used': memstats.get('nocache:used'),
},
'swap': {
'total': memstats.get('swaptotal'),
'free': memstats.get('swapfree'),
'used': memstats.get('swap:used'),
'cached': memstats.get('swapcached'),
},
}
return memory_facts
def get_cpu_facts(self, collected_facts=None):
cpu_facts = {}
collected_facts = collected_facts or {}
i = 0
vendor_id_occurrence = 0
model_name_occurrence = 0
physid = 0
coreid = 0
sockets = {}
cores = {}
xen = False
xen_paravirt = False
try:
if os.path.exists('/proc/xen'):
xen = True
else:
for line in get_file_lines('/sys/hypervisor/type'):
if line.strip() == 'xen':
xen = True
# Only interested in the first line
break
except IOError:
pass
if not os.access("/proc/cpuinfo", os.R_OK):
return cpu_facts
cpu_facts['processor'] = []
for line in get_file_lines('/proc/cpuinfo'):
data = line.split(":", 1)
key = data[0].strip()
if xen:
if key == 'flags':
# Check for vme cpu flag, Xen paravirt does not expose this.
# Need to detect Xen paravirt because it exposes cpuinfo
# differently than Xen HVM or KVM and causes reporting of
# only a single cpu core.
if 'vme' not in data:
xen_paravirt = True
# model name is for Intel arch, Processor (mind the uppercase P)
# works for some ARM devices, like the Sheevaplug.
# 'ncpus active' is SPARC attribute
if key in ['model name', 'Processor', 'vendor_id', 'cpu', 'Vendor', 'processor']:
if 'processor' not in cpu_facts:
cpu_facts['processor'] = []
cpu_facts['processor'].append(data[1].strip())
if key == 'vendor_id':
vendor_id_occurrence += 1
if key == 'model name':
model_name_occurrence += 1
i += 1
elif key == 'physical id':
physid = data[1].strip()
if physid not in sockets:
sockets[physid] = 1
elif key == 'core id':
coreid = data[1].strip()
if coreid not in sockets:
cores[coreid] = 1
elif key == 'cpu cores':
sockets[physid] = int(data[1].strip())
elif key == 'siblings':
cores[coreid] = int(data[1].strip())
elif key == '# processors':
cpu_facts['processor_cores'] = int(data[1].strip())
elif key == 'ncpus active':
i = int(data[1].strip())
# Skip for platforms without vendor_id/model_name in cpuinfo (e.g ppc64le)
if vendor_id_occurrence > 0:
if vendor_id_occurrence == model_name_occurrence:
i = vendor_id_occurrence
# FIXME
if collected_facts.get('ansible_architecture') != 's390x':
if xen_paravirt:
cpu_facts['processor_count'] = i
cpu_facts['processor_cores'] = i
cpu_facts['processor_threads_per_core'] = 1
cpu_facts['processor_vcpus'] = i
else:
if sockets:
cpu_facts['processor_count'] = len(sockets)
else:
cpu_facts['processor_count'] = i
socket_values = list(sockets.values())
if socket_values and socket_values[0]:
cpu_facts['processor_cores'] = socket_values[0]
else:
cpu_facts['processor_cores'] = 1
core_values = list(cores.values())
if core_values:
cpu_facts['processor_threads_per_core'] = core_values[0] // cpu_facts['processor_cores']
else:
cpu_facts['processor_threads_per_core'] = 1 // cpu_facts['processor_cores']
cpu_facts['processor_vcpus'] = (cpu_facts['processor_threads_per_core'] *
cpu_facts['processor_count'] * cpu_facts['processor_cores'])
return cpu_facts
def get_dmi_facts(self):
''' learn dmi facts from system
Try /sys first for dmi related facts.
If that is not available, fall back to dmidecode executable '''
dmi_facts = {}
if os.path.exists('/sys/devices/virtual/dmi/id/product_name'):
# Use kernel DMI info, if available
# DMI SPEC -- http://www.dmtf.org/sites/default/files/standards/documents/DSP0134_2.7.0.pdf
FORM_FACTOR = ["Unknown", "Other", "Unknown", "Desktop",
"Low Profile Desktop", "Pizza Box", "Mini Tower", "Tower",
"Portable", "Laptop", "Notebook", "Hand Held", "Docking Station",
"All In One", "Sub Notebook", "Space-saving", "Lunch Box",
"Main Server Chassis", "Expansion Chassis", "Sub Chassis",
"Bus Expansion Chassis", "Peripheral Chassis", "RAID Chassis",
"Rack Mount Chassis", "Sealed-case PC", "Multi-system",
"CompactPCI", "AdvancedTCA", "Blade"]
DMI_DICT = {
'bios_date': '/sys/devices/virtual/dmi/id/bios_date',
'bios_version': '/sys/devices/virtual/dmi/id/bios_version',
'form_factor': '/sys/devices/virtual/dmi/id/chassis_type',
'product_name': '/sys/devices/virtual/dmi/id/product_name',
'product_serial': '/sys/devices/virtual/dmi/id/product_serial',
'product_uuid': '/sys/devices/virtual/dmi/id/product_uuid',
'product_version': '/sys/devices/virtual/dmi/id/product_version',
'system_vendor': '/sys/devices/virtual/dmi/id/sys_vendor'
}
for (key, path) in DMI_DICT.items():
data = get_file_content(path)
if data is not None:
if key == 'form_factor':
try:
dmi_facts['form_factor'] = FORM_FACTOR[int(data)]
except IndexError:
dmi_facts['form_factor'] = 'unknown (%s)' % data
else:
dmi_facts[key] = data
else:
dmi_facts[key] = 'NA'
else:
# Fall back to using dmidecode, if available
dmi_bin = self.module.get_bin_path('dmidecode')
DMI_DICT = {
'bios_date': 'bios-release-date',
'bios_version': 'bios-version',
'form_factor': 'chassis-type',
'product_name': 'system-product-name',
'product_serial': 'system-serial-number',
'product_uuid': 'system-uuid',
'product_version': 'system-version',
'system_vendor': 'system-manufacturer'
}
for (k, v) in DMI_DICT.items():
if dmi_bin is not None:
(rc, out, err) = self.module.run_command('%s -s %s' % (dmi_bin, v))
if rc == 0:
# Strip out commented lines (specific dmidecode output)
thisvalue = ''.join([line for line in out.splitlines() if not line.startswith('#')])
try:
json.dumps(thisvalue)
except UnicodeDecodeError:
thisvalue = "NA"
dmi_facts[k] = thisvalue
else:
dmi_facts[k] = 'NA'
else:
dmi_facts[k] = 'NA'
return dmi_facts
def _run_lsblk(self, lsblk_path):
# call lsblk and collect all uuids
# --exclude 2 makes lsblk ignore floppy disks, which are slower to answer than typical timeouts
# this uses the linux major device number
# for details see https://www.kernel.org/doc/Documentation/devices.txt
args = ['--list', '--noheadings', '--paths', '--output', 'NAME,UUID', '--exclude', '2']
cmd = [lsblk_path] + args
rc, out, err = self.module.run_command(cmd)
return rc, out, err
def _lsblk_uuid(self):
uuids = {}
lsblk_path = self.module.get_bin_path("lsblk")
if not lsblk_path:
return uuids
rc, out, err = self._run_lsblk(lsblk_path)
if rc != 0:
return uuids
# each line will be in format:
# <devicename><some whitespace><uuid>
# /dev/sda1 32caaec3-ef40-4691-a3b6-438c3f9bc1c0
for lsblk_line in out.splitlines():
if not lsblk_line:
continue
line = lsblk_line.strip()
fields = line.rsplit(None, 1)
if len(fields) < 2:
continue
device_name, uuid = fields[0].strip(), fields[1].strip()
if device_name in uuids:
continue
uuids[device_name] = uuid
return uuids
def _run_findmnt(self, findmnt_path):
args = ['--list', '--noheadings', '--notruncate']
cmd = [findmnt_path] + args
rc, out, err = self.module.run_command(cmd, errors='surrogate_then_replace')
return rc, out, err
def _find_bind_mounts(self):
bind_mounts = set()
findmnt_path = self.module.get_bin_path("findmnt")
if not findmnt_path:
return bind_mounts
rc, out, err = self._run_findmnt(findmnt_path)
if rc != 0:
return bind_mounts
# find bind mounts, in case /etc/mtab is a symlink to /proc/mounts
for line in out.splitlines():
fields = line.split()
# fields[0] is the TARGET, fields[1] is the SOURCE
if len(fields) < 2:
continue
# bind mounts will have a [/directory_name] in the SOURCE column
if self.BIND_MOUNT_RE.match(fields[1]):
bind_mounts.add(fields[0])
return bind_mounts
def _mtab_entries(self):
mtab_file = '/etc/mtab'
if not os.path.exists(mtab_file):
mtab_file = '/proc/mounts'
mtab = get_file_content(mtab_file, '')
mtab_entries = []
for line in mtab.splitlines():
fields = line.split()
if len(fields) < 4:
continue
mtab_entries.append(fields)
return mtab_entries
@timeout.timeout()
def get_mount_facts(self):
mount_facts = {}
mount_facts['mounts'] = []
bind_mounts = self._find_bind_mounts()
uuids = self._lsblk_uuid()
mtab_entries = self._mtab_entries()
mounts = []
for fields in mtab_entries:
device, mount, fstype, options = fields[0], fields[1], fields[2], fields[3]
if not device.startswith('/') and ':/' not in device:
continue
if fstype == 'none':
continue
mount_statvfs_info = get_mount_size(mount)
if mount in bind_mounts:
# only add if not already there, we might have a plain /etc/mtab
if not self.MTAB_BIND_MOUNT_RE.match(options):
options += ",bind"
mount_info = {'mount': mount,
'device': device,
'fstype': fstype,
'options': options,
'uuid': uuids.get(device, 'N/A')}
mount_info.update(mount_statvfs_info)
mounts.append(mount_info)
mount_facts['mounts'] = mounts
return mount_facts
def get_device_links(self, link_dir):
if not os.path.exists(link_dir):
return {}
try:
retval = collections.defaultdict(set)
for entry in os.listdir(link_dir):
try:
target = os.path.basename(os.readlink(os.path.join(link_dir, entry)))
retval[target].add(entry)
except OSError:
continue
return dict((k, list(sorted(v))) for (k, v) in iteritems(retval))
except OSError:
return {}
def get_all_device_owners(self):
try:
retval = collections.defaultdict(set)
for path in glob.glob('/sys/block/*/slaves/*'):
elements = path.split('/')
device = elements[3]
target = elements[5]
retval[target].add(device)
return dict((k, list(sorted(v))) for (k, v) in iteritems(retval))
except OSError:
return {}
def get_all_device_links(self):
return {
'ids': self.get_device_links('/dev/disk/by-id'),
'uuids': self.get_device_links('/dev/disk/by-uuid'),
'labels': self.get_device_links('/dev/disk/by-label'),
'masters': self.get_all_device_owners(),
}
def get_holders(self, block_dev_dict, sysdir):
block_dev_dict['holders'] = []
if os.path.isdir(sysdir + "/holders"):
for folder in os.listdir(sysdir + "/holders"):
if not folder.startswith("dm-"):
continue
name = get_file_content(sysdir + "/holders/" + folder + "/dm/name")
if name:
block_dev_dict['holders'].append(name)
else:
block_dev_dict['holders'].append(folder)
def get_device_facts(self):
device_facts = {}
device_facts['devices'] = {}
lspci = self.module.get_bin_path('lspci')
if lspci:
rc, pcidata, err = self.module.run_command([lspci, '-D'], errors='surrogate_then_replace')
else:
pcidata = None
try:
block_devs = os.listdir("/sys/block")
except OSError:
return device_facts
devs_wwn = {}
try:
devs_by_id = os.listdir("/dev/disk/by-id")
except OSError:
pass
else:
for link_name in devs_by_id:
if link_name.startswith("wwn-"):
try:
wwn_link = os.readlink(os.path.join("/dev/disk/by-id", link_name))
except OSError:
continue
devs_wwn[os.path.basename(wwn_link)] = link_name[4:]
links = self.get_all_device_links()
device_facts['device_links'] = links
for block in block_devs:
virtual = 1
sysfs_no_links = 0
try:
path = os.readlink(os.path.join("/sys/block/", block))
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.EINVAL:
path = block
sysfs_no_links = 1
else:
continue
sysdir = os.path.join("/sys/block", path)
if sysfs_no_links == 1:
for folder in os.listdir(sysdir):
if "device" in folder:
virtual = 0
break
d = {}
d['virtual'] = virtual
d['links'] = {}
for (link_type, link_values) in iteritems(links):
d['links'][link_type] = link_values.get(block, [])
diskname = os.path.basename(sysdir)
for key in ['vendor', 'model', 'sas_address', 'sas_device_handle']:
d[key] = get_file_content(sysdir + "/device/" + key)
sg_inq = self.module.get_bin_path('sg_inq')
if sg_inq:
device = "/dev/%s" % (block)
rc, drivedata, err = self.module.run_command([sg_inq, device])
if rc == 0:
serial = re.search(r"Unit serial number:\s+(\w+)", drivedata)
if serial:
d['serial'] = serial.group(1)
for key in ['vendor', 'model']:
d[key] = get_file_content(sysdir + "/device/" + key)
for key, test in [('removable', '/removable'),
('support_discard', '/queue/discard_granularity'),
]:
d[key] = get_file_content(sysdir + test)
if diskname in devs_wwn:
d['wwn'] = devs_wwn[diskname]
d['partitions'] = {}
for folder in os.listdir(sysdir):
m = re.search("(" + diskname + r"\d+)", folder)
if m:
part = {}
partname = m.group(1)
part_sysdir = sysdir + "/" + partname
part['links'] = {}
for (link_type, link_values) in iteritems(links):
part['links'][link_type] = link_values.get(partname, [])
part['start'] = get_file_content(part_sysdir + "/start", 0)
part['sectors'] = get_file_content(part_sysdir + "/size", 0)
part['sectorsize'] = get_file_content(part_sysdir + "/queue/logical_block_size")
if not part['sectorsize']:
part['sectorsize'] = get_file_content(part_sysdir + "/queue/hw_sector_size", 512)
part['size'] = bytes_to_human((float(part['sectors']) * 512.0))
part['uuid'] = get_partition_uuid(partname)
self.get_holders(part, part_sysdir)
d['partitions'][partname] = part
d['rotational'] = get_file_content(sysdir + "/queue/rotational")
d['scheduler_mode'] = ""
scheduler = get_file_content(sysdir + "/queue/scheduler")
if scheduler is not None:
m = re.match(r".*?(\[(.*)\])", scheduler)
if m:
d['scheduler_mode'] = m.group(2)
d['sectors'] = get_file_content(sysdir + "/size")
if not d['sectors']:
d['sectors'] = 0
d['sectorsize'] = get_file_content(sysdir + "/queue/logical_block_size")
if not d['sectorsize']:
d['sectorsize'] = get_file_content(sysdir + "/queue/hw_sector_size", 512)
d['size'] = bytes_to_human(float(d['sectors']) * 512.0)
d['host'] = ""
# domains are numbered (0 to ffff), bus (0 to ff), slot (0 to 1f), and function (0 to 7).
m = re.match(r".+/([a-f0-9]{4}:[a-f0-9]{2}:[0|1][a-f0-9]\.[0-7])/", sysdir)
if m and pcidata:
pciid = m.group(1)
did = re.escape(pciid)
m = re.search("^" + did + r"\s(.*)$", pcidata, re.MULTILINE)
if m:
d['host'] = m.group(1)
self.get_holders(d, sysdir)
device_facts['devices'][diskname] = d
return device_facts
def get_uptime_facts(self):
uptime_facts = {}
uptime_file_content = get_file_content('/proc/uptime')
if uptime_file_content:
uptime_seconds_string = uptime_file_content.split(' ')[0]
uptime_facts['uptime_seconds'] = int(float(uptime_seconds_string))
return uptime_facts
def _find_mapper_device_name(self, dm_device):
dm_prefix = '/dev/dm-'
mapper_device = dm_device
if dm_device.startswith(dm_prefix):
dmsetup_cmd = self.module.get_bin_path('dmsetup', True)
mapper_prefix = '/dev/mapper/'
rc, dm_name, err = self.module.run_command("%s info -C --noheadings -o name %s" % (dmsetup_cmd, dm_device))
if rc == 0:
mapper_device = mapper_prefix + dm_name.rstrip()
return mapper_device
def get_lvm_facts(self):
""" Get LVM Facts if running as root and lvm utils are available """
lvm_facts = {}
if os.getuid() == 0 and self.module.get_bin_path('vgs'):
lvm_util_options = '--noheadings --nosuffix --units g --separator ,'
vgs_path = self.module.get_bin_path('vgs')
# vgs fields: VG #PV #LV #SN Attr VSize VFree
vgs = {}
if vgs_path:
rc, vg_lines, err = self.module.run_command('%s %s' % (vgs_path, lvm_util_options))
for vg_line in vg_lines.splitlines():
items = vg_line.strip().split(',')
vgs[items[0]] = {'size_g': items[-2],
'free_g': items[-1],
'num_lvs': items[2],
'num_pvs': items[1]}
lvs_path = self.module.get_bin_path('lvs')
# lvs fields:
# LV VG Attr LSize Pool Origin Data% Move Log Copy% Convert
lvs = {}
if lvs_path:
rc, lv_lines, err = self.module.run_command('%s %s' % (lvs_path, lvm_util_options))
for lv_line in lv_lines.splitlines():
items = lv_line.strip().split(',')
lvs[items[0]] = {'size_g': items[3], 'vg': items[1]}
pvs_path = self.module.get_bin_path('pvs')
# pvs fields: PV VG #Fmt #Attr PSize PFree
pvs = {}
if pvs_path:
rc, pv_lines, err = self.module.run_command('%s %s' % (pvs_path, lvm_util_options))
for pv_line in pv_lines.splitlines():
items = pv_line.strip().split(',')
pvs[self._find_mapper_device_name(items[0])] = {
'size_g': items[4],
'free_g': items[5],
'vg': items[1]}
lvm_facts['lvm'] = {'lvs': lvs, 'vgs': vgs, 'pvs': pvs}
return lvm_facts
class LinuxHardwareCollector(HardwareCollector):
_platform = 'Linux'
_fact_class = LinuxHardware
required_facts = set(['platform'])
| gpl-3.0 |
lcpt/xc | misc/test/defSections.py | 1 | 3450 | # Source Generated with Decompyle++
# File: defSections.pyc (Python 2.7)
from __future__ import division
import xc_base
import geom
import xc
from materials.ehe import EHE_materials
areaFi8 = 5e-05
areaFi10 = 7.85e-05
areaFi12 = 0.000113
areaFi16 = 0.000201
areaFi20 = 0.000314
areaFi25 = 0.0004608
def getDiagIntSection2(mdlr):
ancho = 1
canto = 0.3
recpos = 0.06 + 0.008
recneg = 0.06 + 0.008
geomSecHA1 = mdlr.getMaterialHandler.newSectionGeometry('geomSecHA1')
regions = geomSecHA1.getRegions
concrete= regions.newQuadRegion(EHE_materials.HA30.nmbDiagD)
concrete.nDivIJ = 10
concrete.nDivJK = 10
concrete.pMin = geom.Pos2d(-ancho / 2, -canto / 2)
concrete.pMax = geom.Pos2d(ancho / 2, canto / 2)
reinforcement = geomSecHA1.getReinfLayers
bottomReinforcement = reinforcement.newStraightReinfLayer(EHE_materials.B500S.nmbDiagD)
bottomReinforcement.numReinfBars = 5
bottomReinforcement.barArea = areaFi12
bottomReinforcement.p1 = geom.Pos2d(-ancho / 2 + recneg, -canto / 2 + recneg)
bottomReinforcement.p2 = geom.Pos2d(ancho / 2 - recneg, -canto / 2 + recneg)
topReinforcement = reinforcement.newStraightReinfLayer(EHE_materials.B500S.nmbDiagD)
topReinforcement.numReinfBars = 5
topReinforcement.barArea = areaFi12
topReinforcement.p1 = geom.Pos2d(-ancho / 2 + recpos, canto / 2 - recpos)
topReinforcement.p2 = geom.Pos2d(ancho / 2 - recpos, canto / 2 - recpos)
materiales = mdlr.getMaterialHandler
secHA1 = materiales.newMaterial('fiber_section_3d', 'secHA1')
fiberSectionRepr = secHA1.getFiberSectionRepr()
fiberSectionRepr.setGeomNamed('geomSecHA1')
secHA1.setupFibers()
param = xc.InteractionDiagramParameters()
param.concreteTag = EHE_materials.HA30.tagDiagD
param.reinforcementTag = EHE_materials.B500S.tagDiagD
diagIntSecHA1 = materiales.calcInteractionDiagram('secHA1', param)
return diagIntSecHA1
def getDiagIntSection1(mdlr):
ancho = 1
canto = 0.3
recpos = 0.076 + 0.008
recneg = 0.076 + 0.008
geomSecHA2 = mdlr.getMaterialHandler.newSectionGeometry('geomSecHA2')
regions = geomSecHA2.getRegions
concrete= regions.newQuadRegion(EHE_materials.HA30.nmbDiagD)
concrete.nDivIJ = 10
concrete.nDivJK = 10
concrete.pMin = geom.Pos2d(-ancho / 2, -canto / 2)
concrete.pMax = geom.Pos2d(ancho / 2, canto / 2)
reinforcement = geomSecHA2.getReinfLayers
bottomReinforcement = reinforcement.newStraightReinfLayer(EHE_materials.B500S.nmbDiagD)
bottomReinforcement.numReinfBars = 7
bottomReinforcement.barArea = areaFi20
bottomReinforcement.p1 = geom.Pos2d(-ancho / 2 + recneg, -canto / 2 + recneg)
bottomReinforcement.p2 = geom.Pos2d(ancho / 2 - recneg, -canto / 2 + recneg)
topReinforcement = reinforcement.newStraightReinfLayer(EHE_materials.B500S.nmbDiagD)
topReinforcement.numReinfBars = 7
topReinforcement.barArea = areaFi20
topReinforcement.p1 = geom.Pos2d(-ancho / 2 + recpos, canto / 2 - recpos)
topReinforcement.p2 = geom.Pos2d(ancho / 2 - recpos, canto / 2 - recpos)
materiales = mdlr.getMaterialHandler
secHA2 = materiales.newMaterial('fiber_section_3d', 'secHA2')
fiberSectionRepr = secHA2.getFiberSectionRepr()
fiberSectionRepr.setGeomNamed('geomSecHA2')
secHA2.setupFibers()
param = xc.InteractionDiagramParameters()
param.concreteTag = EHE_materials.HA30.tagDiagD
param.reinforcementTag = EHE_materials.B500S.tagDiagD
diagIntSecHA2 = materiales.calcInteractionDiagram('secHA2', param)
return diagIntSecHA2
| gpl-3.0 |
vainotuisk/icecreamratings | ENV/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/__about__.py | 101 | 1073 | # Copyright 2014 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
__all__ = [
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
]
__title__ = "packaging"
__summary__ = "Core utilities for Python packages"
__uri__ = "https://github.com/pypa/packaging"
__version__ = "15.1"
__author__ = "Donald Stufft"
__email__ = "[email protected]"
__license__ = "Apache License, Version 2.0"
__copyright__ = "Copyright 2014 %s" % __author__
| bsd-3-clause |
SpectreJan/gnuradio | gr-utils/python/modtool/gr-newmod/docs/doxygen/doxyxml/base.py | 333 | 6794 | #
# Copyright 2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
A base class is created.
Classes based upon this are used to make more user-friendly interfaces
to the doxygen xml docs than the generated classes provide.
"""
import os
import pdb
from xml.parsers.expat import ExpatError
from generated import compound
class Base(object):
class Duplicate(StandardError):
pass
class NoSuchMember(StandardError):
pass
class ParsingError(StandardError):
pass
def __init__(self, parse_data, top=None):
self._parsed = False
self._error = False
self._parse_data = parse_data
self._members = []
self._dict_members = {}
self._in_category = {}
self._data = {}
if top is not None:
self._xml_path = top._xml_path
# Set up holder of references
else:
top = self
self._refs = {}
self._xml_path = parse_data
self.top = top
@classmethod
def from_refid(cls, refid, top=None):
""" Instantiate class from a refid rather than parsing object. """
# First check to see if its already been instantiated.
if top is not None and refid in top._refs:
return top._refs[refid]
# Otherwise create a new instance and set refid.
inst = cls(None, top=top)
inst.refid = refid
inst.add_ref(inst)
return inst
@classmethod
def from_parse_data(cls, parse_data, top=None):
refid = getattr(parse_data, 'refid', None)
if refid is not None and top is not None and refid in top._refs:
return top._refs[refid]
inst = cls(parse_data, top=top)
if refid is not None:
inst.refid = refid
inst.add_ref(inst)
return inst
def add_ref(self, obj):
if hasattr(obj, 'refid'):
self.top._refs[obj.refid] = obj
mem_classes = []
def get_cls(self, mem):
for cls in self.mem_classes:
if cls.can_parse(mem):
return cls
raise StandardError(("Did not find a class for object '%s'." \
% (mem.get_name())))
def convert_mem(self, mem):
try:
cls = self.get_cls(mem)
converted = cls.from_parse_data(mem, self.top)
if converted is None:
raise StandardError('No class matched this object.')
self.add_ref(converted)
return converted
except StandardError, e:
print e
@classmethod
def includes(cls, inst):
return isinstance(inst, cls)
@classmethod
def can_parse(cls, obj):
return False
def _parse(self):
self._parsed = True
def _get_dict_members(self, cat=None):
"""
For given category a dictionary is returned mapping member names to
members of that category. For names that are duplicated the name is
mapped to None.
"""
self.confirm_no_error()
if cat not in self._dict_members:
new_dict = {}
for mem in self.in_category(cat):
if mem.name() not in new_dict:
new_dict[mem.name()] = mem
else:
new_dict[mem.name()] = self.Duplicate
self._dict_members[cat] = new_dict
return self._dict_members[cat]
def in_category(self, cat):
self.confirm_no_error()
if cat is None:
return self._members
if cat not in self._in_category:
self._in_category[cat] = [mem for mem in self._members
if cat.includes(mem)]
return self._in_category[cat]
def get_member(self, name, cat=None):
self.confirm_no_error()
# Check if it's in a namespace or class.
bits = name.split('::')
first = bits[0]
rest = '::'.join(bits[1:])
member = self._get_dict_members(cat).get(first, self.NoSuchMember)
# Raise any errors that are returned.
if member in set([self.NoSuchMember, self.Duplicate]):
raise member()
if rest:
return member.get_member(rest, cat=cat)
return member
def has_member(self, name, cat=None):
try:
mem = self.get_member(name, cat=cat)
return True
except self.NoSuchMember:
return False
def data(self):
self.confirm_no_error()
return self._data
def members(self):
self.confirm_no_error()
return self._members
def process_memberdefs(self):
mdtss = []
for sec in self._retrieved_data.compounddef.sectiondef:
mdtss += sec.memberdef
# At the moment we lose all information associated with sections.
# Sometimes a memberdef is in several sectiondef.
# We make sure we don't get duplicates here.
uniques = set([])
for mem in mdtss:
converted = self.convert_mem(mem)
pair = (mem.name, mem.__class__)
if pair not in uniques:
uniques.add(pair)
self._members.append(converted)
def retrieve_data(self):
filename = os.path.join(self._xml_path, self.refid + '.xml')
try:
self._retrieved_data = compound.parse(filename)
except ExpatError:
print('Error in xml in file %s' % filename)
self._error = True
self._retrieved_data = None
def check_parsed(self):
if not self._parsed:
self._parse()
def confirm_no_error(self):
self.check_parsed()
if self._error:
raise self.ParsingError()
def error(self):
self.check_parsed()
return self._error
def name(self):
# first see if we can do it without processing.
if self._parse_data is not None:
return self._parse_data.name
self.check_parsed()
return self._retrieved_data.compounddef.name
| gpl-3.0 |
adityacs/ansible | test/units/plugins/connection/test_connection.py | 52 | 6390 | # (c) 2015, Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from io import StringIO
from ansible.compat.tests import mock
from ansible.compat.tests import unittest
from ansible.errors import AnsibleError
from ansible.playbook.play_context import PlayContext
from ansible.plugins.connection import ConnectionBase
#from ansible.plugins.connection.accelerate import Connection as AccelerateConnection
#from ansible.plugins.connection.chroot import Connection as ChrootConnection
#from ansible.plugins.connection.funcd import Connection as FuncdConnection
#from ansible.plugins.connection.jail import Connection as JailConnection
#from ansible.plugins.connection.libvirt_lxc import Connection as LibvirtLXCConnection
from ansible.plugins.connection.lxc import Connection as LxcConnection
from ansible.plugins.connection.local import Connection as LocalConnection
from ansible.plugins.connection.paramiko_ssh import Connection as ParamikoConnection
from ansible.plugins.connection.ssh import Connection as SSHConnection
from ansible.plugins.connection.docker import Connection as DockerConnection
#from ansible.plugins.connection.winrm import Connection as WinRmConnection
from ansible.plugins.connection.network_cli import Connection as NetworkCliConnection
class TestConnectionBaseClass(unittest.TestCase):
def setUp(self):
self.play_context = PlayContext()
self.in_stream = StringIO()
def tearDown(self):
pass
def test_subclass_error(self):
class ConnectionModule1(ConnectionBase):
pass
with self.assertRaises(TypeError):
ConnectionModule1()
class ConnectionModule2(ConnectionBase):
def get(self, key):
super(ConnectionModule2, self).get(key)
with self.assertRaises(TypeError):
ConnectionModule2()
def test_subclass_success(self):
class ConnectionModule3(ConnectionBase):
@property
def transport(self):
pass
def _connect(self):
pass
def exec_command(self):
pass
def put_file(self):
pass
def fetch_file(self):
pass
def close(self):
pass
self.assertIsInstance(ConnectionModule3(self.play_context, self.in_stream), ConnectionModule3)
# def test_accelerate_connection_module(self):
# self.assertIsInstance(AccelerateConnection(), AccelerateConnection)
#
# def test_chroot_connection_module(self):
# self.assertIsInstance(ChrootConnection(), ChrootConnection)
#
# def test_funcd_connection_module(self):
# self.assertIsInstance(FuncdConnection(), FuncdConnection)
#
# def test_jail_connection_module(self):
# self.assertIsInstance(JailConnection(), JailConnection)
#
# def test_libvirt_lxc_connection_module(self):
# self.assertIsInstance(LibvirtLXCConnection(), LibvirtLXCConnection)
def test_lxc_connection_module(self):
self.assertIsInstance(LxcConnection(self.play_context, self.in_stream), LxcConnection)
def test_local_connection_module(self):
self.assertIsInstance(LocalConnection(self.play_context, self.in_stream), LocalConnection)
def test_paramiko_connection_module(self):
self.assertIsInstance(ParamikoConnection(self.play_context, self.in_stream), ParamikoConnection)
def test_ssh_connection_module(self):
self.assertIsInstance(SSHConnection(self.play_context, self.in_stream), SSHConnection)
@mock.patch('ansible.plugins.connection.docker.Connection._old_docker_version', return_value=('false', 'garbage', '', 1))
@mock.patch('ansible.plugins.connection.docker.Connection._new_docker_version', return_value=('docker version', '1.2.3', '', 0))
def test_docker_connection_module_too_old(self, mock_new_docker_verison, mock_old_docker_version):
self.assertRaisesRegexp(AnsibleError, '^docker connection type requires docker 1.3 or higher$',
DockerConnection, self.play_context, self.in_stream, docker_command='/fake/docker')
@mock.patch('ansible.plugins.connection.docker.Connection._old_docker_version', return_value=('false', 'garbage', '', 1))
@mock.patch('ansible.plugins.connection.docker.Connection._new_docker_version', return_value=('docker version', '1.3.4', '', 0))
def test_docker_connection_module(self, mock_new_docker_verison, mock_old_docker_version):
self.assertIsInstance(DockerConnection(self.play_context, self.in_stream, docker_command='/fake/docker'),
DockerConnection)
# old version and new version fail
@mock.patch('ansible.plugins.connection.docker.Connection._old_docker_version', return_value=('false', 'garbage', '', 1))
@mock.patch('ansible.plugins.connection.docker.Connection._new_docker_version', return_value=('false', 'garbage', '', 1))
def test_docker_connection_module_wrong_cmd(self, mock_new_docker_version, mock_old_docker_version):
self.assertRaisesRegexp(AnsibleError, '^Docker version check (.*?) failed: ',
DockerConnection, self.play_context, self.in_stream, docker_command='/fake/docker')
# def test_winrm_connection_module(self):
# self.assertIsInstance(WinRmConnection(), WinRmConnection)
def test_network_cli_connection_module(self):
self.assertIsInstance(NetworkCliConnection(self.play_context, self.in_stream), NetworkCliConnection)
self.assertIsInstance(NetworkCliConnection(self.play_context, self.in_stream), ParamikoConnection)
| gpl-3.0 |
susilehtola/psi4 | tests/pytests/test_addons_qcschema.py | 7 | 65060 | import json
import pprint
import pytest
from .addons import hardware_nvidia_gpu, using
import qcengine as qcng
import psi4
# Notes
# * options-setting NOT cummulative if a run_qcschema in between
# Generating
# * equivalent to test_psi4. copy over the job, then run below to generate atomicinput
# atin = psi4.driver.p4util.state_to_atomicinput(driver="energy", method="ccsd", molecule=ethene_ethyne)
# print(f' jatin = """{atin.serialize("json")}"""')
# assert 0
# * switch to json running
# atres = psi4.schema_wrapper.run_qcschema(json.loads(jatin))
# pprint.pprint(atres.dict())
pytestmark = [pytest.mark.quick, pytest.mark.smoke]
@using("gdma")
def test_gdma():
"""gdma1"""
#! Water RHF/cc-pVTZ distributed multipole analysis
ref_energy = -76.0571685433842219
ref_dma_mat = psi4.core.Matrix.from_list([[
-0.43406697290168, -0.18762673939633, 0.00000000000000, 0.00000000000000, 0.03206686487531, 0.00000000000000,
-0.00000000000000, -0.53123477172696, 0.00000000000000
],
[
0.21703348903257, -0.06422316619952, 0.00000000000000,
-0.11648289410022, 0.01844320206227, 0.00000000000000,
0.07409226544133, -0.07115302332866, 0.00000000000000
],
[
0.21703348903257, -0.06422316619952, 0.00000000000000,
0.11648289410022, 0.01844320206227, 0.00000000000000,
-0.07409226544133, -0.07115302332866, 0.00000000000000
]])
ref_tot_mat = psi4.core.Matrix(1, 9)
ref_tot_mat.name = "Reference total values"
ref_tot_arr = [
0.00000000516346, -0.79665315928128, 0.00000000000000, 0.00000000000000, 0.10813259329390, 0.00000000000000,
0.00000000000000, -2.01989585894142, 0.00000000000000
]
for i in range(9):
ref_tot_mat.set(0, i, ref_tot_arr[i])
# noreorient/nocom are not needed, but are used here to guarantee that the
# GDMA origin placement defined below is at the O atom.
# added protocols.wavefunction = orbitals_and_eigenvalues (needs to be all)
jatin = """{"id": null, "schema_name": "qcschema_input", "schema_version": 1, "molecule": {"schema_name": "qcschema_molecule", "schema_version": 2, "validated": true, "symbols": ["O", "H", "H"], "geometry": [0.0, 0.0, 0.22143054847664648, 4.379423262771008e-17, -1.4304281906653031, -0.8857259733588368, -4.379423262771008e-17, 1.4304281906653031, -0.8857259733588368], "name": "H2O", "molecular_charge": 0.0, "molecular_multiplicity": 1, "masses": [15.99491461957, 1.00782503223, 1.00782503223], "real": [true, true, true], "atom_labels": ["", "", ""], "atomic_numbers": [8, 1, 1], "mass_numbers": [16, 1, 1], "fragments": [[0, 1, 2]], "fragment_charges": [0.0], "fragment_multiplicities": [1], "fix_com": true, "fix_orientation": true, "provenance": {"creator": "QCElemental", "version": "v0.17.0+7.gf55d5ac.dirty", "routine": "qcelemental.molparse.from_string"}}, "driver": "energy", "model": {"method": "scf", "basis": "CC-PVTZ"}, "keywords": {"d_convergence": 1e-10, "gdma_limit": 2, "gdma_origin": [0.0, 0.0, 0.117176], "gdma_radius": ["H", 0.65], "gdma_switch": 0.0, "scf_type": "PK"}, "protocols": {"wavefunction": "orbitals_and_eigenvalues"}, "extras": {"wfn_qcvars_only": true}, "provenance": {"creator": "Psi4", "version": "1.4a2.dev1089", "routine": "psi4.driver.p4util.procutil"}}"""
atres = psi4.schema_wrapper.run_qcschema(json.loads(jatin))
try:
psi4.gdma(atres.wavefunction)
except TypeError as e:
if "Invoked with: WavefunctionProperties" in str(e):
pytest.xfail("GDMA not processable from AtomicResult.wavefunction")
dmavals = psi4.core.variable("DMA DISTRIBUTED MULTIPOLES")
totvals = psi4.core.variable("DMA TOTAL MULTIPOLES")
assert psi4.compare_values(ref_energy, energy, 8, "SCF Energy")
assert psi4.compare_matrices(ref_dma_mat, dmavals, 6, "DMA Distributed Multipoles")
assert psi4.compare_matrices(ref_tot_mat, totvals, 6, "DMA Total Multipoles")
@using("ipi")
def test_ipi_broker1():
"""ipi_broker1"""
pytest.xfail("IPI doesn't use basic psi4 functions, so can't transmit as AtomicInput")
# water = psi4.geometry("""
# O -1.216 -0.015 -0.261
# H -1.946 0.681 -0.378
# H -1.332 -0.754 0.283
# units angstrom
# no_reorient
# no_com
# """)
#
# psi4.set_options({
# 'basis': 'sto-3g',
# 'reference': 'rhf',
# })
#
# options = {}
#
# #ipi_broker(serverdata="inet:localhost:21340", options=options)
# b = psi4.ipi_broker("ccsd", serverdata=False, options=options)
#
# refnuc = 9.05843673637
# refscf = -74.9417588868628
# refccsd = -0.04895074370294
# reftotal = -74.9907096305658
#
# frc = [[ 0.08704801, 0.1067644 , -0.11170374],
# [-0.02216499, -0.03279655, 0.03215871],
# [-0.06488302, -0.07396785, 0.07954503]]
#
# b.calculate_force()
#
## atin = psi4.driver.p4util.state_to_atomicinput(driver="energy", method="ccsd", molecule=ethene_ethyne)
## print(f' jatin = """{atin.serialize("json")}"""')
## assert 0
##
## atres = psi4.schema_wrapper.run_qcschema(json.loads(jatin))
## pprint.pprint(atres.dict())
#
#
# assert psi4.compare_values(refnuc, water.nuclear_repulsion_energy(), 3, "Nuclear repulsion energy")
# assert psi4.compare_values(refscf, psi4.core.variable("SCF total energy"), 5, "SCF energy")
# assert psi4.compare_values(refccsd, psi4.core.variable("CCSD correlation energy"), 4, "CCSD contribution")
# assert psi4.compare_values(reftotal, psi4.core.variable("Current energy"), 7, "Total energy")
# assert psi4.compare_values(reftotal, b._potential, 7, "Total energy (Broker)")
# assert psi4.compare_arrays(frc, b._force, 4, "Total force (Broker)")
#
# water_mirror = psi4.geometry("""
# O 1.216 0.015 0.261
# H 1.946 -0.681 0.378
# H 1.332 0.754 -0.283
# units angstrom
# no_reorient
# no_com
# """)
#
# b.calculate_force()
#
## atin = psi4.driver.p4util.state_to_atomicinput(driver="energy", method="ccsd", molecule=ethene_ethyne)
## print(f' jatin = """{atin.serialize("json")}"""')
## assert 0
##
## atres = psi4.schema_wrapper.run_qcschema(json.loads(jatin))
## pprint.pprint(atres.dict())
#
#
# assert psi4.compare_values(refnuc, water_mirror.nuclear_repulsion_energy(), 3, "Nuclear repulsion energy")
# assert psi4.compare_values(refscf, psi4.core.variable("SCF total energy"), 5, "SCF energy")
# assert psi4.compare_values(refccsd, psi4.core.variable("CCSD correlation energy"), 4, "CCSD contribution")
# assert psi4.compare_values(reftotal, psi4.core.variable("Current energy"), 7, "Total energy")
# assert psi4.compare_values(reftotal, b._potential, 7, "Total energy (Broker)")
# assert psi4.compare_arrays(frc, -b._force, 4, "Total force (Broker)")
#@using("mrcc")
#def test_mrcc():
# """mrcc/ccsdt"""
# #! CCSDT cc-pVDZ energy for the H2O molecule using MRCC
#
# h2o = psi4.geometry("""
# o
# h 1 1.0
# h 1 1.0 2 104.5
# """)
#
# psi4.set_options({
# 'basis': 'cc-pvdz',
# 'freeze_core': 'true'})
#
# psi4.energy('mrccsdt')
#
## atin = psi4.driver.p4util.state_to_atomicinput(driver="energy", method="ccsd", molecule=ethene_ethyne)
## print(f' jatin = """{atin.serialize("json")}"""')
## assert 0
##
## atres = psi4.schema_wrapper.run_qcschema(json.loads(jatin))
## pprint.pprint(atres.dict())
#
#
# assert psi4.compare_values( 8.801465529972, psi4.variable("NUCLEAR REPULSION ENERGY"), 6, 'NRE')
# assert psi4.compare_values(-76.021418445155, psi4.variable("SCF TOTAL ENERGY"), 6, 'SCF')
# assert psi4.compare_values( -0.204692406830, psi4.variable("MP2 CORRELATION ENERGY") , 6, 'MP2 correlation')
# assert psi4.compare_values( -0.217715210258, psi4.variable("CCSDT CORRELATION ENERGY"), 6, 'CCSDT correlation')
# assert psi4.compare_values(-76.239133655413, psi4.variable("CURRENT ENERGY"), 6, 'CCSDT')
@using("chemps2")
def test_chemps2():
"""chemps2/scf-n2"""
#! dmrg-scf on N2
jatin = """{"id": null, "schema_name": "qcschema_input", "schema_version": 1, "molecule": {"schema_name": "qcschema_molecule", "schema_version": 2, "validated": true, "symbols": ["N", "N"], "geometry": [0.0, 0.0, -1.059, 0.0, 0.0, 1.059], "name": "N2", "molecular_charge": 0.0, "molecular_multiplicity": 1, "masses": [14.00307400443, 14.00307400443], "real": [true, true], "atom_labels": ["", ""], "atomic_numbers": [7, 7], "mass_numbers": [14, 14], "fragments": [[0, 1]], "fragment_charges": [0.0], "fragment_multiplicities": [1], "fix_com": false, "fix_orientation": false, "provenance": {"creator": "QCElemental", "version": "v0.17.0+7.gf55d5ac.dirty", "routine": "qcelemental.molparse.from_string"}}, "driver": "energy", "model": {"method": "dmrg-scf", "basis": "CC-PVDZ"}, "keywords": {"active": [2, 0, 1, 1, 0, 2, 1, 1], "dmrg_diis": 1, "dmrg_diis_write": 1, "dmrg_excitation": 0, "dmrg_irrep": 0, "dmrg_local_init": 0, "dmrg_mps_write": 0, "dmrg_multiplicity": 1, "dmrg_print_corr": 1, "dmrg_scf_active_space": "NO", "dmrg_scf_diis_thr": 0.01, "dmrg_scf_state_avg": 0, "dmrg_sweep_dvdson_rtol": [0.0001, 1e-06, 1e-08], "dmrg_sweep_energy_conv": [1e-10, 1e-10, 1e-10], "dmrg_sweep_max_sweeps": [5, 5, 10], "dmrg_sweep_noise_prefac": [0.05, 0.05, 0.0], "dmrg_sweep_states": [500, 1000, 1000], "dmrg_unitary_write": 1, "d_convergence": 1e-12, "e_convergence": 1e-12, "reference": "RHF", "restricted_docc": [1, 0, 0, 0, 0, 1, 0, 0]}, "protocols": {}, "extras": {"wfn_qcvars_only": true}, "provenance": {"creator": "Psi4", "version": "1.4a2.dev1089", "routine": "psi4.driver.p4util.procutil"}}"""
atres = psi4.schema_wrapper.run_qcschema(json.loads(jatin))
assert psi4.compare_values(-109.1035023353, atres.return_result, 6, "DMRG Energy")
@using('mp2d')
def test_mp2d():
jatin = """{"id": null, "schema_name": "qcschema_input", "schema_version": 1, "molecule": {"schema_name": "qcschema_molecule", "schema_version": 2, "validated": true, "symbols": ["C", "C", "H", "H", "H", "H", "C", "C", "H", "H"], "geometry": [3.8623510442091865e-17, -1.261539587380886, -4.02163254721969, -3.862351044209187e-17, 1.261539587380886, -4.02163254721969, 1.745390733721485, -2.3286206872737854, -4.0245162692871395, -1.7453907337214847, -2.3286206872737854, -4.0245162692871395, -1.745390733721485, 2.3286206872737854, -4.0245162692871395, 1.7453907337214847, 2.3286206872737854, -4.0245162692871395, -5.7777898331617076e-34, 0.0, 5.47454736883822, -5.7777898331617076e-34, 0.0, 3.193150937439626, -5.7777898331617076e-34, 0.0, 1.1789145370276326, -5.7777898331617076e-34, 0.0, 7.484131263529336], "name": "C4H6", "molecular_charge": 0.0, "molecular_multiplicity": 1, "masses": [12.0, 12.0, 1.00782503223, 1.00782503223, 1.00782503223, 1.00782503223, 12.0, 12.0, 1.00782503223, 1.00782503223], "real": [true, true, true, true, true, true, true, true, true, true], "atom_labels": ["", "", "", "", "", "", "", "", "", ""], "atomic_numbers": [6, 6, 1, 1, 1, 1, 6, 6, 1, 1], "mass_numbers": [12, 12, 1, 1, 1, 1, 12, 12, 1, 1], "fragments": [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9]], "fragment_charges": [0.0, 0.0], "fragment_multiplicities": [1, 1], "fix_com": false, "fix_orientation": false, "provenance": {"creator": "QCElemental", "version": "v0.17.0+7.gf55d5ac.dirty", "routine": "qcelemental.molparse.from_string"}}, "driver": "energy", "model": {"method": "mp2-d", "basis": "cc-pvdz"}, "keywords": {}, "protocols": {}, "extras": {"wfn_qcvars_only": true}, "provenance": {"creator": "Psi4", "version": "1.4a2.dev1089", "routine": "psi4.driver.p4util.procutil"}}"""
atres = psi4.schema_wrapper.run_qcschema(json.loads(jatin))
expected = 0.00632174635953
assert psi4.compare_values(expected, atres.extras["qcvars"]["DISPERSION CORRECTION ENERGY"], 7, 'disp E')
assert psi4.compare_values(expected, atres.properties.scf_dispersion_correction_energy, 7, 'mp2d disp E')
@using("dftd3")
def test_dftd3():
"""dftd3/energy"""
ref_d2 = [-0.00390110, -0.00165271, -0.00058118]
ref_d3zero = [-0.00285088, -0.00084340, -0.00031923]
ref_d3bj = [-0.00784595, -0.00394347, -0.00226683]
ref_pbe_d2 = [-0.00278650, -0.00118051, -0.00041513]
ref_pbe_d3zero = [-0.00175474, -0.00045421, -0.00016839]
ref_pbe_d3bj = [-0.00475937, -0.00235265, -0.00131239]
# mA, b3lyp-d2, libdisp
jatin = """{"id": null, "schema_name": "qcschema_input", "schema_version": 1, "molecule": {"schema_name": "qcschema_molecule", "schema_version": 2, "validated": true, "symbols": ["C", "C", "H", "H", "H", "H"], "geometry": [3.8623510442091865e-17, -1.261539587380886, 0.00041472029798115525, -3.862351044209187e-17, 1.261539587380886, 0.00041472029798115525, 1.745390733721485, -2.3286206872737854, -0.0024690017694669127, -1.7453907337214847, -2.3286206872737854, -0.0024690017694669127, -1.745390733721485, 2.3286206872737854, -0.0024690017694669127, 1.7453907337214847, 2.3286206872737854, -0.0024690017694669127], "name": "C2H4", "molecular_charge": 0.0, "molecular_multiplicity": 1, "masses": [12.0, 12.0, 1.00782503223, 1.00782503223, 1.00782503223, 1.00782503223], "real": [true, true, true, true, true, true], "atom_labels": ["", "", "", "", "", ""], "atomic_numbers": [6, 6, 1, 1, 1, 1], "mass_numbers": [12, 12, 1, 1, 1, 1], "fragments": [[0, 1, 2, 3, 4, 5]], "fragment_charges": [0.0], "fragment_multiplicities": [1], "fix_com": false, "fix_orientation": false, "provenance": {"creator": "QCElemental", "version": "v0.17.0+7.gf55d5ac.dirty", "routine": "qcelemental.molparse.from_string"}}, "driver": "energy", "model": {"method": "b3lyp-d2", "basis": "STO-3G"}, "keywords": {"dft_radial_points": 50, "dft_spherical_points": 110, "scf_type": "DF", "function_kwargs": {"engine": "libdisp"}}, "protocols": {}, "extras": {"wfn_qcvars_only": true}, "provenance": {"creator": "Psi4", "version": "1.4a2.dev1089", "routine": "psi4.driver.p4util.procutil"}}"""
atres = psi4.schema_wrapper.run_qcschema(json.loads(jatin))
assert psi4.compare_values(ref_d2[1], atres.extras["qcvars"]["DISPERSION CORRECTION ENERGY"], 7,
'Ethene -D2 (calling psi4 Disp class)')
# mA, b3lyp-d3bj, dftd3
jatin = """{"id": null, "schema_name": "qcschema_input", "schema_version": 1, "molecule": {"schema_name": "qcschema_molecule", "schema_version": 2, "validated": true, "symbols": ["C", "C", "H", "H", "H", "H"], "geometry": [3.8623510442091865e-17, -1.261539587380886, 0.00041472029798115525, -3.862351044209187e-17, 1.261539587380886, 0.00041472029798115525, 1.745390733721485, -2.3286206872737854, -0.0024690017694669127, -1.7453907337214847, -2.3286206872737854, -0.0024690017694669127, -1.745390733721485, 2.3286206872737854, -0.0024690017694669127, 1.7453907337214847, 2.3286206872737854, -0.0024690017694669127], "name": "C2H4", "molecular_charge": 0.0, "molecular_multiplicity": 1, "masses": [12.0, 12.0, 1.00782503223, 1.00782503223, 1.00782503223, 1.00782503223], "real": [true, true, true, true, true, true], "atom_labels": ["", "", "", "", "", ""], "atomic_numbers": [6, 6, 1, 1, 1, 1], "mass_numbers": [12, 12, 1, 1, 1, 1], "fragments": [[0, 1, 2, 3, 4, 5]], "fragment_charges": [0.0], "fragment_multiplicities": [1], "fix_com": false, "fix_orientation": false, "provenance": {"creator": "QCElemental", "version": "v0.17.0+7.gf55d5ac.dirty", "routine": "qcelemental.molparse.from_string"}}, "driver": "energy", "model": {"method": "B3LYP-D3BJ", "basis": "STO-3G"}, "keywords": {"dft_radial_points": 50, "dft_spherical_points": 110, "scf_type": "DF"}, "protocols": {}, "extras": {"wfn_qcvars_only": true}, "provenance": {"creator": "Psi4", "version": "1.4a2.dev1089", "routine": "psi4.driver.p4util.procutil"}}"""
atres = psi4.schema_wrapper.run_qcschema(json.loads(jatin))
assert psi4.compare_values(ref_d3bj[1], atres.extras["qcvars"]["DISPERSION CORRECTION ENERGY"], 7,
'Ethene -D3 (calling dftd3 -bj)')
# mB, pbe-d3bj, custom parmeters
jatin = """{"id": null, "schema_name": "qcschema_input", "schema_version": 1, "molecule": {"schema_name": "qcschema_molecule", "schema_version": 2, "validated": true, "symbols": ["C", "C", "H", "H"], "geometry": [3.7092061506874214e-68, -3.7092061506874214e-68, 1.1408784499704554, 3.7092061506874214e-68, -3.7092061506874214e-68, -1.140517981428138, 3.7092061506874214e-68, -3.7092061506874214e-68, -3.154754381840132, 3.7092061506874214e-68, -3.7092061506874214e-68, 3.1504623446615714], "name": "C2H2", "molecular_charge": 0.0, "molecular_multiplicity": 1, "masses": [12.0, 12.0, 1.00782503223, 1.00782503223], "real": [true, true, true, true], "atom_labels": ["", "", "", ""], "atomic_numbers": [6, 6, 1, 1], "mass_numbers": [12, 12, 1, 1], "fragments": [[0, 1, 2, 3]], "fragment_charges": [0.0], "fragment_multiplicities": [1], "fix_com": false, "fix_orientation": false, "provenance": {"creator": "QCElemental", "version": "v0.17.0+7.gf55d5ac.dirty", "routine": "qcelemental.molparse.from_string"}}, "driver": "energy", "model": {"method": "pbe-d3bj", "basis": "STO-3G"}, "keywords": {"dft_dispersion_parameters": [2.0, 0.7875, 0.4289, 4.4407], "dft_radial_points": 50, "dft_spherical_points": 110, "scf_type": "DF"}, "protocols": {}, "extras": {"wfn_qcvars_only": true}, "provenance": {"creator": "Psi4", "version": "1.4a2.dev1089", "routine": "psi4.driver.p4util.procutil"}}"""
atres = psi4.schema_wrapper.run_qcschema(json.loads(jatin))
assert psi4.compare_values(-0.002238400, atres.extras["qcvars"]["DISPERSION CORRECTION ENERGY"], 7,
'Ethene -D3 (calling dftd3 -bj)')
@using("libefp")
def test_libefp():
"""libefp/qchem-qmefp-sp"""
#! EFP on mixed QM (water) and EFP (water + 2 * ammonia) system.
#! An EFP-only calc performed first to test vales against q-chem.
pytest.xfail("EFP not transmittable through QCSchema")
qmefp = psi4.geometry("""
# QM fragment
0 1
units bohr
O1 0.000000000000 0.000000000000 0.224348285559
H2 -1.423528800232 0.000000000000 -0.897393142237
H3 1.423528800232 0.000000000000 -0.897393142237
# EFP as EFP fragments
--
efp h2o -4.014110144291 2.316749370493 -1.801514729931 -2.902133 1.734999 -1.953647
--
efp NH3,1.972094713645,,3.599497221584 , 5.447701074734 -1.105309 2.033306 -1.488582
--
efp NH3 -7.876296399270 -1.854372164887 -2.414804197762 2.526442 1.658262 -2.742084
""")
# <<< EFP calc >>>
psi4.set_options({'basis': '6-31g*', 'scf_type': 'pk', 'guess': 'core', 'df_scf_guess': False})
# psi4.energy('efp')
#
atin = psi4.driver.p4util.state_to_atomicinput(driver="energy", method="efp")
print(f' jatin = """{atin.serialize("json")}"""')
assert 0
## atres = psi4.schema_wrapper.run_qcschema(json.loads(jatin))
## pprint.pprint(atres.dict())
#
#
# assert psi4.compare_values( 9.1793879214, qmefp.nuclear_repulsion_energy(), 6, 'QM NRE')
# assert psi4.compare_values(-0.0004901368, psi4.variable('efp elst energy'), 6, 'EFP-EFP Elst') # from q-chem
# assert psi4.compare_values(-0.0003168768, psi4.variable('efp ind energy'), 6, 'EFP-EFP Indc')
# assert psi4.compare_values(-0.0021985285, psi4.variable('efp disp energy'), 6, 'EFP-EFP Disp') # from q-chem
# assert psi4.compare_values( 0.0056859871, psi4.variable('efp exch energy'), 6, 'EFP-EFP Exch') # from q-chem
# assert psi4.compare_values( 0.0026804450, psi4.variable('efp total energy'), 6, 'EFP-EFP Totl')
# assert psi4.compare_values( 0.0026804450, psi4.variable('current energy'), 6, 'Current')
# psi4.core.print_variables()
#
# psi4.core.clean()
# psi4.core.clean_variables()
#
# # <<< QM + EFP calc >>>
# psi4.set_options({
# 'e_convergence': 12,
# 'd_convergence': 12})
# psi4.energy('scf')
#
# assert psi4.compare_values( 9.1793879214, qmefp.nuclear_repulsion_energy(), 6, 'QM NRE')
# assert psi4.compare_values( 0.2622598847, psi4.variable('efp total energy') - psi4.variable('efp ind energy'), 6, 'EFP corr to SCF') # from q-chem
# assert psi4.compare_values(-0.0117694790, psi4.variable('efp ind energy'), 6, 'QM-EFP Indc') # from q-chem
# assert psi4.compare_values(-0.0021985285, psi4.variable('efp disp energy'), 6, 'EFP-EFP Disp') # from q-chem
# assert psi4.compare_values( 0.0056859871, psi4.variable('efp exch energy'), 6, 'EFP-EFP Exch') # from q-chem
# assert psi4.compare_values( 0.2504904057, psi4.variable('efp total energy'), 6, 'EFP-EFP Totl') # from q-chem
# assert psi4.compare_values(-76.0139362744, psi4.variable('scf total energy'), 6, 'SCF') # from q-chem
# psi4.core.print_variables()
@using("pcmsolver")
def test_pcmsolver():
"""pcmsolver/scf"""
#! pcm
nucenergy = 12.0367196636183458
polenergy = -0.0053060443528559
totalenergy = -55.4559426361734040
# pcm_string = """
# Units = Angstrom
# Medium {
# SolverType = IEFPCM
# Solvent = Water
# }
#
# Cavity {
# RadiiSet = UFF
# Type = GePol
# Scaling = False
# Area = 0.3
# Mode = Implicit
# }
# """
# psi4.set_options({
# 'basis': 'STO-3G',
# 'scf_type': 'pk',
# 'pcm': True,
# 'pcm_scf_type': 'total',
# "pcm__input": pcm_string,
# })
#pcmfile = "\\n".join(pcm_string.split("\n"))
#print(f"{pcmfile=}")
jatin = """{"id": null, "schema_name": "qcschema_input", "schema_version": 1, "molecule": {"schema_name": "qcschema_molecule", "schema_version": 2, "validated": true, "symbols": ["N", "H", "H", "H"], "geometry": [-1e-10, -0.1040380466, 0.0, -0.9015844116, 0.4818470201, -1.5615900098, -0.9015844116, 0.4818470201, 1.5615900098, 1.8031688251, 0.4818470204, 0.0], "name": "H3N", "molecular_charge": 0.0, "molecular_multiplicity": 1, "masses": [14.00307400443, 1.00782503223, 1.00782503223, 1.00782503223], "real": [true, true, true, true], "atom_labels": ["", "", "", ""], "atomic_numbers": [7, 1, 1, 1], "mass_numbers": [14, 1, 1, 1], "fragments": [[0, 1, 2, 3]], "fragment_charges": [0.0], "fragment_multiplicities": [1], "fix_com": false, "fix_orientation": false, "fix_symmetry": "c1", "provenance": {"creator": "QCElemental", "version": "v0.17.0+7.gf55d5ac.dirty", "routine": "qcelemental.molparse.from_string"}}, "driver": "energy", "model": {"method": "scf", "basis": "STO-3G"}, "keywords": {"pcm": 1, "pcm__input": "\\n Units = Angstrom\\n Medium {\\n SolverType = IEFPCM\\n Solvent = Water\\n }\\n \\n Cavity {\\n RadiiSet = UFF\\n Type = GePol\\n Scaling = False\\n Area = 0.3\\n Mode = Implicit\\n }\\n ", "pcm_scf_type": "TOTAL", "scf_type": "PK"}, "protocols": {}, "extras": {"wfn_qcvars_only": true}, "provenance": {"creator": "Psi4", "version": "1.4a2.dev1089", "routine": "psi4.driver.p4util.procutil"}}"""
atres1 = psi4.schema_wrapper.run_qcschema(json.loads(jatin))
pprint.pprint(atres1.dict())
assert psi4.compare_values(nucenergy, atres1.properties.nuclear_repulsion_energy, 10,
"Nuclear repulsion energy (PCM, total algorithm)")
assert psi4.compare_values(totalenergy, atres1.return_result, 10, "Total energy (PCM, total algorithm)")
assert psi4.compare_values(polenergy, atres1.extras["qcvars"]["PCM POLARIZATION ENERGY"], 6,
"Polarization energy (PCM, total algorithm)")
jatin = """{"id": null, "schema_name": "qcschema_input", "schema_version": 1, "molecule": {"schema_name": "qcschema_molecule", "schema_version": 2, "validated": true, "symbols": ["N", "H", "H", "H"], "geometry": [-1e-10, -0.1040380466, 0.0, -0.9015844116, 0.4818470201, -1.5615900098, -0.9015844116, 0.4818470201, 1.5615900098, 1.8031688251, 0.4818470204, 0.0], "name": "H3N", "molecular_charge": 0.0, "molecular_multiplicity": 1, "masses": [14.00307400443, 1.00782503223, 1.00782503223, 1.00782503223], "real": [true, true, true, true], "atom_labels": ["", "", "", ""], "atomic_numbers": [7, 1, 1, 1], "mass_numbers": [14, 1, 1, 1], "fragments": [[0, 1, 2, 3]], "fragment_charges": [0.0], "fragment_multiplicities": [1], "fix_com": false, "fix_orientation": false, "fix_symmetry": "c1", "provenance": {"creator": "QCElemental", "version": "v0.17.0+7.gf55d5ac.dirty", "routine": "qcelemental.molparse.from_string"}}, "driver": "energy", "model": {"method": "scf", "basis": "STO-3G"}, "keywords": {"pcm": 1, "pcm__input": "\\n Units = Angstrom\\n Medium {\\n SolverType = IEFPCM\\n Solvent = Water\\n }\\n \\n Cavity {\\n RadiiSet = UFF\\n Type = GePol\\n Scaling = False\\n Area = 0.3\\n Mode = Implicit\\n }\\n ", "pcm_scf_type": "SEPARATE", "scf_type": "PK"}, "protocols": {}, "extras": {"wfn_qcvars_only": true}, "provenance": {"creator": "Psi4", "version": "1.4a2.dev1089", "routine": "psi4.driver.p4util.procutil"}}"""
atres2 = psi4.schema_wrapper.run_qcschema(json.loads(jatin))
pprint.pprint(atres2.dict())
assert psi4.compare_values(totalenergy, atres2.return_result, 10, "Total energy (PCM, separate algorithm)")
assert psi4.compare_values(polenergy, atres2.extras["qcvars"]["PCM POLARIZATION ENERGY"], 6,
"Polarization energy (PCM, separate algorithm)")
jatin = """{"id": null, "schema_name": "qcschema_input", "schema_version": 1, "molecule": {"schema_name": "qcschema_molecule", "schema_version": 2, "validated": true, "symbols": ["N", "H", "H", "H"], "geometry": [-1e-10, -0.1040380466, 0.0, -0.9015844116, 0.4818470201, -1.5615900098, -0.9015844116, 0.4818470201, 1.5615900098, 1.8031688251, 0.4818470204, 0.0], "name": "H3N", "molecular_charge": 0.0, "molecular_multiplicity": 1, "masses": [14.00307400443, 1.00782503223, 1.00782503223, 1.00782503223], "real": [true, true, true, true], "atom_labels": ["", "", "", ""], "atomic_numbers": [7, 1, 1, 1], "mass_numbers": [14, 1, 1, 1], "fragments": [[0, 1, 2, 3]], "fragment_charges": [0.0], "fragment_multiplicities": [1], "fix_com": false, "fix_orientation": false, "fix_symmetry": "c1", "provenance": {"creator": "QCElemental", "version": "v0.17.0+7.gf55d5ac.dirty", "routine": "qcelemental.molparse.from_string"}}, "driver": "energy", "model": {"method": "scf", "basis": "STO-3G"}, "keywords": {"pcm": 1, "pcm__input": "\\n Units = Angstrom\\n Medium {\\n SolverType = IEFPCM\\n Solvent = Water\\n }\\n \\n Cavity {\\n RadiiSet = UFF\\n Type = GePol\\n Scaling = False\\n Area = 0.3\\n Mode = Implicit\\n }\\n ", "pcm_scf_type": "TOTAL", "scf_type": "PK", "reference": "uhf"}, "protocols": {}, "extras": {"wfn_qcvars_only": true}, "provenance": {"creator": "Psi4", "version": "1.4a2.dev1089", "routine": "psi4.driver.p4util.procutil"}}"""
atres3 = psi4.schema_wrapper.run_qcschema(json.loads(jatin))
pprint.pprint(atres3.dict())
assert psi4.compare_values(totalenergy, atres3.return_result, 10, "Total energy (PCM, separate algorithm)")
assert psi4.compare_values(polenergy, atres3.extras["qcvars"]["PCM POLARIZATION ENERGY"], 6,
"Polarization energy (PCM, separate algorithm)")
jatin = """{"id": null, "schema_name": "qcschema_input", "schema_version": 1, "molecule": {"schema_name": "qcschema_molecule", "schema_version": 2, "validated": true, "symbols": ["N", "H", "H", "H"], "geometry": [-1e-10, -0.1040380466, 0.0, -0.9015844116, 0.4818470201, -1.5615900098, -0.9015844116, 0.4818470201, 1.5615900098, 1.8031688251, 0.4818470204, 0.0], "name": "H3N", "molecular_charge": 0.0, "molecular_multiplicity": 1, "masses": [14.00307400443, 1.00782503223, 1.00782503223, 1.00782503223], "real": [true, true, true, true], "atom_labels": ["", "", "", ""], "atomic_numbers": [7, 1, 1, 1], "mass_numbers": [14, 1, 1, 1], "fragments": [[0, 1, 2, 3]], "fragment_charges": [0.0], "fragment_multiplicities": [1], "fix_com": false, "fix_orientation": false, "fix_symmetry": "c1", "provenance": {"creator": "QCElemental", "version": "v0.17.0+7.gf55d5ac.dirty", "routine": "qcelemental.molparse.from_string"}}, "driver": "energy", "model": {"method": "scf", "basis": "STO-3G"}, "keywords": {"pcm": 1, "pcm__input": "\\n Units = Angstrom\\n Medium {\\n SolverType = IEFPCM\\n Solvent = Water\\n }\\n \\n Cavity {\\n RadiiSet = UFF\\n Type = GePol\\n Scaling = False\\n Area = 0.3\\n Mode = Implicit\\n }\\n ", "pcm_scf_type": "TOTAL", "scf_type": "PK", "reference": "rohf"}, "protocols": {}, "extras": {"wfn_qcvars_only": true}, "provenance": {"creator": "Psi4", "version": "1.4a2.dev1089", "routine": "psi4.driver.p4util.procutil"}}"""
atres4 = psi4.schema_wrapper.run_qcschema(json.loads(jatin))
pprint.pprint(atres4.dict())
assert psi4.compare_values(totalenergy, atres4.return_result, 10, "Total energy (PCM, separate algorithm)")
assert psi4.compare_values(polenergy, atres4.extras["qcvars"]["PCM POLARIZATION ENERGY"], 6,
"Polarization energy (PCM, separate algorithm)")
@pytest.mark.parametrize("integral_package", [
pytest.param("libint2"),
pytest.param("simint", marks=using("simint")),
])
def test_integrals(integral_package):
"""scf5"""
#! Test of all different algorithms and reference types for SCF, on singlet and triplet O2, using the cc-pVTZ basis set and using ERD integrals.
Eref_sing_can = -149.58723684929720
Eref_sing_df = -149.58715054487624
Eref_uhf_can = -149.67135517240553
Eref_uhf_df = -149.67125624291961
Eref_rohf_can = -149.65170765757173
Eref_rohf_df = -149.65160796208073
jatin = """{"id": null, "schema_name": "qcschema_input", "schema_version": 1, "molecule": {"schema_name": "qcschema_molecule", "schema_version": 2, "validated": true, "symbols": ["O", "O"], "geometry": [0.0, 0.0, -1.0393493690018054, 0.0, 0.0, 1.0393493690018054], "name": "O2", "molecular_charge": 0.0, "molecular_multiplicity": 1, "masses": [15.99491461957, 15.99491461957], "real": [true, true], "atom_labels": ["", ""], "atomic_numbers": [8, 8], "mass_numbers": [16, 16], "fragments": [[0, 1]], "fragment_charges": [0.0], "fragment_multiplicities": [1], "fix_com": false, "fix_orientation": false, "provenance": {"creator": "QCElemental", "version": "v0.17.0+7.gf55d5ac.dirty", "routine": "qcelemental.molparse.from_string"}}, "driver": "energy", "model": {"method": "scf", "basis": "CC-PVTZ"}, "keywords": {"df_basis_scf": "CC-PVTZ-JKFIT", "print": 2, "scf__scf_type": "DIRECT"}, "protocols": {}, "extras": {"wfn_qcvars_only": true}, "provenance": {"creator": "Psi4", "version": "1.4a2.dev1089", "routine": "psi4.driver.p4util.procutil"}}"""
datin = json.loads(jatin)
datin["keywords"]["integral_package"] = integral_package
atres = psi4.schema_wrapper.run_qcschema(datin)
assert psi4.compare_values(Eref_sing_can, atres.return_result, 6, 'Singlet Direct RHF energy')
jatin = """{"id": null, "schema_name": "qcschema_input", "schema_version": 1, "molecule": {"schema_name": "qcschema_molecule", "schema_version": 2, "validated": true, "symbols": ["O", "O"], "geometry": [0.0, 0.0, -1.0393493690018054, 0.0, 0.0, 1.0393493690018054], "name": "O2", "molecular_charge": 0.0, "molecular_multiplicity": 1, "masses": [15.99491461957, 15.99491461957], "real": [true, true], "atom_labels": ["", ""], "atomic_numbers": [8, 8], "mass_numbers": [16, 16], "fragments": [[0, 1]], "fragment_charges": [0.0], "fragment_multiplicities": [1], "fix_com": false, "fix_orientation": false, "provenance": {"creator": "QCElemental", "version": "v0.17.0+7.gf55d5ac.dirty", "routine": "qcelemental.molparse.from_string"}}, "driver": "energy", "model": {"method": "scf", "basis": "CC-PVTZ"}, "keywords": {"df_basis_scf": "CC-PVTZ-JKFIT", "print": 2, "scf_type": "df", "reference": "uhf"}, "protocols": {}, "extras": {"wfn_qcvars_only": true}, "provenance": {"creator": "Psi4", "version": "1.4a2.dev1089", "routine": "psi4.driver.p4util.procutil"}}"""
datin = json.loads(jatin)
datin["keywords"]["integral_package"] = integral_package
atres = psi4.schema_wrapper.run_qcschema(datin)
assert psi4.compare_values(Eref_sing_df, atres.return_result, 6, 'Singlet DF UHF energy')
jatin = """{"id": null, "schema_name": "qcschema_input", "schema_version": 1, "molecule": {"schema_name": "qcschema_molecule", "schema_version": 2, "validated": true, "symbols": ["O", "O"], "geometry": [0.0, 0.0, -1.0393493690018054, 0.0, 0.0, 1.0393493690018054], "name": "O2", "molecular_charge": 0.0, "molecular_multiplicity": 3, "masses": [15.99491461957, 15.99491461957], "real": [true, true], "atom_labels": ["", ""], "atomic_numbers": [8, 8], "mass_numbers": [16, 16], "fragments": [[0, 1]], "fragment_charges": [0.0], "fragment_multiplicities": [1], "fix_com": false, "fix_orientation": false, "provenance": {"creator": "QCElemental", "version": "v0.17.0+7.gf55d5ac.dirty", "routine": "qcelemental.molparse.from_string"}}, "driver": "energy", "model": {"method": "scf", "basis": "CC-PVTZ"}, "keywords": {"df_basis_scf": "CC-PVTZ-JKFIT", "print": 2, "scf_type": "out_of_core", "reference": "rohf"}, "protocols": {}, "extras": {"wfn_qcvars_only": true}, "provenance": {"creator": "Psi4", "version": "1.4a2.dev1089", "routine": "psi4.driver.p4util.procutil"}}"""
datin = json.loads(jatin)
datin["keywords"]["integral_package"] = integral_package
atres = psi4.schema_wrapper.run_qcschema(datin)
assert psi4.compare_values(Eref_rohf_can, atres.return_result, 6, 'Triplet Disk ROHF energy')
def test_json():
"""json/energy"""
import numpy as np
# Generate JSON data
json_input = {
"schema_name": "qcschema_input",
"schema_version": 1,
"molecule": {
"symbols": ["He", "He"],
"geometry": [0, 0, -1, 0, 0, 1]
},
"driver": "gradient",
"model": {
"method": "SCF",
"basis": "sto-3g"
},
"keywords": {}
}
json_ret = psi4.schema_wrapper.run_qcschema(json_input)
json_ret = json_ret.dict()
pprint.pprint(json_ret)
assert psi4.compare_integers(True, json_ret["success"], "Success")
assert psi4.compare_values(-5.474227786274896, json_ret["properties"]["return_energy"], 4, "SCF ENERGY")
bench_gradient = np.array([[0.0, 0.0, 0.32746933], [0.0, 0.0, -0.32746933]])
cgradient = np.array(json_ret["return_result"]).reshape(-1, 3)
assert psi4.compare_arrays(bench_gradient, cgradient, 4, "SCF RETURN GRADIENT")
#@pytest.mark.smoke
#@using("cfour")
#def test_cfour():
# """cfour/sp-rhf-ccsd_t_"""
# #! single-point CCSD(T)/qz2p on water
#
# print(' <<< Translation of ZMAT to Psi4 format to Cfour >>>')
#
# psi4.geometry("""
# O
# H 1 R
# H 1 R 2 A
#
# R=0.958
# A=104.5
# """)
#
# psi4.set_options({
# 'cfour_CALC_level': 'CCSD(T)',
# 'cfour_BASIS': 'qz2p',
# 'cfour_SCF_CONV': 12,
# 'cfour_CC_CONV': 12,
# })
#
# psi4.energy('cfour')
#
## atin = psi4.driver.p4util.state_to_atomicinput(driver="energy", method="ccsd", molecule=ethene_ethyne)
## print(f' jatin = """{atin.serialize("json")}"""')
## assert 0
##
## atres = psi4.schema_wrapper.run_qcschema(json.loads(jatin))
## pprint.pprint(atres.dict())
#
#
# assert psi4.compare_values(-76.062748460117, psi4.variable('scf total energy'), 6, 'SCF')
# assert psi4.compare_values(-76.332940127333, psi4.variable('mp2 total energy'), 6, 'MP2')
# assert psi4.compare_values(-76.338453951890, psi4.variable('ccsd total energy'), 6, 'CCSD')
# assert psi4.compare_values(-0.275705491773, psi4.variable('ccsd correlation energy'), 6, 'CCSD corl')
# assert psi4.compare_values(-76.345717549886, psi4.variable('ccsd(t) total energy'), 6, 'CCSD(T)')
# assert psi4.compare_values(-0.282969089769, psi4.variable('ccsd(t) correlation energy'), 6, 'CCSD(T) corl')
@using("v2rdm_casscf")
def test_v2rdm_casscf():
"""v2rdm_casscf/tests/v2rdm1"""
#! cc-pvdz N2 (6,6) active space Test DQG
print(' N2 / cc-pVDZ / DQG(6,6), scf_type = CD / 1e-12, rNN = 0.5 A')
interloper = psi4.geometry("""
0 1
O
H 1 1.0
H 1 1.0 2 90.0
""")
# NOTES
# * add plugin keywords to AtomicInput by hand, not set_module_options, since the module list doesn't know about v2rdm
jatin = """{"id": null, "schema_name": "qcschema_input", "schema_version": 1, "molecule": {"schema_name": "qcschema_molecule", "schema_version": 2, "validated": true, "symbols": ["N", "N"], "geometry": [0.0, 0.0, -0.4724315332214108, 0.0, 0.0, 0.4724315332214108], "name": "N2", "molecular_charge": 0.0, "molecular_multiplicity": 1, "masses": [14.00307400443, 14.00307400443], "real": [true, true], "atom_labels": ["", ""], "atomic_numbers": [7, 7], "mass_numbers": [14, 14], "fragments": [[0, 1]], "fragment_charges": [0.0], "fragment_multiplicities": [1], "fix_com": false, "fix_orientation": false, "provenance": {"creator": "QCElemental", "version": "v0.17.0+7.gf55d5ac.dirty", "routine": "qcelemental.molparse.from_string"}}, "driver": "energy", "model": {"method": "v2rdm-casscf", "basis": "CC-PVDZ"}, "keywords": {"active": [1, 0, 1, 1, 0, 1, 1, 1], "cholesky_tolerance": 1e-12, "d_convergence": 1e-10, "maxiter": 500, "restricted_docc": [2, 0, 0, 0, 0, 2, 0, 0], "scf_type": "CD", "v2rdm_casscf__r_convergence": 1e-5, "v2rdm_casscf__e_convergence": 1e-6, "v2rdm_casscf__maxiter": 20000}, "protocols": {}, "extras": {"wfn_qcvars_only": true}, "provenance": {"creator": "Psi4", "version": "1.4a2.dev1089", "routine": "psi4.driver.p4util.procutil"}}"""
atres = psi4.schema_wrapper.run_qcschema(json.loads(jatin))
pprint.pprint(atres.dict())
assert psi4.compare_values(-103.04337420425350, atres.extras["qcvars"]["SCF TOTAL ENERGY"], 8, "SCF total energy")
assert psi4.compare_values(-103.086205379481, atres.return_result, 5, "v2RDM-CASSCF total energy")
#@hardware_nvidia_gpu
#@using("gpu_dfcc")
#def test_gpu_dfcc():
# """gpu_dfcc/tests/gpu_dfcc1"""
# #! cc-pvdz (H2O)2 Test DF-CCSD vs GPU-DF-CCSD
#
# import gpu_dfcc
#
# H20 = psi4.geometry("""
# O 0.000000000000 0.000000000000 -0.068516219310
# H 0.000000000000 -0.790689573744 0.543701060724
# H 0.000000000000 0.790689573744 0.543701060724
# """)
#
# psi4.set_memory(32000000000)
# psi4.set_options({
# 'cc_timings': False,
# 'num_gpus': 1,
# 'cc_type': 'df',
# 'df_basis_cc': 'aug-cc-pvdz-ri',
# 'df_basis_scf': 'aug-cc-pvdz-jkfit',
# 'basis': 'aug-cc-pvdz',
# 'freeze_core': 'true',
# 'e_convergence': 1e-8,
# 'd_convergence': 1e-8,
# 'r_convergence': 1e-8,
# 'scf_type': 'df',
# 'maxiter': 30})
# psi4.set_num_threads(2)
# en_dfcc = psi4.energy('ccsd', molecule=H20)
# en_gpu_dfcc = psi4.energy('gpu-df-ccsd', molecule=H20)
#
## atin = psi4.driver.p4util.state_to_atomicinput(driver="energy", method="ccsd", molecule=ethene_ethyne)
## print(f' jatin = """{atin.serialize("json")}"""')
## assert 0
##
## atres = psi4.schema_wrapper.run_qcschema(json.loads(jatin))
## pprint.pprint(atres.dict())
#
## atin = psi4.driver.p4util.state_to_atomicinput(driver="energy", method="ccsd", molecule=ethene_ethyne)
## print(f' jatin = """{atin.serialize("json")}"""')
## assert 0
##
## atres = psi4.schema_wrapper.run_qcschema(json.loads(jatin))
## pprint.pprint(atres.dict())
#
#
# assert psi4.compare_values(en_gpu_dfcc, en_dfcc, 8, "CCSD total energy")
@using("dftd3")
@using("gcp")
def test_grimme_3c():
# NOTES
# * add `model.basis = "(auto)"`
jatin = """{"id": null, "schema_name": "qcschema_input", "schema_version": 1, "molecule": {"schema_name": "qcschema_molecule", "schema_version": 2, "validated": true, "symbols": ["C", "C", "H", "H", "H", "H", "C", "C", "H", "H"], "geometry": [2.676440890378796e-18, -1.261539587380886, -4.02163254721969, 2.676440890378796e-18, 1.261539587380886, -4.02163254721969, 1.7453907337214847, -2.3286206872737854, -4.0245162692871395, -1.7453907337214847, -2.3286206872737854, -4.0245162692871395, -1.7453907337214847, 2.3286206872737854, -4.0245162692871395, 1.7453907337214847, 2.3286206872737854, -4.0245162692871395, 2.676440890378796e-18, -1.8740279466317074e-17, 5.47454736883822, 2.676440890378796e-18, -1.8740279466317074e-17, 3.193150937439626, 2.676440890378796e-18, -1.8740279466317074e-17, 1.1789145370276326, 2.676440890378796e-18, -1.8740279466317074e-17, 7.484131263529336], "name": "C4H6", "molecular_charge": 0.0, "molecular_multiplicity": 1, "masses": [12.0, 12.0, 1.00782503223, 1.00782503223, 1.00782503223, 1.00782503223, 12.0, 12.0, 1.00782503223, 1.00782503223], "real": [true, true, true, true, true, true, true, true, true, true], "atom_labels": ["", "", "", "", "", "", "", "", "", ""], "atomic_numbers": [6, 6, 1, 1, 1, 1, 6, 6, 1, 1], "mass_numbers": [12, 12, 1, 1, 1, 1, 12, 12, 1, 1], "fragments": [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9]], "fragment_charges": [0.0, 0.0], "fragment_multiplicities": [1, 1], "fix_com": false, "fix_orientation": false, "fix_symmetry": "c1", "provenance": {"creator": "QCElemental", "version": "v0.17.0+7.gf55d5ac.dirty", "routine": "qcelemental.molparse.from_string"}}, "driver": "energy", "model": {"method": "pbeh3c", "basis": "(auto)"}, "keywords": {"function_kwargs": {"bsse_type": "nocp"}}, "protocols": {}, "extras": {"wfn_qcvars_only": true}, "provenance": {"creator": "Psi4", "version": "1.4a2.dev1089", "routine": "psi4.driver.p4util.procutil"}}"""
atres = psi4.schema_wrapper.run_qcschema(json.loads(jatin))
assert psi4.compare_values(-2.153, atres.return_result * psi4.constants.hartree2kcalmol, 0.03,
'S22-16 PBEh-3c/def2-mSVP')
jatin = """{"id": null, "schema_name": "qcschema_input", "schema_version": 1, "molecule": {"schema_name": "qcschema_molecule", "schema_version": 2, "validated": true, "symbols": ["C", "C", "H", "H", "H", "H", "C", "C", "H", "H"], "geometry": [2.676440890378796e-18, -1.261539587380886, -4.02163254721969, 2.676440890378796e-18, 1.261539587380886, -4.02163254721969, 1.7453907337214847, -2.3286206872737854, -4.0245162692871395, -1.7453907337214847, -2.3286206872737854, -4.0245162692871395, -1.7453907337214847, 2.3286206872737854, -4.0245162692871395, 1.7453907337214847, 2.3286206872737854, -4.0245162692871395, 2.676440890378796e-18, -1.8740279466317074e-17, 5.47454736883822, 2.676440890378796e-18, -1.8740279466317074e-17, 3.193150937439626, 2.676440890378796e-18, -1.8740279466317074e-17, 1.1789145370276326, 2.676440890378796e-18, -1.8740279466317074e-17, 7.484131263529336], "name": "C4H6", "molecular_charge": 0.0, "molecular_multiplicity": 1, "masses": [12.0, 12.0, 1.00782503223, 1.00782503223, 1.00782503223, 1.00782503223, 12.0, 12.0, 1.00782503223, 1.00782503223], "real": [true, true, true, true, true, true, true, true, true, true], "atom_labels": ["", "", "", "", "", "", "", "", "", ""], "atomic_numbers": [6, 6, 1, 1, 1, 1, 6, 6, 1, 1], "mass_numbers": [12, 12, 1, 1, 1, 1, 12, 12, 1, 1], "fragments": [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9]], "fragment_charges": [0.0, 0.0], "fragment_multiplicities": [1, 1], "fix_com": false, "fix_orientation": false, "fix_symmetry": "c1", "provenance": {"creator": "QCElemental", "version": "v0.17.0+7.gf55d5ac.dirty", "routine": "qcelemental.molparse.from_string"}}, "driver": "energy", "model": {"method": "hf3c", "basis": ""}, "keywords": {"scf_type": "pk", "function_kwargs": {"bsse_type": "nocp"}}, "protocols": {}, "extras": {"wfn_qcvars_only": true}, "provenance": {"creator": "Psi4", "version": "1.4a2.dev1089", "routine": "psi4.driver.p4util.procutil"}}"""
atres = psi4.schema_wrapper.run_qcschema(json.loads(jatin))
assert psi4.compare_values(-0.00240232, atres.return_result, 6, 'S22-16 HF-3c/minix')
@using("dkh")
def test_dkh():
"""dkh/molpro-2order"""
jatin = """{"id": null, "schema_name": "qcschema_input", "schema_version": 1, "molecule": {"schema_name": "qcschema_molecule", "schema_version": 2, "validated": true, "symbols": ["Ne"], "geometry": [0.0, 0.0, 0.0], "name": "Ne", "molecular_charge": 0.0, "molecular_multiplicity": 1, "masses": [19.9924401762], "real": [true], "atom_labels": [""], "atomic_numbers": [10], "mass_numbers": [20], "fragments": [[0]], "fragment_charges": [0.0], "fragment_multiplicities": [1], "fix_com": false, "fix_orientation": false, "provenance": {"creator": "QCElemental", "version": "v0.17.0+7.gf55d5ac.dirty", "routine": "qcelemental.molparse.from_string"}}, "driver": "energy", "model": {"method": "scf", "basis": "CC-PVTZ-DK"}, "keywords": {"dkh_order": 2, "print": 2, "reference": "RHF", "relativistic": "DKH", "scf_type": "PK"}, "protocols": {}, "extras": {"wfn_qcvars_only": true}, "provenance": {"creator": "Psi4", "version": "1.4a2.dev1089", "routine": "psi4.driver.p4util.procutil"}}"""
atres = psi4.schema_wrapper.run_qcschema(json.loads(jatin))
assert psi4.compare_values(-128.66891610, atres.return_result, 6, '2nd order vs Molpro')
#@using("ambit")
#@using("forte")
#def disabled_test_forte():
# """aci-10: Perform aci on benzyne"""
#
# import forte
#
# refscf = -229.20378006852584
# refaci = -229.359450812283
# refacipt2 = -229.360444943286
#
# mbenzyne = psi4.geometry("""
# 0 1
# C 0.0000000000 -2.5451795941 0.0000000000
# C 0.0000000000 2.5451795941 0.0000000000
# C -2.2828001669 -1.3508352528 0.0000000000
# C 2.2828001669 -1.3508352528 0.0000000000
# C 2.2828001669 1.3508352528 0.0000000000
# C -2.2828001669 1.3508352528 0.0000000000
# H -4.0782187459 -2.3208602146 0.0000000000
# H 4.0782187459 -2.3208602146 0.0000000000
# H 4.0782187459 2.3208602146 0.0000000000
# H -4.0782187459 2.3208602146 0.0000000000
#
# units bohr
# """)
#
# psi4.set_options({
# 'basis': 'DZ',
# 'df_basis_mp2': 'cc-pvdz-ri',
# 'reference': 'uhf',
# 'scf_type': 'pk',
# 'd_convergence': 10,
# 'e_convergence': 12,
# 'guess': 'gwh',
# })
#
# psi4.set_module_options("FORTE", {
# 'root_sym': 0,
# 'frozen_docc': [2,1,0,0,0,0,2,1],
# 'restricted_docc': [3,2,0,0,0,0,2,3],
# 'active': [1,0,1,2,1,2,1,0],
# 'multiplicity': 1,
# 'aci_nroot': 1,
# 'job_type': 'aci',
# 'sigma': 0.001,
# 'aci_select_type': 'aimed_energy',
# 'aci_spin_projection': 1,
# 'aci_enforce_spin_complete': True,
# 'aci_add_aimed_degenerate': False,
# 'aci_project_out_spin_contaminants': False,
# 'diag_algorithm': 'full',
# 'aci_quiet_mode': True,
# })
#
## atin = psi4.driver.p4util.state_to_atomicinput(driver="energy", method="ccsd", molecule=ethene_ethyne)
## print(f' jatin = """{atin.serialize("json")}"""')
## assert 0
##
## atres = psi4.schema_wrapper.run_qcschema(json.loads(jatin))
## pprint.pprint(atres.dict())
#
#
# scf = psi4.energy('scf')
# assert psi4.compare_values(refscf, scf,10,"SCF Energy")
#
# psi4.energy('forte')
# assert psi4.compare_values(refaci, psi4.variable("ACI ENERGY"),10,"ACI energy")
# assert psi4.compare_values(refacipt2, psi4.variable("ACI+PT2 ENERGY"),8,"ACI+PT2 energy")
@using("snsmp2")
def test_snsmp2():
"""snsmp2/he-he"""
# NOTES
# * add `model.basis = "(auto)"`
jatin = """{"id": null, "schema_name": "qcschema_input", "schema_version": 1, "molecule": {"schema_name": "qcschema_molecule", "schema_version": 2, "validated": true, "symbols": ["He", "He"], "geometry": [-1.8897261254578286, -2.892808813508824e-17, 0.0, 1.8897261254578286, 2.892808813508824e-17, 0.0], "name": "He2", "molecular_charge": 0.0, "molecular_multiplicity": 1, "masses": [4.00260325413, 4.00260325413], "real": [true, true], "atom_labels": ["", ""], "atomic_numbers": [2, 2], "mass_numbers": [4, 4], "fragments": [[0], [1]], "fragment_charges": [0.0, 0.0], "fragment_multiplicities": [1, 1], "fix_com": false, "fix_orientation": false, "provenance": {"creator": "QCElemental", "version": "v0.17.0+7.gf55d5ac.dirty", "routine": "qcelemental.molparse.from_string"}}, "driver": "energy", "model": {"method": "sns-mp2", "basis": "(auto)"}, "keywords": {}, "protocols": {}, "extras": {"wfn_qcvars_only": true}, "provenance": {"creator": "Psi4", "version": "1.4a2.dev1089", "routine": "psi4.driver.p4util.procutil"}}"""
atres = psi4.schema_wrapper.run_qcschema(json.loads(jatin))
pprint.pprint(atres.dict())
assert psi4.compare_values(0.00176708227, atres.return_result, 5, "SNS-MP2 IE [Eh]")
@using("resp")
def test_resp():
"""resp/tests/test_resp_1"""
pytest.xfail("RESP calls Psi4, not the reverse, so no AtomicInput possible")
@using("fockci")
def test_psi4fockci():
"""psi4fockci/n2"""
jatin = """{"id": null, "schema_name": "qcschema_input", "schema_version": 1, "molecule": {"schema_name": "qcschema_molecule", "schema_version": 2, "validated": true, "symbols": ["N", "N"], "geometry": [0.0, 0.0, -2.3621576568222853, 0.0, 0.0, 2.3621576568222853], "name": "N2", "molecular_charge": 0.0, "molecular_multiplicity": 7, "masses": [14.00307400443, 14.00307400443], "real": [true, true], "atom_labels": ["", ""], "atomic_numbers": [7, 7], "mass_numbers": [14, 14], "fragments": [[0, 1]], "fragment_charges": [0.0], "fragment_multiplicities": [7], "fix_com": false, "fix_orientation": false, "fix_symmetry": "c1", "provenance": {"creator": "QCElemental", "version": "v0.17.0+7.gf55d5ac.dirty", "routine": "qcelemental.molparse.from_string"}}, "driver": "energy", "model": {"method": "psi4fockci", "basis": "cc-pvdz"}, "keywords": {"function_kwargs": {"new_charge": 0, "new_multiplicity": 1}}, "protocols": {}, "extras": {"wfn_qcvars_only": true}, "provenance": {"creator": "Psi4", "version": "1.4a2.dev1089", "routine": "psi4.driver.p4util.procutil"}}"""
atres = psi4.schema_wrapper.run_qcschema(json.loads(jatin))
assert psi4.compare_values(-108.776024394853295, atres.extras["qcvars"]["CI ROOT 0 TOTAL ENERGY"], 7, "3SF Energy")
jatin = """{"id": null, "schema_name": "qcschema_input", "schema_version": 1, "molecule": {"schema_name": "qcschema_molecule", "schema_version": 2, "validated": true, "symbols": ["N", "N"], "geometry": [0.0, 0.0, -2.3621576568222853, 0.0, 0.0, 2.3621576568222853], "name": "N2", "molecular_charge": 0.0, "molecular_multiplicity": 7, "masses": [14.00307400443, 14.00307400443], "real": [true, true], "atom_labels": ["", ""], "atomic_numbers": [7, 7], "mass_numbers": [14, 14], "fragments": [[0, 1]], "fragment_charges": [0.0], "fragment_multiplicities": [7], "fix_com": false, "fix_orientation": false, "fix_symmetry": "c1", "provenance": {"creator": "QCElemental", "version": "v0.17.0+7.gf55d5ac.dirty", "routine": "qcelemental.molparse.from_string"}}, "driver": "energy", "model": {"method": "psi4fockci", "basis": "cc-pvdz"}, "keywords": {"function_kwargs": {"new_charge": 1, "new_multiplicity": 2}}, "protocols": {}, "extras": {"wfn_qcvars_only": true}, "provenance": {"creator": "Psi4", "version": "1.4a2.dev1089", "routine": "psi4.driver.p4util.procutil"}}"""
atres = psi4.schema_wrapper.run_qcschema(json.loads(jatin))
assert psi4.compare_values(-108.250639579451, atres.extras["qcvars"]["CI ROOT 0 TOTAL ENERGY"], 7, "2SF-IP Energy")
jatin = """{"id": null, "schema_name": "qcschema_input", "schema_version": 1, "molecule": {"schema_name": "qcschema_molecule", "schema_version": 2, "validated": true, "symbols": ["N", "N"], "geometry": [0.0, 0.0, -2.3621576568222853, 0.0, 0.0, 2.3621576568222853], "name": "N2", "molecular_charge": 0.0, "molecular_multiplicity": 7, "masses": [14.00307400443, 14.00307400443], "real": [true, true], "atom_labels": ["", ""], "atomic_numbers": [7, 7], "mass_numbers": [14, 14], "fragments": [[0, 1]], "fragment_charges": [0.0], "fragment_multiplicities": [7], "fix_com": false, "fix_orientation": false, "fix_symmetry": "c1", "provenance": {"creator": "QCElemental", "version": "v0.17.0+7.gf55d5ac.dirty", "routine": "qcelemental.molparse.from_string"}}, "driver": "energy", "model": {"method": "psi4fockci", "basis": "cc-pvdz"}, "keywords": {"function_kwargs": {"new_charge": -1, "new_multiplicity": 2}}, "protocols": {}, "extras": {"wfn_qcvars_only": true}, "provenance": {"creator": "Psi4", "version": "1.4a2.dev1089", "routine": "psi4.driver.p4util.procutil"}}"""
atres = psi4.schema_wrapper.run_qcschema(json.loads(jatin))
assert psi4.compare_values(-108.600832070267, atres.extras["qcvars"]["CI ROOT 0 TOTAL ENERGY"], 7, "2SF-EA Energy")
@using("cppe")
def test_cppe():
#! PE-SCF of PNA in presence of 6 water molecules
#! Reference data from Q-Chem calculation
# potfile = """
#! Generated by PyFraME 0.1.0
#@COORDINATES
#18
#AA
#O 9.37100000 2.95300000 -6.07800000 1
#H 8.87200000 2.13400000 -6.04900000 2
#...
#@MULTIPOLES
#ORDER 0
#...
#ORDER 1
#...
#ORDER 2
#...
#@POLARIZABILITIES
#ORDER 1 1
#...
#EXCLISTS
#...
#17 16 18
#18 16 17
#"""
# potfile = "\\n".join(potfile.split("\n"))
#
# psi4.set_options({
# 'pe': True,
# ...
# 'pe__potfile': potfile,
# })
jatin = """{"id": null, "schema_name": "qcschema_input", "schema_version": 1, "molecule": {"schema_name": "qcschema_molecule", "schema_version": 2, "validated": true, "symbols": ["C", "C", "C", "C", "C", "C", "H", "H", "H", "H", "N", "N", "O", "O", "H", "H"], "geometry": [16.3423515329593, 2.031455584867165, -3.233321400658344, 17.918383121591127, 0.8125822339468661, -1.5268987093699253, 17.755866674801755, 1.417294594093371, 1.0166726554963117, 16.028656996133297, 3.2352111267838017, 1.880277494830539, 14.462074038128758, 4.431407764198608, 0.10393493690018055, 14.611362402039928, 3.838033760804849, -2.441526154091514, 19.243081135537064, -0.5839253727664689, -2.1996412100329117, 18.980409204098425, 0.4762109836153727, 2.356488478445912, 13.118478762928243, 5.837364001539231, 0.7351034628030951, 13.411386312374207, 4.752661205526438, -3.813467321173897, 15.875589179971215, 3.826695404052102, 4.393613241689451, 16.50486797974867, 1.4002870589642507, -5.912953046557544, 15.08001448115347, 2.515225472984369, -7.371821615410987, 18.058222854875005, -0.2078698738003611, -6.5497907508368325, 14.64348774617271, 5.123047526116172, 5.01155368471416, 16.99052759399133, 2.9763186475960794, 5.659729745746196], "name": "C6H6N2O2", "molecular_charge": 0.0, "molecular_multiplicity": 1, "masses": [12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 1.00782503223, 1.00782503223, 1.00782503223, 1.00782503223, 14.00307400443, 14.00307400443, 15.99491461957, 15.99491461957, 1.00782503223, 1.00782503223], "real": [true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true], "atom_labels": ["", "", "", "", "", "", "", "", "", "", "", "", "", "", "", ""], "atomic_numbers": [6, 6, 6, 6, 6, 6, 1, 1, 1, 1, 7, 7, 8, 8, 1, 1], "mass_numbers": [12, 12, 12, 12, 12, 12, 1, 1, 1, 1, 14, 14, 16, 16, 1, 1], "fragments": [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]], "fragment_charges": [0.0], "fragment_multiplicities": [1], "fix_com": false, "fix_orientation": false, "provenance": {"creator": "QCElemental", "version": "v0.17.0+7.gf55d5ac.dirty", "routine": "qcelemental.molparse.from_string"}}, "driver": "energy", "model": {"method": "scf", "basis": "STO-3G"}, "keywords": {"d_convergence": 1e-10, "e_convergence": 1e-10, "pe": 1, "pe__potfile": "\\n! Generated by PyFraME 0.1.0\\n@COORDINATES\\n18\\nAA\\nO 9.37100000 2.95300000 -6.07800000 1\\nH 8.87200000 2.13400000 -6.04900000 2\\nH 9.87300000 2.94000000 -5.26100000 3\\nO 7.72000000 5.12000000 -5.51900000 4\\nH 7.63800000 5.70400000 -6.27600000 5\\nH 8.29100000 4.41700000 -5.83600000 6\\nO 10.45300000 3.07700000 -3.43400000 7\\nH 9.94500000 3.80300000 -3.06600000 8\\nH 11.35900000 3.30000000 -3.21200000 9\\nO 6.15200000 4.88500000 -1.44700000 10\\nH 5.50700000 5.59100000 -1.36900000 11\\nH 5.89100000 4.42800000 -2.24900000 12\\nO 5.82300000 3.53700000 -3.94100000 13\\nH 6.31400000 2.71500000 -4.01100000 14\\nH 6.27500000 4.12300000 -4.55200000 15\\nO 8.86000000 5.34600000 -2.74900000 16\\nH 8.46500000 5.48700000 -3.61200000 17\\nH 8.10300000 5.25400000 -2.16700000 18\\n@MULTIPOLES\\nORDER 0\\n18\\n1 -0.67072060\\n2 0.33528566\\n3 0.33543494\\n4 -0.67055041\\n5 0.33526795\\n6 0.33528246\\n7 -0.67071744\\n8 0.33530196\\n9 0.33541547\\n10 -0.67067328\\n11 0.33530711\\n12 0.33536617\\n13 -0.67033801\\n14 0.33511794\\n15 0.33522007\\n16 -0.67061076\\n17 0.33527067\\n18 0.33534009\\nORDER 1\\n18\\n1 0.00080560 -0.20614866 0.20971724\\n2 0.12061233 0.19534842 -0.00437020\\n3 -0.12142692 0.00052018 -0.19496774\\n4 0.12128208 -0.02952128 -0.26636264\\n5 0.02123859 -0.14133483 0.17957852\\n6 -0.13641247 0.16937441 0.07336077\\n7 0.09874019 0.23525681 0.14626863\\n8 0.12395337 -0.17250412 -0.08710686\\n9 -0.21781638 -0.05098712 -0.05185290\\n10 -0.22463142 0.06171102 -0.17953533\\n11 0.15306787 -0.16979678 -0.02103891\\n12 0.06031266 0.11119740 0.19160536\\n13 0.23374507 -0.05843888 -0.16882627\\n14 -0.11567937 0.19764719 0.01487010\\n15 -0.10631260 -0.14219349 0.14548634\\n16 -0.28549425 0.01213605 -0.06959331\\n17 0.09187853 -0.03392224 0.20768211\\n18 0.17941729 0.02239518 -0.14158334\\nORDER 2\\n18\\n1 -3.82946639 0.38325366 0.37670941 -3.74967413 0.05052330 -3.75455511\\n2 -0.30950705 0.24207772 -0.02491219 -0.03438601 -0.02932713 -0.45703642\\n3 -0.30728365 -0.02024246 0.24345483 -0.45782760 -0.02099977 -0.03545985\\n4 -4.01843560 -0.42071343 -0.05131494 -3.51375485 -0.22366647 -3.80188906\\n5 -0.45058853 -0.01355906 0.03221603 -0.26454826 -0.26763342 -0.08600138\\n6 -0.24883865 -0.23689663 -0.12134441 -0.16776157 0.15242467 -0.38450342\\n7 -3.29787344 -0.20331433 -0.01414521 -3.86392383 0.23538575 -4.17197450\\n8 -0.32338453 -0.21790162 -0.11343377 -0.11675396 0.16862725 -0.36084577\\n9 0.03250781 0.14275944 0.13176702 -0.41814694 0.03110778 -0.41507187\\n10 -3.94275214 -0.29331092 0.07399890 -3.64071323 0.42271528 -3.75038841\\n11 -0.18497857 -0.27928790 -0.02349247 -0.15879913 0.01388996 -0.45721403\\n12 -0.40467967 0.08377330 0.14054460 -0.34162736 0.21069480 -0.05454010\\n13 -3.98817938 -0.10562980 -0.21981136 -3.34423732 -0.30490142 -4.00186338\\n14 -0.29272797 -0.25436911 -0.02365191 -0.05984813 0.05198185 -0.44875356\\n15 -0.31577317 0.16791558 -0.17641064 -0.26949266 -0.21058649 -0.21581787\\n16 -3.76934440 0.01997274 -0.13330175 -4.28009915 -0.16523022 -3.28439494\\n17 -0.34779244 -0.03700706 0.22659090 -0.43533186 -0.07018391 -0.01780670\\n18 -0.08414919 0.04219414 -0.26720999 -0.44242465 -0.02713904 -0.27419185\\n@POLARIZABILITIES\\nORDER 1 1\\n18\\n1 2.30791521 0.59643991 0.58658837 2.61100398 -0.10257978 2.60785108\\n2 1.30711897 0.90889808 -0.14203759 2.23138041 0.03064426 0.56363899\\n3 1.31473738 -0.12335800 0.91322978 0.56426081 0.06343491 2.22049295\\n4 2.07587445 -0.67088756 -0.21506644 2.80527167 -0.31578724 2.64886958\\n5 0.72279353 0.00751649 0.24603845 1.44539448 -1.05520399 1.93525324\\n6 1.51667446 -0.87135876 -0.35833460 1.82612167 0.59796749 0.76044651\\n7 3.17597352 -0.21979725 0.03827106 2.48641276 0.51074529 1.86433465\\n8 1.15711080 -0.91546924 -0.49534521 1.87545624 0.51559151 1.06960779\\n9 2.55558469 0.50338563 0.46922356 0.68876452 -0.02563589 0.85563095\\n10 2.34307583 -0.51517500 0.28388438 2.61854341 0.61181317 2.56593966\\n11 1.63329191 -1.01651663 -0.24252266 1.86507920 0.04484738 0.60387225\\n12 0.76841032 0.41201391 0.40278140 1.14519478 0.81893980 2.18754212\\n13 2.29130369 -0.22262257 -0.50824047 3.08292101 -0.43438854 2.16011928\\n14 1.20133160 -0.94148299 0.07075699 2.22478033 0.20477680 0.68049498\\n15 1.10983723 0.72098321 -0.53040485 1.39825592 -0.82860371 1.59671491\\n16 2.74369667 0.01646994 -0.12367515 1.60703710 -0.26089660 3.17839863\\n17 0.86347479 -0.13575237 0.83717068 0.86374986 -0.25419187 2.37549183\\n18 1.89994613 0.17584219 -1.10556667 0.83549707 -0.08475173 1.36594908\\nEXCLISTS\\n18 3\\n1 2 3\\n2 1 3\\n3 1 2\\n4 5 6\\n5 4 6\\n6 4 5\\n7 8 9\\n8 7 9\\n9 7 8\\n10 11 12\\n11 10 12\\n12 10 11\\n13 14 15\\n14 13 15\\n15 13 14\\n16 17 18\\n17 16 18\\n18 16 17\\n", "scf_type": "PK"}, "protocols": {}, "extras": {"wfn_qcvars_only": true}, "provenance": {"creator": "Psi4", "version": "1.4a2.dev1090", "routine": "psi4.driver.p4util.procutil"}}"""
atres = psi4.schema_wrapper.run_qcschema(json.loads(jatin))
assert psi4.compare_values(-0.03424830892844, atres.extras["qcvars"]["PE ENERGY"], 6, "PE Energy contribution")
assert psi4.compare_values(-482.9411084900, atres.return_result, 6, "Total PE-SCF Energy")
#@pytest.mark.smoke
#@using("cct3")
#def test_cct3():
# import cct3
#
# psi4.geometry("""
# units bohr
# h -2.514213562373 -1.000000000000 0.000000000000
# h -2.514213562373 1.000000000000 0.000000000000
# h 2.514213562373 -1.000000000000 0.000000000000
# h 2.514213562373 1.000000000000 0.000000000000
# h -1.000000000000 -2.414213562373 0.000000000000
# h -1.000000000000 2.414213562373 0.000000000000
# h 1.000000000000 -2.414213562373 0.000000000000
# h 1.000000000000 2.414213562373 0.000000000000
# symmetry d2h
# """)
#
# def basisspec_psi4_yo__anonymous1234(mol, role):
# bas = """
# cartesian
# ****
# H 0
# S 3 1.0000
# 4.50038 0.0704800
# 0.681277 0.407890
# 0.151374 0.647670
# ****
# """
# mol.set_basis_all_atoms("mbs_my", role=role)
# return {"mbs_my": bas}
#
# psi4.driver.qcdb.libmintsbasisset.basishorde["ANONYMOUS1234"] = basisspec_psi4_yo__anonymous1234
#
# psi4.set_options({
# "cct3__froz": 0,
# "cct3__act_occ": 1,
# "cct3__act_unocc": 1,
# "cct3__etol": 16,
# "cct3__calc_type": "cct3",
# "basis": "anonymous1234",
# })
#
## atin = psi4.driver.p4util.state_to_atomicinput(driver="energy", method="ccsd", molecule=ethene_ethyne)
## print(f' jatin = """{atin.serialize("json")}"""')
## assert 0
##
## atres = psi4.schema_wrapper.run_qcschema(json.loads(jatin))
## pprint.pprint(atres.dict())
#
#
# ene = psi4.energy("cct3")
# assert psi4.compare_values(-4.220587742726, ene, 10, "cc(t;3) energy")
#
| lgpl-3.0 |
upsidetravel/bucket-antivirus-function | scan_bucket.py | 1 | 4671 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Upside Travel, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import sys
import boto3
from common import AV_STATUS_METADATA, LAMBDA_ENDPOINT
from common import AV_TIMESTAMP_METADATA
from common import S3_ENDPOINT
# Get all objects in an S3 bucket that have not been previously scanned
def get_objects(s3_client, s3_bucket_name):
s3_object_list = []
s3_list_objects_result = {"IsTruncated": True}
while s3_list_objects_result["IsTruncated"]:
s3_list_objects_config = {"Bucket": s3_bucket_name}
continuation_token = s3_list_objects_result.get("NextContinuationToken")
if continuation_token:
s3_list_objects_config["ContinuationToken"] = continuation_token
s3_list_objects_result = s3_client.list_objects_v2(**s3_list_objects_config)
if "Contents" not in s3_list_objects_result:
break
for key in s3_list_objects_result["Contents"]:
key_name = key["Key"]
# Don't include objects that have been scanned
if not object_previously_scanned(s3_client, s3_bucket_name, key_name):
s3_object_list.append(key_name)
return s3_object_list
# Determine if an object has been previously scanned for viruses
def object_previously_scanned(s3_client, s3_bucket_name, key_name):
s3_object_tags = s3_client.get_object_tagging(Bucket=s3_bucket_name, Key=key_name)
if "TagSet" not in s3_object_tags:
return False
for tag in s3_object_tags["TagSet"]:
if tag["Key"] in [AV_STATUS_METADATA, AV_TIMESTAMP_METADATA]:
return True
return False
# Scan an S3 object for viruses by invoking the lambda function
# Skip any objects that have already been scanned
def scan_object(lambda_client, lambda_function_name, s3_bucket_name, key_name):
print("Scanning: {}/{}".format(s3_bucket_name, key_name))
s3_event = format_s3_event(s3_bucket_name, key_name)
lambda_invoke_result = lambda_client.invoke(
FunctionName=lambda_function_name,
InvocationType="Event",
Payload=json.dumps(s3_event),
)
if lambda_invoke_result["ResponseMetadata"]["HTTPStatusCode"] != 202:
print("Error invoking lambda: {}".format(lambda_invoke_result))
# Format an S3 Event to use when invoking the lambda function
# https://docs.aws.amazon.com/AmazonS3/latest/dev/notification-content-structure.html
def format_s3_event(s3_bucket_name, key_name):
s3_event = {
"Records": [
{"s3": {"bucket": {"name": s3_bucket_name}, "object": {"key": key_name}}}
]
}
return s3_event
def main(lambda_function_name, s3_bucket_name, limit):
# Verify the lambda exists
lambda_client = boto3.client("lambda", endpoint_url=LAMBDA_ENDPOINT)
try:
lambda_client.get_function(FunctionName=lambda_function_name)
except Exception:
print("Lambda Function '{}' does not exist".format(lambda_function_name))
sys.exit(1)
# Verify the S3 bucket exists
s3_client = boto3.client("s3", endpoint_url=S3_ENDPOINT)
try:
s3_client.head_bucket(Bucket=s3_bucket_name)
except Exception:
print("S3 Bucket '{}' does not exist".format(s3_bucket_name))
sys.exit(1)
# Scan the objects in the bucket
s3_object_list = get_objects(s3_client, s3_bucket_name)
if limit:
s3_object_list = s3_object_list[: min(limit, len(s3_object_list))]
for key_name in s3_object_list:
scan_object(lambda_client, lambda_function_name, s3_bucket_name, key_name)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Scan an S3 bucket for viruses.")
parser.add_argument(
"--lambda-function-name",
required=True,
help="The name of the lambda function to invoke",
)
parser.add_argument(
"--s3-bucket-name", required=True, help="The name of the S3 bucket to scan"
)
parser.add_argument("--limit", type=int, help="The number of records to limit to")
args = parser.parse_args()
main(args.lambda_function_name, args.s3_bucket_name, args.limit)
| apache-2.0 |
ecanzonieri/pyleus | examples/word_count/word_count/line_spout.py | 9 | 1818 | import logging
import random
from pyleus.storm import Spout
log = logging.getLogger('counter')
LINES = """
Lorem ipsum dolor sit amet, consectetur
adipiscing elit. Curabitur pharetra ante eget
nunc blandit vestibulum. Curabitur tempus mi
a risus lacinia egestas. Nulla faucibus
elit vitae dignissim euismod. Fusce ac
elementum leo, ut elementum dui. Ut
consequat est magna, eu posuere mi
pulvinar eget. Integer adipiscing, quam vitae
pretium facilisis, mi ligula viverra sapien,
nec elementum lacus metus ac mi.
Morbi sodales diam non velit accumsan
mollis. Donec eleifend quam in metus
faucibus auctor. Cras auctor sapien non
mauris vehicula, vel aliquam libero luctus.
Sed eu lobortis sapien. Maecenas eu
fringilla enim. Ut in velit nec
lectus tincidunt varius. Sed vel dictum
nunc. Morbi mollis nunc augue, eget
sagittis libero laoreet id. Suspendisse lobortis
nibh mauris, non bibendum magna iaculis
sed. Mauris interdum massa ut sagittis
vestibulum. In ipsum lacus, faucibus eu
hendrerit at, egestas non nisi. Duis
erat mauris, aliquam in hendrerit eget,
aliquam vel nibh. Proin molestie porta
imperdiet. Interdum et malesuada fames ac
ante ipsum primis in faucibus. Praesent
vitae cursus leo, a congue justo.
Ut interdum tellus non odio adipiscing
malesuada. Mauris in ante nec erat
lobortis eleifend. Morbi condimentum interdum elit,
quis iaculis ante pharetra id. In
""".strip().split('\n')
class LineSpout(Spout):
OUTPUT_FIELDS = ["line"]
def next_tuple(self):
line = random.choice(LINES)
log.debug(line)
self.emit((line,), tup_id=random.randrange(999999999))
if __name__ == '__main__':
logging.basicConfig(
level=logging.DEBUG,
filename='/tmp/word_count_lines.log',
format="%(message)s",
filemode='a',
)
LineSpout().run()
| apache-2.0 |
Smarsh/django | tests/regressiontests/templates/smartif.py | 50 | 2175 | import unittest
from django.template.smartif import IfParser, Literal
class SmartIfTests(unittest.TestCase):
def assertCalcEqual(self, expected, tokens):
self.assertEqual(expected, IfParser(tokens).parse().eval({}))
# We only test things here that are difficult to test elsewhere
# Many other tests are found in the main tests for builtin template tags
# Test parsing via the printed parse tree
def test_not(self):
var = IfParser(["not", False]).parse()
self.assertEqual("(not (literal False))", repr(var))
self.assert_(var.eval({}))
self.assertFalse(IfParser(["not", True]).parse().eval({}))
def test_or(self):
var = IfParser([True, "or", False]).parse()
self.assertEqual("(or (literal True) (literal False))", repr(var))
self.assert_(var.eval({}))
def test_in(self):
list_ = [1,2,3]
self.assertCalcEqual(True, [1, 'in', list_])
self.assertCalcEqual(False, [1, 'in', None])
self.assertCalcEqual(False, [None, 'in', list_])
def test_not_in(self):
list_ = [1,2,3]
self.assertCalcEqual(False, [1, 'not', 'in', list_])
self.assertCalcEqual(True, [4, 'not', 'in', list_])
self.assertCalcEqual(False, [1, 'not', 'in', None])
self.assertCalcEqual(True, [None, 'not', 'in', list_])
def test_precedence(self):
# (False and False) or True == True <- we want this one, like Python
# False and (False or True) == False
self.assertCalcEqual(True, [False, 'and', False, 'or', True])
# True or (False and False) == True <- we want this one, like Python
# (True or False) and False == False
self.assertCalcEqual(True, [True, 'or', False, 'and', False])
# (1 or 1) == 2 -> False
# 1 or (1 == 2) -> True <- we want this one
self.assertCalcEqual(True, [1, 'or', 1, '==', 2])
self.assertCalcEqual(True, [True, '==', True, 'or', True, '==', False])
self.assertEqual("(or (and (== (literal 1) (literal 2)) (literal 3)) (literal 4))",
repr(IfParser([1, '==', 2, 'and', 3, 'or', 4]).parse()))
| bsd-3-clause |
cburgmer/eclectus | tomoeqt/handwritingwidget.py | 1 | 12861 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Qt widget integrating Tegaki/Tomoe handwriting character recognition for
Japanese Kanji and Chinese Hanzi.
Includes a QApplication demonstrating the wiget.
10.02.2009 Christoph Burgmer ([email protected])
History:
* 11.02.2009, show boundaries and keep handwriting within them, resizeable.
* 12.02.2009, dictionary setting method, stroke count, maximum size,
graceful import failure
* 02.06.2009, ported to also work with Tegaki
Released under the LGPL (http://www.gnu.org/licenses/lgpl.html).
"""
import sys
import os
import signal
# imports needed by tomoe widget
from PyQt4 import QtGui, QtCore
try:
from tegaki.recognizer import Recognizer
from tegaki.character import Writing
recognizerType = 'tegaki'
except ImportError:
try:
from tomoe import Recognizer, Writing, Dict
recognizerType = 'tomoe'
except ImportError:
recognizerType = None
class HandwritingWidget(QtGui.QGraphicsView):
"""
Qt widget integrating Tegaki/Tomoe handwriting character recognition for
Japanese Kanji and Chinese Hanzi.
Example:
dictionary = os.path.join("/usr/local/share/tomoe/recognizer/",
'handwriting-zh_CN.xml')
settings = {'tomoe': {'dictionary'; dictionary}}
widget = HandwritingWidget(mainWindow, dictionary, 200, 200)
connect(widget, QtCore.SIGNAL("updated()"), showResults)
"""
class LineDrawingGraphicsScene(QtGui.QGraphicsScene):
"""Graphics scene for drawing strokes and handling recognizer."""
def __init__(self, parent, recognizerSettings=None, size=100):
QtGui.QGraphicsScene.__init__(self, parent)
self.size = 100
self.writing = None
# set pen for handwriting
self.pen = QtGui.QPen()
self.pen.setWidth(3)
self.strokeItemGroups = []
self.currentStrokeItems = []
self.setSize(size)
if recognizerSettings:
self.setDictionary(recognizerSettings)
def setDictionary(self, recognizerSettings={}):
#self.clear_strokes()
#initialize the default dictionary and a simple recognizer
if recognizerType == 'tomoe' \
and 'tomoe' in recognizerSettings \
and 'dictionary' in recognizerSettings['tomoe']:
tomoeDict = Dict("XML",
filename=recognizerSettings['tomoe']['dictionary'])
self.recognizer = Recognizer('Simple',
dictionary=tomoeDict)
# will encapsulate stroke data
if not self.writing:
self.writing = Writing()
elif recognizerType == 'tegaki':
recognizers = Recognizer.get_available_recognizers()
if not recognizers:
raise Exception('No recognizer available')
if 'tegaki' in recognizerSettings \
and 'recognizer' in recognizerSettings['tegaki']:
engine = recognizerSettings['tegaki']['recognizer']
if engine not in recognizers:
raise Exception('recognizer not available')
else:
engine = recognizers.keys()[0]
recognizer_klass = recognizers[engine]
self.recognizer = recognizer_klass()
if 'tegaki' in recognizerSettings \
and 'model' in recognizerSettings['tegaki']:
model = recognizerSettings['tegaki']['model']
if model not in recognizer_klass.get_available_models():
raise Exception('Model not available')
else:
model = recognizer_klass.get_available_models().keys()[0]
self.recognizer.set_model(model)
# will encapsulate stroke data
if not self.writing:
self.writing = Writing()
else:
self.writing = None
def enabled(self):
#return True
return self.writing != None # TODO bug ?
def setSize(self, size):
for group in self.strokeItemGroups:
for item in group:
self.removeItem(item)
self.clear()
self.setSceneRect(0, 0, size, size)
# draw character grid
self.setBackgroundBrush(QtCore.Qt.lightGray)
self.addRect(-1, -1, size+2, size+2,
QtCore.Qt.white, QtCore.Qt.white).setZValue(-1)
self.addRect(0.1 * size, 0.1 * size, 0.8 * size, 0.8 * size)
self.addLine(0.5 * size, 0.1 * size, 0.5 * size, 0.9 * size,
QtGui.QPen(QtCore.Qt.DashLine))
self.addLine(0.1 * size, 0.5 * size, 0.9 * size, 0.5 * size,
QtGui.QPen(QtCore.Qt.DashLine))
# recalculate drawn strokes
scaleFactor = 1.0 * size / self.size
for group in self.strokeItemGroups:
for item in group:
self.addItem(item)
line = item.line()
line.setLine(line.x1() * scaleFactor,
line.y1() * scaleFactor, line.x2() * scaleFactor,
line.y2() * scaleFactor)
item.setLine(line)
self.size = size
def clear_strokes(self):
"""Removes all strokes and clears the drawing area."""
if self.strokeItemGroups:
for group in self.strokeItemGroups:
for item in group:
self.removeItem(item)
self.strokeItemGroups = []
if self.writing:
self.writing.clear()
def remove_last_stroke(self):
"""Removes the latest stroke."""
if self.strokeItemGroups:
for item in self.strokeItemGroups.pop():
self.removeItem(item)
if self.writing:
self.writing.remove_last_stroke()
def strokeCount(self):
return self.writing.get_n_strokes()
def doSearch(self, maxResults=10):
"""Searches for the current stroke input and returns the results."""
if self.writing and self.writing.get_n_strokes() > 0:
if recognizerType == 'tomoe':
res = self.recognizer.search(self.writing)
if maxResults != None:
res = res[:min(maxResults, len(res))]
return [(r.get_char().get_utf8().decode('utf8'),
r.get_score()) for r in res]
elif recognizerType == 'tegaki':
return [(c.decode('utf8'), score) for c, score \
in self.recognizer.recognize(self.writing, maxResults)]
else:
return []
def mouseReleaseEvent(self, mouseEvent):
if mouseEvent.button() & QtCore.Qt.LeftButton:
# left button released
#pos = mouseEvent.scenePos()
#self.keepBounds(pos)
#self.writing.line_to(pos.x() * 1000 / self.size,
#pos.y() * 1000 / self.size)
self.strokeItemGroups.append(self.currentStrokeItems)
self.currentStrokeItems = []
self.emit(QtCore.SIGNAL("strokeAdded()"))
def mousePressEvent(self, mouseEvent):
if mouseEvent.button() & QtCore.Qt.LeftButton:
# left button pressed
pos = mouseEvent.scenePos()
self.keepBounds(pos)
self.writing.move_to(int(pos.x() * 1000 / self.size),
int(pos.y() * 1000 / self.size))
def mouseMoveEvent(self, mouseEvent):
if mouseEvent.buttons() & QtCore.Qt.LeftButton:
# mouse is moved with the left button hold down
lastPos = mouseEvent.lastScenePos()
self.keepBounds(lastPos)
pos = mouseEvent.scenePos()
self.keepBounds(pos)
self.currentStrokeItems.append(
self.addLine(QtCore.QLineF(lastPos, pos), self.pen))
# tomoe seems to use a 1000x1000 pixel grid
self.writing.line_to(int(pos.x() * 1000 / self.size),
int(pos.y() * 1000 / self.size))
def keepBounds(self, point):
"""Keep the coordinates inside the scene rectangle."""
point.setX(min(max(0, point.x()), self.size))
point.setY(min(max(0, point.y()), self.size))
def __init__(self, parent, recognizerSettings=None, size=100):
self.scene = HandwritingWidget.LineDrawingGraphicsScene(parent,
recognizerSettings, size)
QtGui.QGraphicsView.__init__(self, self.scene, parent)
self.setRenderHints(QtGui.QPainter.Antialiasing)
self.connect(self.scene, QtCore.SIGNAL("strokeAdded()"),
lambda: self.emit(QtCore.SIGNAL("updated()")))
self.setInteractive(self.recognizerAvailable())
self.setMaximumSize(0)
@staticmethod
def recognizerAvailable():
return recognizerType != None
def setDictionary(self, recognizerSettings):
self.scene.setDictionary(recognizerSettings)
self.setInteractive(self.recognizerAvailable())
self.emit(QtCore.SIGNAL("updated()"))
def setMaximumSize(self, size):
self.maximumSize = size
def results(self, maxResults=None):
"""
Returns the results for the current strokes with at maximum maxResults.
"""
if self.scene.enabled():
return self.scene.doSearch()
def strokeCount(self):
if self.scene.enabled():
return self.scene.strokeCount()
def clear(self):
"""Removes all strokes and clears the drawing area."""
if self.scene.enabled():
self.scene.clear_strokes()
self.emit(QtCore.SIGNAL("updated()"))
def remove_last_stroke(self):
"""Removes the latest stroke."""
if self.scene.enabled():
self.scene.remove_last_stroke()
self.emit(QtCore.SIGNAL("updated()"))
def resizeEvent(self, event):
QtGui.QGraphicsView.resizeEvent(self, event)
size = event.size()
minSize = min(size.width(), size.height())
if self.maximumSize:
minSize = min(minSize, self.maximumSize)
self.scene.setSize(minSize)
class MainWindow(QtGui.QMainWindow):
def __init__(self):
QtGui.QMainWindow.__init__(self)
# this is all you need to get the widget working
tomoeDictionary = os.path.join("/usr/local/share/tomoe/recognizer/",
'handwriting-zh_CN.xml')
recognizerSettings = {'tomoe': {'dictionary': tomoeDictionary},
'tegaki': {}}
self.widget = HandwritingWidget(self, recognizerSettings, 200)
self.connect(self.widget, QtCore.SIGNAL("updated()"), self.showResults)
# add some nice layout and buttons to clear strokes
self.centralwidget = QtGui.QWidget(self)
self.verticalLayout = QtGui.QVBoxLayout(self.centralwidget)
self.horizontalLayout = QtGui.QHBoxLayout()
self.clearButton = QtGui.QPushButton(self.centralwidget)
self.clearButton.setText('&Clear')
self.backButton = QtGui.QPushButton(self.centralwidget)
self.backButton.setText('&Back')
self.resultLabel = QtGui.QLineEdit(self.centralwidget)
self.horizontalLayout.addWidget(self.clearButton)
self.horizontalLayout.addWidget(self.backButton)
self.verticalLayout.addLayout(self.horizontalLayout)
self.verticalLayout.addWidget(self.widget)
self.verticalLayout.addWidget(self.resultLabel)
# add connections for clearing stroke input
self.connect(self.clearButton, QtCore.SIGNAL("clicked()"),
self.widget.clear)
self.connect(self.backButton, QtCore.SIGNAL("clicked()"),
self.widget.remove_last_stroke)
self.setCentralWidget(self.centralwidget)
def showResults(self):
resultList = self.widget.results(10)
#self.resultLabel.setText(
#', '.join([char + ' (' + str(s) + ')' for char, s in resultList]))
self.resultLabel.setText(''.join([char for char, _ in resultList]))
def main():
# create applicaton
app = QtGui.QApplication(sys.argv)
# create main window
window = MainWindow()
window.show()
# react to CTRL+C on the command line
signal.signal(signal.SIGINT, signal.SIG_DFL)
app.exec_()
if __name__ == '__main__':
main()
| gpl-3.0 |
keenerd/namcap | Namcap/tests/pkgbuild/test_invalidstartdir.py | 4 | 2034 | # -*- coding: utf-8 -*-
#
# namcap tests - invalidstartdir
# Copyright (C) 2011 Rémy Oudompheng <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
from Namcap.tests.pkgbuild_test import PkgbuildTest
import Namcap.rules.invalidstartdir as module
class NamcapInvalidStartdirTest(PkgbuildTest):
pkgbuild1 = """
# Maintainer: Arch Linux <[email protected]>
# Contributor: Arch Linux <[email protected]>
pkgname=mypackage
pkgver=1.0
pkgrel=1
pkgdesc="This program does foobar"
arch=('i686' 'x86_64')
url="http://www.example.com/"
license=('GPL')
depends=('glibc')
options=('!libtool')
source=(ftp://ftp.example.com/pub/mypackage-0.1.tar.gz)
md5sums=('abcdefabcdef12345678901234567890')
build() {
cd "$startdir/src/${pkgname}-${pkgver}"
patch -p1 ${startdir}/patch
./configure --prefix=/usr
make
}
package() {
cd "${srcdir}"/${pkgname}-${pkgver}
./configure --prefix=/usr
make DESTDIR="$startdir/pkg" install
}
"""
test_valid = PkgbuildTest.valid_tests
def preSetUp(self):
self.rule = module.package
def test_example1(self):
# Example 1
r = self.run_on_pkg(self.pkgbuild1)
self.assertEqual(set(r.errors), set([
("file-referred-in-startdir", ()),
("use-pkgdir", ()),
("use-srcdir", ())
]))
self.assertEqual(r.warnings, [])
self.assertEqual(r.infos, [])
# vim: set ts=4 sw=4 noet:
| gpl-2.0 |
ZenDevelopmentSystems/scikit-learn | sklearn/metrics/cluster/unsupervised.py | 230 | 8281 | """ Unsupervised evaluation metrics. """
# Authors: Robert Layton <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from ...utils import check_random_state
from ..pairwise import pairwise_distances
def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
Predicted labels for each sample.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : integer or numpy.RandomState, optional
The generator used to randomly select a subset of samples if
``sample_size is not None``. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
n_labels = len(np.unique(labels))
n_samples = X.shape[0]
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 "
"to n_samples - 1 (inclusive)" % n_labels)
if sample_size is not None:
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def silhouette_samples(X, labels, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
the distance array itself, use "precomputed" as the metric.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array, shape = [n_samples]
Silhouette Coefficient for each samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
distances = pairwise_distances(X, metric=metric, **kwds)
n = labels.shape[0]
A = np.array([_intra_cluster_distance(distances[i], labels, i)
for i in range(n)])
B = np.array([_nearest_cluster_distance(distances[i], labels, i)
for i in range(n)])
sil_samples = (B - A) / np.maximum(A, B)
return sil_samples
def _intra_cluster_distance(distances_row, labels, i):
"""Calculate the mean intra-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is excluded from calculation and
used to determine the current label
Returns
-------
a : float
Mean intra-cluster distance for sample i
"""
mask = labels == labels[i]
mask[i] = False
if not np.any(mask):
# cluster of size 1
return 0
a = np.mean(distances_row[mask])
return a
def _nearest_cluster_distance(distances_row, labels, i):
"""Calculate the mean nearest-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is used to determine the current
label.
Returns
-------
b : float
Mean nearest-cluster distance for sample i
"""
label = labels[i]
b = np.min([np.mean(distances_row[labels == cur_label])
for cur_label in set(labels) if not cur_label == label])
return b
| bsd-3-clause |
polyaxon/polyaxon | examples/quick-start/model.py | 1 | 4890 | #!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import keras
import tensorflow as tf
from keras.datasets import fashion_mnist
from tensorflow.keras.layers import Dense, Flatten, Conv2D, Dense, Dropout, Activation, MaxPooling2D
from tensorflow.keras.models import Sequential
from tensorflow.keras import optimizers
from polyaxon import tracking
from polyaxon.tracking.contrib.keras import PolyaxonCallback
OPTIMIZERS = {
'adam': optimizers.Adam,
'rmsprop': optimizers.RMSprop,
'sgd': optimizers.SGD,
}
def create_model(
conv1_size,
conv2_size,
dropout,
hidden1_size,
conv_activation,
dense_activation,
optimizer,
learning_rate,
loss,
num_classes,
):
model = Sequential()
model.add(Conv2D(conv1_size, (5, 5), activation=conv_activation,
input_shape=(img_width, img_height, 1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(conv2_size, (5, 5), activation=conv_activation))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(dropout))
model.add(Flatten())
model.add(Dense(hidden1_size, activation=dense_activation))
model.add(Dense(num_classes, activation='softmax'))
model.compile(
optimizer=OPTIMIZERS[optimizer](learning_rate=learning_rate),
loss=loss,
metrics=['accuracy'],
)
return model
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--conv1_size',
type=int,
default=32)
parser.add_argument(
'--conv2_size',
type=int,
default=64
)
parser.add_argument(
'--dropout',
type=float,
default=0.2
)
parser.add_argument(
'--hidden1_size',
type=int,
default=500
)
parser.add_argument(
'--conv_activation',
type=str,
default="relu"
)
parser.add_argument(
'--dense_activation',
type=str,
default="relu"
)
parser.add_argument(
'--optimizer',
type=str,
default='adam'
)
parser.add_argument(
'--learning_rate',
type=float,
default=0.001
)
parser.add_argument(
'--epochs',
type=int,
default=10
)
parser.add_argument(
'--loss',
type=str,
default="categorical_crossentropy"
)
args = parser.parse_args()
img_width, img_height = 28, 28
# Data
(X_train, y_train), (X_test, y_test) = fashion_mnist.load_data()
labels = ["T-shirt/top", "Trouser", "Pullover", "Dress", "Coat",
"Sandal", "Shirt", "Sneaker", "Bag", "Ankle boot"]
X_train = X_train.astype('float32')
X_train /= 255.
X_test = X_test.astype('float32')
X_test /= 255.
# reshape input data
X_train = X_train.reshape(X_train.shape[0], img_width, img_height, 1)
X_test = X_test.reshape(X_test.shape[0], img_width, img_height, 1)
# one hot encode outputs
y_train = keras.utils.to_categorical(y_train)
y_test = keras.utils.to_categorical(y_test)
num_classes = y_test.shape[1]
# Polyaxon
tracking.init()
tracking.log_data_ref(content=X_train, name='x_train')
tracking.log_data_ref(content=y_train, name='y_train')
tracking.log_data_ref(content=X_test, name='X_test')
tracking.log_data_ref(content=y_test, name='y_train')
plx_callback = PolyaxonCallback()
log_dir = tracking.get_tensorboard_path()
print("log_dir", log_dir)
print("model_dir", plx_callback.filepath)
# TF Model
model = create_model(
conv1_size=args.conv1_size,
conv2_size=args.conv2_size,
dropout=args.dropout,
hidden1_size=args.hidden1_size,
conv_activation=args.conv_activation,
dense_activation=args.dense_activation,
optimizer=args.optimizer,
learning_rate=args.learning_rate,
loss=args.loss,
num_classes=y_test.shape[1]
)
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=log_dir,
histogram_freq=1,
update_freq=100
)
model.fit(x=X_train,
y=y_train,
epochs=args.epochs,
validation_data=(X_test, y_test),
callbacks=[tensorboard_callback, plx_callback])
| apache-2.0 |
tomlof/scikit-learn | examples/svm/plot_custom_kernel.py | 93 | 1562 | """
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(X, Y):
"""
We create a custom kernel:
(2 0)
k(X, Y) = X ( ) Y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(X, M), Y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired, edgecolors='k')
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
| bsd-3-clause |
orione7/plugin.video.streamondemand-pureita | lib/gdata/tlslite/utils/cryptomath.py | 172 | 11559 | """cryptomath module
This module has basic math/crypto code."""
import os
import math
import base64
import binascii
import sha
from compat import *
# **************************************************************************
# Load Optional Modules
# **************************************************************************
# Try to load M2Crypto/OpenSSL
try:
from M2Crypto import m2
m2cryptoLoaded = True
except ImportError:
m2cryptoLoaded = False
# Try to load cryptlib
try:
import cryptlib_py
try:
cryptlib_py.cryptInit()
except cryptlib_py.CryptException, e:
#If tlslite and cryptoIDlib are both present,
#they might each try to re-initialize this,
#so we're tolerant of that.
if e[0] != cryptlib_py.CRYPT_ERROR_INITED:
raise
cryptlibpyLoaded = True
except ImportError:
cryptlibpyLoaded = False
#Try to load GMPY
try:
import gmpy
gmpyLoaded = True
except ImportError:
gmpyLoaded = False
#Try to load pycrypto
try:
import Crypto.Cipher.AES
pycryptoLoaded = True
except ImportError:
pycryptoLoaded = False
# **************************************************************************
# PRNG Functions
# **************************************************************************
# Get os.urandom PRNG
try:
os.urandom(1)
def getRandomBytes(howMany):
return stringToBytes(os.urandom(howMany))
prngName = "os.urandom"
except:
# Else get cryptlib PRNG
if cryptlibpyLoaded:
def getRandomBytes(howMany):
randomKey = cryptlib_py.cryptCreateContext(cryptlib_py.CRYPT_UNUSED,
cryptlib_py.CRYPT_ALGO_AES)
cryptlib_py.cryptSetAttribute(randomKey,
cryptlib_py.CRYPT_CTXINFO_MODE,
cryptlib_py.CRYPT_MODE_OFB)
cryptlib_py.cryptGenerateKey(randomKey)
bytes = createByteArrayZeros(howMany)
cryptlib_py.cryptEncrypt(randomKey, bytes)
return bytes
prngName = "cryptlib"
else:
#Else get UNIX /dev/urandom PRNG
try:
devRandomFile = open("/dev/urandom", "rb")
def getRandomBytes(howMany):
return stringToBytes(devRandomFile.read(howMany))
prngName = "/dev/urandom"
except IOError:
#Else get Win32 CryptoAPI PRNG
try:
import win32prng
def getRandomBytes(howMany):
s = win32prng.getRandomBytes(howMany)
if len(s) != howMany:
raise AssertionError()
return stringToBytes(s)
prngName ="CryptoAPI"
except ImportError:
#Else no PRNG :-(
def getRandomBytes(howMany):
raise NotImplementedError("No Random Number Generator "\
"available.")
prngName = "None"
# **************************************************************************
# Converter Functions
# **************************************************************************
def bytesToNumber(bytes):
total = 0L
multiplier = 1L
for count in range(len(bytes)-1, -1, -1):
byte = bytes[count]
total += multiplier * byte
multiplier *= 256
return total
def numberToBytes(n):
howManyBytes = numBytes(n)
bytes = createByteArrayZeros(howManyBytes)
for count in range(howManyBytes-1, -1, -1):
bytes[count] = int(n % 256)
n >>= 8
return bytes
def bytesToBase64(bytes):
s = bytesToString(bytes)
return stringToBase64(s)
def base64ToBytes(s):
s = base64ToString(s)
return stringToBytes(s)
def numberToBase64(n):
bytes = numberToBytes(n)
return bytesToBase64(bytes)
def base64ToNumber(s):
bytes = base64ToBytes(s)
return bytesToNumber(bytes)
def stringToNumber(s):
bytes = stringToBytes(s)
return bytesToNumber(bytes)
def numberToString(s):
bytes = numberToBytes(s)
return bytesToString(bytes)
def base64ToString(s):
try:
return base64.decodestring(s)
except binascii.Error, e:
raise SyntaxError(e)
except binascii.Incomplete, e:
raise SyntaxError(e)
def stringToBase64(s):
return base64.encodestring(s).replace("\n", "")
def mpiToNumber(mpi): #mpi is an openssl-format bignum string
if (ord(mpi[4]) & 0x80) !=0: #Make sure this is a positive number
raise AssertionError()
bytes = stringToBytes(mpi[4:])
return bytesToNumber(bytes)
def numberToMPI(n):
bytes = numberToBytes(n)
ext = 0
#If the high-order bit is going to be set,
#add an extra byte of zeros
if (numBits(n) & 0x7)==0:
ext = 1
length = numBytes(n) + ext
bytes = concatArrays(createByteArrayZeros(4+ext), bytes)
bytes[0] = (length >> 24) & 0xFF
bytes[1] = (length >> 16) & 0xFF
bytes[2] = (length >> 8) & 0xFF
bytes[3] = length & 0xFF
return bytesToString(bytes)
# **************************************************************************
# Misc. Utility Functions
# **************************************************************************
def numBytes(n):
if n==0:
return 0
bits = numBits(n)
return int(math.ceil(bits / 8.0))
def hashAndBase64(s):
return stringToBase64(sha.sha(s).digest())
def getBase64Nonce(numChars=22): #defaults to an 132 bit nonce
bytes = getRandomBytes(numChars)
bytesStr = "".join([chr(b) for b in bytes])
return stringToBase64(bytesStr)[:numChars]
# **************************************************************************
# Big Number Math
# **************************************************************************
def getRandomNumber(low, high):
if low >= high:
raise AssertionError()
howManyBits = numBits(high)
howManyBytes = numBytes(high)
lastBits = howManyBits % 8
while 1:
bytes = getRandomBytes(howManyBytes)
if lastBits:
bytes[0] = bytes[0] % (1 << lastBits)
n = bytesToNumber(bytes)
if n >= low and n < high:
return n
def gcd(a,b):
a, b = max(a,b), min(a,b)
while b:
a, b = b, a % b
return a
def lcm(a, b):
#This will break when python division changes, but we can't use // cause
#of Jython
return (a * b) / gcd(a, b)
#Returns inverse of a mod b, zero if none
#Uses Extended Euclidean Algorithm
def invMod(a, b):
c, d = a, b
uc, ud = 1, 0
while c != 0:
#This will break when python division changes, but we can't use //
#cause of Jython
q = d / c
c, d = d-(q*c), c
uc, ud = ud - (q * uc), uc
if d == 1:
return ud % b
return 0
if gmpyLoaded:
def powMod(base, power, modulus):
base = gmpy.mpz(base)
power = gmpy.mpz(power)
modulus = gmpy.mpz(modulus)
result = pow(base, power, modulus)
return long(result)
else:
#Copied from Bryan G. Olson's post to comp.lang.python
#Does left-to-right instead of pow()'s right-to-left,
#thus about 30% faster than the python built-in with small bases
def powMod(base, power, modulus):
nBitScan = 5
""" Return base**power mod modulus, using multi bit scanning
with nBitScan bits at a time."""
#TREV - Added support for negative exponents
negativeResult = False
if (power < 0):
power *= -1
negativeResult = True
exp2 = 2**nBitScan
mask = exp2 - 1
# Break power into a list of digits of nBitScan bits.
# The list is recursive so easy to read in reverse direction.
nibbles = None
while power:
nibbles = int(power & mask), nibbles
power = power >> nBitScan
# Make a table of powers of base up to 2**nBitScan - 1
lowPowers = [1]
for i in xrange(1, exp2):
lowPowers.append((lowPowers[i-1] * base) % modulus)
# To exponentiate by the first nibble, look it up in the table
nib, nibbles = nibbles
prod = lowPowers[nib]
# For the rest, square nBitScan times, then multiply by
# base^nibble
while nibbles:
nib, nibbles = nibbles
for i in xrange(nBitScan):
prod = (prod * prod) % modulus
if nib: prod = (prod * lowPowers[nib]) % modulus
#TREV - Added support for negative exponents
if negativeResult:
prodInv = invMod(prod, modulus)
#Check to make sure the inverse is correct
if (prod * prodInv) % modulus != 1:
raise AssertionError()
return prodInv
return prod
#Pre-calculate a sieve of the ~100 primes < 1000:
def makeSieve(n):
sieve = range(n)
for count in range(2, int(math.sqrt(n))):
if sieve[count] == 0:
continue
x = sieve[count] * 2
while x < len(sieve):
sieve[x] = 0
x += sieve[count]
sieve = [x for x in sieve[2:] if x]
return sieve
sieve = makeSieve(1000)
def isPrime(n, iterations=5, display=False):
#Trial division with sieve
for x in sieve:
if x >= n: return True
if n % x == 0: return False
#Passed trial division, proceed to Rabin-Miller
#Rabin-Miller implemented per Ferguson & Schneier
#Compute s, t for Rabin-Miller
if display: print "*",
s, t = n-1, 0
while s % 2 == 0:
s, t = s/2, t+1
#Repeat Rabin-Miller x times
a = 2 #Use 2 as a base for first iteration speedup, per HAC
for count in range(iterations):
v = powMod(a, s, n)
if v==1:
continue
i = 0
while v != n-1:
if i == t-1:
return False
else:
v, i = powMod(v, 2, n), i+1
a = getRandomNumber(2, n)
return True
def getRandomPrime(bits, display=False):
if bits < 10:
raise AssertionError()
#The 1.5 ensures the 2 MSBs are set
#Thus, when used for p,q in RSA, n will have its MSB set
#
#Since 30 is lcm(2,3,5), we'll set our test numbers to
#29 % 30 and keep them there
low = (2L ** (bits-1)) * 3/2
high = 2L ** bits - 30
p = getRandomNumber(low, high)
p += 29 - (p % 30)
while 1:
if display: print ".",
p += 30
if p >= high:
p = getRandomNumber(low, high)
p += 29 - (p % 30)
if isPrime(p, display=display):
return p
#Unused at the moment...
def getRandomSafePrime(bits, display=False):
if bits < 10:
raise AssertionError()
#The 1.5 ensures the 2 MSBs are set
#Thus, when used for p,q in RSA, n will have its MSB set
#
#Since 30 is lcm(2,3,5), we'll set our test numbers to
#29 % 30 and keep them there
low = (2 ** (bits-2)) * 3/2
high = (2 ** (bits-1)) - 30
q = getRandomNumber(low, high)
q += 29 - (q % 30)
while 1:
if display: print ".",
q += 30
if (q >= high):
q = getRandomNumber(low, high)
q += 29 - (q % 30)
#Ideas from Tom Wu's SRP code
#Do trial division on p and q before Rabin-Miller
if isPrime(q, 0, display=display):
p = (2 * q) + 1
if isPrime(p, display=display):
if isPrime(q, display=display):
return p
| gpl-3.0 |
katsikas/gnuradio | gnuradio-core/src/lib/filter/generate_gr_fir_sysconfig.py | 17 | 3117 | #!/bin/env python
# -*- python -*-
#
# Copyright 2003,2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from generate_utils import *
# ----------------------------------------------------------------
def make_gr_fir_sysconfig_h ():
out = open_and_log_name ('gr_fir_sysconfig.h', 'w')
if not out:
return
out.write (copyright)
out.write (
'''
/*
* WARNING: This file is automatically generated by generate_gr_fir_sysconfig.py
* Any changes made to this file will be overwritten.
*/
#ifndef INCLUDED_GR_FIR_SYSCONFIG_H
#define INCLUDED_GR_FIR_SYSCONFIG_H
#include <gr_types.h>
''')
# for sig in fir_signatures:
# out.write ('class gr_fir_' + sig + ';\n')
out.write ('#include <gr_fir_util.h>\n')
out.write (
'''
/*!
* \\brief abstract base class for configuring the automatic selection of the
* fastest gr_fir for your platform.
*
* This is used internally by gr_fir_util.
*/
class gr_fir_sysconfig {
public:
virtual ~gr_fir_sysconfig ();
''')
for sig in fir_signatures:
out.write ((' virtual gr_fir_%s *create_gr_fir_%s (const std::vector<%s> &taps) = 0;\n' %
(sig, sig, tap_type (sig))))
out.write ('\n')
for sig in fir_signatures:
out.write ((' virtual void get_gr_fir_%s_info (std::vector<gr_fir_%s_info> *info) = 0;\n' %
(sig, sig)))
out.write (
'''
};
/*
* This returns the single instance of the appropriate derived class.
* This function must be defined only once in the system, and should be defined
* in the platform specific code.
*/
gr_fir_sysconfig *gr_fir_sysconfig_singleton ();
#endif /* INCLUDED_GR_FIR_SYSCONFIG_H */
''')
out.close ()
# ----------------------------------------------------------------
def make_gr_fir_sysconfig_cc ():
out = open_and_log_name ('gr_fir_sysconfig.cc', 'w')
if not out:
return
out.write (copyright)
out.write (
'''
/*
* WARNING: This file is automatically generated by generate_gr_fir_sysconfig.py
* Any changes made to this file will be overwritten.
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <gr_fir_sysconfig.h>
gr_fir_sysconfig::~gr_fir_sysconfig ()
{
}
''')
out.close ()
# ----------------------------------------------------------------
def generate ():
make_gr_fir_sysconfig_h ()
make_gr_fir_sysconfig_cc ()
if __name__ == '__main__':
generate ()
| gpl-3.0 |
BartoszCichecki/onlinepython | onlinepython/pypy-2.4.0-win32/lib-python/2.7/test/test_float.py | 23 | 62308 |
import unittest, struct
import os
from test import test_support
import math
from math import isinf, isnan, copysign, ldexp
import operator
import random
import fractions
import sys
INF = float("inf")
NAN = float("nan")
have_getformat = hasattr(float, "__getformat__")
requires_getformat = unittest.skipUnless(have_getformat,
"requires __getformat__")
requires_setformat = unittest.skipUnless(hasattr(float, "__setformat__"),
"requires __setformat__")
# decorator for skipping tests on non-IEEE 754 platforms
requires_IEEE_754 = unittest.skipUnless(have_getformat and
float.__getformat__("double").startswith("IEEE"),
"test requires IEEE 754 doubles")
#locate file with float format test values
test_dir = os.path.dirname(__file__) or os.curdir
format_testfile = os.path.join(test_dir, 'formatfloat_testcases.txt')
class GeneralFloatCases(unittest.TestCase):
def test_float(self):
self.assertEqual(float(3.14), 3.14)
self.assertEqual(float(314), 314.0)
self.assertEqual(float(314L), 314.0)
self.assertEqual(float(" 3.14 "), 3.14)
self.assertRaises(ValueError, float, " 0x3.1 ")
self.assertRaises(ValueError, float, " -0x3.p-1 ")
self.assertRaises(ValueError, float, " +0x3.p-1 ")
self.assertRaises(ValueError, float, "++3.14")
self.assertRaises(ValueError, float, "+-3.14")
self.assertRaises(ValueError, float, "-+3.14")
self.assertRaises(ValueError, float, "--3.14")
# check that we don't accept alternate exponent markers
self.assertRaises(ValueError, float, "-1.7d29")
self.assertRaises(ValueError, float, "3D-14")
if test_support.have_unicode:
self.assertEqual(float(unicode(" 3.14 ")), 3.14)
self.assertEqual(float(unicode(" \u0663.\u0661\u0664 ",'raw-unicode-escape')), 3.14)
# extra long strings should no longer be a problem
# (in 2.6, long unicode inputs to float raised ValueError)
float('.' + '1'*1000)
float(unicode('.' + '1'*1000))
def check_conversion_to_int(self, x):
"""Check that int(x) has the correct value and type, for a float x."""
n = int(x)
if x >= 0.0:
# x >= 0 and n = int(x) ==> n <= x < n + 1
self.assertLessEqual(n, x)
self.assertLess(x, n + 1)
else:
# x < 0 and n = int(x) ==> n >= x > n - 1
self.assertGreaterEqual(n, x)
self.assertGreater(x, n - 1)
# Result should be an int if within range, else a long.
if -sys.maxint-1 <= n <= sys.maxint:
self.assertEqual(type(n), int)
else:
self.assertEqual(type(n), long)
# Double check.
self.assertEqual(type(int(n)), type(n))
def test_conversion_to_int(self):
# Check that floats within the range of an int convert to type
# int, not long. (issue #11144.)
boundary = float(sys.maxint + 1)
epsilon = 2**-sys.float_info.mant_dig * boundary
# These 2 floats are either side of the positive int/long boundary on
# both 32-bit and 64-bit systems.
self.check_conversion_to_int(boundary - epsilon)
self.check_conversion_to_int(boundary)
# These floats are either side of the negative long/int boundary on
# 64-bit systems...
self.check_conversion_to_int(-boundary - 2*epsilon)
self.check_conversion_to_int(-boundary)
# ... and these ones are either side of the negative long/int
# boundary on 32-bit systems.
self.check_conversion_to_int(-boundary - 1.0)
self.check_conversion_to_int(-boundary - 1.0 + 2*epsilon)
@test_support.run_with_locale('LC_NUMERIC', 'fr_FR', 'de_DE')
def test_float_with_comma(self):
# set locale to something that doesn't use '.' for the decimal point
# float must not accept the locale specific decimal point but
# it still has to accept the normal python syntax
import locale
if not locale.localeconv()['decimal_point'] == ',':
self.skipTest('decimal_point is not ","')
self.assertEqual(float(" 3.14 "), 3.14)
self.assertEqual(float("+3.14 "), 3.14)
self.assertEqual(float("-3.14 "), -3.14)
self.assertEqual(float(".14 "), .14)
self.assertEqual(float("3. "), 3.0)
self.assertEqual(float("3.e3 "), 3000.0)
self.assertEqual(float("3.2e3 "), 3200.0)
self.assertEqual(float("2.5e-1 "), 0.25)
self.assertEqual(float("5e-1"), 0.5)
self.assertRaises(ValueError, float, " 3,14 ")
self.assertRaises(ValueError, float, " +3,14 ")
self.assertRaises(ValueError, float, " -3,14 ")
self.assertRaises(ValueError, float, " 0x3.1 ")
self.assertRaises(ValueError, float, " -0x3.p-1 ")
self.assertRaises(ValueError, float, " +0x3.p-1 ")
self.assertEqual(float(" 25.e-1 "), 2.5)
self.assertEqual(test_support.fcmp(float(" .25e-1 "), .025), 0)
def test_floatconversion(self):
# Make sure that calls to __float__() work properly
class Foo0:
def __float__(self):
return 42.
class Foo1(object):
def __float__(self):
return 42.
class Foo2(float):
def __float__(self):
return 42.
class Foo3(float):
def __new__(cls, value=0.):
return float.__new__(cls, 2*value)
def __float__(self):
return self
class Foo4(float):
def __float__(self):
return 42
# Issue 5759: __float__ not called on str subclasses (though it is on
# unicode subclasses).
class FooStr(str):
def __float__(self):
return float(str(self)) + 1
class FooUnicode(unicode):
def __float__(self):
return float(unicode(self)) + 1
self.assertAlmostEqual(float(Foo0()), 42.)
self.assertAlmostEqual(float(Foo1()), 42.)
self.assertAlmostEqual(float(Foo2()), 42.)
self.assertAlmostEqual(float(Foo3(21)), 42.)
self.assertRaises(TypeError, float, Foo4(42))
self.assertAlmostEqual(float(FooUnicode('8')), 9.)
self.assertAlmostEqual(float(FooStr('8')), 9.)
def test_is_integer(self):
self.assertFalse((1.1).is_integer())
self.assertTrue((1.).is_integer())
self.assertFalse(float("nan").is_integer())
self.assertFalse(float("inf").is_integer())
def test_floatasratio(self):
for f, ratio in [
(0.875, (7, 8)),
(-0.875, (-7, 8)),
(0.0, (0, 1)),
(11.5, (23, 2)),
]:
self.assertEqual(f.as_integer_ratio(), ratio)
for i in range(10000):
f = random.random()
f *= 10 ** random.randint(-100, 100)
n, d = f.as_integer_ratio()
self.assertEqual(float(n).__truediv__(d), f)
R = fractions.Fraction
self.assertEqual(R(0, 1),
R(*float(0.0).as_integer_ratio()))
self.assertEqual(R(5, 2),
R(*float(2.5).as_integer_ratio()))
self.assertEqual(R(1, 2),
R(*float(0.5).as_integer_ratio()))
self.assertEqual(R(4728779608739021, 2251799813685248),
R(*float(2.1).as_integer_ratio()))
self.assertEqual(R(-4728779608739021, 2251799813685248),
R(*float(-2.1).as_integer_ratio()))
self.assertEqual(R(-2100, 1),
R(*float(-2100.0).as_integer_ratio()))
self.assertRaises(OverflowError, float('inf').as_integer_ratio)
self.assertRaises(OverflowError, float('-inf').as_integer_ratio)
self.assertRaises(ValueError, float('nan').as_integer_ratio)
def assertEqualAndEqualSign(self, a, b):
# fail unless a == b and a and b have the same sign bit;
# the only difference from assertEqual is that this test
# distinguishes -0.0 and 0.0.
self.assertEqual((a, copysign(1.0, a)), (b, copysign(1.0, b)))
@requires_IEEE_754
def test_float_mod(self):
# Check behaviour of % operator for IEEE 754 special cases.
# In particular, check signs of zeros.
mod = operator.mod
self.assertEqualAndEqualSign(mod(-1.0, 1.0), 0.0)
self.assertEqualAndEqualSign(mod(-1e-100, 1.0), 1.0)
self.assertEqualAndEqualSign(mod(-0.0, 1.0), 0.0)
self.assertEqualAndEqualSign(mod(0.0, 1.0), 0.0)
self.assertEqualAndEqualSign(mod(1e-100, 1.0), 1e-100)
self.assertEqualAndEqualSign(mod(1.0, 1.0), 0.0)
self.assertEqualAndEqualSign(mod(-1.0, -1.0), -0.0)
self.assertEqualAndEqualSign(mod(-1e-100, -1.0), -1e-100)
self.assertEqualAndEqualSign(mod(-0.0, -1.0), -0.0)
self.assertEqualAndEqualSign(mod(0.0, -1.0), -0.0)
self.assertEqualAndEqualSign(mod(1e-100, -1.0), -1.0)
self.assertEqualAndEqualSign(mod(1.0, -1.0), -0.0)
@requires_IEEE_754
def test_float_pow(self):
# test builtin pow and ** operator for IEEE 754 special cases.
# Special cases taken from section F.9.4.4 of the C99 specification
for pow_op in pow, operator.pow:
# x**NAN is NAN for any x except 1
self.assertTrue(isnan(pow_op(-INF, NAN)))
self.assertTrue(isnan(pow_op(-2.0, NAN)))
self.assertTrue(isnan(pow_op(-1.0, NAN)))
self.assertTrue(isnan(pow_op(-0.5, NAN)))
self.assertTrue(isnan(pow_op(-0.0, NAN)))
self.assertTrue(isnan(pow_op(0.0, NAN)))
self.assertTrue(isnan(pow_op(0.5, NAN)))
self.assertTrue(isnan(pow_op(2.0, NAN)))
self.assertTrue(isnan(pow_op(INF, NAN)))
self.assertTrue(isnan(pow_op(NAN, NAN)))
# NAN**y is NAN for any y except +-0
self.assertTrue(isnan(pow_op(NAN, -INF)))
self.assertTrue(isnan(pow_op(NAN, -2.0)))
self.assertTrue(isnan(pow_op(NAN, -1.0)))
self.assertTrue(isnan(pow_op(NAN, -0.5)))
self.assertTrue(isnan(pow_op(NAN, 0.5)))
self.assertTrue(isnan(pow_op(NAN, 1.0)))
self.assertTrue(isnan(pow_op(NAN, 2.0)))
self.assertTrue(isnan(pow_op(NAN, INF)))
# (+-0)**y raises ZeroDivisionError for y a negative odd integer
self.assertRaises(ZeroDivisionError, pow_op, -0.0, -1.0)
self.assertRaises(ZeroDivisionError, pow_op, 0.0, -1.0)
# (+-0)**y raises ZeroDivisionError for y finite and negative
# but not an odd integer
self.assertRaises(ZeroDivisionError, pow_op, -0.0, -2.0)
self.assertRaises(ZeroDivisionError, pow_op, -0.0, -0.5)
self.assertRaises(ZeroDivisionError, pow_op, 0.0, -2.0)
self.assertRaises(ZeroDivisionError, pow_op, 0.0, -0.5)
# (+-0)**y is +-0 for y a positive odd integer
self.assertEqualAndEqualSign(pow_op(-0.0, 1.0), -0.0)
self.assertEqualAndEqualSign(pow_op(0.0, 1.0), 0.0)
# (+-0)**y is 0 for y finite and positive but not an odd integer
self.assertEqualAndEqualSign(pow_op(-0.0, 0.5), 0.0)
self.assertEqualAndEqualSign(pow_op(-0.0, 2.0), 0.0)
self.assertEqualAndEqualSign(pow_op(0.0, 0.5), 0.0)
self.assertEqualAndEqualSign(pow_op(0.0, 2.0), 0.0)
# (-1)**+-inf is 1
self.assertEqualAndEqualSign(pow_op(-1.0, -INF), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, INF), 1.0)
# 1**y is 1 for any y, even if y is an infinity or nan
self.assertEqualAndEqualSign(pow_op(1.0, -INF), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, -2.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, -1.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, -0.5), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, 0.5), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, 1.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, 2.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, INF), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, NAN), 1.0)
# x**+-0 is 1 for any x, even if x is a zero, infinity, or nan
self.assertEqualAndEqualSign(pow_op(-INF, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-2.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-0.5, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-0.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(0.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(0.5, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(2.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(INF, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(NAN, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-INF, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-2.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-0.5, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-0.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(0.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(0.5, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(2.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(INF, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(NAN, -0.0), 1.0)
# x**y raises ValueError for finite negative x and non-integral y
self.assertRaises(ValueError, pow_op, -2.0, -0.5)
self.assertRaises(ValueError, pow_op, -2.0, 0.5)
self.assertRaises(ValueError, pow_op, -1.0, -0.5)
self.assertRaises(ValueError, pow_op, -1.0, 0.5)
self.assertRaises(ValueError, pow_op, -0.5, -0.5)
self.assertRaises(ValueError, pow_op, -0.5, 0.5)
# x**-INF is INF for abs(x) < 1
self.assertEqualAndEqualSign(pow_op(-0.5, -INF), INF)
self.assertEqualAndEqualSign(pow_op(-0.0, -INF), INF)
self.assertEqualAndEqualSign(pow_op(0.0, -INF), INF)
self.assertEqualAndEqualSign(pow_op(0.5, -INF), INF)
# x**-INF is 0 for abs(x) > 1
self.assertEqualAndEqualSign(pow_op(-INF, -INF), 0.0)
self.assertEqualAndEqualSign(pow_op(-2.0, -INF), 0.0)
self.assertEqualAndEqualSign(pow_op(2.0, -INF), 0.0)
self.assertEqualAndEqualSign(pow_op(INF, -INF), 0.0)
# x**INF is 0 for abs(x) < 1
self.assertEqualAndEqualSign(pow_op(-0.5, INF), 0.0)
self.assertEqualAndEqualSign(pow_op(-0.0, INF), 0.0)
self.assertEqualAndEqualSign(pow_op(0.0, INF), 0.0)
self.assertEqualAndEqualSign(pow_op(0.5, INF), 0.0)
# x**INF is INF for abs(x) > 1
self.assertEqualAndEqualSign(pow_op(-INF, INF), INF)
self.assertEqualAndEqualSign(pow_op(-2.0, INF), INF)
self.assertEqualAndEqualSign(pow_op(2.0, INF), INF)
self.assertEqualAndEqualSign(pow_op(INF, INF), INF)
# (-INF)**y is -0.0 for y a negative odd integer
self.assertEqualAndEqualSign(pow_op(-INF, -1.0), -0.0)
# (-INF)**y is 0.0 for y negative but not an odd integer
self.assertEqualAndEqualSign(pow_op(-INF, -0.5), 0.0)
self.assertEqualAndEqualSign(pow_op(-INF, -2.0), 0.0)
# (-INF)**y is -INF for y a positive odd integer
self.assertEqualAndEqualSign(pow_op(-INF, 1.0), -INF)
# (-INF)**y is INF for y positive but not an odd integer
self.assertEqualAndEqualSign(pow_op(-INF, 0.5), INF)
self.assertEqualAndEqualSign(pow_op(-INF, 2.0), INF)
# INF**y is INF for y positive
self.assertEqualAndEqualSign(pow_op(INF, 0.5), INF)
self.assertEqualAndEqualSign(pow_op(INF, 1.0), INF)
self.assertEqualAndEqualSign(pow_op(INF, 2.0), INF)
# INF**y is 0.0 for y negative
self.assertEqualAndEqualSign(pow_op(INF, -2.0), 0.0)
self.assertEqualAndEqualSign(pow_op(INF, -1.0), 0.0)
self.assertEqualAndEqualSign(pow_op(INF, -0.5), 0.0)
# basic checks not covered by the special cases above
self.assertEqualAndEqualSign(pow_op(-2.0, -2.0), 0.25)
self.assertEqualAndEqualSign(pow_op(-2.0, -1.0), -0.5)
self.assertEqualAndEqualSign(pow_op(-2.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-2.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-2.0, 1.0), -2.0)
self.assertEqualAndEqualSign(pow_op(-2.0, 2.0), 4.0)
self.assertEqualAndEqualSign(pow_op(-1.0, -2.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, -1.0), -1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, 1.0), -1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, 2.0), 1.0)
self.assertEqualAndEqualSign(pow_op(2.0, -2.0), 0.25)
self.assertEqualAndEqualSign(pow_op(2.0, -1.0), 0.5)
self.assertEqualAndEqualSign(pow_op(2.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(2.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(2.0, 1.0), 2.0)
self.assertEqualAndEqualSign(pow_op(2.0, 2.0), 4.0)
# 1 ** large and -1 ** large; some libms apparently
# have problems with these
self.assertEqualAndEqualSign(pow_op(1.0, -1e100), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, 1e100), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, -1e100), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, 1e100), 1.0)
# check sign for results that underflow to 0
self.assertEqualAndEqualSign(pow_op(-2.0, -2000.0), 0.0)
self.assertRaises(ValueError, pow_op, -2.0, -2000.5)
self.assertEqualAndEqualSign(pow_op(-2.0, -2001.0), -0.0)
self.assertEqualAndEqualSign(pow_op(2.0, -2000.0), 0.0)
self.assertEqualAndEqualSign(pow_op(2.0, -2000.5), 0.0)
self.assertEqualAndEqualSign(pow_op(2.0, -2001.0), 0.0)
self.assertEqualAndEqualSign(pow_op(-0.5, 2000.0), 0.0)
self.assertRaises(ValueError, pow_op, -0.5, 2000.5)
self.assertEqualAndEqualSign(pow_op(-0.5, 2001.0), -0.0)
self.assertEqualAndEqualSign(pow_op(0.5, 2000.0), 0.0)
self.assertEqualAndEqualSign(pow_op(0.5, 2000.5), 0.0)
self.assertEqualAndEqualSign(pow_op(0.5, 2001.0), 0.0)
# check we don't raise an exception for subnormal results,
# and validate signs. Tests currently disabled, since
# they fail on systems where a subnormal result from pow
# is flushed to zero (e.g. Debian/ia64.)
#self.assertTrue(0.0 < pow_op(0.5, 1048) < 1e-315)
#self.assertTrue(0.0 < pow_op(-0.5, 1048) < 1e-315)
#self.assertTrue(0.0 < pow_op(0.5, 1047) < 1e-315)
#self.assertTrue(0.0 > pow_op(-0.5, 1047) > -1e-315)
#self.assertTrue(0.0 < pow_op(2.0, -1048) < 1e-315)
#self.assertTrue(0.0 < pow_op(-2.0, -1048) < 1e-315)
#self.assertTrue(0.0 < pow_op(2.0, -1047) < 1e-315)
#self.assertTrue(0.0 > pow_op(-2.0, -1047) > -1e-315)
@requires_setformat
class FormatFunctionsTestCase(unittest.TestCase):
def setUp(self):
self.save_formats = {'double':float.__getformat__('double'),
'float':float.__getformat__('float')}
def tearDown(self):
float.__setformat__('double', self.save_formats['double'])
float.__setformat__('float', self.save_formats['float'])
def test_getformat(self):
self.assertIn(float.__getformat__('double'),
['unknown', 'IEEE, big-endian', 'IEEE, little-endian'])
self.assertIn(float.__getformat__('float'),
['unknown', 'IEEE, big-endian', 'IEEE, little-endian'])
self.assertRaises(ValueError, float.__getformat__, 'chicken')
self.assertRaises(TypeError, float.__getformat__, 1)
def test_setformat(self):
for t in 'double', 'float':
float.__setformat__(t, 'unknown')
if self.save_formats[t] == 'IEEE, big-endian':
self.assertRaises(ValueError, float.__setformat__,
t, 'IEEE, little-endian')
elif self.save_formats[t] == 'IEEE, little-endian':
self.assertRaises(ValueError, float.__setformat__,
t, 'IEEE, big-endian')
else:
self.assertRaises(ValueError, float.__setformat__,
t, 'IEEE, big-endian')
self.assertRaises(ValueError, float.__setformat__,
t, 'IEEE, little-endian')
self.assertRaises(ValueError, float.__setformat__,
t, 'chicken')
self.assertRaises(ValueError, float.__setformat__,
'chicken', 'unknown')
BE_DOUBLE_INF = '\x7f\xf0\x00\x00\x00\x00\x00\x00'
LE_DOUBLE_INF = ''.join(reversed(BE_DOUBLE_INF))
BE_DOUBLE_NAN = '\x7f\xf8\x00\x00\x00\x00\x00\x00'
LE_DOUBLE_NAN = ''.join(reversed(BE_DOUBLE_NAN))
BE_FLOAT_INF = '\x7f\x80\x00\x00'
LE_FLOAT_INF = ''.join(reversed(BE_FLOAT_INF))
BE_FLOAT_NAN = '\x7f\xc0\x00\x00'
LE_FLOAT_NAN = ''.join(reversed(BE_FLOAT_NAN))
# on non-IEEE platforms, attempting to unpack a bit pattern
# representing an infinity or a NaN should raise an exception.
@requires_setformat
class UnknownFormatTestCase(unittest.TestCase):
def setUp(self):
self.save_formats = {'double':float.__getformat__('double'),
'float':float.__getformat__('float')}
float.__setformat__('double', 'unknown')
float.__setformat__('float', 'unknown')
def tearDown(self):
float.__setformat__('double', self.save_formats['double'])
float.__setformat__('float', self.save_formats['float'])
def test_double_specials_dont_unpack(self):
for fmt, data in [('>d', BE_DOUBLE_INF),
('>d', BE_DOUBLE_NAN),
('<d', LE_DOUBLE_INF),
('<d', LE_DOUBLE_NAN)]:
self.assertRaises(ValueError, struct.unpack, fmt, data)
def test_float_specials_dont_unpack(self):
for fmt, data in [('>f', BE_FLOAT_INF),
('>f', BE_FLOAT_NAN),
('<f', LE_FLOAT_INF),
('<f', LE_FLOAT_NAN)]:
self.assertRaises(ValueError, struct.unpack, fmt, data)
# on an IEEE platform, all we guarantee is that bit patterns
# representing infinities or NaNs do not raise an exception; all else
# is accident (today).
# let's also try to guarantee that -0.0 and 0.0 don't get confused.
class IEEEFormatTestCase(unittest.TestCase):
@requires_IEEE_754
def test_double_specials_do_unpack(self):
for fmt, data in [('>d', BE_DOUBLE_INF),
('>d', BE_DOUBLE_NAN),
('<d', LE_DOUBLE_INF),
('<d', LE_DOUBLE_NAN)]:
struct.unpack(fmt, data)
@requires_IEEE_754
def test_float_specials_do_unpack(self):
for fmt, data in [('>f', BE_FLOAT_INF),
('>f', BE_FLOAT_NAN),
('<f', LE_FLOAT_INF),
('<f', LE_FLOAT_NAN)]:
struct.unpack(fmt, data)
@requires_IEEE_754
def test_negative_zero(self):
def pos_pos():
return 0.0, math.atan2(0.0, -1)
def pos_neg():
return 0.0, math.atan2(-0.0, -1)
def neg_pos():
return -0.0, math.atan2(0.0, -1)
def neg_neg():
return -0.0, math.atan2(-0.0, -1)
self.assertEqual(pos_pos(), neg_pos())
self.assertEqual(pos_neg(), neg_neg())
@requires_IEEE_754
def test_underflow_sign(self):
# check that -1e-1000 gives -0.0, not 0.0
self.assertEqual(math.atan2(-1e-1000, -1), math.atan2(-0.0, -1))
self.assertEqual(math.atan2(float('-1e-1000'), -1),
math.atan2(-0.0, -1))
def test_format(self):
# these should be rewritten to use both format(x, spec) and
# x.__format__(spec)
self.assertEqual(format(0.0, 'f'), '0.000000')
# the default is 'g', except for empty format spec
self.assertEqual(format(0.0, ''), '0.0')
self.assertEqual(format(0.01, ''), '0.01')
self.assertEqual(format(0.01, 'g'), '0.01')
# empty presentation type should format in the same way as str
# (issue 5920)
x = 100/7.
self.assertEqual(format(x, ''), str(x))
self.assertEqual(format(x, '-'), str(x))
self.assertEqual(format(x, '>'), str(x))
self.assertEqual(format(x, '2'), str(x))
self.assertEqual(format(1.0, 'f'), '1.000000')
self.assertEqual(format(-1.0, 'f'), '-1.000000')
self.assertEqual(format( 1.0, ' f'), ' 1.000000')
self.assertEqual(format(-1.0, ' f'), '-1.000000')
self.assertEqual(format( 1.0, '+f'), '+1.000000')
self.assertEqual(format(-1.0, '+f'), '-1.000000')
# % formatting
self.assertEqual(format(-1.0, '%'), '-100.000000%')
# conversion to string should fail
self.assertRaises(ValueError, format, 3.0, "s")
# other format specifiers shouldn't work on floats,
# in particular int specifiers
for format_spec in ([chr(x) for x in range(ord('a'), ord('z')+1)] +
[chr(x) for x in range(ord('A'), ord('Z')+1)]):
if not format_spec in 'eEfFgGn%':
self.assertRaises(ValueError, format, 0.0, format_spec)
self.assertRaises(ValueError, format, 1.0, format_spec)
self.assertRaises(ValueError, format, -1.0, format_spec)
self.assertRaises(ValueError, format, 1e100, format_spec)
self.assertRaises(ValueError, format, -1e100, format_spec)
self.assertRaises(ValueError, format, 1e-100, format_spec)
self.assertRaises(ValueError, format, -1e-100, format_spec)
# issue 3382: 'f' and 'F' with inf's and nan's
self.assertEqual('{0:f}'.format(INF), 'inf')
self.assertEqual('{0:F}'.format(INF), 'INF')
self.assertEqual('{0:f}'.format(-INF), '-inf')
self.assertEqual('{0:F}'.format(-INF), '-INF')
self.assertEqual('{0:f}'.format(NAN), 'nan')
self.assertEqual('{0:F}'.format(NAN), 'NAN')
@requires_IEEE_754
def test_format_testfile(self):
with open(format_testfile) as testfile:
for line in open(format_testfile):
if line.startswith('--'):
continue
line = line.strip()
if not line:
continue
lhs, rhs = map(str.strip, line.split('->'))
fmt, arg = lhs.split()
arg = float(arg)
self.assertEqual(fmt % arg, rhs)
if not math.isnan(arg) and copysign(1.0, arg) > 0.0:
self.assertEqual(fmt % -arg, '-' + rhs)
def test_issue5864(self):
self.assertEqual(format(123.456, '.4'), '123.5')
self.assertEqual(format(1234.56, '.4'), '1.235e+03')
self.assertEqual(format(12345.6, '.4'), '1.235e+04')
class ReprTestCase(unittest.TestCase):
def test_repr(self):
floats_file = open(os.path.join(os.path.split(__file__)[0],
'floating_points.txt'))
for line in floats_file:
line = line.strip()
if not line or line.startswith('#'):
continue
v = eval(line)
self.assertEqual(v, eval(repr(v)))
floats_file.close()
@unittest.skipUnless(getattr(sys, 'float_repr_style', '') == 'short',
"applies only when using short float repr style")
def test_short_repr(self):
# test short float repr introduced in Python 3.1. One aspect
# of this repr is that we get some degree of str -> float ->
# str roundtripping. In particular, for any numeric string
# containing 15 or fewer significant digits, those exact same
# digits (modulo trailing zeros) should appear in the output.
# No more repr(0.03) -> "0.029999999999999999"!
test_strings = [
# output always includes *either* a decimal point and at
# least one digit after that point, or an exponent.
'0.0',
'1.0',
'0.01',
'0.02',
'0.03',
'0.04',
'0.05',
'1.23456789',
'10.0',
'100.0',
# values >= 1e16 get an exponent...
'1000000000000000.0',
'9999999999999990.0',
'1e+16',
'1e+17',
# ... and so do values < 1e-4
'0.001',
'0.001001',
'0.00010000000000001',
'0.0001',
'9.999999999999e-05',
'1e-05',
# values designed to provoke failure if the FPU rounding
# precision isn't set correctly
'8.72293771110361e+25',
'7.47005307342313e+26',
'2.86438000439698e+28',
'8.89142905246179e+28',
'3.08578087079232e+35',
]
for s in test_strings:
negs = '-'+s
self.assertEqual(s, repr(float(s)))
self.assertEqual(negs, repr(float(negs)))
@requires_IEEE_754
class RoundTestCase(unittest.TestCase):
def test_second_argument_type(self):
# any type with an __index__ method should be permitted as
# a second argument
self.assertAlmostEqual(round(12.34, True), 12.3)
class MyIndex(object):
def __index__(self): return 4
self.assertAlmostEqual(round(-0.123456, MyIndex()), -0.1235)
# but floats should be illegal
self.assertRaises(TypeError, round, 3.14159, 2.0)
def test_inf_nan(self):
# rounding an infinity or nan returns the same number;
# (in py3k, rounding an infinity or nan raises an error,
# since the result can't be represented as a long).
self.assertEqual(round(INF), INF)
self.assertEqual(round(-INF), -INF)
self.assertTrue(math.isnan(round(NAN)))
for n in range(-5, 5):
self.assertEqual(round(INF, n), INF)
self.assertEqual(round(-INF, n), -INF)
self.assertTrue(math.isnan(round(NAN, n)))
self.assertRaises(TypeError, round, INF, 0.0)
self.assertRaises(TypeError, round, -INF, 1.0)
self.assertRaises(TypeError, round, NAN, "ceci n'est pas un integer")
self.assertRaises(TypeError, round, -0.0, 1j)
def test_large_n(self):
for n in [324, 325, 400, 2**31-1, 2**31, 2**32, 2**100]:
self.assertEqual(round(123.456, n), 123.456)
self.assertEqual(round(-123.456, n), -123.456)
self.assertEqual(round(1e300, n), 1e300)
self.assertEqual(round(1e-320, n), 1e-320)
self.assertEqual(round(1e150, 300), 1e150)
self.assertEqual(round(1e300, 307), 1e300)
self.assertEqual(round(-3.1415, 308), -3.1415)
self.assertEqual(round(1e150, 309), 1e150)
self.assertEqual(round(1.4e-315, 315), 1e-315)
def test_small_n(self):
for n in [-308, -309, -400, 1-2**31, -2**31, -2**31-1, -2**100]:
self.assertEqual(round(123.456, n), 0.0)
self.assertEqual(round(-123.456, n), -0.0)
self.assertEqual(round(1e300, n), 0.0)
self.assertEqual(round(1e-320, n), 0.0)
def test_overflow(self):
self.assertRaises(OverflowError, round, 1.6e308, -308)
self.assertRaises(OverflowError, round, -1.7e308, -308)
@unittest.skipUnless(getattr(sys, 'float_repr_style', '') == 'short',
"test applies only when using short float repr style")
def test_previous_round_bugs(self):
# particular cases that have occurred in bug reports
self.assertEqual(round(562949953421312.5, 1),
562949953421312.5)
self.assertEqual(round(56294995342131.5, 3),
56294995342131.5)
@unittest.skipUnless(getattr(sys, 'float_repr_style', '') == 'short',
"test applies only when using short float repr style")
def test_halfway_cases(self):
# Halfway cases need special attention, since the current
# implementation has to deal with them specially. Note that
# 2.x rounds halfway values up (i.e., away from zero) while
# 3.x does round-half-to-even.
self.assertAlmostEqual(round(0.125, 2), 0.13)
self.assertAlmostEqual(round(0.375, 2), 0.38)
self.assertAlmostEqual(round(0.625, 2), 0.63)
self.assertAlmostEqual(round(0.875, 2), 0.88)
self.assertAlmostEqual(round(-0.125, 2), -0.13)
self.assertAlmostEqual(round(-0.375, 2), -0.38)
self.assertAlmostEqual(round(-0.625, 2), -0.63)
self.assertAlmostEqual(round(-0.875, 2), -0.88)
self.assertAlmostEqual(round(0.25, 1), 0.3)
self.assertAlmostEqual(round(0.75, 1), 0.8)
self.assertAlmostEqual(round(-0.25, 1), -0.3)
self.assertAlmostEqual(round(-0.75, 1), -0.8)
self.assertEqual(round(-6.5, 0), -7.0)
self.assertEqual(round(-5.5, 0), -6.0)
self.assertEqual(round(-1.5, 0), -2.0)
self.assertEqual(round(-0.5, 0), -1.0)
self.assertEqual(round(0.5, 0), 1.0)
self.assertEqual(round(1.5, 0), 2.0)
self.assertEqual(round(2.5, 0), 3.0)
self.assertEqual(round(3.5, 0), 4.0)
self.assertEqual(round(4.5, 0), 5.0)
self.assertEqual(round(5.5, 0), 6.0)
self.assertEqual(round(6.5, 0), 7.0)
# same but without an explicit second argument; in 3.x these
# will give integers
self.assertEqual(round(-6.5), -7.0)
self.assertEqual(round(-5.5), -6.0)
self.assertEqual(round(-1.5), -2.0)
self.assertEqual(round(-0.5), -1.0)
self.assertEqual(round(0.5), 1.0)
self.assertEqual(round(1.5), 2.0)
self.assertEqual(round(2.5), 3.0)
self.assertEqual(round(3.5), 4.0)
self.assertEqual(round(4.5), 5.0)
self.assertEqual(round(5.5), 6.0)
self.assertEqual(round(6.5), 7.0)
self.assertEqual(round(-25.0, -1), -30.0)
self.assertEqual(round(-15.0, -1), -20.0)
self.assertEqual(round(-5.0, -1), -10.0)
self.assertEqual(round(5.0, -1), 10.0)
self.assertEqual(round(15.0, -1), 20.0)
self.assertEqual(round(25.0, -1), 30.0)
self.assertEqual(round(35.0, -1), 40.0)
self.assertEqual(round(45.0, -1), 50.0)
self.assertEqual(round(55.0, -1), 60.0)
self.assertEqual(round(65.0, -1), 70.0)
self.assertEqual(round(75.0, -1), 80.0)
self.assertEqual(round(85.0, -1), 90.0)
self.assertEqual(round(95.0, -1), 100.0)
self.assertEqual(round(12325.0, -1), 12330.0)
self.assertEqual(round(350.0, -2), 400.0)
self.assertEqual(round(450.0, -2), 500.0)
self.assertAlmostEqual(round(0.5e21, -21), 1e21)
self.assertAlmostEqual(round(1.5e21, -21), 2e21)
self.assertAlmostEqual(round(2.5e21, -21), 3e21)
self.assertAlmostEqual(round(5.5e21, -21), 6e21)
self.assertAlmostEqual(round(8.5e21, -21), 9e21)
self.assertAlmostEqual(round(-1.5e22, -22), -2e22)
self.assertAlmostEqual(round(-0.5e22, -22), -1e22)
self.assertAlmostEqual(round(0.5e22, -22), 1e22)
self.assertAlmostEqual(round(1.5e22, -22), 2e22)
@requires_IEEE_754
def test_format_specials(self):
# Test formatting of nans and infs.
def test(fmt, value, expected):
# Test with both % and format().
self.assertEqual(fmt % value, expected, fmt)
if not '#' in fmt:
# Until issue 7094 is implemented, format() for floats doesn't
# support '#' formatting
fmt = fmt[1:] # strip off the %
self.assertEqual(format(value, fmt), expected, fmt)
for fmt in ['%e', '%f', '%g', '%.0e', '%.6f', '%.20g',
'%#e', '%#f', '%#g', '%#.20e', '%#.15f', '%#.3g']:
pfmt = '%+' + fmt[1:]
sfmt = '% ' + fmt[1:]
test(fmt, INF, 'inf')
test(fmt, -INF, '-inf')
test(fmt, NAN, 'nan')
test(fmt, -NAN, 'nan')
# When asking for a sign, it's always provided. nans are
# always positive.
test(pfmt, INF, '+inf')
test(pfmt, -INF, '-inf')
test(pfmt, NAN, '+nan')
test(pfmt, -NAN, '+nan')
# When using ' ' for a sign code, only infs can be negative.
# Others have a space.
test(sfmt, INF, ' inf')
test(sfmt, -INF, '-inf')
test(sfmt, NAN, ' nan')
test(sfmt, -NAN, ' nan')
# Beginning with Python 2.6 float has cross platform compatible
# ways to create and represent inf and nan
class InfNanTest(unittest.TestCase):
def test_inf_from_str(self):
self.assertTrue(isinf(float("inf")))
self.assertTrue(isinf(float("+inf")))
self.assertTrue(isinf(float("-inf")))
self.assertTrue(isinf(float("infinity")))
self.assertTrue(isinf(float("+infinity")))
self.assertTrue(isinf(float("-infinity")))
self.assertEqual(repr(float("inf")), "inf")
self.assertEqual(repr(float("+inf")), "inf")
self.assertEqual(repr(float("-inf")), "-inf")
self.assertEqual(repr(float("infinity")), "inf")
self.assertEqual(repr(float("+infinity")), "inf")
self.assertEqual(repr(float("-infinity")), "-inf")
self.assertEqual(repr(float("INF")), "inf")
self.assertEqual(repr(float("+Inf")), "inf")
self.assertEqual(repr(float("-iNF")), "-inf")
self.assertEqual(repr(float("Infinity")), "inf")
self.assertEqual(repr(float("+iNfInItY")), "inf")
self.assertEqual(repr(float("-INFINITY")), "-inf")
self.assertEqual(str(float("inf")), "inf")
self.assertEqual(str(float("+inf")), "inf")
self.assertEqual(str(float("-inf")), "-inf")
self.assertEqual(str(float("infinity")), "inf")
self.assertEqual(str(float("+infinity")), "inf")
self.assertEqual(str(float("-infinity")), "-inf")
self.assertRaises(ValueError, float, "info")
self.assertRaises(ValueError, float, "+info")
self.assertRaises(ValueError, float, "-info")
self.assertRaises(ValueError, float, "in")
self.assertRaises(ValueError, float, "+in")
self.assertRaises(ValueError, float, "-in")
self.assertRaises(ValueError, float, "infinit")
self.assertRaises(ValueError, float, "+Infin")
self.assertRaises(ValueError, float, "-INFI")
self.assertRaises(ValueError, float, "infinitys")
def test_inf_as_str(self):
self.assertEqual(repr(1e300 * 1e300), "inf")
self.assertEqual(repr(-1e300 * 1e300), "-inf")
self.assertEqual(str(1e300 * 1e300), "inf")
self.assertEqual(str(-1e300 * 1e300), "-inf")
def test_nan_from_str(self):
self.assertTrue(isnan(float("nan")))
self.assertTrue(isnan(float("+nan")))
self.assertTrue(isnan(float("-nan")))
self.assertEqual(repr(float("nan")), "nan")
self.assertEqual(repr(float("+nan")), "nan")
self.assertEqual(repr(float("-nan")), "nan")
self.assertEqual(repr(float("NAN")), "nan")
self.assertEqual(repr(float("+NAn")), "nan")
self.assertEqual(repr(float("-NaN")), "nan")
self.assertEqual(str(float("nan")), "nan")
self.assertEqual(str(float("+nan")), "nan")
self.assertEqual(str(float("-nan")), "nan")
self.assertRaises(ValueError, float, "nana")
self.assertRaises(ValueError, float, "+nana")
self.assertRaises(ValueError, float, "-nana")
self.assertRaises(ValueError, float, "na")
self.assertRaises(ValueError, float, "+na")
self.assertRaises(ValueError, float, "-na")
def test_nan_as_str(self):
self.assertEqual(repr(1e300 * 1e300 * 0), "nan")
self.assertEqual(repr(-1e300 * 1e300 * 0), "nan")
self.assertEqual(str(1e300 * 1e300 * 0), "nan")
self.assertEqual(str(-1e300 * 1e300 * 0), "nan")
def notest_float_nan(self):
self.assertTrue(NAN.is_nan())
self.assertFalse(INF.is_nan())
self.assertFalse((0.).is_nan())
def notest_float_inf(self):
self.assertTrue(INF.is_inf())
self.assertFalse(NAN.is_inf())
self.assertFalse((0.).is_inf())
def test_hash_inf(self):
# the actual values here should be regarded as an
# implementation detail, but they need to be
# identical to those used in the Decimal module.
self.assertEqual(hash(float('inf')), 314159)
self.assertEqual(hash(float('-inf')), -271828)
self.assertEqual(hash(float('nan')), 0)
fromHex = float.fromhex
toHex = float.hex
class HexFloatTestCase(unittest.TestCase):
MAX = fromHex('0x.fffffffffffff8p+1024') # max normal
MIN = fromHex('0x1p-1022') # min normal
TINY = fromHex('0x0.0000000000001p-1022') # min subnormal
EPS = fromHex('0x0.0000000000001p0') # diff between 1.0 and next float up
def identical(self, x, y):
# check that floats x and y are identical, or that both
# are NaNs
if isnan(x) or isnan(y):
if isnan(x) == isnan(y):
return
elif x == y and (x != 0.0 or copysign(1.0, x) == copysign(1.0, y)):
return
self.fail('%r not identical to %r' % (x, y))
def test_ends(self):
self.identical(self.MIN, ldexp(1.0, -1022))
self.identical(self.TINY, ldexp(1.0, -1074))
self.identical(self.EPS, ldexp(1.0, -52))
self.identical(self.MAX, 2.*(ldexp(1.0, 1023) - ldexp(1.0, 970)))
def test_invalid_inputs(self):
invalid_inputs = [
'infi', # misspelt infinities and nans
'-Infinit',
'++inf',
'-+Inf',
'--nan',
'+-NaN',
'snan',
'NaNs',
'nna',
'an',
'nf',
'nfinity',
'inity',
'iinity',
'0xnan',
'',
' ',
'x1.0p0',
'0xX1.0p0',
'+ 0x1.0p0', # internal whitespace
'- 0x1.0p0',
'0 x1.0p0',
'0x 1.0p0',
'0x1 2.0p0',
'+0x1 .0p0',
'0x1. 0p0',
'-0x1.0 1p0',
'-0x1.0 p0',
'+0x1.0p +0',
'0x1.0p -0',
'0x1.0p 0',
'+0x1.0p+ 0',
'-0x1.0p- 0',
'++0x1.0p-0', # double signs
'--0x1.0p0',
'+-0x1.0p+0',
'-+0x1.0p0',
'0x1.0p++0',
'+0x1.0p+-0',
'-0x1.0p-+0',
'0x1.0p--0',
'0x1.0.p0',
'0x.p0', # no hex digits before or after point
'0x1,p0', # wrong decimal point character
'0x1pa',
u'0x1p\uff10', # fullwidth Unicode digits
u'\uff10x1p0',
u'0x\uff11p0',
u'0x1.\uff10p0',
'0x1p0 \n 0x2p0',
'0x1p0\0 0x1p0', # embedded null byte is not end of string
]
for x in invalid_inputs:
try:
result = fromHex(x)
except ValueError:
pass
else:
self.fail('Expected float.fromhex(%r) to raise ValueError; '
'got %r instead' % (x, result))
def test_whitespace(self):
value_pairs = [
('inf', INF),
('-Infinity', -INF),
('nan', NAN),
('1.0', 1.0),
('-0x.2', -0.125),
('-0.0', -0.0)
]
whitespace = [
'',
' ',
'\t',
'\n',
'\n \t',
'\f',
'\v',
'\r'
]
for inp, expected in value_pairs:
for lead in whitespace:
for trail in whitespace:
got = fromHex(lead + inp + trail)
self.identical(got, expected)
def test_from_hex(self):
MIN = self.MIN;
MAX = self.MAX;
TINY = self.TINY;
EPS = self.EPS;
# two spellings of infinity, with optional signs; case-insensitive
self.identical(fromHex('inf'), INF)
self.identical(fromHex('+Inf'), INF)
self.identical(fromHex('-INF'), -INF)
self.identical(fromHex('iNf'), INF)
self.identical(fromHex('Infinity'), INF)
self.identical(fromHex('+INFINITY'), INF)
self.identical(fromHex('-infinity'), -INF)
self.identical(fromHex('-iNFiNitY'), -INF)
# nans with optional sign; case insensitive
self.identical(fromHex('nan'), NAN)
self.identical(fromHex('+NaN'), NAN)
self.identical(fromHex('-NaN'), NAN)
self.identical(fromHex('-nAN'), NAN)
# variations in input format
self.identical(fromHex('1'), 1.0)
self.identical(fromHex('+1'), 1.0)
self.identical(fromHex('1.'), 1.0)
self.identical(fromHex('1.0'), 1.0)
self.identical(fromHex('1.0p0'), 1.0)
self.identical(fromHex('01'), 1.0)
self.identical(fromHex('01.'), 1.0)
self.identical(fromHex('0x1'), 1.0)
self.identical(fromHex('0x1.'), 1.0)
self.identical(fromHex('0x1.0'), 1.0)
self.identical(fromHex('+0x1.0'), 1.0)
self.identical(fromHex('0x1p0'), 1.0)
self.identical(fromHex('0X1p0'), 1.0)
self.identical(fromHex('0X1P0'), 1.0)
self.identical(fromHex('0x1P0'), 1.0)
self.identical(fromHex('0x1.p0'), 1.0)
self.identical(fromHex('0x1.0p0'), 1.0)
self.identical(fromHex('0x.1p4'), 1.0)
self.identical(fromHex('0x.1p04'), 1.0)
self.identical(fromHex('0x.1p004'), 1.0)
self.identical(fromHex('0x1p+0'), 1.0)
self.identical(fromHex('0x1P-0'), 1.0)
self.identical(fromHex('+0x1p0'), 1.0)
self.identical(fromHex('0x01p0'), 1.0)
self.identical(fromHex('0x1p00'), 1.0)
self.identical(fromHex(u'0x1p0'), 1.0)
self.identical(fromHex(' 0x1p0 '), 1.0)
self.identical(fromHex('\n 0x1p0'), 1.0)
self.identical(fromHex('0x1p0 \t'), 1.0)
self.identical(fromHex('0xap0'), 10.0)
self.identical(fromHex('0xAp0'), 10.0)
self.identical(fromHex('0xaP0'), 10.0)
self.identical(fromHex('0xAP0'), 10.0)
self.identical(fromHex('0xbep0'), 190.0)
self.identical(fromHex('0xBep0'), 190.0)
self.identical(fromHex('0xbEp0'), 190.0)
self.identical(fromHex('0XBE0P-4'), 190.0)
self.identical(fromHex('0xBEp0'), 190.0)
self.identical(fromHex('0xB.Ep4'), 190.0)
self.identical(fromHex('0x.BEp8'), 190.0)
self.identical(fromHex('0x.0BEp12'), 190.0)
# moving the point around
pi = fromHex('0x1.921fb54442d18p1')
self.identical(fromHex('0x.006487ed5110b46p11'), pi)
self.identical(fromHex('0x.00c90fdaa22168cp10'), pi)
self.identical(fromHex('0x.01921fb54442d18p9'), pi)
self.identical(fromHex('0x.03243f6a8885a3p8'), pi)
self.identical(fromHex('0x.06487ed5110b46p7'), pi)
self.identical(fromHex('0x.0c90fdaa22168cp6'), pi)
self.identical(fromHex('0x.1921fb54442d18p5'), pi)
self.identical(fromHex('0x.3243f6a8885a3p4'), pi)
self.identical(fromHex('0x.6487ed5110b46p3'), pi)
self.identical(fromHex('0x.c90fdaa22168cp2'), pi)
self.identical(fromHex('0x1.921fb54442d18p1'), pi)
self.identical(fromHex('0x3.243f6a8885a3p0'), pi)
self.identical(fromHex('0x6.487ed5110b46p-1'), pi)
self.identical(fromHex('0xc.90fdaa22168cp-2'), pi)
self.identical(fromHex('0x19.21fb54442d18p-3'), pi)
self.identical(fromHex('0x32.43f6a8885a3p-4'), pi)
self.identical(fromHex('0x64.87ed5110b46p-5'), pi)
self.identical(fromHex('0xc9.0fdaa22168cp-6'), pi)
self.identical(fromHex('0x192.1fb54442d18p-7'), pi)
self.identical(fromHex('0x324.3f6a8885a3p-8'), pi)
self.identical(fromHex('0x648.7ed5110b46p-9'), pi)
self.identical(fromHex('0xc90.fdaa22168cp-10'), pi)
self.identical(fromHex('0x1921.fb54442d18p-11'), pi)
# ...
self.identical(fromHex('0x1921fb54442d1.8p-47'), pi)
self.identical(fromHex('0x3243f6a8885a3p-48'), pi)
self.identical(fromHex('0x6487ed5110b46p-49'), pi)
self.identical(fromHex('0xc90fdaa22168cp-50'), pi)
self.identical(fromHex('0x1921fb54442d18p-51'), pi)
self.identical(fromHex('0x3243f6a8885a30p-52'), pi)
self.identical(fromHex('0x6487ed5110b460p-53'), pi)
self.identical(fromHex('0xc90fdaa22168c0p-54'), pi)
self.identical(fromHex('0x1921fb54442d180p-55'), pi)
# results that should overflow...
self.assertRaises(OverflowError, fromHex, '-0x1p1024')
self.assertRaises(OverflowError, fromHex, '0x1p+1025')
self.assertRaises(OverflowError, fromHex, '+0X1p1030')
self.assertRaises(OverflowError, fromHex, '-0x1p+1100')
self.assertRaises(OverflowError, fromHex, '0X1p123456789123456789')
self.assertRaises(OverflowError, fromHex, '+0X.8p+1025')
self.assertRaises(OverflowError, fromHex, '+0x0.8p1025')
self.assertRaises(OverflowError, fromHex, '-0x0.4p1026')
self.assertRaises(OverflowError, fromHex, '0X2p+1023')
self.assertRaises(OverflowError, fromHex, '0x2.p1023')
self.assertRaises(OverflowError, fromHex, '-0x2.0p+1023')
self.assertRaises(OverflowError, fromHex, '+0X4p+1022')
self.assertRaises(OverflowError, fromHex, '0x1.ffffffffffffffp+1023')
self.assertRaises(OverflowError, fromHex, '-0X1.fffffffffffff9p1023')
self.assertRaises(OverflowError, fromHex, '0X1.fffffffffffff8p1023')
self.assertRaises(OverflowError, fromHex, '+0x3.fffffffffffffp1022')
self.assertRaises(OverflowError, fromHex, '0x3fffffffffffffp+970')
self.assertRaises(OverflowError, fromHex, '0x10000000000000000p960')
self.assertRaises(OverflowError, fromHex, '-0Xffffffffffffffffp960')
# ...and those that round to +-max float
self.identical(fromHex('+0x1.fffffffffffffp+1023'), MAX)
self.identical(fromHex('-0X1.fffffffffffff7p1023'), -MAX)
self.identical(fromHex('0X1.fffffffffffff7fffffffffffffp1023'), MAX)
# zeros
self.identical(fromHex('0x0p0'), 0.0)
self.identical(fromHex('0x0p1000'), 0.0)
self.identical(fromHex('-0x0p1023'), -0.0)
self.identical(fromHex('0X0p1024'), 0.0)
self.identical(fromHex('-0x0p1025'), -0.0)
self.identical(fromHex('0X0p2000'), 0.0)
self.identical(fromHex('0x0p123456789123456789'), 0.0)
self.identical(fromHex('-0X0p-0'), -0.0)
self.identical(fromHex('-0X0p-1000'), -0.0)
self.identical(fromHex('0x0p-1023'), 0.0)
self.identical(fromHex('-0X0p-1024'), -0.0)
self.identical(fromHex('-0x0p-1025'), -0.0)
self.identical(fromHex('-0x0p-1072'), -0.0)
self.identical(fromHex('0X0p-1073'), 0.0)
self.identical(fromHex('-0x0p-1074'), -0.0)
self.identical(fromHex('0x0p-1075'), 0.0)
self.identical(fromHex('0X0p-1076'), 0.0)
self.identical(fromHex('-0X0p-2000'), -0.0)
self.identical(fromHex('-0x0p-123456789123456789'), -0.0)
# values that should underflow to 0
self.identical(fromHex('0X1p-1075'), 0.0)
self.identical(fromHex('-0X1p-1075'), -0.0)
self.identical(fromHex('-0x1p-123456789123456789'), -0.0)
self.identical(fromHex('0x1.00000000000000001p-1075'), TINY)
self.identical(fromHex('-0x1.1p-1075'), -TINY)
self.identical(fromHex('0x1.fffffffffffffffffp-1075'), TINY)
# check round-half-even is working correctly near 0 ...
self.identical(fromHex('0x1p-1076'), 0.0)
self.identical(fromHex('0X2p-1076'), 0.0)
self.identical(fromHex('0X3p-1076'), TINY)
self.identical(fromHex('0x4p-1076'), TINY)
self.identical(fromHex('0X5p-1076'), TINY)
self.identical(fromHex('0X6p-1076'), 2*TINY)
self.identical(fromHex('0x7p-1076'), 2*TINY)
self.identical(fromHex('0X8p-1076'), 2*TINY)
self.identical(fromHex('0X9p-1076'), 2*TINY)
self.identical(fromHex('0xap-1076'), 2*TINY)
self.identical(fromHex('0Xbp-1076'), 3*TINY)
self.identical(fromHex('0xcp-1076'), 3*TINY)
self.identical(fromHex('0Xdp-1076'), 3*TINY)
self.identical(fromHex('0Xep-1076'), 4*TINY)
self.identical(fromHex('0xfp-1076'), 4*TINY)
self.identical(fromHex('0x10p-1076'), 4*TINY)
self.identical(fromHex('-0x1p-1076'), -0.0)
self.identical(fromHex('-0X2p-1076'), -0.0)
self.identical(fromHex('-0x3p-1076'), -TINY)
self.identical(fromHex('-0X4p-1076'), -TINY)
self.identical(fromHex('-0x5p-1076'), -TINY)
self.identical(fromHex('-0x6p-1076'), -2*TINY)
self.identical(fromHex('-0X7p-1076'), -2*TINY)
self.identical(fromHex('-0X8p-1076'), -2*TINY)
self.identical(fromHex('-0X9p-1076'), -2*TINY)
self.identical(fromHex('-0Xap-1076'), -2*TINY)
self.identical(fromHex('-0xbp-1076'), -3*TINY)
self.identical(fromHex('-0xcp-1076'), -3*TINY)
self.identical(fromHex('-0Xdp-1076'), -3*TINY)
self.identical(fromHex('-0xep-1076'), -4*TINY)
self.identical(fromHex('-0Xfp-1076'), -4*TINY)
self.identical(fromHex('-0X10p-1076'), -4*TINY)
# ... and near MIN ...
self.identical(fromHex('0x0.ffffffffffffd6p-1022'), MIN-3*TINY)
self.identical(fromHex('0x0.ffffffffffffd8p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffdap-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffdcp-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffdep-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffe0p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffe2p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffe4p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffe6p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffe8p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffeap-1022'), MIN-TINY)
self.identical(fromHex('0x0.ffffffffffffecp-1022'), MIN-TINY)
self.identical(fromHex('0x0.ffffffffffffeep-1022'), MIN-TINY)
self.identical(fromHex('0x0.fffffffffffff0p-1022'), MIN-TINY)
self.identical(fromHex('0x0.fffffffffffff2p-1022'), MIN-TINY)
self.identical(fromHex('0x0.fffffffffffff4p-1022'), MIN-TINY)
self.identical(fromHex('0x0.fffffffffffff6p-1022'), MIN-TINY)
self.identical(fromHex('0x0.fffffffffffff8p-1022'), MIN)
self.identical(fromHex('0x0.fffffffffffffap-1022'), MIN)
self.identical(fromHex('0x0.fffffffffffffcp-1022'), MIN)
self.identical(fromHex('0x0.fffffffffffffep-1022'), MIN)
self.identical(fromHex('0x1.00000000000000p-1022'), MIN)
self.identical(fromHex('0x1.00000000000002p-1022'), MIN)
self.identical(fromHex('0x1.00000000000004p-1022'), MIN)
self.identical(fromHex('0x1.00000000000006p-1022'), MIN)
self.identical(fromHex('0x1.00000000000008p-1022'), MIN)
self.identical(fromHex('0x1.0000000000000ap-1022'), MIN+TINY)
self.identical(fromHex('0x1.0000000000000cp-1022'), MIN+TINY)
self.identical(fromHex('0x1.0000000000000ep-1022'), MIN+TINY)
self.identical(fromHex('0x1.00000000000010p-1022'), MIN+TINY)
self.identical(fromHex('0x1.00000000000012p-1022'), MIN+TINY)
self.identical(fromHex('0x1.00000000000014p-1022'), MIN+TINY)
self.identical(fromHex('0x1.00000000000016p-1022'), MIN+TINY)
self.identical(fromHex('0x1.00000000000018p-1022'), MIN+2*TINY)
# ... and near 1.0.
self.identical(fromHex('0x0.fffffffffffff0p0'), 1.0-EPS)
self.identical(fromHex('0x0.fffffffffffff1p0'), 1.0-EPS)
self.identical(fromHex('0X0.fffffffffffff2p0'), 1.0-EPS)
self.identical(fromHex('0x0.fffffffffffff3p0'), 1.0-EPS)
self.identical(fromHex('0X0.fffffffffffff4p0'), 1.0-EPS)
self.identical(fromHex('0X0.fffffffffffff5p0'), 1.0-EPS/2)
self.identical(fromHex('0X0.fffffffffffff6p0'), 1.0-EPS/2)
self.identical(fromHex('0x0.fffffffffffff7p0'), 1.0-EPS/2)
self.identical(fromHex('0x0.fffffffffffff8p0'), 1.0-EPS/2)
self.identical(fromHex('0X0.fffffffffffff9p0'), 1.0-EPS/2)
self.identical(fromHex('0X0.fffffffffffffap0'), 1.0-EPS/2)
self.identical(fromHex('0x0.fffffffffffffbp0'), 1.0-EPS/2)
self.identical(fromHex('0X0.fffffffffffffcp0'), 1.0)
self.identical(fromHex('0x0.fffffffffffffdp0'), 1.0)
self.identical(fromHex('0X0.fffffffffffffep0'), 1.0)
self.identical(fromHex('0x0.ffffffffffffffp0'), 1.0)
self.identical(fromHex('0X1.00000000000000p0'), 1.0)
self.identical(fromHex('0X1.00000000000001p0'), 1.0)
self.identical(fromHex('0x1.00000000000002p0'), 1.0)
self.identical(fromHex('0X1.00000000000003p0'), 1.0)
self.identical(fromHex('0x1.00000000000004p0'), 1.0)
self.identical(fromHex('0X1.00000000000005p0'), 1.0)
self.identical(fromHex('0X1.00000000000006p0'), 1.0)
self.identical(fromHex('0X1.00000000000007p0'), 1.0)
self.identical(fromHex('0x1.00000000000007ffffffffffffffffffffp0'),
1.0)
self.identical(fromHex('0x1.00000000000008p0'), 1.0)
self.identical(fromHex('0x1.00000000000008000000000000000001p0'),
1+EPS)
self.identical(fromHex('0X1.00000000000009p0'), 1.0+EPS)
self.identical(fromHex('0x1.0000000000000ap0'), 1.0+EPS)
self.identical(fromHex('0x1.0000000000000bp0'), 1.0+EPS)
self.identical(fromHex('0X1.0000000000000cp0'), 1.0+EPS)
self.identical(fromHex('0x1.0000000000000dp0'), 1.0+EPS)
self.identical(fromHex('0x1.0000000000000ep0'), 1.0+EPS)
self.identical(fromHex('0X1.0000000000000fp0'), 1.0+EPS)
self.identical(fromHex('0x1.00000000000010p0'), 1.0+EPS)
self.identical(fromHex('0X1.00000000000011p0'), 1.0+EPS)
self.identical(fromHex('0x1.00000000000012p0'), 1.0+EPS)
self.identical(fromHex('0X1.00000000000013p0'), 1.0+EPS)
self.identical(fromHex('0X1.00000000000014p0'), 1.0+EPS)
self.identical(fromHex('0x1.00000000000015p0'), 1.0+EPS)
self.identical(fromHex('0x1.00000000000016p0'), 1.0+EPS)
self.identical(fromHex('0X1.00000000000017p0'), 1.0+EPS)
self.identical(fromHex('0x1.00000000000017ffffffffffffffffffffp0'),
1.0+EPS)
self.identical(fromHex('0x1.00000000000018p0'), 1.0+2*EPS)
self.identical(fromHex('0X1.00000000000018000000000000000001p0'),
1.0+2*EPS)
self.identical(fromHex('0x1.00000000000019p0'), 1.0+2*EPS)
self.identical(fromHex('0X1.0000000000001ap0'), 1.0+2*EPS)
self.identical(fromHex('0X1.0000000000001bp0'), 1.0+2*EPS)
self.identical(fromHex('0x1.0000000000001cp0'), 1.0+2*EPS)
self.identical(fromHex('0x1.0000000000001dp0'), 1.0+2*EPS)
self.identical(fromHex('0x1.0000000000001ep0'), 1.0+2*EPS)
self.identical(fromHex('0X1.0000000000001fp0'), 1.0+2*EPS)
self.identical(fromHex('0x1.00000000000020p0'), 1.0+2*EPS)
def test_roundtrip(self):
def roundtrip(x):
return fromHex(toHex(x))
for x in [NAN, INF, self.MAX, self.MIN, self.MIN-self.TINY, self.TINY, 0.0]:
self.identical(x, roundtrip(x))
self.identical(-x, roundtrip(-x))
# fromHex(toHex(x)) should exactly recover x, for any non-NaN float x.
import random
for i in xrange(10000):
e = random.randrange(-1200, 1200)
m = random.random()
s = random.choice([1.0, -1.0])
try:
x = s*ldexp(m, e)
except OverflowError:
pass
else:
self.identical(x, fromHex(toHex(x)))
def test_main():
test_support.run_unittest(
GeneralFloatCases,
FormatFunctionsTestCase,
UnknownFormatTestCase,
IEEEFormatTestCase,
ReprTestCase,
RoundTestCase,
InfNanTest,
HexFloatTestCase,
)
if __name__ == '__main__':
test_main()
| gpl-2.0 |
gavmain/django_demo | demo/users/views.py | 95 | 1459 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.core.urlresolvers import reverse
from django.views.generic import DetailView, ListView, RedirectView, UpdateView
from django.contrib.auth.mixins import LoginRequiredMixin
from .models import User
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = 'username'
slug_url_kwarg = 'username'
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse('users:detail',
kwargs={'username': self.request.user.username})
class UserUpdateView(LoginRequiredMixin, UpdateView):
fields = ['name', ]
# we already imported User in the view code above, remember?
model = User
# send the user back to their own page after a successful update
def get_success_url(self):
return reverse('users:detail',
kwargs={'username': self.request.user.username})
def get_object(self):
# Only get the User record for the user making the request
return User.objects.get(username=self.request.user.username)
class UserListView(LoginRequiredMixin, ListView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = 'username'
slug_url_kwarg = 'username'
| mit |
yatinkumbhare/openstack-nova | nova/tests/unit/scheduler/test_scheduler_utils.py | 10 | 15657 | # Copyright (c) 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler Utils
"""
import contextlib
import uuid
import mock
from mox3 import mox
from oslo_config import cfg
from nova.compute import flavors
from nova.compute import utils as compute_utils
from nova import db
from nova import exception
from nova import objects
from nova import rpc
from nova.scheduler import utils as scheduler_utils
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit.objects import test_flavor
CONF = cfg.CONF
class SchedulerUtilsTestCase(test.NoDBTestCase):
"""Test case for scheduler utils methods."""
def setUp(self):
super(SchedulerUtilsTestCase, self).setUp()
self.context = 'fake-context'
@mock.patch('nova.objects.Flavor.get_by_flavor_id')
def test_build_request_spec_without_image(self, mock_get):
image = None
instance = {'uuid': 'fake-uuid'}
instance_type = objects.Flavor(**test_flavor.fake_flavor)
mock_get.return_value = objects.Flavor(extra_specs={})
self.mox.StubOutWithMock(flavors, 'extract_flavor')
flavors.extract_flavor(mox.IgnoreArg()).AndReturn(instance_type)
self.mox.ReplayAll()
request_spec = scheduler_utils.build_request_spec(self.context, image,
[instance])
self.assertEqual({}, request_spec['image'])
def test_build_request_spec_with_object(self):
instance_type = objects.Flavor()
instance = fake_instance.fake_instance_obj(self.context)
with mock.patch.object(instance, 'get_flavor') as mock_get:
mock_get.return_value = instance_type
request_spec = scheduler_utils.build_request_spec(self.context,
None,
[instance])
mock_get.assert_called_once_with()
self.assertIsInstance(request_spec['instance_properties'], dict)
@mock.patch.object(rpc, 'get_notifier', return_value=mock.Mock())
@mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
@mock.patch.object(objects.Instance, 'save')
def test_set_vm_state_and_notify(self, mock_save, mock_add, mock_get):
expected_uuid = 'fake-uuid'
request_spec = dict(instance_properties=dict(uuid='other-uuid'))
updates = dict(vm_state='fake-vm-state')
service = 'fake-service'
method = 'fake-method'
exc_info = 'exc_info'
payload = dict(request_spec=request_spec,
instance_properties=request_spec.get(
'instance_properties', {}),
instance_id=expected_uuid,
state='fake-vm-state',
method=method,
reason=exc_info)
event_type = '%s.%s' % (service, method)
scheduler_utils.set_vm_state_and_notify(self.context,
expected_uuid,
service,
method,
updates,
exc_info,
request_spec,
db)
mock_save.assert_called_once_with()
mock_add.assert_called_once_with(self.context, mock.ANY,
exc_info, mock.ANY)
self.assertIsInstance(mock_add.call_args[0][1], objects.Instance)
self.assertIsInstance(mock_add.call_args[0][3], tuple)
mock_get.return_value.error.assert_called_once_with(self.context,
event_type,
payload)
def _test_populate_filter_props(self, host_state_obj=True,
with_retry=True,
force_hosts=None,
force_nodes=None):
if force_hosts is None:
force_hosts = []
if force_nodes is None:
force_nodes = []
if with_retry:
if ((len(force_hosts) == 1 and len(force_nodes) <= 1)
or (len(force_nodes) == 1 and len(force_hosts) <= 1)):
filter_properties = dict(force_hosts=force_hosts,
force_nodes=force_nodes)
elif len(force_hosts) > 1 or len(force_nodes) > 1:
filter_properties = dict(retry=dict(hosts=[]),
force_hosts=force_hosts,
force_nodes=force_nodes)
else:
filter_properties = dict(retry=dict(hosts=[]))
else:
filter_properties = dict()
if host_state_obj:
class host_state(object):
host = 'fake-host'
nodename = 'fake-node'
limits = 'fake-limits'
else:
host_state = dict(host='fake-host',
nodename='fake-node',
limits='fake-limits')
scheduler_utils.populate_filter_properties(filter_properties,
host_state)
enable_retry_force_hosts = not force_hosts or len(force_hosts) > 1
enable_retry_force_nodes = not force_nodes or len(force_nodes) > 1
if with_retry or enable_retry_force_hosts or enable_retry_force_nodes:
# So we can check for 2 hosts
scheduler_utils.populate_filter_properties(filter_properties,
host_state)
if force_hosts:
expected_limits = None
else:
expected_limits = 'fake-limits'
self.assertEqual(expected_limits,
filter_properties.get('limits'))
if (with_retry and enable_retry_force_hosts
and enable_retry_force_nodes):
self.assertEqual([['fake-host', 'fake-node'],
['fake-host', 'fake-node']],
filter_properties['retry']['hosts'])
else:
self.assertNotIn('retry', filter_properties)
def test_populate_filter_props(self):
self._test_populate_filter_props()
def test_populate_filter_props_host_dict(self):
self._test_populate_filter_props(host_state_obj=False)
def test_populate_filter_props_no_retry(self):
self._test_populate_filter_props(with_retry=False)
def test_populate_filter_props_force_hosts_no_retry(self):
self._test_populate_filter_props(force_hosts=['force-host'])
def test_populate_filter_props_force_nodes_no_retry(self):
self._test_populate_filter_props(force_nodes=['force-node'])
def test_populate_filter_props_multi_force_hosts_with_retry(self):
self._test_populate_filter_props(force_hosts=['force-host1',
'force-host2'])
def test_populate_filter_props_multi_force_nodes_with_retry(self):
self._test_populate_filter_props(force_nodes=['force-node1',
'force-node2'])
@mock.patch.object(scheduler_utils, '_max_attempts')
def test_populate_retry_exception_at_max_attempts(self, _max_attempts):
_max_attempts.return_value = 2
msg = 'The exception text was preserved!'
filter_properties = dict(retry=dict(num_attempts=2, hosts=[],
exc=[msg]))
nvh = self.assertRaises(exception.NoValidHost,
scheduler_utils.populate_retry,
filter_properties, 'fake-uuid')
# make sure 'msg' is a substring of the complete exception text
self.assertIn(msg, nvh.message)
def _check_parse_options(self, opts, sep, converter, expected):
good = scheduler_utils.parse_options(opts,
sep=sep,
converter=converter)
for item in expected:
self.assertIn(item, good)
def test_parse_options(self):
# check normal
self._check_parse_options(['foo=1', 'bar=-2.1'],
'=',
float,
[('foo', 1.0), ('bar', -2.1)])
# check convert error
self._check_parse_options(['foo=a1', 'bar=-2.1'],
'=',
float,
[('bar', -2.1)])
# check separator missing
self._check_parse_options(['foo', 'bar=-2.1'],
'=',
float,
[('bar', -2.1)])
# check key missing
self._check_parse_options(['=5', 'bar=-2.1'],
'=',
float,
[('bar', -2.1)])
def test_validate_filters_configured(self):
self.flags(scheduler_default_filters='FakeFilter1,FakeFilter2')
self.assertTrue(scheduler_utils.validate_filter('FakeFilter1'))
self.assertTrue(scheduler_utils.validate_filter('FakeFilter2'))
self.assertFalse(scheduler_utils.validate_filter('FakeFilter3'))
def _create_server_group(self, policy='anti-affinity'):
instance = fake_instance.fake_instance_obj(self.context,
params={'host': 'hostA'})
group = objects.InstanceGroup()
group.name = 'pele'
group.uuid = str(uuid.uuid4())
group.members = [instance.uuid]
group.policies = [policy]
return group
def _get_group_details(self, group, policy=None):
group_hosts = ['hostB']
with contextlib.nested(
mock.patch.object(objects.InstanceGroup, 'get_by_instance_uuid',
return_value=group),
mock.patch.object(objects.InstanceGroup, 'get_hosts',
return_value=['hostA']),
) as (get_group, get_hosts):
scheduler_utils._SUPPORTS_ANTI_AFFINITY = None
scheduler_utils._SUPPORTS_AFFINITY = None
group_info = scheduler_utils._get_group_details(
self.context, 'fake_uuid', group_hosts)
self.assertEqual(
(set(['hostA', 'hostB']), [policy]),
group_info)
def test_get_group_details(self):
for policy in ['affinity', 'anti-affinity']:
group = self._create_server_group(policy)
self._get_group_details(group, policy=policy)
def test_get_group_details_with_no_affinity_filters(self):
self.flags(scheduler_default_filters=['fake'])
scheduler_utils._SUPPORTS_ANTI_AFFINITY = None
scheduler_utils._SUPPORTS_AFFINITY = None
group_info = scheduler_utils._get_group_details(self.context,
'fake-uuid')
self.assertIsNone(group_info)
def test_get_group_details_with_no_instance_uuid(self):
self.flags(scheduler_default_filters=['fake'])
scheduler_utils._SUPPORTS_ANTI_AFFINITY = None
scheduler_utils._SUPPORTS_AFFINITY = None
group_info = scheduler_utils._get_group_details(self.context, None)
self.assertIsNone(group_info)
def _get_group_details_with_filter_not_configured(self, policy):
wrong_filter = {
'affinity': 'ServerGroupAntiAffinityFilter',
'anti-affinity': 'ServerGroupAffinityFilter',
}
self.flags(scheduler_default_filters=[wrong_filter[policy]])
instance = fake_instance.fake_instance_obj(self.context,
params={'host': 'hostA'})
group = objects.InstanceGroup()
group.uuid = str(uuid.uuid4())
group.members = [instance.uuid]
group.policies = [policy]
with contextlib.nested(
mock.patch.object(objects.InstanceGroup, 'get_by_instance_uuid',
return_value=group),
mock.patch.object(objects.InstanceGroup, 'get_hosts',
return_value=['hostA']),
) as (get_group, get_hosts):
scheduler_utils._SUPPORTS_ANTI_AFFINITY = None
scheduler_utils._SUPPORTS_AFFINITY = None
self.assertRaises(exception.UnsupportedPolicyException,
scheduler_utils._get_group_details,
self.context, 'fake-uuid')
def test_get_group_details_with_filter_not_configured(self):
policies = ['anti-affinity', 'affinity']
for policy in policies:
self._get_group_details_with_filter_not_configured(policy)
@mock.patch.object(scheduler_utils, '_get_group_details')
def test_setup_instance_group_in_filter_properties(self, mock_ggd):
mock_ggd.return_value = scheduler_utils.GroupDetails(
hosts=set(['hostA', 'hostB']), policies=['policy'])
spec = {'instance_properties': {'uuid': 'fake-uuid'}}
filter_props = {'group_hosts': ['hostC']}
scheduler_utils.setup_instance_group(self.context, spec, filter_props)
mock_ggd.assert_called_once_with(self.context, 'fake-uuid',
['hostC'])
expected_filter_props = {'group_updated': True,
'group_hosts': set(['hostA', 'hostB']),
'group_policies': ['policy']}
self.assertEqual(expected_filter_props, filter_props)
@mock.patch.object(scheduler_utils, '_get_group_details')
def test_setup_instance_group_with_no_group(self, mock_ggd):
mock_ggd.return_value = None
spec = {'instance_properties': {'uuid': 'fake-uuid'}}
filter_props = {'group_hosts': ['hostC']}
scheduler_utils.setup_instance_group(self.context, spec, filter_props)
mock_ggd.assert_called_once_with(self.context, 'fake-uuid',
['hostC'])
self.assertNotIn('group_updated', filter_props)
self.assertNotIn('group_policies', filter_props)
self.assertEqual(['hostC'], filter_props['group_hosts'])
@mock.patch.object(scheduler_utils, '_get_group_details')
def test_setup_instance_group_with_filter_not_configured(self, mock_ggd):
mock_ggd.side_effect = exception.NoValidHost(reason='whatever')
spec = {'instance_properties': {'uuid': 'fake-uuid'}}
filter_props = {'group_hosts': ['hostC']}
self.assertRaises(exception.NoValidHost,
scheduler_utils.setup_instance_group,
self.context, spec, filter_props)
| apache-2.0 |
mkieszek/odoo | openerp/addons/test_inherit/models.py | 11 | 2179 | # -*- coding: utf-8 -*-
from openerp import models, fields, api, osv
# We just create a new model
class mother(models.Model):
_name = 'test.inherit.mother'
_columns = {
# check interoperability of field inheritance with old-style fields
'name': osv.fields.char('Name'),
'state': osv.fields.selection([('a', 'A'), ('b', 'B')], string='State'),
}
_defaults = {
'name': 'Foo',
}
surname = fields.Char(compute='_compute_surname')
@api.one
@api.depends('name')
def _compute_surname(self):
self.surname = self.name or ''
# We want to inherits from the parent model and we add some fields
# in the child object
class daughter(models.Model):
_name = 'test.inherit.daughter'
template_id = fields.Many2one('test.inherit.mother', 'Template',
delegate=True, required=True, ondelete='cascade')
field_in_daughter = fields.Char('Field1')
# We add a new field in the parent object. Because of a recent refactoring,
# this feature was broken.
# This test and these models try to show the bug and fix it.
class mother(models.Model):
_inherit = 'test.inherit.mother'
field_in_mother = fields.Char()
# extend the name field: make it required and change its default value
name = fields.Char(required=True, default='Bar')
# extend the selection of the state field
state = fields.Selection(selection_add=[('c', 'C')])
# override the computed field, and extend its dependencies
@api.one
@api.depends('field_in_mother')
def _compute_surname(self):
if self.field_in_mother:
self.surname = self.field_in_mother
else:
super(mother, self)._compute_surname()
class mother(models.Model):
_inherit = 'test.inherit.mother'
# extend again the selection of the state field
state = fields.Selection(selection_add=[('d', 'D')])
class daughter(models.Model):
_inherit = 'test.inherit.daughter'
# simply redeclare the field without adding any option
template_id = fields.Many2one()
# change the default value of an inherited field
name = fields.Char(default='Baz')
| agpl-3.0 |
PredictiveScienceLab/py-mcmc | pymcmc/_mala_proposal.py | 2 | 1960 | """
This is a Metropolis Adjusted Langevin Algorithm (MALA) proposal.
Author:
Ilias Bilionis
"""
__all__ = ['MALAProposal']
import numpy as np
from scipy.stats import norm
from . import GradProposal
from . import SingleParameterTunableProposalConcept
class MALAProposal(GradProposal, SingleParameterTunableProposalConcept):
"""
A MALA proposal.
:param dt: The time step. The larger you pick it, the bigger the steps
you make and the acceptance rate will go down.
:type dt: float
The rest of the keyword arguments is what you would find in:
+ :class:`pymcmc.GradProposal`
+ :class:`pymcmc.SingleParameterTunableProposal`
"""
def __init__(self, dt=1., **kwargs):
"""
Initialize the object.
"""
self.dt = dt
if not kwargs.has_key('name'):
kwargs['name'] = 'MALA Proposal'
kwargs['param_name'] = 'dt'
GradProposal.__init__(self, **kwargs)
SingleParameterTunableProposalConcept.__init__(self, **kwargs)
def _sample(self, old_params, old_grad_params):
return (old_params +
0.5 * self.dt ** 2 * old_grad_params +
self.dt * np.random.randn(old_params.shape[0]))
def __call__(self, new_params, old_params, old_grad_params):
return np.sum(norm.logpdf(new_params,
loc=(old_params + 0.5 * self.dt ** 2 * old_grad_params),
scale=self.dt))
def __getstate__(self):
state = GradProposal.__getstate__(self)
state['dt'] = self.dt
tuner_state = SingleParameterTunableProposalConcept.__getstate__(self)
return dict(state.items() + tuner_state.items())
def __setstate__(self, state):
GradProposal.__setstate__(self, state)
self.dt = state['dt']
SingleParameterTunableProposalConcept.__setstate__(self, state['tuner'])
| lgpl-3.0 |
shinate/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/system/outputcapture.py | 124 | 5478 | # Copyright (c) 2009, Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Class for unittest support. Used for capturing stderr/stdout.
import logging
import unittest # Don't use unittest2 here as the autoinstaller may not have it yet.
import sys
from StringIO import StringIO
class OutputCapture(object):
# By default we capture the output to a stream. Other modules may override
# this function in order to do things like pass through the output. See
# webkitpy.test.main for an example.
@staticmethod
def stream_wrapper(stream):
return StringIO()
def __init__(self):
self.saved_outputs = dict()
self._log_level = logging.INFO
def set_log_level(self, log_level):
self._log_level = log_level
if hasattr(self, '_logs_handler'):
self._logs_handler.setLevel(self._log_level)
def _capture_output_with_name(self, output_name):
stream = getattr(sys, output_name)
captured_output = self.stream_wrapper(stream)
self.saved_outputs[output_name] = stream
setattr(sys, output_name, captured_output)
return captured_output
def _restore_output_with_name(self, output_name):
captured_output = getattr(sys, output_name).getvalue()
setattr(sys, output_name, self.saved_outputs[output_name])
del self.saved_outputs[output_name]
return captured_output
def capture_output(self):
self._logs = StringIO()
self._logs_handler = logging.StreamHandler(self._logs)
self._logs_handler.setLevel(self._log_level)
self._logger = logging.getLogger()
self._orig_log_level = self._logger.level
self._logger.addHandler(self._logs_handler)
self._logger.setLevel(min(self._log_level, self._orig_log_level))
return (self._capture_output_with_name("stdout"), self._capture_output_with_name("stderr"))
def restore_output(self):
self._logger.removeHandler(self._logs_handler)
self._logger.setLevel(self._orig_log_level)
self._logs_handler.flush()
self._logs.flush()
logs_string = self._logs.getvalue()
delattr(self, '_logs_handler')
delattr(self, '_logs')
return (self._restore_output_with_name("stdout"), self._restore_output_with_name("stderr"), logs_string)
def assert_outputs(self, testcase, function, args=[], kwargs={}, expected_stdout="", expected_stderr="", expected_exception=None, expected_logs=None):
self.capture_output()
try:
if expected_exception:
return_value = testcase.assertRaises(expected_exception, function, *args, **kwargs)
else:
return_value = function(*args, **kwargs)
finally:
(stdout_string, stderr_string, logs_string) = self.restore_output()
if hasattr(testcase, 'assertMultiLineEqual'):
testassert = testcase.assertMultiLineEqual
else:
testassert = testcase.assertEqual
testassert(stdout_string, expected_stdout)
testassert(stderr_string, expected_stderr)
if expected_logs is not None:
testassert(logs_string, expected_logs)
# This is a little strange, but I don't know where else to return this information.
return return_value
class OutputCaptureTestCaseBase(unittest.TestCase):
maxDiff = None
def setUp(self):
unittest.TestCase.setUp(self)
self.output_capture = OutputCapture()
(self.__captured_stdout, self.__captured_stderr) = self.output_capture.capture_output()
def tearDown(self):
del self.__captured_stdout
del self.__captured_stderr
self.output_capture.restore_output()
unittest.TestCase.tearDown(self)
def assertStdout(self, expected_stdout):
self.assertEqual(expected_stdout, self.__captured_stdout.getvalue())
def assertStderr(self, expected_stderr):
self.assertEqual(expected_stderr, self.__captured_stderr.getvalue())
| bsd-3-clause |
danielquinn/spirithunter | src/spirits/migrations/0002_auto_20160904_1741.py | 1 | 2320 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-04 17:41
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('items', '0002_auto_20160904_1741'),
('spirits', '0001_initial'),
('aspects', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='spirit',
name='owner',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='spirits', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='levelladder',
name='family',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ladder', to='spirits.Family'),
),
migrations.AddField(
model_name='levelladder',
name='item_drop_common',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='common_drops', to='items.Item'),
),
migrations.AddField(
model_name='levelladder',
name='item_drop_rare',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='rare_drops', to='items.Item'),
),
migrations.AddField(
model_name='elementalstrength',
name='element',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='applications', to='aspects.Element'),
),
migrations.AddField(
model_name='elementalstrength',
name='spirit',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='elemental_strengths', to='spirits.Spirit'),
),
migrations.AlterUniqueTogether(
name='levelladder',
unique_together=set([('level', 'family')]),
),
migrations.AlterUniqueTogether(
name='elementalstrength',
unique_together=set([('spirit', 'element')]),
),
]
| agpl-3.0 |
fedora-infra/fmn.consumer | tests/test_sse_backend.py | 2 | 5500 | """
Unit tests for the SSE backend.
"""
from __future__ import unicode_literals, absolute_import
import json
import unittest
import mock
from fmn.consumer.backends import SSEBackend
@mock.patch('fmn.consumer.backends.sse.protocol.ClientCreator', mock.Mock())
class TestSSEBackend(unittest.TestCase):
def test_format_message_conglomerated(self):
"""Assert conglomerated messages are formatted"""
message = {
'subtitle': 'relrod pushed commits to ghc and 487 other packages',
'link': 'http://example.com/',
'icon': 'https://that-git-logo',
'secondary_icon': 'https://that-relrod-avatar',
'start_time': 0,
'end_time': 100,
'human_time': '5 minutes ago',
'usernames': ['relrod'],
'packages': ['ghc', 'nethack'],
'topics': ['org.fedoraproject.prod.git.receive'],
'categories': ['git'],
'msg_ids': {
'2014-abcde': {
'subtitle': 'relrod pushed some commits to ghc',
'title': 'git.receive',
'link': 'http://...',
'icon': 'http://...',
},
'2014-bcdef': {
'subtitle': 'relrod pushed some commits to nethack',
'title': 'git.receive',
'link': 'http://...',
'icon': 'http://...',
},
},
}
recipient = {
"triggered_by_links": True,
"markup_messages": True,
"user": "jcline.id.fedoraproject.org",
"filter_name": "firehose",
"filter_oneshot": True,
"filter_id": 7,
"shorten_links": False,
"verbose": True,
}
backend = SSEBackend({})
formatted_message = backend._format_message(message, recipient)
self.assertTrue(isinstance(formatted_message, bytes))
formatted_message = json.loads(formatted_message)
for key in ('dom_id', 'date_time', 'icon', 'link', 'markup', 'secondary_icon'):
self.assertTrue(key in formatted_message)
self.assertEqual(formatted_message['link'], message['link'])
self.assertEqual(formatted_message['markup'], message['subtitle'])
@mock.patch('fmn.consumer.backends.sse.fedmsg.meta')
def test_format_message_raw(self, mock_meta):
"""Assert raw messages are formatted"""
message = {
u'username': u'apache',
u'i': 1,
u'timestamp': 1478281861,
u'msg_id': u'2016-c2184569-f9c4-4c52-affd-79e28848d70f',
u'crypto': u'x509',
u'topic': u'org.fedoraproject.prod.buildsys.task.state.change',
u'msg': {
u'info': {
u'children': [],
u'parent': None,
u'channel_id': 1,
u'start_time': u'2016-11-04 17:51:01.254871',
u'request': [
u'../packages/eclipse/4.5.0/1.fc26/src/eclipse-4.5.0-1.fc26.src.rpm',
u'f26',
{u'scratch': True, u'arch_override': u'x86_64'}
],
u'state': 1,
u'awaited': None,
u'method': u'build',
u'priority': 50,
u'completion_time': None,
u'waiting': None,
u'create_time': u'2016-11-04 17:50:57.825631',
u'owner': 3199,
u'host_id': 82,
u'label': None,
u'arch': u'noarch',
u'id': 16289846
},
u'old': u'FREE',
u'attribute': u'state',
u'method': u'build',
u'instance': u'primary',
u'owner': u'koschei',
u'new': u'OPEN',
u'srpm': u'eclipse-4.5.0-1.fc26.src.rpm',
u'id': 16289846
}
}
recipient = {
"triggered_by_links": True,
"markup_messages": True,
"user": "jcline.id.fedoraproject.org",
"filter_name": "firehose",
"filter_oneshot": True,
"filter_id": 7,
"shorten_links": False,
"verbose": True,
}
mock_meta.msg2icon.return_value = 'http://example.com/icon.png'
mock_meta.msg2link.return_value = 'http://example.com/link'
mock_meta.msg2secondary_icon.return_value = None
mock_meta.msg2agent.return_value = 'koschei'
mock_meta.msg2title.return_value = 'Some title'
mock_meta.msg2subtitle.return_value = 'Some subtitle'
backend = SSEBackend({})
formatted_message = backend._format_message(message, recipient)
self.assertTrue(isinstance(formatted_message, bytes))
formatted_message = json.loads(formatted_message)
for key in ('dom_id', 'date_time', 'icon', 'link', 'markup', 'secondary_icon'):
self.assertTrue(key in formatted_message)
self.assertEqual(mock_meta.msg2icon.return_value, formatted_message['icon'])
self.assertEqual(mock_meta.msg2link.return_value, formatted_message['link'])
self.assertEqual(
mock_meta.msg2secondary_icon.return_value, formatted_message['secondary_icon'])
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
mcrowson/django | django/utils/functional.py | 234 | 13622 | import copy
import operator
from functools import total_ordering, wraps
from django.utils import six
from django.utils.six.moves import copyreg
# You can't trivially replace this with `functools.partial` because this binds
# to classes and returns bound instances, whereas functools.partial (on
# CPython) is a type and its instances don't bind.
def curry(_curried_func, *args, **kwargs):
def _curried(*moreargs, **morekwargs):
return _curried_func(*(args + moreargs), **dict(kwargs, **morekwargs))
return _curried
class cached_property(object):
"""
Decorator that converts a method with a single self argument into a
property cached on the instance.
Optional ``name`` argument allows you to make cached properties of other
methods. (e.g. url = cached_property(get_absolute_url, name='url') )
"""
def __init__(self, func, name=None):
self.func = func
self.__doc__ = getattr(func, '__doc__')
self.name = name or func.__name__
def __get__(self, instance, type=None):
if instance is None:
return self
res = instance.__dict__[self.name] = self.func(instance)
return res
class Promise(object):
"""
This is just a base class for the proxy class created in
the closure of the lazy function. It can be used to recognize
promises in code.
"""
pass
def lazy(func, *resultclasses):
"""
Turns any callable into a lazy evaluated callable. You need to give result
classes or types -- at least one is needed so that the automatic forcing of
the lazy evaluation code is triggered. Results are not memoized; the
function is evaluated on every access.
"""
@total_ordering
class __proxy__(Promise):
"""
Encapsulate a function call and act as a proxy for methods that are
called on the result of that function. The function is not evaluated
until one of the methods on the result is called.
"""
__prepared = False
def __init__(self, args, kw):
self.__args = args
self.__kw = kw
if not self.__prepared:
self.__prepare_class__()
self.__prepared = True
def __reduce__(self):
return (
_lazy_proxy_unpickle,
(func, self.__args, self.__kw) + resultclasses
)
@classmethod
def __prepare_class__(cls):
for resultclass in resultclasses:
for type_ in resultclass.mro():
for method_name in type_.__dict__.keys():
# All __promise__ return the same wrapper method, they
# look up the correct implementation when called.
if hasattr(cls, method_name):
continue
meth = cls.__promise__(method_name)
setattr(cls, method_name, meth)
cls._delegate_bytes = bytes in resultclasses
cls._delegate_text = six.text_type in resultclasses
assert not (cls._delegate_bytes and cls._delegate_text), (
"Cannot call lazy() with both bytes and text return types.")
if cls._delegate_text:
if six.PY3:
cls.__str__ = cls.__text_cast
else:
cls.__unicode__ = cls.__text_cast
cls.__str__ = cls.__bytes_cast_encoded
elif cls._delegate_bytes:
if six.PY3:
cls.__bytes__ = cls.__bytes_cast
else:
cls.__str__ = cls.__bytes_cast
@classmethod
def __promise__(cls, method_name):
# Builds a wrapper around some magic method
def __wrapper__(self, *args, **kw):
# Automatically triggers the evaluation of a lazy value and
# applies the given magic method of the result type.
res = func(*self.__args, **self.__kw)
return getattr(res, method_name)(*args, **kw)
return __wrapper__
def __text_cast(self):
return func(*self.__args, **self.__kw)
def __bytes_cast(self):
return bytes(func(*self.__args, **self.__kw))
def __bytes_cast_encoded(self):
return func(*self.__args, **self.__kw).encode('utf-8')
def __cast(self):
if self._delegate_bytes:
return self.__bytes_cast()
elif self._delegate_text:
return self.__text_cast()
else:
return func(*self.__args, **self.__kw)
def __str__(self):
# object defines __str__(), so __prepare_class__() won't overload
# a __str__() method from the proxied class.
return str(self.__cast())
def __ne__(self, other):
if isinstance(other, Promise):
other = other.__cast()
return self.__cast() != other
def __eq__(self, other):
if isinstance(other, Promise):
other = other.__cast()
return self.__cast() == other
def __lt__(self, other):
if isinstance(other, Promise):
other = other.__cast()
return self.__cast() < other
def __hash__(self):
return hash(self.__cast())
def __mod__(self, rhs):
if self._delegate_bytes and six.PY2:
return bytes(self) % rhs
elif self._delegate_text:
return six.text_type(self) % rhs
return self.__cast() % rhs
def __deepcopy__(self, memo):
# Instances of this class are effectively immutable. It's just a
# collection of functions. So we don't need to do anything
# complicated for copying.
memo[id(self)] = self
return self
@wraps(func)
def __wrapper__(*args, **kw):
# Creates the proxy object, instead of the actual value.
return __proxy__(args, kw)
return __wrapper__
def _lazy_proxy_unpickle(func, args, kwargs, *resultclasses):
return lazy(func, *resultclasses)(*args, **kwargs)
def allow_lazy(func, *resultclasses):
"""
A decorator that allows a function to be called with one or more lazy
arguments. If none of the args are lazy, the function is evaluated
immediately, otherwise a __proxy__ is returned that will evaluate the
function when needed.
"""
lazy_func = lazy(func, *resultclasses)
@wraps(func)
def wrapper(*args, **kwargs):
for arg in list(args) + list(kwargs.values()):
if isinstance(arg, Promise):
break
else:
return func(*args, **kwargs)
return lazy_func(*args, **kwargs)
return wrapper
empty = object()
def new_method_proxy(func):
def inner(self, *args):
if self._wrapped is empty:
self._setup()
return func(self._wrapped, *args)
return inner
class LazyObject(object):
"""
A wrapper for another class that can be used to delay instantiation of the
wrapped class.
By subclassing, you have the opportunity to intercept and alter the
instantiation. If you don't need to do that, use SimpleLazyObject.
"""
# Avoid infinite recursion when tracing __init__ (#19456).
_wrapped = None
def __init__(self):
self._wrapped = empty
__getattr__ = new_method_proxy(getattr)
def __setattr__(self, name, value):
if name == "_wrapped":
# Assign to __dict__ to avoid infinite __setattr__ loops.
self.__dict__["_wrapped"] = value
else:
if self._wrapped is empty:
self._setup()
setattr(self._wrapped, name, value)
def __delattr__(self, name):
if name == "_wrapped":
raise TypeError("can't delete _wrapped.")
if self._wrapped is empty:
self._setup()
delattr(self._wrapped, name)
def _setup(self):
"""
Must be implemented by subclasses to initialize the wrapped object.
"""
raise NotImplementedError('subclasses of LazyObject must provide a _setup() method')
# Because we have messed with __class__ below, we confuse pickle as to what
# class we are pickling. It also appears to stop __reduce__ from being
# called. So, we define __getstate__ in a way that cooperates with the way
# that pickle interprets this class. This fails when the wrapped class is
# a builtin, but it is better than nothing.
def __getstate__(self):
if self._wrapped is empty:
self._setup()
return self._wrapped.__dict__
# Python 3 will call __reduce__ when pickling; this method is needed
# to serialize and deserialize correctly.
@classmethod
def __newobj__(cls, *args):
return cls.__new__(cls, *args)
def __reduce_ex__(self, proto):
if proto >= 2:
# On Py3, since the default protocol is 3, pickle uses the
# ``__newobj__`` method (& more efficient opcodes) for writing.
return (self.__newobj__, (self.__class__,), self.__getstate__())
else:
# On Py2, the default protocol is 0 (for back-compat) & the above
# code fails miserably (see regression test). Instead, we return
# exactly what's returned if there's no ``__reduce__`` method at
# all.
return (copyreg._reconstructor, (self.__class__, object, None), self.__getstate__())
def __deepcopy__(self, memo):
if self._wrapped is empty:
# We have to use type(self), not self.__class__, because the
# latter is proxied.
result = type(self)()
memo[id(self)] = result
return result
return copy.deepcopy(self._wrapped, memo)
if six.PY3:
__bytes__ = new_method_proxy(bytes)
__str__ = new_method_proxy(str)
__bool__ = new_method_proxy(bool)
else:
__str__ = new_method_proxy(str)
__unicode__ = new_method_proxy(unicode) # NOQA: unicode undefined on PY3
__nonzero__ = new_method_proxy(bool)
# Introspection support
__dir__ = new_method_proxy(dir)
# Need to pretend to be the wrapped class, for the sake of objects that
# care about this (especially in equality tests)
__class__ = property(new_method_proxy(operator.attrgetter("__class__")))
__eq__ = new_method_proxy(operator.eq)
__ne__ = new_method_proxy(operator.ne)
__hash__ = new_method_proxy(hash)
# List/Tuple/Dictionary methods support
__getitem__ = new_method_proxy(operator.getitem)
__setitem__ = new_method_proxy(operator.setitem)
__delitem__ = new_method_proxy(operator.delitem)
__iter__ = new_method_proxy(iter)
__len__ = new_method_proxy(len)
__contains__ = new_method_proxy(operator.contains)
# Workaround for http://bugs.python.org/issue12370
_super = super
class SimpleLazyObject(LazyObject):
"""
A lazy object initialized from any function.
Designed for compound objects of unknown type. For builtins or objects of
known type, use django.utils.functional.lazy.
"""
def __init__(self, func):
"""
Pass in a callable that returns the object to be wrapped.
If copies are made of the resulting SimpleLazyObject, which can happen
in various circumstances within Django, then you must ensure that the
callable can be safely run more than once and will return the same
value.
"""
self.__dict__['_setupfunc'] = func
_super(SimpleLazyObject, self).__init__()
def _setup(self):
self._wrapped = self._setupfunc()
# Return a meaningful representation of the lazy object for debugging
# without evaluating the wrapped object.
def __repr__(self):
if self._wrapped is empty:
repr_attr = self._setupfunc
else:
repr_attr = self._wrapped
return '<%s: %r>' % (type(self).__name__, repr_attr)
def __deepcopy__(self, memo):
if self._wrapped is empty:
# We have to use SimpleLazyObject, not self.__class__, because the
# latter is proxied.
result = SimpleLazyObject(self._setupfunc)
memo[id(self)] = result
return result
return copy.deepcopy(self._wrapped, memo)
class lazy_property(property):
"""
A property that works with subclasses by wrapping the decorated
functions of the base class.
"""
def __new__(cls, fget=None, fset=None, fdel=None, doc=None):
if fget is not None:
@wraps(fget)
def fget(instance, instance_type=None, name=fget.__name__):
return getattr(instance, name)()
if fset is not None:
@wraps(fset)
def fset(instance, value, name=fset.__name__):
return getattr(instance, name)(value)
if fdel is not None:
@wraps(fdel)
def fdel(instance, name=fdel.__name__):
return getattr(instance, name)()
return property(fget, fset, fdel, doc)
def partition(predicate, values):
"""
Splits the values into two sets, based on the return value of the function
(True/False). e.g.:
>>> partition(lambda x: x > 3, range(5))
[0, 1, 2, 3], [4]
"""
results = ([], [])
for item in values:
results[predicate(item)].append(item)
return results
| bsd-3-clause |
rwatson/chromium-capsicum | third_party/scons/scons-local/SCons/Tool/cvf.py | 3 | 2399 | """engine.SCons.Tool.cvf
Tool-specific initialization for the Compaq Visual Fortran compiler.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/cvf.py 3897 2009/01/13 06:45:54 scons"
import fortran
compilers = ['f90']
def generate(env):
"""Add Builders and construction variables for compaq visual fortran to an Environment."""
fortran.generate(env)
env['FORTRAN'] = 'f90'
env['FORTRANCOM'] = '$FORTRAN $FORTRANFLAGS $_FORTRANMODFLAG $_FORTRANINCFLAGS /compile_only ${SOURCES.windows} /object:${TARGET.windows}'
env['FORTRANPPCOM'] = '$FORTRAN $FORTRANFLAGS $CPPFLAGS $_CPPDEFFLAGS $_FORTRANMODFLAG $_FORTRANINCFLAGS /compile_only ${SOURCES.windows} /object:${TARGET.windows}'
env['SHFORTRANCOM'] = '$SHFORTRAN $SHFORTRANFLAGS $_FORTRANMODFLAG $_FORTRANINCFLAGS /compile_only ${SOURCES.windows} /object:${TARGET.windows}'
env['SHFORTRANPPCOM'] = '$SHFORTRAN $SHFORTRANFLAGS $CPPFLAGS $_CPPDEFFLAGS $_FORTRANMODFLAG $_FORTRANINCFLAGS /compile_only ${SOURCES.windows} /object:${TARGET.windows}'
env['OBJSUFFIX'] = '.obj'
env['FORTRANMODDIR'] = '${TARGET.dir}'
env['FORTRANMODDIRPREFIX'] = '/module:'
env['FORTRANMODDIRSUFFIX'] = ''
def exists(env):
return env.Detect(compilers)
| bsd-3-clause |
inveniosoftware/kwalitee | kwalitee/cli/githooks.py | 2 | 2832 | # -*- coding: utf-8 -*-
#
# This file is part of kwalitee
# Copyright (C) 2014, 2015 CERN.
#
# kwalitee is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# kwalitee is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with kwalitee; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
#
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""Command-line tools for the git hooks."""
from __future__ import absolute_import, print_function
import os
import sys
import click
from ..hooks import run
HOOKS = {
"pre-commit",
"prepare-commit-msg",
"post-commit",
}
HOOK_PATH = os.path.join(".git", "hooks")
@click.group()
def githooks():
"""Install githooks for kwalitee checks."""
@githooks.command()
@click.option("-f", "--force", is_flag=True,
help="Overwrite existing hooks", default=False)
def install(force=False):
"""Install git hooks."""
ret, git_dir, _ = run("git rev-parse --show-toplevel")
if ret != 0:
click.echo(
"ERROR: Please run from within a GIT repository.",
file=sys.stderr)
raise click.Abort
git_dir = git_dir[0]
hooks_dir = os.path.join(git_dir, HOOK_PATH)
for hook in HOOKS:
hook_path = os.path.join(hooks_dir, hook)
if os.path.exists(hook_path):
if not force:
click.echo(
"Hook already exists. Skipping {0}".format(hook_path),
file=sys.stderr)
continue
else:
os.unlink(hook_path)
source = os.path.join(sys.prefix, "bin", "kwalitee-" + hook)
os.symlink(os.path.normpath(source), hook_path)
return True
@githooks.command()
def uninstall():
"""Uninstall git hooks."""
ret, git_dir, _ = run("git rev-parse --show-toplevel")
if ret != 0:
click.echo(
"ERROR: Please run from within a GIT repository.",
file=sys.stderr)
raise click.Abort
git_dir = git_dir[0]
hooks_dir = os.path.join(git_dir, HOOK_PATH)
for hook in HOOKS:
hook_path = os.path.join(hooks_dir, hook)
if os.path.exists(hook_path):
os.remove(hook_path)
return True
| gpl-2.0 |
xaviercobain88/framework-python | openerp/addons/account/account_analytic_line.py | 31 | 7587 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields
from openerp.osv import osv
from openerp.tools.translate import _
class account_analytic_line(osv.osv):
_inherit = 'account.analytic.line'
_description = 'Analytic Line'
_columns = {
'product_uom_id': fields.many2one('product.uom', 'Unit of Measure'),
'product_id': fields.many2one('product.product', 'Product'),
'general_account_id': fields.many2one('account.account', 'General Account', required=True, ondelete='restrict'),
'move_id': fields.many2one('account.move.line', 'Move Line', ondelete='cascade', select=True),
'journal_id': fields.many2one('account.analytic.journal', 'Analytic Journal', required=True, ondelete='restrict', select=True),
'code': fields.char('Code', size=8),
'ref': fields.char('Ref.', size=64),
'currency_id': fields.related('move_id', 'currency_id', type='many2one', relation='res.currency', string='Account Currency', store=True, help="The related account currency if not equal to the company one.", readonly=True),
'amount_currency': fields.related('move_id', 'amount_currency', type='float', string='Amount Currency', store=True, help="The amount expressed in the related account currency if not equal to the company one.", readonly=True),
}
_defaults = {
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.analytic.line', context=c),
}
_order = 'date desc'
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
if context is None:
context = {}
if context.get('from_date',False):
args.append(['date', '>=', context['from_date']])
if context.get('to_date',False):
args.append(['date','<=', context['to_date']])
return super(account_analytic_line, self).search(cr, uid, args, offset, limit,
order, context=context, count=count)
def _check_company(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
for l in lines:
if l.move_id and not l.account_id.company_id.id == l.move_id.account_id.company_id.id:
return False
return True
# Compute the cost based on the price type define into company
# property_valuation_price_type property
def on_change_unit_amount(self, cr, uid, id, prod_id, quantity, company_id,
unit=False, journal_id=False, context=None):
if context==None:
context={}
if not journal_id:
j_ids = self.pool.get('account.analytic.journal').search(cr, uid, [('type','=','purchase')])
journal_id = j_ids and j_ids[0] or False
if not journal_id or not prod_id:
return {}
product_obj = self.pool.get('product.product')
analytic_journal_obj =self.pool.get('account.analytic.journal')
product_price_type_obj = self.pool.get('product.price.type')
j_id = analytic_journal_obj.browse(cr, uid, journal_id, context=context)
prod = product_obj.browse(cr, uid, prod_id, context=context)
result = 0.0
if prod_id:
unit = prod.uom_id.id
if j_id.type == 'purchase':
unit = prod.uom_po_id.id
if j_id.type <> 'sale':
a = prod.property_account_expense.id
if not a:
a = prod.categ_id.property_account_expense_categ.id
if not a:
raise osv.except_osv(_('Error!'),
_('There is no expense account defined ' \
'for this product: "%s" (id:%d).') % \
(prod.name, prod.id,))
else:
a = prod.property_account_income.id
if not a:
a = prod.categ_id.property_account_income_categ.id
if not a:
raise osv.except_osv(_('Error!'),
_('There is no income account defined ' \
'for this product: "%s" (id:%d).') % \
(prod.name, prod_id,))
flag = False
# Compute based on pricetype
product_price_type_ids = product_price_type_obj.search(cr, uid, [('field','=','standard_price')], context=context)
pricetype = product_price_type_obj.browse(cr, uid, product_price_type_ids, context=context)[0]
if journal_id:
journal = analytic_journal_obj.browse(cr, uid, journal_id, context=context)
if journal.type == 'sale':
product_price_type_ids = product_price_type_obj.search(cr, uid, [('field','=','list_price')], context=context)
if product_price_type_ids:
pricetype = product_price_type_obj.browse(cr, uid, product_price_type_ids, context=context)[0]
# Take the company currency as the reference one
if pricetype.field == 'list_price':
flag = True
ctx = context.copy()
if unit:
# price_get() will respect a 'uom' in its context, in order
# to return a default price for those units
ctx['uom'] = unit
amount_unit = prod.price_get(pricetype.field, context=ctx)[prod.id]
prec = self.pool.get('decimal.precision').precision_get(cr, uid, 'Account')
amount = amount_unit * quantity or 0.0
result = round(amount, prec)
if not flag:
result *= -1
return {'value': {
'amount': result,
'general_account_id': a,
'product_uom_id': unit
}
}
def view_header_get(self, cr, user, view_id, view_type, context=None):
if context is None:
context = {}
if context.get('account_id', False):
# account_id in context may also be pointing to an account.account.id
cr.execute('select name from account_analytic_account where id=%s', (context['account_id'],))
res = cr.fetchone()
if res:
res = _('Entries: ')+ (res[0] or '')
return res
return False
account_analytic_line()
class res_partner(osv.osv):
""" Inherits partner and adds contract information in the partner form """
_inherit = 'res.partner'
_columns = {
'contract_ids': fields.one2many('account.analytic.account', \
'partner_id', 'Contracts', readonly=True),
}
res_partner()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
adamschmideg/mal | rpython/step2_eval.py | 32 | 3030 | #import sys, traceback
import mal_readline
import mal_types as types
from mal_types import (MalSym, MalInt, MalStr,
_keywordu,
MalList, _list, MalVector, MalHashMap, MalFunc)
import reader, printer
# read
def READ(str):
return reader.read_str(str)
# eval
def eval_ast(ast, env):
if types._symbol_Q(ast):
assert isinstance(ast, MalSym)
if ast.value in env:
return env[ast.value]
else:
raise Exception(u"'" + ast.value + u"' not found")
elif types._list_Q(ast):
res = []
for a in ast.values:
res.append(EVAL(a, env))
return MalList(res)
elif types._vector_Q(ast):
res = []
for a in ast.values:
res.append(EVAL(a, env))
return MalVector(res)
elif types._hash_map_Q(ast):
new_dct = {}
for k in ast.dct.keys():
new_dct[k] = EVAL(ast.dct[k], env)
return MalHashMap(new_dct)
else:
return ast # primitive value, return unchanged
def EVAL(ast, env):
#print("EVAL %s" % printer._pr_str(ast))
if not types._list_Q(ast):
return eval_ast(ast, env)
# apply list
el = eval_ast(ast, env)
f = el.values[0]
if isinstance(f, MalFunc):
return f.apply(el.values[1:])
else:
raise Exception("%s is not callable" % f)
# print
def PRINT(exp):
return printer._pr_str(exp)
# repl
repl_env = {}
def REP(str, env):
return PRINT(EVAL(READ(str), env))
def plus(args):
a, b = args[0], args[1]
assert isinstance(a, MalInt)
assert isinstance(b, MalInt)
return MalInt(a.value+b.value)
def minus(args):
a, b = args[0], args[1]
assert isinstance(a, MalInt)
assert isinstance(b, MalInt)
return MalInt(a.value-b.value)
def multiply(args):
a, b = args[0], args[1]
assert isinstance(a, MalInt)
assert isinstance(b, MalInt)
return MalInt(a.value*b.value)
def divide(args):
a, b = args[0], args[1]
assert isinstance(a, MalInt)
assert isinstance(b, MalInt)
return MalInt(int(a.value/b.value))
repl_env[u'+'] = MalFunc(plus)
repl_env[u'-'] = MalFunc(minus)
repl_env[u'*'] = MalFunc(multiply)
repl_env[u'/'] = MalFunc(divide)
def entry_point(argv):
while True:
try:
line = mal_readline.readline("user> ")
if line == "": continue
print(REP(line, repl_env))
except EOFError as e:
break
except reader.Blank:
continue
except types.MalException as e:
print(u"Error: %s" % printer._pr_str(e.object, False))
except Exception as e:
print("Error: %s" % e)
#print("".join(traceback.format_exception(*sys.exc_info())))
return 0
# _____ Define and setup target ___
def target(*args):
return entry_point
# Just run entry_point if not RPython compilation
import sys
if not sys.argv[0].endswith('rpython'):
entry_point(sys.argv)
| mpl-2.0 |
vgan/soiqbot | requests/packages/chardet/langcyrillicmodel.py | 2762 | 17725 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# KOI8-R language model
# Character Mapping Table:
KOI8R_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, # 80
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, # 90
223,224,225, 68,226,227,228,229,230,231,232,233,234,235,236,237, # a0
238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, # b0
27, 3, 21, 28, 13, 2, 39, 19, 26, 4, 23, 11, 8, 12, 5, 1, # c0
15, 16, 9, 7, 6, 14, 24, 10, 17, 18, 20, 25, 30, 29, 22, 54, # d0
59, 37, 44, 58, 41, 48, 53, 46, 55, 42, 60, 36, 49, 38, 31, 34, # e0
35, 43, 45, 32, 40, 52, 56, 33, 61, 62, 51, 57, 47, 63, 50, 70, # f0
)
win1251_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
239,240,241,242,243,244,245,246, 68,247,248,249,250,251,252,253,
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
)
latin5_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,
)
macCyrillic_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
239,240,241,242,243,244,245,246,247,248,249,250,251,252, 68, 16,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27,255,
)
IBM855_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194, 68,195,196,197,198,199,200,201,202,203,204,205,
206,207,208,209,210,211,212,213,214,215,216,217, 27, 59, 54, 70,
3, 37, 21, 44, 28, 58, 13, 41, 2, 48, 39, 53, 19, 46,218,219,
220,221,222,223,224, 26, 55, 4, 42,225,226,227,228, 23, 60,229,
230,231,232,233,234,235, 11, 36,236,237,238,239,240,241,242,243,
8, 49, 12, 38, 5, 31, 1, 34, 15,244,245,246,247, 35, 16,248,
43, 9, 45, 7, 32, 6, 40, 14, 52, 24, 56, 10, 33, 17, 61,249,
250, 18, 62, 20, 51, 25, 57, 30, 47, 29, 63, 22, 50,251,252,255,
)
IBM866_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 97.6601%
# first 1024 sequences: 2.3389%
# rest sequences: 0.1237%
# negative sequences: 0.0009%
RussianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,1,3,3,3,3,1,3,3,3,2,3,2,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,2,2,2,2,2,0,0,2,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,2,3,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,2,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,2,3,3,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,
0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,
0,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,2,2,2,3,1,3,3,1,3,3,3,3,2,2,3,0,2,2,2,3,3,2,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,3,3,2,2,3,2,3,3,3,2,1,2,2,0,1,2,2,2,2,2,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,3,0,2,2,3,3,2,1,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,1,2,3,2,2,3,2,3,3,3,3,2,2,3,0,3,2,2,3,1,1,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,3,3,3,3,2,2,2,0,3,3,3,2,2,2,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,2,3,2,2,0,1,3,2,1,2,2,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,2,1,1,3,0,1,1,1,1,2,1,1,0,2,2,2,1,2,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,2,2,2,2,1,3,2,3,2,3,2,1,2,2,0,1,1,2,1,2,1,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,2,3,3,3,2,2,2,2,0,2,2,2,2,3,1,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,2,3,2,2,3,3,3,3,3,3,3,3,3,1,3,2,0,0,3,3,3,3,2,3,3,3,3,2,3,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,3,2,2,3,3,0,2,1,0,3,2,3,2,3,0,0,1,2,0,0,1,0,1,2,1,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,3,0,2,3,3,3,3,2,3,3,3,3,1,2,2,0,0,2,3,2,2,2,3,2,3,2,2,3,0,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,0,2,3,2,3,0,1,2,3,3,2,0,2,3,0,0,2,3,2,2,0,1,3,1,3,2,2,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,0,2,3,3,3,3,3,3,3,3,2,1,3,2,0,0,2,2,3,3,3,2,3,3,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,2,2,2,3,3,0,0,1,1,1,1,1,2,0,0,1,1,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,3,3,3,3,3,0,3,2,3,3,2,3,2,0,2,1,0,1,1,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,2,2,2,2,3,1,3,2,3,1,1,2,1,0,2,2,2,2,1,3,1,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
2,2,3,3,3,3,3,1,2,2,1,3,1,0,3,0,0,3,0,0,0,1,1,0,1,2,1,0,0,0,0,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,2,1,1,3,3,3,2,2,1,2,2,3,1,1,2,0,0,2,2,1,3,0,0,2,1,1,2,1,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,3,3,3,1,2,2,2,1,2,1,3,3,1,1,2,1,2,1,2,2,0,2,0,0,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,3,2,1,3,2,2,3,2,0,3,2,0,3,0,1,0,1,1,0,0,1,1,1,1,0,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,3,3,3,2,2,2,3,3,1,2,1,2,1,0,1,0,1,1,0,1,0,0,2,1,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,1,1,2,1,2,3,3,2,2,1,2,2,3,0,2,1,0,0,2,2,3,2,1,2,2,2,2,2,3,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,1,1,0,1,1,2,2,1,1,3,0,0,1,3,1,1,1,0,0,0,1,0,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,3,3,3,2,0,0,0,2,1,0,1,0,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,0,2,3,2,2,2,1,2,2,2,1,2,1,0,0,1,1,1,0,2,0,1,1,1,0,0,1,1,
1,0,0,0,0,0,1,2,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,0,0,0,0,1,0,0,0,0,3,0,1,2,1,0,0,0,0,0,0,0,1,1,0,0,1,1,
1,0,1,0,1,2,0,0,1,1,2,1,0,1,1,1,1,0,1,1,1,1,0,1,0,0,1,0,0,1,1,0,
2,2,3,2,2,2,3,1,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,0,1,0,1,1,1,0,2,1,
1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,0,1,1,0,
3,3,3,2,2,2,2,3,2,2,1,1,2,2,2,2,1,1,3,1,2,1,2,0,0,1,1,0,1,0,2,1,
1,1,1,1,1,2,1,0,1,1,1,1,0,1,0,0,1,1,0,0,1,0,1,0,0,1,0,0,0,1,1,0,
2,0,0,1,0,3,2,2,2,2,1,2,1,2,1,2,0,0,0,2,1,2,2,1,1,2,2,0,1,1,0,2,
1,1,1,1,1,0,1,1,1,2,1,1,1,2,1,0,1,2,1,1,1,1,0,1,1,1,0,0,1,0,0,1,
1,3,2,2,2,1,1,1,2,3,0,0,0,0,2,0,2,2,1,0,0,0,0,0,0,1,0,0,0,0,1,1,
1,0,1,1,0,1,0,1,1,0,1,1,0,2,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,
2,3,2,3,2,1,2,2,2,2,1,0,0,0,2,0,0,1,1,0,0,0,0,0,0,0,1,1,0,0,2,1,
1,1,2,1,0,2,0,0,1,0,1,0,0,1,0,0,1,1,0,1,1,0,0,0,0,0,1,0,0,0,0,0,
3,0,0,1,0,2,2,2,3,2,2,2,2,2,2,2,0,0,0,2,1,2,1,1,1,2,2,0,0,0,1,2,
1,1,1,1,1,0,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,1,0,1,1,1,1,1,1,0,0,1,
2,3,2,3,3,2,0,1,1,1,0,0,1,0,2,0,1,1,3,1,0,0,0,0,0,0,0,1,0,0,2,1,
1,1,1,1,1,1,1,0,1,0,1,1,1,1,0,1,1,1,0,0,1,1,0,1,0,0,0,0,0,0,1,0,
2,3,3,3,3,1,2,2,2,2,0,1,1,0,2,1,1,1,2,1,0,1,1,0,0,1,0,1,0,0,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,2,0,0,1,1,2,2,1,0,0,2,0,1,1,3,0,0,1,0,0,0,0,0,1,0,1,2,1,
1,1,2,0,1,1,1,0,1,0,1,1,0,1,0,1,1,1,1,0,1,0,0,0,0,0,0,1,0,1,1,0,
1,3,2,3,2,1,0,0,2,2,2,0,1,0,2,0,1,1,1,0,1,0,0,0,3,0,1,1,0,0,2,1,
1,1,1,0,1,1,0,0,0,0,1,1,0,1,0,0,2,1,1,0,1,0,0,0,1,0,1,0,0,1,1,0,
3,1,2,1,1,2,2,2,2,2,2,1,2,2,1,1,0,0,0,2,2,2,0,0,0,1,2,1,0,1,0,1,
2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,2,1,1,1,0,1,0,1,1,0,1,1,1,0,0,1,
3,0,0,0,0,2,0,1,1,1,1,1,1,1,0,1,0,0,0,1,1,1,0,1,0,1,1,0,0,1,0,1,
1,1,0,0,1,0,0,0,1,0,1,1,0,0,1,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,1,
1,3,3,2,2,0,0,0,2,2,0,0,0,1,2,0,1,1,2,0,0,0,0,0,0,0,0,1,0,0,2,1,
0,1,1,0,0,1,1,0,0,0,1,1,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
2,3,2,3,2,0,0,0,0,1,1,0,0,0,2,0,2,0,2,0,0,0,0,0,1,0,0,1,0,0,1,1,
1,1,2,0,1,2,1,0,1,1,2,1,1,1,1,1,2,1,1,0,1,0,0,1,1,1,1,1,0,1,1,0,
1,3,2,2,2,1,0,0,2,2,1,0,1,2,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,1,1,
0,0,1,1,0,1,1,0,0,1,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,0,2,3,1,2,2,2,2,2,2,1,1,0,0,0,1,0,1,0,2,1,1,1,0,0,0,0,1,
1,1,0,1,1,0,1,1,1,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
2,0,2,0,0,1,0,3,2,1,2,1,2,2,0,1,0,0,0,2,1,0,0,2,1,1,1,1,0,2,0,2,
2,1,1,1,1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,0,0,0,1,1,1,1,0,1,0,0,1,
1,2,2,2,2,1,0,0,1,0,0,0,0,0,2,0,1,1,1,1,0,0,0,0,1,0,1,2,0,0,2,0,
1,0,1,1,1,2,1,0,1,0,1,1,0,0,1,0,1,1,1,0,1,0,0,0,1,0,0,1,0,1,1,0,
2,1,2,2,2,0,3,0,1,1,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,0,1,1,1,0,0,1,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,
1,2,2,3,2,2,0,0,1,1,2,0,1,2,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,
2,2,1,1,2,1,2,2,2,2,2,1,2,2,0,1,0,0,0,1,2,2,2,1,2,1,1,1,1,1,2,1,
1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,0,1,
1,2,2,2,2,0,1,0,2,2,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,
0,0,1,0,0,1,0,0,0,0,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,0,2,2,2,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,
0,1,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,0,0,1,0,0,1,1,2,0,0,0,0,1,0,1,0,0,1,0,0,2,0,0,0,1,
0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,1,1,2,0,2,1,1,1,1,0,2,2,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,1,
0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,1,2,0,0,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,
0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
1,0,0,0,0,2,0,1,2,1,0,1,1,1,0,1,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,1,
0,0,0,0,0,1,0,0,1,1,0,0,1,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,
2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,0,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,1,0,1,0,1,0,0,1,1,1,1,0,0,0,1,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,0,1,1,0,1,0,1,0,0,0,0,1,1,0,1,1,0,0,0,0,0,1,0,1,1,0,1,0,0,0,
0,1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
)
Koi8rModel = {
'charToOrderMap': KOI8R_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "KOI8-R"
}
Win1251CyrillicModel = {
'charToOrderMap': win1251_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "windows-1251"
}
Latin5CyrillicModel = {
'charToOrderMap': latin5_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-5"
}
MacCyrillicModel = {
'charToOrderMap': macCyrillic_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "MacCyrillic"
};
Ibm866Model = {
'charToOrderMap': IBM866_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "IBM866"
}
Ibm855Model = {
'charToOrderMap': IBM855_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "IBM855"
}
# flake8: noqa
| mit |
angryrancor/kivy | kivy/core/clipboard/clipboard_pygame.py | 39 | 1446 | '''
Clipboard Pygame: an implementation of the Clipboard using pygame.scrap.
'''
__all__ = ('ClipboardPygame', )
from kivy.utils import platform
from kivy.compat import PY2
from kivy.core.clipboard import ClipboardBase
if platform not in ('win', 'linux', 'macosx'):
raise SystemError('unsupported platform for pygame clipboard')
try:
import pygame
import pygame.scrap
except:
raise
class ClipboardPygame(ClipboardBase):
_is_init = False
_types = None
_aliases = {
'text/plain;charset=utf-8': 'UTF8_STRING'
}
def init(self):
if ClipboardPygame._is_init:
return
pygame.scrap.init()
ClipboardPygame._is_init = True
def get(self, mimetype='text/plain'):
self.init()
mimetype = self._aliases.get(mimetype, mimetype)
text = pygame.scrap.get(mimetype)
return text
def put(self, data, mimetype='text/plain'):
self.init()
mimetype = self._aliases.get(mimetype, mimetype)
pygame.scrap.put(mimetype, data)
def get_types(self):
if not self._types:
self.init()
types = pygame.scrap.get_types()
for mime, pygtype in self._aliases.items()[:]:
if mime in types:
del self._aliases[mime]
if pygtype in types:
types.append(mime)
self._types = types
return self._types
| mit |
arista-eosplus/ansible | lib/ansible/modules/network/panos/panos_security_policy.py | 39 | 16244 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: panos_security_policy
short_description: Create security rule policy on PanOS devices.
description:
- Security policies allow you to enforce rules and take action, and can be as
general or specific as needed. The policy rules are compared against the
incoming traffic in sequence, and because the first rule that matches the
traffic is applied, the more specific rules must precede the more general ones.
author: "Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- pan-python can be obtained from PyPi U(https://pypi.python.org/pypi/pan-python)
- pandevice can be obtained from PyPi U(https://pypi.python.org/pypi/pandevice)
notes:
- Checkmode is not supported.
- Panorama is supported
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device being configured.
required: true
username:
description:
- Username credentials to use for auth unless I(api_key) is set.
default: "admin"
password:
description:
- Password credentials to use for auth unless I(api_key) is set.
required: true
api_key:
description:
- API key that can be used instead of I(username)/I(password) credentials.
rule_name:
description:
- Name of the security rule.
required: true
rule_type:
description:
- Type of security rule (version 6.1 of PanOS and above).
default: "universal"
description:
description:
- Description for the security rule.
default: "None"
tag:
description:
- Administrative tags that can be added to the rule. Note, tags must be already defined.
default: "None"
from_zone:
description:
- List of source zones.
default: "any"
to_zone:
description:
- List of destination zones.
default: "any"
source:
description:
- List of source addresses.
default: "any"
source_user:
description:
- Use users to enforce policy for individual users or a group of users.
default: "any"
hip_profiles:
description: >
If you are using GlobalProtect with host information profile (HIP) enabled, you can also base the policy
on information collected by GlobalProtect. For example, the user access level can be determined HIP that
notifies the firewall about the user's local configuration.
default: "any"
destination:
description:
- List of destination addresses.
default: "any"
application:
description:
- List of applications.
default: "any"
service:
description:
- List of services.
default: "application-default"
log_start:
description:
- Whether to log at session start.
default: false
log_end:
description:
- Whether to log at session end.
default: true
action:
description:
- Action to apply once rules maches.
default: "allow"
group_profile:
description: >
Security profile group that is already defined in the system. This property supersedes antivirus,
vulnerability, spyware, url_filtering, file_blocking, data_filtering, and wildfire_analysis properties.
default: None
antivirus:
description:
- Name of the already defined antivirus profile.
default: None
vulnerability:
description:
- Name of the already defined vulnerability profile.
default: None
spyware:
description:
- Name of the already defined spyware profile.
default: None
url_filtering:
description:
- Name of the already defined url_filtering profile.
default: None
file_blocking:
description:
- Name of the already defined file_blocking profile.
default: None
data_filtering:
description:
- Name of the already defined data_filtering profile.
default: None
wildfire_analysis:
description:
- Name of the already defined wildfire_analysis profile.
default: None
devicegroup:
description: >
Device groups are used for the Panorama interaction with Firewall(s). The group must exists on Panorama.
If device group is not define we assume that we are contacting Firewall.
default: None
commit:
description:
- Commit configuration if changed.
default: true
'''
EXAMPLES = '''
- name: permit ssh to 1.1.1.1
panos_security_policy:
ip_address: '10.5.172.91'
username: 'admin'
password: 'paloalto'
rule_name: 'SSH permit'
description: 'SSH rule test'
from_zone: ['public']
to_zone: ['private']
source: ['any']
source_user: ['any']
destination: ['1.1.1.1']
category: ['any']
application: ['ssh']
service: ['application-default']
hip_profiles: ['any']
action: 'allow'
commit: false
- name: Allow HTTP multimedia only from CDNs
panos_security_policy:
ip_address: '10.5.172.91'
username: 'admin'
password: 'paloalto'
rule_name: 'HTTP Multimedia'
description: 'Allow HTTP multimedia only to host at 1.1.1.1'
from_zone: ['public']
to_zone: ['private']
source: ['any']
source_user: ['any']
destination: ['1.1.1.1']
category: ['content-delivery-networks']
application: ['http-video', 'http-audio']
service: ['service-http', 'service-https']
hip_profiles: ['any']
action: 'allow'
commit: false
- name: more complex fictitious rule that uses profiles
panos_security_policy:
ip_address: '10.5.172.91'
username: 'admin'
password: 'paloalto'
rule_name: 'Allow HTTP w profile'
log_start: false
log_end: true
action: 'allow'
antivirus: 'default'
vulnerability: 'default'
spyware: 'default'
url_filtering: 'default'
wildfire_analysis: 'default'
commit: false
- name: deny all
panos_security_policy:
ip_address: '10.5.172.91'
username: 'admin'
password: 'paloalto'
rule_name: 'DenyAll'
log_start: true
log_end: true
action: 'deny'
rule_type: 'interzone'
commit: false
# permit ssh to 1.1.1.1 using panorama and pushing the configuration to firewalls
# that are defined in 'DeviceGroupA' device group
- name: permit ssh to 1.1.1.1 through Panorama
panos_security_policy:
ip_address: '10.5.172.92'
password: 'paloalto'
rule_name: 'SSH permit'
description: 'SSH rule test'
from_zone: ['public']
to_zone: ['private']
source: ['any']
source_user: ['any']
destination: ['1.1.1.1']
category: ['any']
application: ['ssh']
service: ['application-default']
hip_profiles: ['any']
action: 'allow'
devicegroup: 'DeviceGroupA'
'''
RETURN = '''
# Default return values
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import get_exception
try:
import pan.xapi
from pan.xapi import PanXapiError
import pandevice
import pandevice.firewall
import pandevice.panorama
import pandevice.objects
import pandevice.policies
HAS_LIB = True
except ImportError:
HAS_LIB = False
def security_rule_exists(device, rule_name):
if isinstance(device, pandevice.firewall.Firewall):
rule_base = pandevice.policies.Rulebase.refreshall(device)
elif isinstance(device, pandevice.panorama.Panorama):
# look for only pre-rulebase ATM
rule_base = pandevice.policies.PreRulebase.refreshall(device)
if rule_base:
rule_base = rule_base[0]
security_rules = rule_base.findall(pandevice.policies.SecurityRule)
if security_rules:
for r in security_rules:
if r.name == rule_name:
return True
return False
def create_security_rule(**kwargs):
security_rule = pandevice.policies.SecurityRule(
name=kwargs['rule_name'],
description=kwargs['description'],
tozone=kwargs['to_zone'],
fromzone=kwargs['from_zone'],
source=kwargs['source'],
source_user=kwargs['source_user'],
destination=kwargs['destination'],
category=kwargs['category'],
application=kwargs['application'],
service=kwargs['service'],
hip_profiles=kwargs['hip_profiles'],
log_start=kwargs['log_start'],
log_end=kwargs['log_end'],
type=kwargs['rule_type'],
action=kwargs['action'])
if 'tag' in kwargs:
security_rule.tag = kwargs['tag']
# profile settings
if 'group_profile' in kwargs:
security_rule.group = kwargs['group_profile']
else:
if 'antivirus' in kwargs:
security_rule.virus = kwargs['antivirus']
if 'vulnerability' in kwargs:
security_rule.vulnerability = kwargs['vulnerability']
if 'spyware' in kwargs:
security_rule.spyware = kwargs['spyware']
if 'url_filtering' in kwargs:
security_rule.url_filtering = kwargs['url_filtering']
if 'file_blocking' in kwargs:
security_rule.file_blocking = kwargs['file_blocking']
if 'data_filtering' in kwargs:
security_rule.data_filtering = kwargs['data_filtering']
if 'wildfire_analysis' in kwargs:
security_rule.wildfire_analysis = kwargs['wildfire_analysis']
return security_rule
def add_security_rule(device, sec_rule):
if isinstance(device, pandevice.firewall.Firewall):
rule_base = pandevice.policies.Rulebase.refreshall(device)
elif isinstance(device, pandevice.panorama.Panorama):
# look for only pre-rulebase ATM
rule_base = pandevice.policies.PreRulebase.refreshall(device)
if rule_base:
rule_base = rule_base[0]
rule_base.add(sec_rule)
sec_rule.create()
return True
else:
return False
def _commit(device, device_group=None):
"""
:param device: either firewall or panorama
:param device_group: panorama device group or if none then 'all'
:return: True if successful
"""
result = device.commit(sync=True)
if isinstance(device, pandevice.panorama.Panorama):
result = device.commit_all(sync=True, sync_all=True, devicegroup=device_group)
return result
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(no_log=True),
username=dict(default='admin'),
api_key=dict(no_log=True),
rule_name=dict(required=True),
description=dict(default=''),
tag=dict(),
to_zone=dict(type='list', default=['any']),
from_zone=dict(type='list', default=['any']),
source=dict(type='list', default=["any"]),
source_user=dict(type='list', default=['any']),
destination=dict(type='list', default=["any"]),
category=dict(type='list', default=['any']),
application=dict(type='list', default=['any']),
service=dict(type='list', default=['application-default']),
hip_profiles=dict(type='list', default=['any']),
group_profile=dict(),
antivirus=dict(),
vulnerability=dict(),
spyware=dict(),
url_filtering=dict(),
file_blocking=dict(),
data_filtering=dict(),
wildfire_analysis=dict(),
log_start=dict(type='bool', default=False),
log_end=dict(type='bool', default=True),
rule_type=dict(default='universal'),
action=dict(default='allow'),
devicegroup=dict(),
commit=dict(type='bool', default=True)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False,
required_one_of=[['api_key', 'password']])
if not HAS_LIB:
module.fail_json(msg='Missing required pan-python and pandevice modules.')
ip_address = module.params["ip_address"]
password = module.params["password"]
username = module.params['username']
api_key = module.params['api_key']
rule_name = module.params['rule_name']
description = module.params['description']
tag = module.params['tag']
from_zone = module.params['from_zone']
to_zone = module.params['to_zone']
source = module.params['source']
source_user = module.params['source_user']
destination = module.params['destination']
category = module.params['category']
application = module.params['application']
service = module.params['service']
hip_profiles = module.params['hip_profiles']
log_start = module.params['log_start']
log_end = module.params['log_end']
rule_type = module.params['rule_type']
action = module.params['action']
group_profile = module.params['group_profile']
antivirus = module.params['antivirus']
vulnerability = module.params['vulnerability']
spyware = module.params['spyware']
url_filtering = module.params['url_filtering']
file_blocking = module.params['file_blocking']
data_filtering = module.params['data_filtering']
wildfire_analysis = module.params['wildfire_analysis']
devicegroup = module.params['devicegroup']
commit = module.params['commit']
if devicegroup:
device = pandevice.panorama.Panorama(ip_address, username, password, api_key=api_key)
dev_grps = device.refresh_devices()
for grp in dev_grps:
if grp.name == devicegroup:
break
module.fail_json(msg=' \'%s\' device group not found in Panorama. Is the name correct?' % devicegroup)
else:
device = pandevice.firewall.Firewall(ip_address, username, password, api_key=api_key)
if security_rule_exists(device, rule_name):
module.fail_json(msg='Rule with the same name already exists.')
try:
sec_rule = create_security_rule(
rule_name=rule_name,
description=description,
tag=tag,
from_zone=from_zone,
to_zone=to_zone,
source=source,
source_user=source_user,
destination=destination,
category=category,
application=application,
service=service,
hip_profiles=hip_profiles,
group_profile=group_profile,
antivirus=antivirus,
vulnerability=vulnerability,
spyware=spyware,
url_filtering=url_filtering,
file_blocking=file_blocking,
data_filtering=data_filtering,
wildfire_analysis=wildfire_analysis,
log_start=log_start,
log_end=log_end,
rule_type=rule_type,
action=action
)
changed = add_security_rule(device, sec_rule)
except PanXapiError:
exc = get_exception()
module.fail_json(msg=exc.message)
if changed and commit:
result = _commit(device, devicegroup)
module.exit_json(changed=changed, msg="okey dokey")
if __name__ == '__main__':
main()
| gpl-3.0 |
amoikevin/gyp | test/win/gyptest-cl-enable-enhanced-instruction-set.py | 52 | 1432 | #!/usr/bin/env python
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Test VCCLCompilerTool EnableEnhancedInstructionSet setting.
"""
import TestGyp
import os
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp()
CHDIR = 'compiler-flags'
test.run_gyp('enable-enhanced-instruction-set.gyp', chdir=CHDIR)
test.build('enable-enhanced-instruction-set.gyp', test.ALL, chdir=CHDIR)
test.run_built_executable('sse_extensions', chdir=CHDIR,
stdout='/arch:SSE\n')
test.run_built_executable('sse2_extensions', chdir=CHDIR,
stdout='/arch:SSE2\n')
# /arch:AVX introduced in VS2010, but MSBuild support lagged until 2012.
if os.path.exists(test.built_file_path('avx_extensions')):
test.run_built_executable('avx_extensions', chdir=CHDIR,
stdout='/arch:AVX\n')
# /arch:IA32 introduced in VS2012.
if os.path.exists(test.built_file_path('no_extensions')):
test.run_built_executable('no_extensions', chdir=CHDIR,
stdout='/arch:IA32\n')
# /arch:AVX2 introduced in VS2013r2.
if os.path.exists(test.built_file_path('avx2_extensions')):
test.run_built_executable('avx2_extensions', chdir=CHDIR,
stdout='/arch:AVX2\n')
test.pass_test()
| bsd-3-clause |
mila-iqia/babyai | scripts/il_perf.py | 1 | 2047 | #!/usr/bin/env python3
import argparse
import pandas
import os
import json
import re
import numpy as np
from scipy import stats
from babyai import plotting as bp
parser = argparse.ArgumentParser("Analyze performance of imitation learning")
parser.add_argument("--path", default='.',
help="path to model logs")
parser.add_argument("--regex", default='.*',
help="filter out some logs")
parser.add_argument("--other", default=None,
help="path to model logs for ttest comparison")
parser.add_argument("--other_regex", default='.*',
help="filter out some logs from comparison")
parser.add_argument("--window", type=int, default=100,
help="size of sliding window average, 10 for GoToRedBallGrey, 100 otherwise")
args = parser.parse_args()
def get_data(path, regex):
df = pandas.concat(bp.load_logs(path), sort=True)
fps = bp.get_fps(df)
models = df['model'].unique()
models = [model for model in df['model'].unique() if re.match(regex, model)]
maxes = []
for model in models:
df_model = df[df['model'] == model]
success_rate = df_model['validation_success_rate']
success_rate = success_rate.rolling(args.window, center=True).mean()
success_rate = max(success_rate[np.logical_not(np.isnan(success_rate))])
print(model, success_rate)
maxes.append(success_rate)
return np.array(maxes), fps
if args.other is not None:
print("is this architecture better")
print(args.regex)
maxes, fps = get_data(args.path, args.regex)
result = {'samples': len(maxes), 'mean': maxes.mean(), 'std': maxes.std(),
'fps_mean': fps.mean(), 'fps_std': fps.std()}
print(result)
if args.other is not None:
print("\nthan this one")
maxes_ttest, fps = get_data(args.other, args.other_regex)
result = {'samples': len(maxes_ttest),
'mean': maxes_ttest.mean(), 'std': maxes_ttest.std(),
'fps_mean': fps.mean(), 'fps_std': fps.std()}
print(result)
ttest = stats.ttest_ind(maxes, maxes_ttest, equal_var=False)
print(f"\n{ttest}")
| bsd-3-clause |
boundlessgeo/QGIS | python/plugins/db_manager/db_plugins/postgis/plugins/versioning/__init__.py | 32 | 2259 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : Versioning plugin for DB Manager
Description : Set up versioning support for a table
Date : Mar 12, 2012
copyright : (C) 2012 by Giuseppe Sucameli
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from qgis.PyQt.QtCore import Qt
from qgis.PyQt.QtWidgets import QAction, QApplication
from qgis.PyQt.QtGui import QIcon
# The load function is called when the "db" database or either one of its
# children db objects (table o schema) is selected by the user.
# @param db is the selected database
# @param mainwindow is the DBManager mainwindow
def load(db, mainwindow):
# add the action to the DBManager menu
action = QAction(QIcon(), QApplication.translate("DBManagerPlugin", "&Change Logging…"), db)
mainwindow.registerAction(action, QApplication.translate("DBManagerPlugin", "&Table"), run)
# The run function is called once the user clicks on the action TopoViewer
# (look above at the load function) from the DBManager menu/toolbar.
# @param item is the selected db item (either db, schema or table)
# @param action is the clicked action on the DBManager menu/toolbar
# @param mainwindow is the DBManager mainwindow
def run(item, action, mainwindow):
from .dlg_versioning import DlgVersioning
dlg = DlgVersioning(item, mainwindow)
QApplication.restoreOverrideCursor()
try:
dlg.exec_()
finally:
QApplication.setOverrideCursor(Qt.WaitCursor)
| gpl-2.0 |
edum1978/eduengage | boilerplate/lib/basehandler.py | 8 | 12971 | # *-* coding: UTF-8 *-*
# standard library imports
import logging
import re
import traceback
import sys
# related third party imports
import webapp2
from google.appengine.api.users import NotAllowedError
from webapp2_extras import jinja2
from webapp2_extras import auth
from webapp2_extras import sessions
from google.appengine.api import taskqueue
# local application/library specific imports
from boilerplate import models
from boilerplate.lib import utils, i18n
from babel import Locale
def user_required(handler):
"""
Decorator for checking if there's a user associated
with the current session.
Will also fail if there's no session present.
"""
def check_login(self, *args, **kwargs):
"""
If handler has no login_url specified invoke a 403 error
"""
try:
auth = self.auth.get_user_by_session()
if not auth:
try:
self.auth_config['login_url'] = self.uri_for('login', continue_url=self.request.path)
self.redirect(self.auth_config['login_url'], abort=True)
except (AttributeError, KeyError), e:
self.abort(403)
else:
return handler(self, *args, **kwargs)
except AttributeError, e:
# avoid AttributeError when the session was delete from the server
logging.error(e)
self.auth.unset_session()
self.redirect_to('home')
return check_login
def generate_csrf_token():
session = sessions.get_store().get_session()
if '_csrf_token' not in session:
session['_csrf_token'] = utils.random_string()
return session['_csrf_token']
def jinja2_factory(app):
j = jinja2.Jinja2(app)
j.environment.filters.update({
# Set filters.
# ...
})
j.environment.globals.update({
# Set global variables.
'csrf_token' : generate_csrf_token,
'uri_for': webapp2.uri_for,
'getattr': getattr,
'str': str
})
j.environment.tests.update({
# Set test.
# ...
})
return j
def handle_error(request, response, exception):
exc_type, exc_value, exc_tb = sys.exc_info()
c = {
'exception': str(exception),
'url': request.url,
}
if request.app.config.get('send_mail_developer') is not False:
# send email
subject = "[{}] ERROR {}".format(request.app.config.get('environment').upper(), request.app.config.get('app_name'))
lines = traceback.format_exception(exc_type, exc_value, exc_tb)
message = '<strong>Type:</strong> ' + exc_type.__name__ + "<br />" + \
'<strong>Description:</strong> ' + c['exception'] + "<br />" + \
'<strong>URL:</strong> ' + c['url'] + "<br />" + \
'<strong>Traceback:</strong> <br />' + '<br />'.join(lines)
email_body_path = "emails/error.txt"
if c['exception'] is not 'Error saving Email Log in datastore':
template_val = {
"app_name" : request.app.config.get('app_name'),
"message" : message,
}
email_body = jinja2.get_jinja2(factory=jinja2_factory, app=webapp2.get_app()).render_template(email_body_path, **template_val)
email_url = webapp2.uri_for('taskqueue-send-email')
for dev in request.app.config.get('developers'):
taskqueue.add(url = email_url, params={
'to': dev[1],
'subject' : subject,
'body' : email_body,
'sender' : request.app.config.get('contact_sender'),
})
status_int = hasattr(exception, 'status_int') and exception.status_int or 500
template = request.app.config.get('error_templates')[status_int]
t = jinja2.get_jinja2(factory=jinja2_factory, app=webapp2.get_app()).render_template(template, **c)
logging.error(str(status_int) + " - " + str(exception))
response.write(t)
response.set_status(status_int)
class ViewClass:
"""
ViewClass to insert variables into the template.
ViewClass is used in BaseHandler to promote variables automatically that can be used
in jinja2 templates.
Use case in a BaseHandler Class:
self.view.var1 = "hello"
self.view.array = [1, 2, 3]
self.view.dict = dict(a="abc", b="bcd")
Can be accessed in the template by just using the variables liek {{var1}} or {{dict.b}}
"""
pass
class BaseHandler(webapp2.RequestHandler):
"""
BaseHandler for all requests
Holds the auth and session properties so they
are reachable for all requests
"""
def __init__(self, request, response):
""" Override the initialiser in order to set the language.
"""
self.initialize(request, response)
self.locale = i18n.set_locale(self)
self.view = ViewClass()
def dispatch(self):
"""
Get a session store for this request.
"""
self.session_store = sessions.get_store(request=self.request)
try:
# csrf protection
if self.request.method == "POST" and not self.request.path.startswith('/taskqueue'):
token = self.session.get('_csrf_token')
if not token or token != self.request.get('_csrf_token'):
self.abort(403)
# Dispatch the request.
webapp2.RequestHandler.dispatch(self)
finally:
# Save all sessions.
self.session_store.save_sessions(self.response)
@webapp2.cached_property
def auth(self):
return auth.get_auth()
@webapp2.cached_property
def session_store(self):
return sessions.get_store(request=self.request)
@webapp2.cached_property
def session(self):
# Returns a session using the default cookie key.
return self.session_store.get_session()
@webapp2.cached_property
def messages(self):
return self.session.get_flashes(key='_messages')
def add_message(self, message, level=None):
self.session.add_flash(message, level, key='_messages')
@webapp2.cached_property
def auth_config(self):
"""
Dict to hold urls for login/logout
"""
return {
'login_url': self.uri_for('login'),
'logout_url': self.uri_for('logout')
}
@webapp2.cached_property
def language(self):
return str(Locale.parse(self.locale).language)
@webapp2.cached_property
def user(self):
return self.auth.get_user_by_session()
@webapp2.cached_property
def user_id(self):
return str(self.user['user_id']) if self.user else None
@webapp2.cached_property
def user_key(self):
if self.user:
user_info = models.User.get_by_id(long(self.user_id))
return user_info.key
return None
@webapp2.cached_property
def username(self):
if self.user:
try:
user_info = models.User.get_by_id(long(self.user_id))
return str(user_info.username)
except AttributeError, e:
# avoid AttributeError when the session was delete from the server
logging.error(e)
self.auth.unset_session()
self.redirect_to('home')
return None
@webapp2.cached_property
def email(self):
if self.user:
try:
user_info = models.User.get_by_id(long(self.user_id))
return user_info.email
except AttributeError, e:
# avoid AttributeError when the session was delete from the server
logging.error(e)
self.auth.unset_session()
self.redirect_to('home')
return None
@webapp2.cached_property
def provider_uris(self):
login_urls = {}
continue_url = self.request.get('continue_url')
for provider in self.provider_info:
if continue_url:
login_url = self.uri_for("social-login", provider_name=provider, continue_url=continue_url)
else:
login_url = self.uri_for("social-login", provider_name=provider)
login_urls[provider] = login_url
return login_urls
@webapp2.cached_property
def provider_info(self):
return models.SocialUser.PROVIDERS_INFO
@webapp2.cached_property
def path_for_language(self):
"""
Get the current path + query_string without language parameter (hl=something)
Useful to put it on a template to concatenate with '&hl=NEW_LOCALE'
Example: .../?hl=en_US
"""
path_lang = re.sub(r'(^hl=(\w{5})\&*)|(\&hl=(\w{5})\&*?)', '', str(self.request.query_string))
return self.request.path + "?" if path_lang == "" else str(self.request.path) + "?" + path_lang
@property
def locales(self):
"""
returns a dict of locale codes to locale display names in both the current locale and the localized locale
example: if the current locale is es_ES then locales['en_US'] = 'Ingles (Estados Unidos) - English (United States)'
"""
if not self.app.config.get('locales'):
return None
locales = {}
for l in self.app.config.get('locales'):
current_locale = Locale.parse(self.locale)
language = current_locale.languages[l.split('_')[0]]
territory = current_locale.territories[l.split('_')[1]]
localized_locale_name = Locale.parse(l).display_name.capitalize()
locales[l] = language.capitalize() + " (" + territory.capitalize() + ") - " + localized_locale_name
return locales
@webapp2.cached_property
def is_mobile(self):
return utils.set_device_cookie_and_return_bool(self)
@webapp2.cached_property
def jinja2(self):
return jinja2.get_jinja2(factory=jinja2_factory, app=self.app)
@webapp2.cached_property
def get_base_layout(self):
"""
Get the current base layout template for jinja2 templating. Uses the variable base_layout set in config
or if there is a base_layout defined, use the base_layout.
"""
return self.base_layout if hasattr(self, 'base_layout') else self.app.config.get('base_layout')
def set_base_layout(self, layout):
"""
Set the base_layout variable, thereby overwriting the default layout template name in config.py.
"""
self.base_layout = layout
def render_template(self, filename, **kwargs):
locales = self.app.config.get('locales') or []
locale_iso = None
language = ''
territory = ''
language_id = self.app.config.get('app_lang')
if self.locale and len(locales) > 1:
locale_iso = Locale.parse(self.locale)
language_id = locale_iso.language
territory_id = locale_iso.territory
language = locale_iso.languages[language_id]
territory = locale_iso.territories[territory_id]
# make all self.view variables available in jinja2 templates
if hasattr(self, 'view'):
kwargs.update(self.view.__dict__)
# set or overwrite special vars for jinja templates
kwargs.update({
'google_analytics_domain' : self.app.config.get('google_analytics_domain'),
'google_analytics_code' : self.app.config.get('google_analytics_code'),
'app_name': self.app.config.get('app_name'),
'user_id': self.user_id,
'username': self.username,
'email': self.email,
'url': self.request.url,
'path': self.request.path,
'query_string': self.request.query_string,
'path_for_language': self.path_for_language,
'is_mobile': self.is_mobile,
'locale_iso': locale_iso, # babel locale object
'locale_language': language.capitalize() + " (" + territory.capitalize() + ")", # babel locale object
'locale_language_id': language_id, # babel locale object
'locales': self.locales,
'provider_uris': self.provider_uris,
'provider_info': self.provider_info,
'enable_federated_login': self.app.config.get('enable_federated_login'),
'base_layout': self.get_base_layout
})
kwargs.update(self.auth_config)
if hasattr(self, 'form'):
kwargs['form'] = self.form
if self.messages:
kwargs['messages'] = self.messages
self.response.headers.add_header('X-UA-Compatible', 'IE=Edge,chrome=1')
self.response.write(self.jinja2.render_template(filename, **kwargs)) | lgpl-3.0 |
CloverHealth/airflow | airflow/example_dags/example_passing_params_via_test_command.py | 14 | 2351 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import timedelta
import airflow
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
dag = DAG("example_passing_params_via_test_command",
default_args={"owner": "airflow",
"start_date": airflow.utils.dates.days_ago(1)},
schedule_interval='*/1 * * * *',
dagrun_timeout=timedelta(minutes=4)
)
def my_py_command(ds, **kwargs):
# Print out the "foo" param passed in via
# `airflow test example_passing_params_via_test_command run_this <date>
# -tp '{"foo":"bar"}'`
if kwargs["test_mode"]:
print(" 'foo' was passed in via test={} command : kwargs[params][foo] \
= {}".format(kwargs["test_mode"], kwargs["params"]["foo"]))
# Print out the value of "miff", passed in below via the Python Operator
print(" 'miff' was passed in via task params = {}".format(kwargs["params"]["miff"]))
return 1
my_templated_command = """
echo " 'foo was passed in via Airflow CLI Test command with value {{ params.foo }} "
echo " 'miff was passed in via BashOperator with value {{ params.miff }} "
"""
run_this = PythonOperator(
task_id='run_this',
provide_context=True,
python_callable=my_py_command,
params={"miff": "agg"},
dag=dag)
also_run_this = BashOperator(
task_id='also_run_this',
bash_command=my_templated_command,
params={"miff": "agg"},
dag=dag)
also_run_this.set_upstream(run_this)
| apache-2.0 |
Luobiny/bioconda-recipes | recipes/mtnucratio/mtnucratio.py | 20 | 2667 | #!/usr/bin/env python
#
# Wrapper script for Java Conda packages that ensures that the java runtime
# is invoked with the right options. Adapted from the bash script (http://stackoverflow.com/questions/59895/can-a-bash-script-tell-what-directory-its-stored-in/246128#246128).
#
#
# Program Parameters
#
import os
import sys
import subprocess
from os import access, getenv, X_OK
jar_file = 'MTNucRatioCalculator-0.7.jar'
default_jvm_mem_opts = ['-Xms512m', '-Xmx1g']
# !!! End of parameter section. No user-serviceable code below this line !!!
def real_dirname(path):
"""Return the symlink-resolved, canonicalized directory-portion of path."""
return os.path.dirname(os.path.realpath(path))
def java_executable():
"""Return the executable name of the Java interpreter."""
java_home = getenv('JAVA_HOME')
java_bin = os.path.join('bin', 'java')
if java_home and access(os.path.join(java_home, java_bin), X_OK):
return os.path.join(java_home, java_bin)
else:
return 'java'
def jvm_opts(argv):
"""Construct list of Java arguments based on our argument list.
The argument list passed in argv must not include the script name.
The return value is a 3-tuple lists of strings of the form:
(memory_options, prop_options, passthrough_options)
"""
mem_opts = []
prop_opts = []
pass_args = []
for arg in argv:
if arg.startswith('-D'):
prop_opts.append(arg)
elif arg.startswith('-XX'):
prop_opts.append(arg)
elif arg.startswith('-Xm'):
mem_opts.append(arg)
else:
pass_args.append(arg)
# In the original shell script the test coded below read:
# if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ]
# To reproduce the behaviour of the above shell code fragment
# it is important to explictly check for equality with None
# in the second condition, so a null envar value counts as True!
if mem_opts == [] and getenv('_JAVA_OPTIONS') == None:
mem_opts = default_jvm_mem_opts
return (mem_opts, prop_opts, pass_args)
def main():
java = java_executable()
jar_dir = real_dirname(sys.argv[0])
(mem_opts, prop_opts, pass_args) = jvm_opts(sys.argv[1:])
if pass_args != [] and pass_args[0].startswith('eu'):
jar_arg = '-cp'
else:
jar_arg = '-jar'
jar_path = os.path.join(jar_dir, jar_file)
java_args = [java]+ mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args
if '--jar_dir' in sys.argv[1:]:
print(jar_path)
else:
sys.exit(subprocess.call(java_args))
if __name__ == '__main__':
main()
| mit |
ahamilton55/ansible | lib/ansible/modules/network/cloudengine/ce_netconf.py | 46 | 5922 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'}
DOCUMENTATION = '''
---
module: ce_netconf
version_added: "2.4"
short_description: Run an arbitrary netconf command on HUAWEI CloudEngine switches.
description:
- Sends an arbitrary netconf command on HUAWEI CloudEngine switches.
author:
- wangdezhuang (@CloudEngine-Ansible)
options:
rpc:
description:
- The type of rpc.
required: true
choices: ['get', 'edit-config', 'execute-action', 'execute-cli']
cfg_xml:
description:
- The config xml string.
required: true
'''
EXAMPLES = '''
- name: CloudEngine netconf test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Netconf get operation"
ce_netconf:
rpc: get
cfg_xml: '<filter type=\"subtree\">
<vlan xmlns=\"http://www.huawei.com/netconf/vrp\" content-version=\"1.0\" format-version=\"1.0\">
<vlans>
<vlan>
<vlanId>10</vlanId>
<vlanif>
<ifName></ifName>
<cfgBand></cfgBand>
<dampTime></dampTime>
</vlanif>
</vlan>
</vlans>
</vlan>
</filter>'
provider: "{{ cli }}"
- name: "Netconf edit-config operation"
ce_netconf:
rpc: edit-config
cfg_xml: '<config>
<aaa xmlns=\"http://www.huawei.com/netconf/vrp\" content-version=\"1.0\" format-version=\"1.0\">
<authenticationSchemes>
<authenticationScheme operation=\"create\">
<authenSchemeName>default_wdz</authenSchemeName>
<firstAuthenMode>local</firstAuthenMode>
<secondAuthenMode>invalid</secondAuthenMode>
</authenticationScheme>
</authenticationSchemes>
</aaa>
</config>'
provider: "{{ cli }}"
- name: "Netconf execute-action operation"
ce_netconf:
rpc: execute-action
cfg_xml: '<action>
<l2mc xmlns=\"http://www.huawei.com/netconf/vrp\" content-version=\"1.0\" format-version=\"1.0\">
<l2McResetAllVlanStatis>
<addrFamily>ipv4unicast</addrFamily>
</l2McResetAllVlanStatis>
</l2mc>
</action>'
provider: "{{ cli }}"
'''
RETURN = '''
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
end_state:
description: k/v pairs of aaa params after module execution
returned: always
type: dict
sample: {"result": ["ok"]}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ce import get_nc_config, set_nc_config
from ansible.module_utils.ce import execute_nc_action, ce_argument_spec, execute_nc_cli
def main():
""" main """
argument_spec = dict(
rpc=dict(choices=['get', 'edit-config',
'execute-action', 'execute-cli'], required=True),
cfg_xml=dict(required=True)
)
argument_spec.update(ce_argument_spec)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
rpc = module.params['rpc']
cfg_xml = module.params['cfg_xml']
changed = False
end_state = dict()
if rpc == "get":
response = get_nc_config(module, cfg_xml)
if "<data/>" in response:
end_state["result"] = "<data/>"
else:
tmp1 = response.split(r"<data>")
tmp2 = tmp1[1].split(r"</data>")
result = tmp2[0].split("\n")
end_state["result"] = result
elif rpc == "edit-config":
response = set_nc_config(module, cfg_xml)
if "<ok/>" not in response:
module.fail_json(msg='rpc edit-config failed.')
changed = True
end_state["result"] = "ok"
elif rpc == "execute-action":
response = execute_nc_action(module, cfg_xml)
if "<ok/>" not in response:
module.fail_json(msg='rpc execute-action failed.')
changed = True
end_state["result"] = "ok"
elif rpc == "execute-cli":
response = execute_nc_cli(module, cfg_xml)
if "<data/>" in response:
end_state["result"] = "<data/>"
else:
tmp1 = response.xml.split(r"<data>")
tmp2 = tmp1[1].split(r"</data>")
result = tmp2[0].split("\n")
end_state["result"] = result
else:
module.fail_json(msg='please input correct rpc.')
results = dict()
results['changed'] = changed
results['end_state'] = end_state
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
OpenBfS/dokpool-plone | Plone/src/elan.sitrep/elan/sitrep/content/srmoduleconfig.py | 1 | 3765 | # -*- coding: utf-8 -*-
#
# File: srmoduleconfig.py
#
# Copyright (c) 2017 by Condat AG
# Generator: ConPD2
# http://www.condat.de
#
__author__ = ''
__docformat__ = 'plaintext'
"""Definition of the SRModuleConfig content type. See srmoduleconfig.py for more
explanation on the statements below.
"""
from AccessControl import ClassSecurityInfo
from elan.sitrep import DocpoolMessageFactory as _
from plone.autoform import directives
from plone.dexterity.content import Item
from plone.dexterity.interfaces import IEditFinishedEvent
from plone.supermodel import model
from Products.CMFCore.utils import getToolByName
from Products.CMFPlone.utils import log
from z3c.relationfield.schema import RelationChoice
from z3c.relationfield.schema import RelationList
from zope import schema
from zope.component import adapter
from zope.interface import implementer
class ISRModuleConfig(model.Schema):
"""
"""
modType = schema.Choice(
title=_(u'label_srmoduleconfig_modtype', default=u'Module Type'),
description=_(u'description_srmoduleconfig_modtype', default=u''),
required=True,
source="elan.sitrep.vocabularies.ModuleTypes",
)
docSelection = RelationChoice(
title=_(
u'label_srmoduleconfig_docselection',
default=u'Collection for relevant documents',
),
description=_(
u'description_srmoduleconfig_docselection',
default=u'This collection defines a pre-selection of possible documents to reference within this module.',
),
required=False,
source="elan.sitrep.vocabularies.Collections",
)
textBlocks = RelationList(
title=_(u'label_srmoduleconfig_textblocks', default=u'Text Blocks'),
description=_(u'description_srmoduleconfig_textblocks', default=u''),
required=False,
value_type=RelationChoice(
title=_("Text Blocks"), source="elan.sitrep.vocabularies.TextBlocks"
),
)
defaultTextBlocks = RelationList(
title=_(
u'label_srmoduletype_defaulttextblocks',
default=u'Default Text (when freshly created)',
),
description=_(
u'description_srmoduletype_defaulttextblocks',
default=u''),
required=False,
value_type=RelationChoice(
title=_("Default Text"), source="elan.sitrep.vocabularies.TextBlocks"
),
)
directives.widget(docSelection='z3c.form.browser.select.SelectFieldWidget')
directives.widget(
textBlocks='z3c.form.browser.select.CollectionSelectFieldWidget')
directives.widget(
defaultTextBlocks='z3c.form.browser.select.CollectionSelectFieldWidget')
@implementer(ISRModuleConfig)
class SRModuleConfig(Item):
"""
"""
security = ClassSecurityInfo()
def getSRModuleNames(self):
"""
Index Method
"""
return [self.modType]
def getSRModuleRefs(self):
"""
Index Method
"""
return [self.UID()]
def currentDocuments(self):
"""
Return the documents from the referenced collection - if any.
"""
if self.docSelection:
coll = self.docSelection.to_object
return coll.results(batch=False)
else:
return []
def currentTextBlocks(self):
"""
"""
return [tb.to_object for tb in (self.textBlocks or [])]
@adapter(ISRModuleConfig, IEditFinishedEvent)
def updated(obj, event=None):
log("SRModuleConfig updated: %s" % str(obj))
sr_cat = getToolByName(obj, "sr_catalog")
sr_cat._reindexObject(obj)
if obj.textBlocks:
for tb in obj.textBlocks:
sr_cat._reindexObject(tb.to_object)
| gpl-3.0 |
safwanrahman/mozillians | vendor-local/lib/python/unidecode/x0fa.py | 252 | 4406 | data = (
'Chey ', # 0x00
'Thak ', # 0x01
'Thak ', # 0x02
'Thang ', # 0x03
'Thayk ', # 0x04
'Thong ', # 0x05
'Pho ', # 0x06
'Phok ', # 0x07
'Hang ', # 0x08
'Hang ', # 0x09
'Hyen ', # 0x0a
'Hwak ', # 0x0b
'Wu ', # 0x0c
'Huo ', # 0x0d
'[?] ', # 0x0e
'[?] ', # 0x0f
'Zhong ', # 0x10
'[?] ', # 0x11
'Qing ', # 0x12
'[?] ', # 0x13
'[?] ', # 0x14
'Xi ', # 0x15
'Zhu ', # 0x16
'Yi ', # 0x17
'Li ', # 0x18
'Shen ', # 0x19
'Xiang ', # 0x1a
'Fu ', # 0x1b
'Jing ', # 0x1c
'Jing ', # 0x1d
'Yu ', # 0x1e
'[?] ', # 0x1f
'Hagi ', # 0x20
'[?] ', # 0x21
'Zhu ', # 0x22
'[?] ', # 0x23
'[?] ', # 0x24
'Yi ', # 0x25
'Du ', # 0x26
'[?] ', # 0x27
'[?] ', # 0x28
'[?] ', # 0x29
'Fan ', # 0x2a
'Si ', # 0x2b
'Guan ', # 0x2c
'[?]', # 0x2d
'[?]', # 0x2e
'[?]', # 0x2f
'[?]', # 0x30
'[?]', # 0x31
'[?]', # 0x32
'[?]', # 0x33
'[?]', # 0x34
'[?]', # 0x35
'[?]', # 0x36
'[?]', # 0x37
'[?]', # 0x38
'[?]', # 0x39
'[?]', # 0x3a
'[?]', # 0x3b
'[?]', # 0x3c
'[?]', # 0x3d
'[?]', # 0x3e
'[?]', # 0x3f
'[?]', # 0x40
'[?]', # 0x41
'[?]', # 0x42
'[?]', # 0x43
'[?]', # 0x44
'[?]', # 0x45
'[?]', # 0x46
'[?]', # 0x47
'[?]', # 0x48
'[?]', # 0x49
'[?]', # 0x4a
'[?]', # 0x4b
'[?]', # 0x4c
'[?]', # 0x4d
'[?]', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'[?]', # 0x52
'[?]', # 0x53
'[?]', # 0x54
'[?]', # 0x55
'[?]', # 0x56
'[?]', # 0x57
'[?]', # 0x58
'[?]', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'[?]', # 0x5f
'[?]', # 0x60
'[?]', # 0x61
'[?]', # 0x62
'[?]', # 0x63
'[?]', # 0x64
'[?]', # 0x65
'[?]', # 0x66
'[?]', # 0x67
'[?]', # 0x68
'[?]', # 0x69
'[?]', # 0x6a
'[?]', # 0x6b
'[?]', # 0x6c
'[?]', # 0x6d
'[?]', # 0x6e
'[?]', # 0x6f
'[?]', # 0x70
'[?]', # 0x71
'[?]', # 0x72
'[?]', # 0x73
'[?]', # 0x74
'[?]', # 0x75
'[?]', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'[?]', # 0x80
'[?]', # 0x81
'[?]', # 0x82
'[?]', # 0x83
'[?]', # 0x84
'[?]', # 0x85
'[?]', # 0x86
'[?]', # 0x87
'[?]', # 0x88
'[?]', # 0x89
'[?]', # 0x8a
'[?]', # 0x8b
'[?]', # 0x8c
'[?]', # 0x8d
'[?]', # 0x8e
'[?]', # 0x8f
'[?]', # 0x90
'[?]', # 0x91
'[?]', # 0x92
'[?]', # 0x93
'[?]', # 0x94
'[?]', # 0x95
'[?]', # 0x96
'[?]', # 0x97
'[?]', # 0x98
'[?]', # 0x99
'[?]', # 0x9a
'[?]', # 0x9b
'[?]', # 0x9c
'[?]', # 0x9d
'[?]', # 0x9e
'[?]', # 0x9f
'[?]', # 0xa0
'[?]', # 0xa1
'[?]', # 0xa2
'[?]', # 0xa3
'[?]', # 0xa4
'[?]', # 0xa5
'[?]', # 0xa6
'[?]', # 0xa7
'[?]', # 0xa8
'[?]', # 0xa9
'[?]', # 0xaa
'[?]', # 0xab
'[?]', # 0xac
'[?]', # 0xad
'[?]', # 0xae
'[?]', # 0xaf
'[?]', # 0xb0
'[?]', # 0xb1
'[?]', # 0xb2
'[?]', # 0xb3
'[?]', # 0xb4
'[?]', # 0xb5
'[?]', # 0xb6
'[?]', # 0xb7
'[?]', # 0xb8
'[?]', # 0xb9
'[?]', # 0xba
'[?]', # 0xbb
'[?]', # 0xbc
'[?]', # 0xbd
'[?]', # 0xbe
'[?]', # 0xbf
'[?]', # 0xc0
'[?]', # 0xc1
'[?]', # 0xc2
'[?]', # 0xc3
'[?]', # 0xc4
'[?]', # 0xc5
'[?]', # 0xc6
'[?]', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'[?]', # 0xcb
'[?]', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'[?]', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'[?]', # 0xe6
'[?]', # 0xe7
'[?]', # 0xe8
'[?]', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| bsd-3-clause |
camptocamp/odoo | addons/crm_claim/__init__.py | 390 | 1078 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm_claim
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ptisserand/docker-registry | tests/test_s3.py | 30 | 6105 | # -*- coding: utf-8 -*-
import StringIO
import sys
import time
from nose import tools
from docker_registry.core import exceptions
import docker_registry.testing as testing
from docker_registry.testing import mock_boto # noqa
from . import mock_s3 # noqa
class StringIOWithError(StringIO.StringIO):
'''Throw IOError after reaching EOF.'''
def read(self, size):
if self.pos == self.len:
raise IOError('Reading beyond EOF')
return StringIO.StringIO.read(self, size)
class TestDriver(testing.Driver):
'''Extra tests for coverage completion.'''
def __init__(self):
self.scheme = 's3'
self.path = ''
self.config = testing.Config({})
def tearDown(self):
self._storage._boto_bucket.delete()
super(TestDriver, self).tearDown()
@tools.raises(exceptions.FileNotFoundError)
def test_list_bucket(self):
# Add a couple of bucket keys
filename1 = self.gen_random_string()
filename2 = self.gen_random_string()
content = self.gen_random_string()
self._storage.put_content(filename1, content)
# Check bucket key is stored in normalized form
self._storage.put_content(filename2 + '/', content)
# Check both keys are in the bucket
assert sorted([filename1, filename2]) == sorted(
list(self._storage.list_directory()))
# Check listing bucket raises exception after removing keys
self._storage.remove(filename1)
self._storage.remove(filename2)
s = self._storage.list_directory()
s.next()
def test_stream_write(self):
# Check stream write with buffer bigger than default 5MB
self._storage.buffer_size = 7 * 1024 * 1024
filename = self.gen_random_string()
# Test 8MB
content = self.gen_random_string(8 * 1024 * 1024)
io = StringIOWithError(content)
assert not self._storage.exists(filename)
try:
self._storage.stream_write(filename, io)
except IOError:
pass
assert self._storage.exists(filename)
# Test that EOFed io string throws IOError on lib/storage/s3
try:
self._storage.stream_write(filename, io)
except IOError:
pass
# Cleanup
io.close()
self._storage.remove(filename)
self._storage.buffer_size = 5 * 1024 * 1024
assert not self._storage.exists(filename)
def test_init_path(self):
# s3 storage _init_path result keys are relative (no / at start)
root_path = self._storage._root_path
if root_path.startswith('/'):
self._storage._root_path = root_path[1:]
assert not self._storage._init_path().startswith('/')
self._storage._root_path = root_path
def test_debug_key(self):
# Create a valid s3 key object to debug
filename = self.gen_random_string()
content = self.gen_random_string()
self._storage.put_content(filename, content)
# Get filename key path as stored
key_path = self._storage._init_path(filename)
key = self._storage._boto_bucket.lookup(key_path)
self._storage._debug_key(key)
# Capture debugged output
saved_stdout = sys.stdout
output = StringIO.StringIO()
sys.stdout = output
# As key is mocked for unittest purposes, we call make_request directly
dummy = "################\n('d', 1)\n{'v': 2}\n################\n"
# '{}\n{}\n{}\n{}\n'.format(
# '#' * 16, ('d', 1), {'v': 2}, '#' * 16)
result = self._storage._boto_bucket.connection.make_request(
'd', 1, v=2)
assert output.getvalue() == dummy
assert result == 'request result'
sys.stdout = saved_stdout
# We don't call self._storage.remove(filename) here to ensure tearDown
# cleanup properly and that other tests keep running as expected.
# Validation test for docker-index#486
def test_get_tags(self):
store = self._storage
store._root_path = 'my/custom/path'
store._init_path()
assert store._root_path == 'my/custom/path'
tag_path = store.tag_path('test', 'test', '0.0.2')
store.put_content(tag_path, 'randomdata')
tags_path = store.tag_path('test', 'test')
for fname in store.list_directory(tags_path):
full_tag_name = fname.split('/').pop()
if not full_tag_name == 'tag_0.0.2':
continue
try:
store.get_content(fname)
except exceptions.FileNotFoundError:
pass
except Exception as e:
raise e
else:
assert False
tag_content = store.get_content(tag_path)
assert tag_content == 'randomdata'
def test_consistency_latency(self):
self.testCount = -1
mockKey = mock_boto.Key()
def mockExists():
self.testCount += 1
return self.testCount == 1
mockKey.exists = mockExists
mockKey.get_contents_as_string = lambda: "Foo bar"
self._storage.makeKey = lambda x: mockKey
startTime = time.time()
content = self._storage.get_content("/FOO")
waitTime = time.time() - startTime
assert waitTime >= 0.1, ("Waiting time was less than %sms "
"(actual : %sms)" %
(0.1 * 1000, waitTime * 1000))
assert content == "Foo bar", ("expected : %s; actual: %s" %
("Foo bar", content))
@tools.raises(exceptions.FileNotFoundError)
def test_too_many_read_retries(self):
self.testCount = -1
mockKey = mock_boto.Key()
def mockExists():
self.testCount += 1
return self.testCount == 5
mockKey.exists = mockExists
mockKey.get_contents_as_string = lambda: "Foo bar"
self._storage.makeKey = lambda x: mockKey
self._storage.get_content("/FOO")
| apache-2.0 |
MoritzS/django | django/utils/archive.py | 52 | 7462 | """
Based on "python-archive" -- http://pypi.python.org/pypi/python-archive/
Copyright (c) 2010 Gary Wilson Jr. <[email protected]> and contributors.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
import shutil
import stat
import tarfile
import zipfile
class ArchiveException(Exception):
"""
Base exception class for all archive errors.
"""
class UnrecognizedArchiveFormat(ArchiveException):
"""
Error raised when passed file is not a recognized archive format.
"""
def extract(path, to_path=''):
"""
Unpack the tar or zip file at the specified path to the directory
specified by to_path.
"""
with Archive(path) as archive:
archive.extract(to_path)
class Archive:
"""
The external API class that encapsulates an archive implementation.
"""
def __init__(self, file):
self._archive = self._archive_cls(file)(file)
@staticmethod
def _archive_cls(file):
cls = None
if isinstance(file, str):
filename = file
else:
try:
filename = file.name
except AttributeError:
raise UnrecognizedArchiveFormat(
"File object not a recognized archive format.")
base, tail_ext = os.path.splitext(filename.lower())
cls = extension_map.get(tail_ext)
if not cls:
base, ext = os.path.splitext(base)
cls = extension_map.get(ext)
if not cls:
raise UnrecognizedArchiveFormat(
"Path not a recognized archive format: %s" % filename)
return cls
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def extract(self, to_path=''):
self._archive.extract(to_path)
def list(self):
self._archive.list()
def close(self):
self._archive.close()
class BaseArchive:
"""
Base Archive class. Implementations should inherit this class.
"""
@staticmethod
def _copy_permissions(mode, filename):
"""
If the file in the archive has some permissions (this assumes a file
won't be writable/executable without being readable), apply those
permissions to the unarchived file.
"""
if mode & stat.S_IROTH:
os.chmod(filename, mode)
def split_leading_dir(self, path):
path = str(path)
path = path.lstrip('/').lstrip('\\')
if '/' in path and (('\\' in path and path.find('/') < path.find('\\')) or '\\' not in path):
return path.split('/', 1)
elif '\\' in path:
return path.split('\\', 1)
else:
return path, ''
def has_leading_dir(self, paths):
"""
Return True if all the paths have the same leading path name
(i.e., everything is in one subdirectory in an archive).
"""
common_prefix = None
for path in paths:
prefix, rest = self.split_leading_dir(path)
if not prefix:
return False
elif common_prefix is None:
common_prefix = prefix
elif prefix != common_prefix:
return False
return True
def extract(self):
raise NotImplementedError('subclasses of BaseArchive must provide an extract() method')
def list(self):
raise NotImplementedError('subclasses of BaseArchive must provide a list() method')
class TarArchive(BaseArchive):
def __init__(self, file):
self._archive = tarfile.open(file)
def list(self, *args, **kwargs):
self._archive.list(*args, **kwargs)
def extract(self, to_path):
members = self._archive.getmembers()
leading = self.has_leading_dir(x.name for x in members)
for member in members:
name = member.name
if leading:
name = self.split_leading_dir(name)[1]
filename = os.path.join(to_path, name)
if member.isdir():
if filename and not os.path.exists(filename):
os.makedirs(filename)
else:
try:
extracted = self._archive.extractfile(member)
except (KeyError, AttributeError) as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
print("In the tar file %s the member %s is invalid: %s" %
(name, member.name, exc))
else:
dirname = os.path.dirname(filename)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
with open(filename, 'wb') as outfile:
shutil.copyfileobj(extracted, outfile)
self._copy_permissions(member.mode, filename)
finally:
if extracted:
extracted.close()
def close(self):
self._archive.close()
class ZipArchive(BaseArchive):
def __init__(self, file):
self._archive = zipfile.ZipFile(file)
def list(self, *args, **kwargs):
self._archive.printdir(*args, **kwargs)
def extract(self, to_path):
namelist = self._archive.namelist()
leading = self.has_leading_dir(namelist)
for name in namelist:
data = self._archive.read(name)
info = self._archive.getinfo(name)
if leading:
name = self.split_leading_dir(name)[1]
filename = os.path.join(to_path, name)
dirname = os.path.dirname(filename)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
if filename.endswith(('/', '\\')):
# A directory
if not os.path.exists(filename):
os.makedirs(filename)
else:
with open(filename, 'wb') as outfile:
outfile.write(data)
# Convert ZipInfo.external_attr to mode
mode = info.external_attr >> 16
self._copy_permissions(mode, filename)
def close(self):
self._archive.close()
extension_map = {
'.tar': TarArchive,
'.tar.bz2': TarArchive,
'.tar.gz': TarArchive,
'.tgz': TarArchive,
'.tz2': TarArchive,
'.zip': ZipArchive,
}
| bsd-3-clause |
buntyke/Flask | microblog/flask/lib/python2.7/site-packages/sqlalchemy/util/__init__.py | 10 | 2520 | # util/__init__.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .compat import callable, cmp, reduce, \
threading, py3k, py33, py36, py2k, jython, pypy, cpython, win32, \
pickle, dottedgetter, parse_qsl, namedtuple, next, reraise, \
raise_from_cause, text_type, safe_kwarg, string_types, int_types, \
binary_type, nested, \
quote_plus, with_metaclass, print_, itertools_filterfalse, u, ue, b,\
unquote_plus, unquote, b64decode, b64encode, byte_buffer, itertools_filter,\
iterbytes, StringIO, inspect_getargspec, zip_longest
from ._collections import KeyedTuple, ImmutableContainer, immutabledict, \
Properties, OrderedProperties, ImmutableProperties, OrderedDict, \
OrderedSet, IdentitySet, OrderedIdentitySet, column_set, \
column_dict, ordered_column_set, populate_column_dict, unique_list, \
UniqueAppender, PopulateDict, EMPTY_SET, to_list, to_set, \
to_column_set, update_copy, flatten_iterator, has_intersection, \
LRUCache, ScopedRegistry, ThreadLocalRegistry, WeakSequence, \
coerce_generator_arg, lightweight_named_tuple
from .langhelpers import iterate_attributes, class_hierarchy, \
portable_instancemethod, unbound_method_to_callable, \
getargspec_init, format_argspec_init, format_argspec_plus, \
get_func_kwargs, get_cls_kwargs, decorator, as_interface, \
memoized_property, memoized_instancemethod, md5_hex, \
group_expirable_memoized_property, dependencies, decode_slice, \
monkeypatch_proxied_specials, asbool, bool_or_str, coerce_kw_type,\
duck_type_collection, assert_arg_type, symbol, dictlike_iteritems,\
classproperty, set_creation_order, warn_exception, warn, NoneType,\
constructor_copy, methods_equivalent, chop_traceback, asint,\
generic_repr, counter, PluginLoader, hybridproperty, hybridmethod, \
safe_reraise,\
get_callable_argspec, only_once, attrsetter, ellipses_string, \
warn_limited, map_bits, MemoizedSlots, EnsureKWArgType
from .deprecations import warn_deprecated, warn_pending_deprecation, \
deprecated, pending_deprecation, inject_docstring_text
# things that used to be not always available,
# but are now as of current support Python versions
from collections import defaultdict
from functools import partial
from functools import update_wrapper
from contextlib import contextmanager
| mit |
ojii/sandlib | lib/lib-python/2.7/importlib/__init__.py | 456 | 1327 | """Backport of importlib.import_module from 3.x."""
# While not critical (and in no way guaranteed!), it would be nice to keep this
# code compatible with Python 2.3.
import sys
def _resolve_name(name, package, level):
"""Return the absolute name of the module to be imported."""
if not hasattr(package, 'rindex'):
raise ValueError("'package' not set to a string")
dot = len(package)
for x in xrange(level, 1, -1):
try:
dot = package.rindex('.', 0, dot)
except ValueError:
raise ValueError("attempted relative import beyond top-level "
"package")
return "%s.%s" % (package[:dot], name)
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' argument")
level = 0
for character in name:
if character != '.':
break
level += 1
name = _resolve_name(name[level:], package, level)
__import__(name)
return sys.modules[name]
| bsd-3-clause |
nelsongoh/tembotsu | libs/future/backports/email/generator.py | 82 | 19520 | # Copyright (C) 2001-2010 Python Software Foundation
# Author: Barry Warsaw
# Contact: [email protected]
"""Classes to generate plain text from a message object tree."""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future.builtins import super
from future.builtins import str
__all__ = ['Generator', 'DecodedGenerator', 'BytesGenerator']
import re
import sys
import time
import random
import warnings
from io import StringIO, BytesIO
from future.backports.email._policybase import compat32
from future.backports.email.header import Header
from future.backports.email.utils import _has_surrogates
import future.backports.email.charset as _charset
UNDERSCORE = '_'
NL = '\n' # XXX: no longer used by the code below.
fcre = re.compile(r'^From ', re.MULTILINE)
class Generator(object):
"""Generates output from a Message object tree.
This basic generator writes the message to the given file object as plain
text.
"""
#
# Public interface
#
def __init__(self, outfp, mangle_from_=True, maxheaderlen=None, **_3to2kwargs):
if 'policy' in _3to2kwargs: policy = _3to2kwargs['policy']; del _3to2kwargs['policy']
else: policy = None
"""Create the generator for message flattening.
outfp is the output file-like object for writing the message to. It
must have a write() method.
Optional mangle_from_ is a flag that, when True (the default), escapes
From_ lines in the body of the message by putting a `>' in front of
them.
Optional maxheaderlen specifies the longest length for a non-continued
header. When a header line is longer (in characters, with tabs
expanded to 8 spaces) than maxheaderlen, the header will split as
defined in the Header class. Set maxheaderlen to zero to disable
header wrapping. The default is 78, as recommended (but not required)
by RFC 2822.
The policy keyword specifies a policy object that controls a number of
aspects of the generator's operation. The default policy maintains
backward compatibility.
"""
self._fp = outfp
self._mangle_from_ = mangle_from_
self.maxheaderlen = maxheaderlen
self.policy = policy
def write(self, s):
# Just delegate to the file object
self._fp.write(s)
def flatten(self, msg, unixfrom=False, linesep=None):
r"""Print the message object tree rooted at msg to the output file
specified when the Generator instance was created.
unixfrom is a flag that forces the printing of a Unix From_ delimiter
before the first object in the message tree. If the original message
has no From_ delimiter, a `standard' one is crafted. By default, this
is False to inhibit the printing of any From_ delimiter.
Note that for subobjects, no From_ line is printed.
linesep specifies the characters used to indicate a new line in
the output. The default value is determined by the policy.
"""
# We use the _XXX constants for operating on data that comes directly
# from the msg, and _encoded_XXX constants for operating on data that
# has already been converted (to bytes in the BytesGenerator) and
# inserted into a temporary buffer.
policy = msg.policy if self.policy is None else self.policy
if linesep is not None:
policy = policy.clone(linesep=linesep)
if self.maxheaderlen is not None:
policy = policy.clone(max_line_length=self.maxheaderlen)
self._NL = policy.linesep
self._encoded_NL = self._encode(self._NL)
self._EMPTY = ''
self._encoded_EMTPY = self._encode('')
# Because we use clone (below) when we recursively process message
# subparts, and because clone uses the computed policy (not None),
# submessages will automatically get set to the computed policy when
# they are processed by this code.
old_gen_policy = self.policy
old_msg_policy = msg.policy
try:
self.policy = policy
msg.policy = policy
if unixfrom:
ufrom = msg.get_unixfrom()
if not ufrom:
ufrom = 'From nobody ' + time.ctime(time.time())
self.write(ufrom + self._NL)
self._write(msg)
finally:
self.policy = old_gen_policy
msg.policy = old_msg_policy
def clone(self, fp):
"""Clone this generator with the exact same options."""
return self.__class__(fp,
self._mangle_from_,
None, # Use policy setting, which we've adjusted
policy=self.policy)
#
# Protected interface - undocumented ;/
#
# Note that we use 'self.write' when what we are writing is coming from
# the source, and self._fp.write when what we are writing is coming from a
# buffer (because the Bytes subclass has already had a chance to transform
# the data in its write method in that case). This is an entirely
# pragmatic split determined by experiment; we could be more general by
# always using write and having the Bytes subclass write method detect when
# it has already transformed the input; but, since this whole thing is a
# hack anyway this seems good enough.
# Similarly, we have _XXX and _encoded_XXX attributes that are used on
# source and buffer data, respectively.
_encoded_EMPTY = ''
def _new_buffer(self):
# BytesGenerator overrides this to return BytesIO.
return StringIO()
def _encode(self, s):
# BytesGenerator overrides this to encode strings to bytes.
return s
def _write_lines(self, lines):
# We have to transform the line endings.
if not lines:
return
lines = lines.splitlines(True)
for line in lines[:-1]:
self.write(line.rstrip('\r\n'))
self.write(self._NL)
laststripped = lines[-1].rstrip('\r\n')
self.write(laststripped)
if len(lines[-1]) != len(laststripped):
self.write(self._NL)
def _write(self, msg):
# We can't write the headers yet because of the following scenario:
# say a multipart message includes the boundary string somewhere in
# its body. We'd have to calculate the new boundary /before/ we write
# the headers so that we can write the correct Content-Type:
# parameter.
#
# The way we do this, so as to make the _handle_*() methods simpler,
# is to cache any subpart writes into a buffer. The we write the
# headers and the buffer contents. That way, subpart handlers can
# Do The Right Thing, and can still modify the Content-Type: header if
# necessary.
oldfp = self._fp
try:
self._fp = sfp = self._new_buffer()
self._dispatch(msg)
finally:
self._fp = oldfp
# Write the headers. First we see if the message object wants to
# handle that itself. If not, we'll do it generically.
meth = getattr(msg, '_write_headers', None)
if meth is None:
self._write_headers(msg)
else:
meth(self)
self._fp.write(sfp.getvalue())
def _dispatch(self, msg):
# Get the Content-Type: for the message, then try to dispatch to
# self._handle_<maintype>_<subtype>(). If there's no handler for the
# full MIME type, then dispatch to self._handle_<maintype>(). If
# that's missing too, then dispatch to self._writeBody().
main = msg.get_content_maintype()
sub = msg.get_content_subtype()
specific = UNDERSCORE.join((main, sub)).replace('-', '_')
meth = getattr(self, '_handle_' + specific, None)
if meth is None:
generic = main.replace('-', '_')
meth = getattr(self, '_handle_' + generic, None)
if meth is None:
meth = self._writeBody
meth(msg)
#
# Default handlers
#
def _write_headers(self, msg):
for h, v in msg.raw_items():
self.write(self.policy.fold(h, v))
# A blank line always separates headers from body
self.write(self._NL)
#
# Handlers for writing types and subtypes
#
def _handle_text(self, msg):
payload = msg.get_payload()
if payload is None:
return
if not isinstance(payload, str):
raise TypeError('string payload expected: %s' % type(payload))
if _has_surrogates(msg._payload):
charset = msg.get_param('charset')
if charset is not None:
del msg['content-transfer-encoding']
msg.set_payload(payload, charset)
payload = msg.get_payload()
if self._mangle_from_:
payload = fcre.sub('>From ', payload)
self._write_lines(payload)
# Default body handler
_writeBody = _handle_text
def _handle_multipart(self, msg):
# The trick here is to write out each part separately, merge them all
# together, and then make sure that the boundary we've chosen isn't
# present in the payload.
msgtexts = []
subparts = msg.get_payload()
if subparts is None:
subparts = []
elif isinstance(subparts, str):
# e.g. a non-strict parse of a message with no starting boundary.
self.write(subparts)
return
elif not isinstance(subparts, list):
# Scalar payload
subparts = [subparts]
for part in subparts:
s = self._new_buffer()
g = self.clone(s)
g.flatten(part, unixfrom=False, linesep=self._NL)
msgtexts.append(s.getvalue())
# BAW: What about boundaries that are wrapped in double-quotes?
boundary = msg.get_boundary()
if not boundary:
# Create a boundary that doesn't appear in any of the
# message texts.
alltext = self._encoded_NL.join(msgtexts)
boundary = self._make_boundary(alltext)
msg.set_boundary(boundary)
# If there's a preamble, write it out, with a trailing CRLF
if msg.preamble is not None:
if self._mangle_from_:
preamble = fcre.sub('>From ', msg.preamble)
else:
preamble = msg.preamble
self._write_lines(preamble)
self.write(self._NL)
# dash-boundary transport-padding CRLF
self.write('--' + boundary + self._NL)
# body-part
if msgtexts:
self._fp.write(msgtexts.pop(0))
# *encapsulation
# --> delimiter transport-padding
# --> CRLF body-part
for body_part in msgtexts:
# delimiter transport-padding CRLF
self.write(self._NL + '--' + boundary + self._NL)
# body-part
self._fp.write(body_part)
# close-delimiter transport-padding
self.write(self._NL + '--' + boundary + '--')
if msg.epilogue is not None:
self.write(self._NL)
if self._mangle_from_:
epilogue = fcre.sub('>From ', msg.epilogue)
else:
epilogue = msg.epilogue
self._write_lines(epilogue)
def _handle_multipart_signed(self, msg):
# The contents of signed parts has to stay unmodified in order to keep
# the signature intact per RFC1847 2.1, so we disable header wrapping.
# RDM: This isn't enough to completely preserve the part, but it helps.
p = self.policy
self.policy = p.clone(max_line_length=0)
try:
self._handle_multipart(msg)
finally:
self.policy = p
def _handle_message_delivery_status(self, msg):
# We can't just write the headers directly to self's file object
# because this will leave an extra newline between the last header
# block and the boundary. Sigh.
blocks = []
for part in msg.get_payload():
s = self._new_buffer()
g = self.clone(s)
g.flatten(part, unixfrom=False, linesep=self._NL)
text = s.getvalue()
lines = text.split(self._encoded_NL)
# Strip off the unnecessary trailing empty line
if lines and lines[-1] == self._encoded_EMPTY:
blocks.append(self._encoded_NL.join(lines[:-1]))
else:
blocks.append(text)
# Now join all the blocks with an empty line. This has the lovely
# effect of separating each block with an empty line, but not adding
# an extra one after the last one.
self._fp.write(self._encoded_NL.join(blocks))
def _handle_message(self, msg):
s = self._new_buffer()
g = self.clone(s)
# The payload of a message/rfc822 part should be a multipart sequence
# of length 1. The zeroth element of the list should be the Message
# object for the subpart. Extract that object, stringify it, and
# write it out.
# Except, it turns out, when it's a string instead, which happens when
# and only when HeaderParser is used on a message of mime type
# message/rfc822. Such messages are generated by, for example,
# Groupwise when forwarding unadorned messages. (Issue 7970.) So
# in that case we just emit the string body.
payload = msg._payload
if isinstance(payload, list):
g.flatten(msg.get_payload(0), unixfrom=False, linesep=self._NL)
payload = s.getvalue()
else:
payload = self._encode(payload)
self._fp.write(payload)
# This used to be a module level function; we use a classmethod for this
# and _compile_re so we can continue to provide the module level function
# for backward compatibility by doing
# _make_boudary = Generator._make_boundary
# at the end of the module. It *is* internal, so we could drop that...
@classmethod
def _make_boundary(cls, text=None):
# Craft a random boundary. If text is given, ensure that the chosen
# boundary doesn't appear in the text.
token = random.randrange(sys.maxsize)
boundary = ('=' * 15) + (_fmt % token) + '=='
if text is None:
return boundary
b = boundary
counter = 0
while True:
cre = cls._compile_re('^--' + re.escape(b) + '(--)?$', re.MULTILINE)
if not cre.search(text):
break
b = boundary + '.' + str(counter)
counter += 1
return b
@classmethod
def _compile_re(cls, s, flags):
return re.compile(s, flags)
class BytesGenerator(Generator):
"""Generates a bytes version of a Message object tree.
Functionally identical to the base Generator except that the output is
bytes and not string. When surrogates were used in the input to encode
bytes, these are decoded back to bytes for output. If the policy has
cte_type set to 7bit, then the message is transformed such that the
non-ASCII bytes are properly content transfer encoded, using the charset
unknown-8bit.
The outfp object must accept bytes in its write method.
"""
# Bytes versions of this constant for use in manipulating data from
# the BytesIO buffer.
_encoded_EMPTY = b''
def write(self, s):
self._fp.write(str(s).encode('ascii', 'surrogateescape'))
def _new_buffer(self):
return BytesIO()
def _encode(self, s):
return s.encode('ascii')
def _write_headers(self, msg):
# This is almost the same as the string version, except for handling
# strings with 8bit bytes.
for h, v in msg.raw_items():
self._fp.write(self.policy.fold_binary(h, v))
# A blank line always separates headers from body
self.write(self._NL)
def _handle_text(self, msg):
# If the string has surrogates the original source was bytes, so
# just write it back out.
if msg._payload is None:
return
if _has_surrogates(msg._payload) and not self.policy.cte_type=='7bit':
if self._mangle_from_:
msg._payload = fcre.sub(">From ", msg._payload)
self._write_lines(msg._payload)
else:
super(BytesGenerator,self)._handle_text(msg)
# Default body handler
_writeBody = _handle_text
@classmethod
def _compile_re(cls, s, flags):
return re.compile(s.encode('ascii'), flags)
_FMT = '[Non-text (%(type)s) part of message omitted, filename %(filename)s]'
class DecodedGenerator(Generator):
"""Generates a text representation of a message.
Like the Generator base class, except that non-text parts are substituted
with a format string representing the part.
"""
def __init__(self, outfp, mangle_from_=True, maxheaderlen=78, fmt=None):
"""Like Generator.__init__() except that an additional optional
argument is allowed.
Walks through all subparts of a message. If the subpart is of main
type `text', then it prints the decoded payload of the subpart.
Otherwise, fmt is a format string that is used instead of the message
payload. fmt is expanded with the following keywords (in
%(keyword)s format):
type : Full MIME type of the non-text part
maintype : Main MIME type of the non-text part
subtype : Sub-MIME type of the non-text part
filename : Filename of the non-text part
description: Description associated with the non-text part
encoding : Content transfer encoding of the non-text part
The default value for fmt is None, meaning
[Non-text (%(type)s) part of message omitted, filename %(filename)s]
"""
Generator.__init__(self, outfp, mangle_from_, maxheaderlen)
if fmt is None:
self._fmt = _FMT
else:
self._fmt = fmt
def _dispatch(self, msg):
for part in msg.walk():
maintype = part.get_content_maintype()
if maintype == 'text':
print(part.get_payload(decode=False), file=self)
elif maintype == 'multipart':
# Just skip this
pass
else:
print(self._fmt % {
'type' : part.get_content_type(),
'maintype' : part.get_content_maintype(),
'subtype' : part.get_content_subtype(),
'filename' : part.get_filename('[no filename]'),
'description': part.get('Content-Description',
'[no description]'),
'encoding' : part.get('Content-Transfer-Encoding',
'[no encoding]'),
}, file=self)
# Helper used by Generator._make_boundary
_width = len(repr(sys.maxsize-1))
_fmt = '%%0%dd' % _width
# Backward compatibility
_make_boundary = Generator._make_boundary
| apache-2.0 |
fabaff/ansible | lib/ansible/executor/playbook_executor.py | 1 | 9866 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import getpass
import locale
import os
import signal
import sys
from ansible.compat.six import string_types
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.playbook import Playbook
from ansible.template import Templar
from ansible.utils.unicode import to_unicode
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class PlaybookExecutor:
'''
This is the primary class for executing playbooks, and thus the
basis for bin/ansible-playbook operation.
'''
def __init__(self, playbooks, inventory, variable_manager, loader, options, passwords):
self._playbooks = playbooks
self._inventory = inventory
self._variable_manager = variable_manager
self._loader = loader
self._options = options
self.passwords = passwords
self._unreachable_hosts = dict()
if options.listhosts or options.listtasks or options.listtags or options.syntax:
self._tqm = None
else:
self._tqm = TaskQueueManager(inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=self.passwords)
def run(self):
'''
Run the given playbook, based on the settings in the play which
may limit the runs to serialized groups, etc.
'''
signal.signal(signal.SIGINT, self._cleanup)
result = 0
entrylist = []
entry = {}
try:
for playbook_path in self._playbooks:
pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader)
self._inventory.set_playbook_basedir(os.path.dirname(playbook_path))
if self._tqm is None: # we are doing a listing
entry = {'playbook': playbook_path}
entry['plays'] = []
else:
# make sure the tqm has callbacks loaded
self._tqm.load_callbacks()
self._tqm.send_callback('v2_playbook_on_start', pb)
i = 1
plays = pb.get_plays()
display.vv('%d plays in %s' % (len(plays), playbook_path))
for play in plays:
if play._included_path is not None:
self._loader.set_basedir(play._included_path)
else:
self._loader.set_basedir(pb._basedir)
# clear any filters which may have been applied to the inventory
self._inventory.remove_restriction()
if play.vars_prompt:
for var in play.vars_prompt:
vname = var['name']
prompt = var.get("prompt", vname)
default = var.get("default", None)
private = var.get("private", True)
confirm = var.get("confirm", False)
encrypt = var.get("encrypt", None)
salt_size = var.get("salt_size", None)
salt = var.get("salt", None)
if vname not in self._variable_manager.extra_vars:
if self._tqm:
self._tqm.send_callback('v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt, default)
play.vars[vname] = display.do_var_prompt(vname, private, prompt, encrypt, confirm, salt_size, salt, default)
else: # we are either in --list-<option> or syntax check
play.vars[vname] = default
# Create a temporary copy of the play here, so we can run post_validate
# on it without the templating changes affecting the original object.
all_vars = self._variable_manager.get_vars(loader=self._loader, play=play)
templar = Templar(loader=self._loader, variables=all_vars)
new_play = play.copy()
new_play.post_validate(templar)
if self._options.syntax:
continue
if self._tqm is None:
# we are just doing a listing
entry['plays'].append(new_play)
else:
self._tqm._unreachable_hosts.update(self._unreachable_hosts)
# we are actually running plays
for batch in self._get_serialized_batches(new_play):
if len(batch) == 0:
self._tqm.send_callback('v2_playbook_on_play_start', new_play)
self._tqm.send_callback('v2_playbook_on_no_hosts_matched')
break
# restrict the inventory to the hosts in the serialized batch
self._inventory.restrict_to_hosts(batch)
# and run it...
result = self._tqm.run(play=play)
# check the number of failures here, to see if they're above the maximum
# failure percentage allowed, or if any errors are fatal. If either of those
# conditions are met, we break out, otherwise we only break out if the entire
# batch failed
failed_hosts_count = len(self._tqm._failed_hosts) + len(self._tqm._unreachable_hosts)
if new_play.any_errors_fatal and failed_hosts_count > 0:
break
elif new_play.max_fail_percentage is not None and \
int((new_play.max_fail_percentage)/100.0 * len(batch)) > int((len(batch) - failed_hosts_count) / len(batch) * 100.0):
break
elif len(batch) == failed_hosts_count:
break
# clear the failed hosts dictionaires in the TQM for the next batch
self._unreachable_hosts.update(self._tqm._unreachable_hosts)
self._tqm.clear_failed_hosts()
# if the last result wasn't zero or 3 (some hosts were unreachable),
# break out of the serial batch loop
if result not in (0, 3):
break
i = i + 1 # per play
if entry:
entrylist.append(entry) # per playbook
# send the stats callback for this playbook
if self._tqm is not None:
self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats)
# if the last result wasn't zero, break out of the playbook file name loop
if result != 0:
break
if entrylist:
return entrylist
finally:
if self._tqm is not None:
self._cleanup()
if self._options.syntax:
display.display("No issues encountered")
return result
return result
def _cleanup(self, signum=None, framenum=None):
return self._tqm.cleanup()
def _get_serialized_batches(self, play):
'''
Returns a list of hosts, subdivided into batches based on
the serial size specified in the play.
'''
# make sure we have a unique list of hosts
all_hosts = self._inventory.get_hosts(play.hosts)
# check to see if the serial number was specified as a percentage,
# and convert it to an integer value based on the number of hosts
if isinstance(play.serial, string_types) and play.serial.endswith('%'):
serial_pct = int(play.serial.replace("%",""))
serial = int((serial_pct/100.0) * len(all_hosts))
else:
if play.serial is None:
serial = -1
else:
serial = int(play.serial)
# if the serial count was not specified or is invalid, default to
# a list of all hosts, otherwise split the list of hosts into chunks
# which are based on the serial size
if serial <= 0:
return [all_hosts]
else:
serialized_batches = []
while len(all_hosts) > 0:
play_hosts = []
for x in range(serial):
if len(all_hosts) > 0:
play_hosts.append(all_hosts.pop(0))
serialized_batches.append(play_hosts)
return serialized_batches
| gpl-3.0 |
savi-dev/horizon | horizon/dashboards/nova/containers/forms.py | 1 | 7384 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django import shortcuts
from django.contrib import messages
from django.core import validators
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import api
from horizon import exceptions
from horizon import forms
LOG = logging.getLogger(__name__)
no_slash_validator = validators.RegexValidator(r'^(?u)[^/]+$',
_("Slash is not an allowed "
"character."),
code="noslash")
class CreateContainer(forms.SelfHandlingForm):
parent = forms.CharField(max_length=255,
required=False,
widget=forms.HiddenInput)
name = forms.CharField(max_length=255,
label=_("Container Name"),
validators=[no_slash_validator])
def handle(self, request, data):
try:
if not data['parent']:
# Create a container
api.swift_create_container(request, data["name"])
messages.success(request, _("Container created successfully."))
else:
# Create a pseudo-folder
container, slash, remainder = data['parent'].partition("/")
remainder = remainder.rstrip("/")
subfolder_name = "/".join([bit for bit
in (remainder, data['name'])
if bit])
api.swift_create_subfolder(request,
container,
subfolder_name)
messages.success(request, _("Folder created successfully."))
url = "horizon:nova:containers:object_index"
if remainder:
remainder = remainder.rstrip("/")
remainder += "/"
return shortcuts.redirect(url, container, remainder)
except:
exceptions.handle(request, _('Unable to create container.'))
return shortcuts.redirect("horizon:nova:containers:index")
class UploadObject(forms.SelfHandlingForm):
path = forms.CharField(max_length=255,
required=False,
widget=forms.HiddenInput)
name = forms.CharField(max_length=255,
label=_("Object Name"),
validators=[no_slash_validator])
object_file = forms.FileField(label=_("File"))
container_name = forms.CharField(widget=forms.HiddenInput())
def handle(self, request, data):
object_file = self.files['object_file']
if data['path']:
object_path = "/".join([data['path'].rstrip("/"), data['name']])
else:
object_path = data['name']
try:
obj = api.swift_upload_object(request,
data['container_name'],
object_path,
object_file)
obj.metadata['orig-filename'] = object_file.name
obj.sync_metadata()
messages.success(request, _("Object was successfully uploaded."))
except:
exceptions.handle(request, _("Unable to upload object."))
return shortcuts.redirect("horizon:nova:containers:object_index",
data['container_name'], data['path'])
class CopyObject(forms.SelfHandlingForm):
new_container_name = forms.ChoiceField(label=_("Destination container"),
validators=[no_slash_validator])
path = forms.CharField(max_length=255, required=False)
new_object_name = forms.CharField(max_length=255,
label=_("Destination object name"),
validators=[no_slash_validator])
orig_container_name = forms.CharField(widget=forms.HiddenInput())
orig_object_name = forms.CharField(widget=forms.HiddenInput())
def __init__(self, *args, **kwargs):
containers = kwargs.pop('containers')
super(CopyObject, self).__init__(*args, **kwargs)
self.fields['new_container_name'].choices = containers
def handle(self, request, data):
object_index = "horizon:nova:containers:object_index"
orig_container = data['orig_container_name']
orig_object = data['orig_object_name']
new_container = data['new_container_name']
new_object = data['new_object_name']
new_path = "%s%s" % (data['path'], new_object)
# Iteratively make sure all the directory markers exist.
if data['path']:
path_component = ""
for bit in data['path'].split("/"):
path_component += bit
try:
api.swift.swift_create_subfolder(request,
new_container,
path_component)
except:
redirect = reverse(object_index, args=(orig_container,))
exceptions.handle(request,
_("Unable to copy object."),
redirect=redirect)
path_component += "/"
# Now copy the object itself.
try:
api.swift_copy_object(request,
orig_container,
orig_object,
new_container,
new_path)
dest = "%s/%s" % (new_container, data['path'])
vals = {"dest": dest.rstrip("/"),
"orig": orig_object.split("/")[-1],
"new": new_object}
messages.success(request,
_('Copied "%(orig)s" to "%(dest)s" as "%(new)s".')
% vals)
except exceptions.HorizonException, exc:
messages.error(request, exc)
return shortcuts.redirect(object_index, orig_container)
except:
redirect = reverse(object_index, args=(orig_container,))
exceptions.handle(request,
_("Unable to copy object."),
redirect=redirect)
return shortcuts.redirect(object_index, new_container, data['path'])
| apache-2.0 |
mahak/keystone | keystone/auth/plugins/base.py | 2 | 3476 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import collections
from keystone.common import provider_api
from keystone import exception
AuthHandlerResponse = collections.namedtuple(
'AuthHandlerResponse', 'status, response_body, response_data')
class AuthMethodHandler(provider_api.ProviderAPIMixin, object,
metaclass=abc.ABCMeta):
"""Abstract base class for an authentication plugin."""
def __init__(self):
pass
@abc.abstractmethod
def authenticate(self, auth_payload):
"""Authenticate user and return an authentication context.
:param auth_payload: the payload content of the authentication request
for a given method
:type auth_payload: dict
If successful, plugin must set ``user_id`` in ``response_data``.
``method_name`` is used to convey any additional authentication methods
in case authentication is for re-scoping. For example, if the
authentication is for re-scoping, plugin must append the previous
method names into ``method_names``; NOTE: This behavior is exclusive
to the re-scope type action. Here's an example of ``response_data`` on
successful authentication::
{
"methods": [
"password",
"token"
],
"user_id": "abc123"
}
Plugins are invoked in the order in which they are specified in the
``methods`` attribute of the ``identity`` object. For example,
``custom-plugin`` is invoked before ``password``, which is invoked
before ``token`` in the following authentication request::
{
"auth": {
"identity": {
"custom-plugin": {
"custom-data": "sdfdfsfsfsdfsf"
},
"methods": [
"custom-plugin",
"password",
"token"
],
"password": {
"user": {
"id": "s23sfad1",
"password": "secret"
}
},
"token": {
"id": "sdfafasdfsfasfasdfds"
}
}
}
}
:returns: AuthHandlerResponse with status set to ``True`` if auth was
successful. If `status` is ``False`` and this is a multi-step
auth, the ``response_body`` can be in a form of a dict for
the next step in authentication.
:raises keystone.exception.Unauthorized: for authentication failure
"""
raise exception.Unauthorized()
| apache-2.0 |
GaussDing/django | tests/gis_tests/geoapp/test_feeds.py | 33 | 4256 | from __future__ import unicode_literals
from xml.dom import minidom
from django.conf import settings
from django.contrib.gis.geos import HAS_GEOS
from django.contrib.sites.models import Site
from django.test import (
TestCase, modify_settings, override_settings, skipUnlessDBFeature,
)
if HAS_GEOS:
from .models import City
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.sites'})
@override_settings(ROOT_URLCONF='gis_tests.geoapp.urls')
@skipUnlessDBFeature("gis_enabled")
class GeoFeedTest(TestCase):
fixtures = ['initial']
def setUp(self):
Site(id=settings.SITE_ID, domain="example.com", name="example.com").save()
def assertChildNodes(self, elem, expected):
"Taken from syndication/tests.py."
actual = set(n.nodeName for n in elem.childNodes)
expected = set(expected)
self.assertEqual(actual, expected)
def test_geofeed_rss(self):
"Tests geographic feeds using GeoRSS over RSSv2."
# Uses `GEOSGeometry` in `item_geometry`
doc1 = minidom.parseString(self.client.get('/feeds/rss1/').content)
# Uses a 2-tuple in `item_geometry`
doc2 = minidom.parseString(self.client.get('/feeds/rss2/').content)
feed1, feed2 = doc1.firstChild, doc2.firstChild
# Making sure the box got added to the second GeoRSS feed.
self.assertChildNodes(feed2.getElementsByTagName('channel')[0],
['title', 'link', 'description', 'language',
'lastBuildDate', 'item', 'georss:box', 'atom:link']
)
# Incrementing through the feeds.
for feed in [feed1, feed2]:
# Ensuring the georss namespace was added to the <rss> element.
self.assertEqual(feed.getAttribute('xmlns:georss'), 'http://www.georss.org/georss')
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), City.objects.count())
# Ensuring the georss element was added to each item in the feed.
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description', 'guid', 'georss:point'])
def test_geofeed_atom(self):
"Testing geographic feeds using GeoRSS over Atom."
doc1 = minidom.parseString(self.client.get('/feeds/atom1/').content)
doc2 = minidom.parseString(self.client.get('/feeds/atom2/').content)
feed1, feed2 = doc1.firstChild, doc2.firstChild
# Making sure the box got added to the second GeoRSS feed.
self.assertChildNodes(feed2, ['title', 'link', 'id', 'updated', 'entry', 'georss:box'])
for feed in [feed1, feed2]:
# Ensuring the georsss namespace was added to the <feed> element.
self.assertEqual(feed.getAttribute('xmlns:georss'), 'http://www.georss.org/georss')
entries = feed.getElementsByTagName('entry')
self.assertEqual(len(entries), City.objects.count())
# Ensuring the georss element was added to each entry in the feed.
for entry in entries:
self.assertChildNodes(entry, ['title', 'link', 'id', 'summary', 'georss:point'])
def test_geofeed_w3c(self):
"Testing geographic feeds using W3C Geo."
doc = minidom.parseString(self.client.get('/feeds/w3cgeo1/').content)
feed = doc.firstChild
# Ensuring the geo namespace was added to the <feed> element.
self.assertEqual(feed.getAttribute('xmlns:geo'), 'http://www.w3.org/2003/01/geo/wgs84_pos#')
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), City.objects.count())
# Ensuring the geo:lat and geo:lon element was added to each item in the feed.
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description', 'guid', 'geo:lat', 'geo:lon'])
# Boxes and Polygons aren't allowed in W3C Geo feeds.
self.assertRaises(ValueError, self.client.get, '/feeds/w3cgeo2/') # Box in <channel>
self.assertRaises(ValueError, self.client.get, '/feeds/w3cgeo3/') # Polygons in <entry>
| bsd-3-clause |
SkillSmart/ConferenceManagementSystem | ApplicationManagement/migrations/0012_auto_20170331_1943.py | 1 | 1448 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-31 17:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ApplicationManagement', '0011_auto_20170331_1649'),
]
operations = [
migrations.RemoveField(
model_name='feedback',
name='category_1',
),
migrations.RemoveField(
model_name='feedback',
name='category_2',
),
migrations.RemoveField(
model_name='feedback',
name='category_3',
),
migrations.RemoveField(
model_name='feedback',
name='category_4',
),
migrations.AddField(
model_name='feedback',
name='feedbackComm',
field=models.IntegerField(blank=True, null=True, verbose_name='How well was the Feedback Communicated?'),
),
migrations.AddField(
model_name='feedback',
name='feedbackQuality',
field=models.IntegerField(blank=True, null=True, verbose_name='How helpfull was his/her Feedback?'),
),
migrations.AddField(
model_name='feedback',
name='feedbackRelated',
field=models.IntegerField(blank=True, null=True, verbose_name='How well did the Feedback relate to the Assessment Criteria?'),
),
]
| mit |
michaelyin/code-for-blog | 2009/plotting_data_monitor/com_monitor.py | 15 | 2988 | import Queue
import threading
import time
import serial
class ComMonitorThread(threading.Thread):
""" A thread for monitoring a COM port. The COM port is
opened when the thread is started.
data_q:
Queue for received data. Items in the queue are
(data, timestamp) pairs, where data is a binary
string representing the received data, and timestamp
is the time elapsed from the thread's start (in
seconds).
error_q:
Queue for error messages. In particular, if the
serial port fails to open for some reason, an error
is placed into this queue.
port:
The COM port to open. Must be recognized by the
system.
port_baud/stopbits/parity:
Serial communication parameters
port_timeout:
The timeout used for reading the COM port. If this
value is low, the thread will return data in finer
grained chunks, with more accurate timestamps, but
it will also consume more CPU.
"""
def __init__( self,
data_q, error_q,
port_num,
port_baud,
port_stopbits=serial.STOPBITS_ONE,
port_parity=serial.PARITY_NONE,
port_timeout=0.01):
threading.Thread.__init__(self)
self.serial_port = None
self.serial_arg = dict( port=port_num,
baudrate=port_baud,
stopbits=port_stopbits,
parity=port_parity,
timeout=port_timeout)
self.data_q = data_q
self.error_q = error_q
self.alive = threading.Event()
self.alive.set()
def run(self):
try:
if self.serial_port:
self.serial_port.close()
self.serial_port = serial.Serial(**self.serial_arg)
except serial.SerialException, e:
self.error_q.put(e.message)
return
# Restart the clock
time.clock()
while self.alive.isSet():
# Reading 1 byte, followed by whatever is left in the
# read buffer, as suggested by the developer of
# PySerial.
#
data = self.serial_port.read(1)
data += self.serial_port.read(self.serial_port.inWaiting())
if len(data) > 0:
timestamp = time.clock()
self.data_q.put((data, timestamp))
# clean up
if self.serial_port:
self.serial_port.close()
def join(self, timeout=None):
self.alive.clear()
threading.Thread.join(self, timeout)
| unlicense |
systemdaemon/systemd | tools/gdb-sd_dump_hashmaps.py | 112 | 5114 | # -*- Mode: python; coding: utf-8; indent-tabs-mode: nil -*- */
#
# This file is part of systemd.
#
# Copyright 2014 Michal Schmidt
#
# systemd is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# systemd is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with systemd; If not, see <http://www.gnu.org/licenses/>.
import gdb
class sd_dump_hashmaps(gdb.Command):
"dump systemd's hashmaps"
def __init__(self):
super(sd_dump_hashmaps, self).__init__("sd_dump_hashmaps", gdb.COMMAND_DATA, gdb.COMPLETE_NONE)
def invoke(self, arg, from_tty):
d = gdb.parse_and_eval("hashmap_debug_list")
all_entry_sizes = gdb.parse_and_eval("all_entry_sizes")
all_direct_buckets = gdb.parse_and_eval("all_direct_buckets")
hashmap_base_t = gdb.lookup_type("HashmapBase")
uchar_t = gdb.lookup_type("unsigned char")
ulong_t = gdb.lookup_type("unsigned long")
debug_offset = gdb.parse_and_eval("(unsigned long)&((HashmapBase*)0)->debug")
print "type, hash, indirect, entries, max_entries, buckets, creator"
while d:
h = gdb.parse_and_eval("(HashmapBase*)((char*)%d - %d)" % (int(d.cast(ulong_t)), debug_offset))
if h["has_indirect"]:
storage_ptr = h["indirect"]["storage"].cast(uchar_t.pointer())
n_entries = h["indirect"]["n_entries"]
n_buckets = h["indirect"]["n_buckets"]
else:
storage_ptr = h["direct"]["storage"].cast(uchar_t.pointer())
n_entries = h["n_direct_entries"]
n_buckets = all_direct_buckets[int(h["type"])];
t = ["plain", "ordered", "set"][int(h["type"])]
print "%s, %s, %s, %d, %d, %d, %s (%s:%d)" % (t, h["hash_ops"], bool(h["has_indirect"]), n_entries, d["max_entries"], n_buckets, d["func"], d["file"], d["line"])
if arg != "" and n_entries > 0:
dib_raw_addr = storage_ptr + (all_entry_sizes[h["type"]] * n_buckets)
histogram = {}
for i in xrange(0, n_buckets):
dib = int(dib_raw_addr[i])
histogram[dib] = histogram.get(dib, 0) + 1
for dib in sorted(iter(histogram)):
if dib != 255:
print "%3d %8d %f%% of entries" % (dib, histogram[dib], 100.0*histogram[dib]/n_entries)
else:
print "%3d %8d %f%% of slots" % (dib, histogram[dib], 100.0*histogram[dib]/n_buckets)
print "mean DIB of entries: %f" % (sum([dib*histogram[dib] for dib in iter(histogram) if dib != 255])*1.0/n_entries)
blocks = []
current_len = 1
prev = int(dib_raw_addr[0])
for i in xrange(1, n_buckets):
dib = int(dib_raw_addr[i])
if (dib == 255) != (prev == 255):
if prev != 255:
blocks += [[i, current_len]]
current_len = 1
else:
current_len += 1
prev = dib
if prev != 255:
blocks += [[i, current_len]]
# a block may be wrapped around
if len(blocks) > 1 and blocks[0][0] == blocks[0][1] and blocks[-1][0] == n_buckets - 1:
blocks[0][1] += blocks[-1][1]
blocks = blocks[0:-1]
print "max block: %s" % max(blocks, key=lambda a: a[1])
print "sum block lens: %d" % sum(b[1] for b in blocks)
print "mean block len: %f" % (1.0 * sum(b[1] for b in blocks) / len(blocks))
d = d["debug_list_next"]
sd_dump_hashmaps()
| gpl-2.0 |
lukeburden/django-allauth | allauth/socialaccount/providers/bitbucket_oauth2/provider.py | 10 | 1122 | from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class BitbucketOAuth2Account(ProviderAccount):
def get_profile_url(self):
return (self.account.extra_data
.get('links', {})
.get('html', {})
.get('href'))
def get_avatar_url(self):
return (self.account.extra_data
.get('links', {})
.get('avatar', {})
.get('href'))
def to_str(self):
dflt = super(BitbucketOAuth2Account, self).to_str()
return self.account.extra_data.get('display_name', dflt)
class BitbucketOAuth2Provider(OAuth2Provider):
id = 'bitbucket_oauth2'
name = 'Bitbucket'
account_class = BitbucketOAuth2Account
def extract_uid(self, data):
return data['username']
def extract_common_fields(self, data):
return dict(email=data.get('email'),
username=data.get('username'),
name=data.get('display_name'))
provider_classes = [BitbucketOAuth2Provider]
| mit |
BlueLens/bl-magi | tensorflow/object_detection/protos/optimizer_pb2.py | 4 | 22276 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: object_detection/protos/optimizer.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='object_detection/protos/optimizer.proto',
package='object_detection.protos',
syntax='proto2',
serialized_pb=_b('\n\'object_detection/protos/optimizer.proto\x12\x17object_detection.protos\"\xb5\x02\n\tOptimizer\x12G\n\x12rms_prop_optimizer\x18\x01 \x01(\x0b\x32).object_detection.protos.RMSPropOptimizerH\x00\x12H\n\x12momentum_optimizer\x18\x02 \x01(\x0b\x32*.object_detection.protos.MomentumOptimizerH\x00\x12@\n\x0e\x61\x64\x61m_optimizer\x18\x03 \x01(\x0b\x32&.object_detection.protos.AdamOptimizerH\x00\x12 \n\x12use_moving_average\x18\x04 \x01(\x08:\x04true\x12$\n\x14moving_average_decay\x18\x05 \x01(\x02:\x06\x30.9999B\x0b\n\toptimizer\"\x9f\x01\n\x10RMSPropOptimizer\x12<\n\rlearning_rate\x18\x01 \x01(\x0b\x32%.object_detection.protos.LearningRate\x12%\n\x18momentum_optimizer_value\x18\x02 \x01(\x02:\x03\x30.9\x12\x12\n\x05\x64\x65\x63\x61y\x18\x03 \x01(\x02:\x03\x30.9\x12\x12\n\x07\x65psilon\x18\x04 \x01(\x02:\x01\x31\"x\n\x11MomentumOptimizer\x12<\n\rlearning_rate\x18\x01 \x01(\x0b\x32%.object_detection.protos.LearningRate\x12%\n\x18momentum_optimizer_value\x18\x02 \x01(\x02:\x03\x30.9\"M\n\rAdamOptimizer\x12<\n\rlearning_rate\x18\x01 \x01(\x0b\x32%.object_detection.protos.LearningRate\"\xa8\x02\n\x0cLearningRate\x12O\n\x16\x63onstant_learning_rate\x18\x01 \x01(\x0b\x32-.object_detection.protos.ConstantLearningRateH\x00\x12`\n\x1f\x65xponential_decay_learning_rate\x18\x02 \x01(\x0b\x32\x35.object_detection.protos.ExponentialDecayLearningRateH\x00\x12T\n\x19manual_step_learning_rate\x18\x03 \x01(\x0b\x32/.object_detection.protos.ManualStepLearningRateH\x00\x42\x0f\n\rlearning_rate\"4\n\x14\x43onstantLearningRate\x12\x1c\n\rlearning_rate\x18\x01 \x01(\x02:\x05\x30.002\"\x97\x01\n\x1c\x45xponentialDecayLearningRate\x12$\n\x15initial_learning_rate\x18\x01 \x01(\x02:\x05\x30.002\x12\x1c\n\x0b\x64\x65\x63\x61y_steps\x18\x02 \x01(\r:\x07\x34\x30\x30\x30\x30\x30\x30\x12\x1a\n\x0c\x64\x65\x63\x61y_factor\x18\x03 \x01(\x02:\x04\x30.95\x12\x17\n\tstaircase\x18\x04 \x01(\x08:\x04true\"\xda\x01\n\x16ManualStepLearningRate\x12$\n\x15initial_learning_rate\x18\x01 \x01(\x02:\x05\x30.002\x12V\n\x08schedule\x18\x02 \x03(\x0b\x32\x44.object_detection.protos.ManualStepLearningRate.LearningRateSchedule\x1a\x42\n\x14LearningRateSchedule\x12\x0c\n\x04step\x18\x01 \x01(\r\x12\x1c\n\rlearning_rate\x18\x02 \x01(\x02:\x05\x30.002')
)
_OPTIMIZER = _descriptor.Descriptor(
name='Optimizer',
full_name='object_detection.protos.Optimizer',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='rms_prop_optimizer', full_name='object_detection.protos.Optimizer.rms_prop_optimizer', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='momentum_optimizer', full_name='object_detection.protos.Optimizer.momentum_optimizer', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='adam_optimizer', full_name='object_detection.protos.Optimizer.adam_optimizer', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='use_moving_average', full_name='object_detection.protos.Optimizer.use_moving_average', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='moving_average_decay', full_name='object_detection.protos.Optimizer.moving_average_decay', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.9999),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='optimizer', full_name='object_detection.protos.Optimizer.optimizer',
index=0, containing_type=None, fields=[]),
],
serialized_start=69,
serialized_end=378,
)
_RMSPROPOPTIMIZER = _descriptor.Descriptor(
name='RMSPropOptimizer',
full_name='object_detection.protos.RMSPropOptimizer',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='learning_rate', full_name='object_detection.protos.RMSPropOptimizer.learning_rate', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='momentum_optimizer_value', full_name='object_detection.protos.RMSPropOptimizer.momentum_optimizer_value', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.9),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='decay', full_name='object_detection.protos.RMSPropOptimizer.decay', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.9),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='epsilon', full_name='object_detection.protos.RMSPropOptimizer.epsilon', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=381,
serialized_end=540,
)
_MOMENTUMOPTIMIZER = _descriptor.Descriptor(
name='MomentumOptimizer',
full_name='object_detection.protos.MomentumOptimizer',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='learning_rate', full_name='object_detection.protos.MomentumOptimizer.learning_rate', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='momentum_optimizer_value', full_name='object_detection.protos.MomentumOptimizer.momentum_optimizer_value', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.9),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=542,
serialized_end=662,
)
_ADAMOPTIMIZER = _descriptor.Descriptor(
name='AdamOptimizer',
full_name='object_detection.protos.AdamOptimizer',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='learning_rate', full_name='object_detection.protos.AdamOptimizer.learning_rate', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=664,
serialized_end=741,
)
_LEARNINGRATE = _descriptor.Descriptor(
name='LearningRate',
full_name='object_detection.protos.LearningRate',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='constant_learning_rate', full_name='object_detection.protos.LearningRate.constant_learning_rate', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='exponential_decay_learning_rate', full_name='object_detection.protos.LearningRate.exponential_decay_learning_rate', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='manual_step_learning_rate', full_name='object_detection.protos.LearningRate.manual_step_learning_rate', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='learning_rate', full_name='object_detection.protos.LearningRate.learning_rate',
index=0, containing_type=None, fields=[]),
],
serialized_start=744,
serialized_end=1040,
)
_CONSTANTLEARNINGRATE = _descriptor.Descriptor(
name='ConstantLearningRate',
full_name='object_detection.protos.ConstantLearningRate',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='learning_rate', full_name='object_detection.protos.ConstantLearningRate.learning_rate', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.002),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1042,
serialized_end=1094,
)
_EXPONENTIALDECAYLEARNINGRATE = _descriptor.Descriptor(
name='ExponentialDecayLearningRate',
full_name='object_detection.protos.ExponentialDecayLearningRate',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='initial_learning_rate', full_name='object_detection.protos.ExponentialDecayLearningRate.initial_learning_rate', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.002),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='decay_steps', full_name='object_detection.protos.ExponentialDecayLearningRate.decay_steps', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=4000000,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='decay_factor', full_name='object_detection.protos.ExponentialDecayLearningRate.decay_factor', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.95),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='staircase', full_name='object_detection.protos.ExponentialDecayLearningRate.staircase', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1097,
serialized_end=1248,
)
_MANUALSTEPLEARNINGRATE_LEARNINGRATESCHEDULE = _descriptor.Descriptor(
name='LearningRateSchedule',
full_name='object_detection.protos.ManualStepLearningRate.LearningRateSchedule',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='step', full_name='object_detection.protos.ManualStepLearningRate.LearningRateSchedule.step', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='learning_rate', full_name='object_detection.protos.ManualStepLearningRate.LearningRateSchedule.learning_rate', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.002),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1403,
serialized_end=1469,
)
_MANUALSTEPLEARNINGRATE = _descriptor.Descriptor(
name='ManualStepLearningRate',
full_name='object_detection.protos.ManualStepLearningRate',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='initial_learning_rate', full_name='object_detection.protos.ManualStepLearningRate.initial_learning_rate', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.002),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='schedule', full_name='object_detection.protos.ManualStepLearningRate.schedule', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_MANUALSTEPLEARNINGRATE_LEARNINGRATESCHEDULE, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1251,
serialized_end=1469,
)
_OPTIMIZER.fields_by_name['rms_prop_optimizer'].message_type = _RMSPROPOPTIMIZER
_OPTIMIZER.fields_by_name['momentum_optimizer'].message_type = _MOMENTUMOPTIMIZER
_OPTIMIZER.fields_by_name['adam_optimizer'].message_type = _ADAMOPTIMIZER
_OPTIMIZER.oneofs_by_name['optimizer'].fields.append(
_OPTIMIZER.fields_by_name['rms_prop_optimizer'])
_OPTIMIZER.fields_by_name['rms_prop_optimizer'].containing_oneof = _OPTIMIZER.oneofs_by_name['optimizer']
_OPTIMIZER.oneofs_by_name['optimizer'].fields.append(
_OPTIMIZER.fields_by_name['momentum_optimizer'])
_OPTIMIZER.fields_by_name['momentum_optimizer'].containing_oneof = _OPTIMIZER.oneofs_by_name['optimizer']
_OPTIMIZER.oneofs_by_name['optimizer'].fields.append(
_OPTIMIZER.fields_by_name['adam_optimizer'])
_OPTIMIZER.fields_by_name['adam_optimizer'].containing_oneof = _OPTIMIZER.oneofs_by_name['optimizer']
_RMSPROPOPTIMIZER.fields_by_name['learning_rate'].message_type = _LEARNINGRATE
_MOMENTUMOPTIMIZER.fields_by_name['learning_rate'].message_type = _LEARNINGRATE
_ADAMOPTIMIZER.fields_by_name['learning_rate'].message_type = _LEARNINGRATE
_LEARNINGRATE.fields_by_name['constant_learning_rate'].message_type = _CONSTANTLEARNINGRATE
_LEARNINGRATE.fields_by_name['exponential_decay_learning_rate'].message_type = _EXPONENTIALDECAYLEARNINGRATE
_LEARNINGRATE.fields_by_name['manual_step_learning_rate'].message_type = _MANUALSTEPLEARNINGRATE
_LEARNINGRATE.oneofs_by_name['learning_rate'].fields.append(
_LEARNINGRATE.fields_by_name['constant_learning_rate'])
_LEARNINGRATE.fields_by_name['constant_learning_rate'].containing_oneof = _LEARNINGRATE.oneofs_by_name['learning_rate']
_LEARNINGRATE.oneofs_by_name['learning_rate'].fields.append(
_LEARNINGRATE.fields_by_name['exponential_decay_learning_rate'])
_LEARNINGRATE.fields_by_name['exponential_decay_learning_rate'].containing_oneof = _LEARNINGRATE.oneofs_by_name['learning_rate']
_LEARNINGRATE.oneofs_by_name['learning_rate'].fields.append(
_LEARNINGRATE.fields_by_name['manual_step_learning_rate'])
_LEARNINGRATE.fields_by_name['manual_step_learning_rate'].containing_oneof = _LEARNINGRATE.oneofs_by_name['learning_rate']
_MANUALSTEPLEARNINGRATE_LEARNINGRATESCHEDULE.containing_type = _MANUALSTEPLEARNINGRATE
_MANUALSTEPLEARNINGRATE.fields_by_name['schedule'].message_type = _MANUALSTEPLEARNINGRATE_LEARNINGRATESCHEDULE
DESCRIPTOR.message_types_by_name['Optimizer'] = _OPTIMIZER
DESCRIPTOR.message_types_by_name['RMSPropOptimizer'] = _RMSPROPOPTIMIZER
DESCRIPTOR.message_types_by_name['MomentumOptimizer'] = _MOMENTUMOPTIMIZER
DESCRIPTOR.message_types_by_name['AdamOptimizer'] = _ADAMOPTIMIZER
DESCRIPTOR.message_types_by_name['LearningRate'] = _LEARNINGRATE
DESCRIPTOR.message_types_by_name['ConstantLearningRate'] = _CONSTANTLEARNINGRATE
DESCRIPTOR.message_types_by_name['ExponentialDecayLearningRate'] = _EXPONENTIALDECAYLEARNINGRATE
DESCRIPTOR.message_types_by_name['ManualStepLearningRate'] = _MANUALSTEPLEARNINGRATE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Optimizer = _reflection.GeneratedProtocolMessageType('Optimizer', (_message.Message,), dict(
DESCRIPTOR = _OPTIMIZER,
__module__ = 'object_detection.protos.optimizer_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.Optimizer)
))
_sym_db.RegisterMessage(Optimizer)
RMSPropOptimizer = _reflection.GeneratedProtocolMessageType('RMSPropOptimizer', (_message.Message,), dict(
DESCRIPTOR = _RMSPROPOPTIMIZER,
__module__ = 'object_detection.protos.optimizer_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.RMSPropOptimizer)
))
_sym_db.RegisterMessage(RMSPropOptimizer)
MomentumOptimizer = _reflection.GeneratedProtocolMessageType('MomentumOptimizer', (_message.Message,), dict(
DESCRIPTOR = _MOMENTUMOPTIMIZER,
__module__ = 'object_detection.protos.optimizer_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.MomentumOptimizer)
))
_sym_db.RegisterMessage(MomentumOptimizer)
AdamOptimizer = _reflection.GeneratedProtocolMessageType('AdamOptimizer', (_message.Message,), dict(
DESCRIPTOR = _ADAMOPTIMIZER,
__module__ = 'object_detection.protos.optimizer_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.AdamOptimizer)
))
_sym_db.RegisterMessage(AdamOptimizer)
LearningRate = _reflection.GeneratedProtocolMessageType('LearningRate', (_message.Message,), dict(
DESCRIPTOR = _LEARNINGRATE,
__module__ = 'object_detection.protos.optimizer_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.LearningRate)
))
_sym_db.RegisterMessage(LearningRate)
ConstantLearningRate = _reflection.GeneratedProtocolMessageType('ConstantLearningRate', (_message.Message,), dict(
DESCRIPTOR = _CONSTANTLEARNINGRATE,
__module__ = 'object_detection.protos.optimizer_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.ConstantLearningRate)
))
_sym_db.RegisterMessage(ConstantLearningRate)
ExponentialDecayLearningRate = _reflection.GeneratedProtocolMessageType('ExponentialDecayLearningRate', (_message.Message,), dict(
DESCRIPTOR = _EXPONENTIALDECAYLEARNINGRATE,
__module__ = 'object_detection.protos.optimizer_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.ExponentialDecayLearningRate)
))
_sym_db.RegisterMessage(ExponentialDecayLearningRate)
ManualStepLearningRate = _reflection.GeneratedProtocolMessageType('ManualStepLearningRate', (_message.Message,), dict(
LearningRateSchedule = _reflection.GeneratedProtocolMessageType('LearningRateSchedule', (_message.Message,), dict(
DESCRIPTOR = _MANUALSTEPLEARNINGRATE_LEARNINGRATESCHEDULE,
__module__ = 'object_detection.protos.optimizer_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.ManualStepLearningRate.LearningRateSchedule)
))
,
DESCRIPTOR = _MANUALSTEPLEARNINGRATE,
__module__ = 'object_detection.protos.optimizer_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.ManualStepLearningRate)
))
_sym_db.RegisterMessage(ManualStepLearningRate)
_sym_db.RegisterMessage(ManualStepLearningRate.LearningRateSchedule)
# @@protoc_insertion_point(module_scope)
| apache-2.0 |
rancher/validation-tests | tests/v3_validation/cattlevalidationtest/core/test_network_policy.py | 2 | 33949 | from common_fixtures import * # NOQA
test_network_policy = os.environ.get(
'TEST_NETWORK_POLICY', "False")
np_reason = \
'Intended to not execute this network policy test'
if_network_policy = pytest.mark.skipif(test_network_policy != "ALL",
reason=np_reason)
if_network_policy_none = pytest.mark.skipif(
test_network_policy != "NONE",
reason=np_reason)
if_network_policy_within_stack = pytest.mark.skipif(
test_network_policy != "WITHIN_STACK",
reason=np_reason)
if_network_policy_within_service = pytest.mark.skipif(
test_network_policy != "WITHIN_SERVICE",
reason=np_reason)
if_network_policy_within_linked = pytest.mark.skipif(
test_network_policy != "WITHIN_LINKED",
reason=np_reason)
if_network_policy_groupby = pytest.mark.skipif(
test_network_policy != "WITHIN_GROUPBY",
reason=np_reason)
NETWORKPOLICY_SUBDIR = \
os.path.join(os.path.dirname(os.path.realpath(__file__)),
'resources/networkpolicy')
policy_within_stack = {"within": "stack", "action": "allow"}
policy_groupby = {"between": {"groupBy": "com.rancher.stack.location"},
"action": "allow"}
policy_within_service = {"within": "service", "action": "allow"}
policy_within_linked = {"within": "linked", "action": "allow"}
shared_environment = {"env": []}
@pytest.fixture(scope='session', autouse=True)
def create_env_for_network_policy(request, client, socat_containers):
assert check_for_network_policy_manager(client)
env2 = create_stack_with_service(client, "test2", NETWORKPOLICY_SUBDIR,
"stack2.yml", "stack2-rc.yml")
assert len(env2.services()) == 6
env1 = create_stack_with_service(client, "test1", NETWORKPOLICY_SUBDIR,
"stack1.yml", "stack1-rc.yml")
assert len(env1.services()) == 11
create_standalone_containers(client)
time.sleep(sleep_interval)
populate_env_details(client)
def fin():
to_delete = [env1, env2]
delete_all(client, to_delete)
delete_all(client, shared_environment["containers"])
delete_all(client, shared_environment["containers_with_label"])
request.addfinalizer(fin)
def populate_env_details(client):
env = client.list_stack(name="test1")
assert len(env) == 1
env1 = env[0]
env = client.list_stack(name="test2")
assert len(env) == 1
env2 = env[0]
shared_environment["env"].append(env1)
shared_environment["env"].append(env2)
shared_environment["stack1_test1allow"] = \
get_service_by_name(client, env1, "test1allow")
shared_environment["stack1_test2allow"] = \
get_service_by_name(client, env1, "test2allow")
shared_environment["stack1_test3deny"] = \
get_service_by_name(client, env1, "test3deny")
shared_environment["stack1_test4deny"] = \
get_service_by_name(client, env1, "test4deny")
shared_environment["stack1_lbwithinstack"] = \
get_service_by_name(client, env1, "lbwithininstack")
shared_environment["stack1_lbcrossstack"] = \
get_service_by_name(client, env1, "lbcrossstack")
shared_environment["stack1_servicewithlinks"] = \
get_service_by_name(client, env1, "servicewithlinks")
shared_environment["stack1_servicecrosslinks"] = \
get_service_by_name(client, env1, "servicecrosslinks")
shared_environment["stack1_servicelinktosidekick"] = \
get_service_by_name(client, env1, "servicelinktosidekick")
shared_environment["stack1_linktowebservice"] = \
get_service_by_name(client, env1, "linktowebservice")
shared_environment["stack2_test1allow"] = \
get_service_by_name(client, env2, "test1allow")
shared_environment["stack2_test2allow"] = \
get_service_by_name(client, env2, "test2allow")
shared_environment["stack2_test3deny"] = \
get_service_by_name(client, env2, "test3deny")
shared_environment["stack2_test4deny"] = \
get_service_by_name(client, env2, "test4deny")
service_with_sidekick = {}
service_with_sidekick["p_con1"] = \
get_container_by_name(client, "test2-testp1-1")
service_with_sidekick["p_con2"] = \
get_container_by_name(client, "test2-testp1-2")
service_with_sidekick["s1_con1"] = \
get_container_by_name(client, "test2-testp1-tests1-1")
service_with_sidekick["s1_con2"] = \
get_container_by_name(client, "test2-testp1-tests1-2")
service_with_sidekick["s2_con1"] = \
get_container_by_name(client, "test2-testp1-tests2-1")
service_with_sidekick["s2_con2"] = \
get_container_by_name(client, "test2-testp1-tests2-2")
shared_environment["stack2_sidekick"] = service_with_sidekick
time.sleep(sleep_interval)
def validate_default_network_action_deny_networkpolicy_allow_within_stacks(
client):
# Validate that standalone containers are not able reach any
# service containers
for container in shared_environment["containers"]:
validate_connectivity_between_con_to_services(
client, container,
[shared_environment["stack1_test2allow"],
shared_environment["stack2_test4deny"]],
connection="deny")
# Validate that there connectivity between containers of different
# services within the same stack is allowed
validate_connectivity_between_services(
client, shared_environment["stack1_test1allow"],
[shared_environment["stack1_test2allow"],
shared_environment["stack1_test3deny"],
shared_environment["stack1_test4deny"]],
connection="allow")
# Validate that there is no connectivity between containers of different
# services across stacks
validate_connectivity_between_services(
client, shared_environment["stack1_test1allow"],
[shared_environment["stack2_test1allow"],
shared_environment["stack2_test2allow"],
shared_environment["stack2_test3deny"],
shared_environment["stack2_test4deny"]],
connection="deny")
# Validate that LB is able reach all targets which are in the same stack as
# Lb
validate_lb_service(client,
shared_environment["stack1_lbwithinstack"],
"9091",
[shared_environment["stack1_test1allow"]])
# Validate that LB is able reach all targets which are in the same stack as
# Lb
validate_linked_service(client,
shared_environment["stack1_servicewithlinks"],
[shared_environment["stack1_test1allow"]],
"99")
# Cross stacks access for links should be denied
validate_linked_service(client,
shared_environment["stack1_servicecrosslinks"],
[shared_environment["stack2_test2allow"]],
"98", linkName="test2allow.test2",
not_reachable=True)
# Cross stacks access for LBs should be denied
validate_lb_service_for_no_access(
client, shared_environment["stack1_lbcrossstack"], "9090")
def validate_default_network_action_deny_networkpolicy_none(
client):
# Validate that standalone containers are not able reach any
# service containers
for container in shared_environment["containers"]:
validate_connectivity_between_con_to_services(
client, container,
[shared_environment["stack1_test2allow"],
shared_environment["stack2_test4deny"]],
connection="deny")
# Validate that there is no connectivity between containers of different
# services across stacks and within stacks
validate_connectivity_between_services(
client, shared_environment["stack1_test1allow"],
[shared_environment["stack1_test2allow"],
shared_environment["stack1_test3deny"],
shared_environment["stack1_test4deny"],
shared_environment["stack2_test1allow"],
shared_environment["stack2_test2allow"],
shared_environment["stack2_test3deny"],
shared_environment["stack2_test4deny"]],
connection="deny")
# Validate that Lb service is not able to reach targets within the
# same stack and cross stacks
validate_lb_service_for_no_access(
client, shared_environment["stack1_lbwithinstack"], "9091")
validate_lb_service_for_no_access(
client, shared_environment["stack1_lbcrossstack"], "9090")
# Validate that connectivity between linked service is denied within the
# same stack and cross stacks
validate_linked_service(client,
shared_environment["stack1_servicewithlinks"],
[shared_environment["stack1_test1allow"]],
"99", not_reachable=True)
validate_linked_service(client,
shared_environment["stack1_servicecrosslinks"],
[shared_environment["stack2_test2allow"]],
"98", linkName="test2allow.test2",
not_reachable=True)
def validate_default_network_action_deny_networkpolicy_groupby(
client):
# Validate that containers that do not have the labels defined
# in group by policy are not allowed to communicate with other
# service containers
for container in shared_environment["containers"]:
validate_connectivity_between_con_to_services(
client, container,
[shared_environment["stack1_test2allow"],
shared_environment["stack2_test4deny"]],
connection="deny")
# Validate that stand alone containers that have the labels defined
# in group by policy are allowed to communicate with service containers
# having the same labels
for container in shared_environment["containers_with_label"]:
validate_connectivity_between_con_to_services(
client, container,
[shared_environment["stack1_test2allow"],
shared_environment["stack2_test1allow"],
shared_environment["stack2_test2allow"]],
connection="allow")
# Validate that service containers that have matching labels defined
# in group by policy are allowed to communicate with each other
validate_connectivity_between_services(
client, shared_environment["stack1_test1allow"],
[shared_environment["stack1_test2allow"],
shared_environment["stack2_test1allow"],
shared_environment["stack2_test2allow"]],
connection="allow")
# Validate that all service containers within the same service that has
# group by labels are able to communicate with each other
validate_connectivity_between_services(
client, shared_environment["stack1_test3deny"],
[shared_environment["stack2_test3deny"]],
connection="allow")
# Validate that service containers that do not have matching labels defined
# in group by policy are not allowed to communicate with each other
validate_connectivity_between_services(
client, shared_environment["stack1_test1allow"],
[shared_environment["stack1_test3deny"],
shared_environment["stack1_test4deny"],
shared_environment["stack2_test3deny"],
shared_environment["stack2_test4deny"]],
connection="deny")
validate_connectivity_between_services(
client, shared_environment["stack1_test3deny"],
[shared_environment["stack1_test1allow"],
shared_environment["stack1_test2allow"],
shared_environment["stack1_test4deny"],
shared_environment["stack2_test1allow"],
shared_environment["stack2_test2allow"],
shared_environment["stack2_test4deny"]],
connection="deny")
def validate_default_network_action_deny_networkpolicy_within_service(
client):
# Validate that standalone containers are not able reach any
# service containers
for container in shared_environment["containers"]:
validate_connectivity_between_con_to_services(
client, container,
[shared_environment["stack1_test1allow"],
shared_environment["stack2_test4deny"]],
connection="deny")
# Validate that containers belonging to the same service are able to
# communicate with each other
validate_connectivity_between_services(
client, shared_environment["stack1_test1allow"],
[shared_environment["stack1_test1allow"]],
connection="allow")
# Validate that containers belonging to the different services within
# the same stack or cross stack are not able to communicate with each other
validate_connectivity_between_services(
client, shared_environment["stack1_test1allow"],
[shared_environment["stack1_test2allow"],
shared_environment["stack2_test1allow"],
shared_environment["stack2_test2allow"]],
connection="deny")
# Validate that Lb services has no access to targets with in
# same stacks or cross stacks
validate_lb_service_for_no_access(
client, shared_environment["stack1_lbcrossstack"], "9090")
validate_lb_service_for_no_access(
client, shared_environment["stack1_lbwithinstack"], "9091")
# Validate that connectivity between linked service is denied within the
# same stack and cross stacks
validate_linked_service(
client, shared_environment["stack1_servicewithlinks"],
[shared_environment["stack1_test1allow"]], "99", not_reachable=True)
validate_linked_service(client,
shared_environment["stack1_servicecrosslinks"],
[shared_environment["stack2_test2allow"]],
"98", linkName="test2allow.test2",
not_reachable=True)
def validate_default_network_action_deny_networkpolicy_within_service_for_sk(
client):
# Validate that containers of primary services are able to connect with
# other containers in the same service and containers in other sidekick
# services
validate_connectivity_between_container_list(
client,
shared_environment["stack2_sidekick"]["p_con1"],
[shared_environment["stack2_sidekick"]["p_con2"],
shared_environment["stack2_sidekick"]["s1_con1"],
shared_environment["stack2_sidekick"]["s1_con2"],
shared_environment["stack2_sidekick"]["s2_con1"],
shared_environment["stack2_sidekick"]["s2_con2"]],
"allow")
# Validate that containers of sidekick services are able to connect with
# other containers in the same service and containers in other sidekick
# services and primary service
validate_connectivity_between_container_list(
client,
shared_environment["stack2_sidekick"]["s1_con1"],
[shared_environment["stack2_sidekick"]["p_con1"],
shared_environment["stack2_sidekick"]["p_con2"],
shared_environment["stack2_sidekick"]["s1_con2"],
shared_environment["stack2_sidekick"]["s2_con1"],
shared_environment["stack2_sidekick"]["s2_con2"]],
"allow")
validate_connectivity_between_container_list(
client,
shared_environment["stack2_sidekick"]["s2_con1"],
[shared_environment["stack2_sidekick"]["p_con1"],
shared_environment["stack2_sidekick"]["p_con2"],
shared_environment["stack2_sidekick"]["s1_con1"],
shared_environment["stack2_sidekick"]["s1_con1"],
shared_environment["stack2_sidekick"]["s2_con2"]],
"allow")
def validate_default_network_action_deny_networkpolicy_within_linked(
client):
# Validate that standalone containers are not able reach any
# service containers
for container in shared_environment["containers"]:
validate_connectivity_between_con_to_services(
client, container,
[shared_environment["stack1_test2allow"],
shared_environment["stack2_test4deny"]],
connection="deny")
# Validate that containers belonging to a service are not able to
# communicate with other containers in the same service or different
# service
validate_connectivity_between_services(
client, shared_environment["stack1_test1allow"],
[shared_environment["stack1_test1allow"],
shared_environment["stack1_test2allow"],
shared_environment["stack2_test1allow"],
shared_environment["stack2_test2allow"]],
connection="deny")
# Validate that Lb services has access to targets with in
# same stacks
validate_lb_service(client,
shared_environment["stack1_lbwithinstack"],
"9091",
[shared_environment["stack1_test1allow"]])
# Validate that Lb services has access to targets cross stacks
validate_lb_service(client,
shared_environment["stack1_lbcrossstack"],
"9090",
[shared_environment["stack2_test1allow"]])
service_with_links = shared_environment["stack1_servicewithlinks"]
linked_service = [shared_environment["stack1_test1allow"]]
validate_dna_deny_np_within_linked_for_linked_service(
client, service_with_links, linked_service, "99")
service_with_links = shared_environment["stack1_servicecrosslinks"]
linked_service = [shared_environment["stack2_test1allow"]]
validate_dna_deny_np_within_linked_for_linked_service(
client, service_with_links, linked_service, "98", "mylink")
def validate_dna_deny_np_within_linked_for_linked_service(
client, service_with_links, linked_service, port, linkName=None):
# Validate that all containers of a service with link has access to
# the containers of the service that it is linked to
validate_connectivity_between_services(
client,
service_with_links,
linked_service,
connection="allow")
# Validate that all containers of a service that is linked by other service
# has no access to the containers of the service that it is linked by
# (s1 -> s2) containers of s2 have no access to s1
for l_service in linked_service:
validate_connectivity_between_services(
client,
l_service,
[service_with_links],
connection="deny")
# Validate that containers are reachable using their link name
validate_linked_service(client,
service_with_links,
linked_service,
port,
linkName=linkName)
def validate_default_network_action_deny_networkpolicy_within_linked_for_sk(
client):
containers = get_service_container_list(
client, shared_environment["stack1_servicelinktosidekick"])
# Validate connectivity between containers of linked services to linked
# service with sidekick
for con in containers:
validate_connectivity_between_container_list(
client,
con,
shared_environment["stack2_sidekick"].values(),
"allow")
for linked_con in shared_environment["stack2_sidekick"].values():
for con in containers:
validate_connectivity_between_containers(
client, linked_con, con, "deny")
def validate_dna_deny_np_within_linked_for_servicealias(
client):
# Validate connectivity between containers of linked services to services
# linked to webservice
validate_connectivity_between_services(
client, shared_environment["stack1_linktowebservice"],
[shared_environment["stack1_test4deny"],
shared_environment["stack2_test3deny"]],
connection="allow")
validate_connectivity_between_services(
client, shared_environment["stack1_test4deny"],
[shared_environment["stack1_linktowebservice"]],
connection="deny")
validate_connectivity_between_services(
client, shared_environment["stack2_tes34deny"],
[shared_environment["stack1_linktowebservice"]],
connection="deny")
@if_network_policy
def test_default_network_action_deny_networkpolicy_allow_within_stacks(
client):
set_network_policy(client, "deny", policy_within_stack)
validate_default_network_action_deny_networkpolicy_allow_within_stacks(
client)
@if_network_policy_within_stack
def test_dna_deny_np_allow_within_stacks_stop_service(
client, socat_containers):
set_network_policy(client, "deny", policy_within_stack)
stop_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_test1allow"], [1])
validate_default_network_action_deny_networkpolicy_allow_within_stacks(
client)
@if_network_policy_within_stack
def test_dna_deny_np_allow_within_stacks_delete_service(
client, socat_containers):
set_network_policy(client, "deny", policy_within_stack)
delete_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_test1allow"], [1])
validate_default_network_action_deny_networkpolicy_allow_within_stacks(
client)
@if_network_policy_within_stack
def test_dna_deny_np_allow_within_stacks_restart_service(
client, socat_containers):
set_network_policy(client, "deny", policy_within_stack)
restart_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_test1allow"], [1])
validate_default_network_action_deny_networkpolicy_allow_within_stacks(
client)
@if_network_policy
def test_default_network_action_deny_networkpolicy_none(client):
set_network_policy(client, "deny")
validate_default_network_action_deny_networkpolicy_none(
client)
@if_network_policy_none
def test_dna_deny_np_none_stop_service(
client, socat_containers):
set_network_policy(client, "deny")
stop_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_test1allow"], [1])
validate_default_network_action_deny_networkpolicy_none(
client)
@if_network_policy_none
def test_dna_deny_np_none_delete_service(
client, socat_containers):
set_network_policy(client, "deny")
delete_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_test1allow"], [1])
validate_default_network_action_deny_networkpolicy_none(
client)
@if_network_policy_none
def test_dna_deny_np_none_restart_service(
client, socat_containers):
set_network_policy(client, "deny")
restart_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_test1allow"], [1])
validate_default_network_action_deny_networkpolicy_none(
client)
@if_network_policy
def test_default_network_action_deny_networkpolicy_groupby(
client):
set_network_policy(client, "deny", policy_groupby)
validate_default_network_action_deny_networkpolicy_groupby(
client)
@if_network_policy_groupby
def test_dna_deny_np_groupby_stop_service(
client, socat_containers):
set_network_policy(client, "deny", policy_groupby)
stop_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_test1allow"], [1])
validate_default_network_action_deny_networkpolicy_groupby(
client)
@if_network_policy_groupby
def test_dna_deny_np_groupby_delete_service(
client, socat_containers):
set_network_policy(client, "deny", policy_groupby)
delete_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_test1allow"], [1])
validate_default_network_action_deny_networkpolicy_groupby(
client)
@if_network_policy_groupby
def test_dna_deny_np_groupby_restart_service(
client, socat_containers):
set_network_policy(client, "deny", policy_groupby)
restart_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_test1allow"], [1])
validate_default_network_action_deny_networkpolicy_groupby(
client)
@if_network_policy
def test_default_network_action_deny_networkpolicy_allow_within_service(
client):
set_network_policy(client, "deny", policy_within_service)
validate_default_network_action_deny_networkpolicy_within_service(
client)
@if_network_policy_within_service
def test_dna_deny_np_allow_within_service_delete_service(
client):
set_network_policy(client, "deny", policy_within_service)
delete_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_test1allow"], [1])
delete_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_lbcrossstack"], [1])
delete_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_lbwithinstack"], [1])
delete_service_instances(
client, shared_environment["env"][0],
shared_environment["stack1_servicewithlinks"], [1])
validate_default_network_action_deny_networkpolicy_within_service(
client)
@if_network_policy_within_service
def test_dna_deny_np_allow_within_service_scale_service(
client):
set_network_policy(client, "deny", policy_within_service)
scale_service(shared_environment["stack1_test1allow"], client, 3)
scale_service(shared_environment["stack1_lbcrossstack"], client, 3)
scale_service(shared_environment["stack1_lbwithinstack"], client, 3)
scale_service(shared_environment["stack1_servicewithlinks"], client, 3)
populate_env_details(client)
validate_default_network_action_deny_networkpolicy_within_service(
client)
scale_service(shared_environment["stack1_test1allow"], client, 2)
scale_service(shared_environment["stack1_lbcrossstack"], client, 2)
scale_service(shared_environment["stack1_lbwithinstack"], client, 2)
scale_service(shared_environment["stack1_servicewithlinks"], client, 2)
@if_network_policy_within_service
def test_dna_deny_np_allow_within_service_stop_service(
client):
set_network_policy(client, "deny", policy_within_service)
validate_default_network_action_deny_networkpolicy_within_service(
client)
stop_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_test1allow"], [1])
stop_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_lbcrossstack"], [1])
stop_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_lbwithinstack"], [1])
stop_service_instances(
client, shared_environment["env"][0],
shared_environment["stack1_servicewithlinks"], [1])
validate_default_network_action_deny_networkpolicy_within_service(
client)
@if_network_policy
def test_dna_deny_np_allow_within_service_check_sidekicks(
client):
set_network_policy(client, "deny", policy_within_service)
validate_default_network_action_deny_networkpolicy_within_service_for_sk(
client)
@if_network_policy
def test_default_network_action_deny_networkpolicy_allow_within_linked(
client):
set_network_policy(client, "deny", policy_within_linked)
validate_default_network_action_deny_networkpolicy_within_linked(
client)
@if_network_policy
def test_dna_deny_np_allow_within_linked_for_sk(
client):
set_network_policy(client, "deny", policy_within_linked)
validate_default_network_action_deny_networkpolicy_within_linked_for_sk(
client)
@if_network_policy
def test_dna_deny_np_allow_within_linked_for_sa(
client):
set_network_policy(client, "deny", policy_within_linked)
validate_dna_deny_np_within_linked_for_servicealias(
client)
@if_network_policy_within_linked
def test_dna_deny_np_allow_within_linked_after_scaleup(
client):
set_network_policy(client, "deny", policy_within_linked)
service_with_links = shared_environment["stack1_servicewithlinks"]
linked_service = shared_environment["stack1_test1allow"]
validate_dna_deny_np_within_linked_for_linked_service(
client, service_with_links, [linked_service], "99")
scale_service(linked_service, client, 3)
shared_environment["stack1_test1allow"] = \
get_service_by_name(client,
shared_environment["env"][0],
"test1allow")
linked_service = shared_environment["stack1_test1allow"]
validate_dna_deny_np_within_linked_for_linked_service(
client, service_with_links, [linked_service], "99")
scale_service(linked_service, client, 2)
shared_environment["stack1_test1allow"] = \
get_service_by_name(client,
shared_environment["env"][0],
"test1allow")
linked_service = shared_environment["stack1_test1allow"]
scale_service(service_with_links, client, 3)
shared_environment["stack1_servicewithlinks"] = \
get_service_by_name(client,
shared_environment["env"][0],
"servicewithlinks")
service_with_links = shared_environment["stack1_servicewithlinks"]
validate_dna_deny_np_within_linked_for_linked_service(
client, service_with_links, [linked_service], "99")
scale_service(service_with_links, client, 2)
shared_environment["stack1_servicewithlinks"] = \
get_service_by_name(client,
shared_environment["env"][0],
"servicewithlinks")
@if_network_policy_within_linked
def test_dna_deny_np_allow_within_linked_after_adding_removing_links(
client):
set_network_policy(client, "deny", policy_within_linked)
service_with_links = shared_environment["stack1_servicewithlinks"]
linked_service = [shared_environment["stack1_test1allow"]]
validate_dna_deny_np_within_linked_for_linked_service(
client, service_with_links, linked_service, "99")
# Add another service link
service_with_links.setservicelinks(
serviceLinks=[
{"serviceId": shared_environment["stack1_test1allow"].id},
{"serviceId": shared_environment["stack1_test2allow"].id}])
validate_dna_deny_np_within_linked_for_linked_service(
client, service_with_links,
[shared_environment["stack1_test1allow"]], "99")
validate_dna_deny_np_within_linked_for_linked_service(
client, service_with_links,
[shared_environment["stack1_test2allow"]], "99")
# Remove existing service link
service_with_links.setservicelinks(
serviceLinks=[
{"serviceId": shared_environment["stack1_test1allow"].id}])
linked_service = [shared_environment["stack1_test1allow"]]
validate_dna_deny_np_within_linked_for_linked_service(
client, service_with_links, linked_service, "99")
validate_connectivity_between_services(
client, service_with_links,
[shared_environment["stack1_test2allow"]],
connection="deny")
validate_connectivity_between_services(
client, shared_environment["stack1_test2allow"],
[service_with_links],
connection="deny")
def scale_service(service, client, final_scale):
service = client.update(service, name=service.name, scale=final_scale)
service = client.wait_success(service, 300)
assert service.state == "active"
assert service.scale == final_scale
check_container_in_service(client, service)
def set_network_policy(client, defaultPolicyAction="allow", policy=None):
networks = client.list_network(name='ipsec')
assert len(networks) == 1
network = networks[0]
network = client.update(
network, defaultPolicyAction=defaultPolicyAction, policy=policy)
network = wait_success(client, network)
assert network.defaultPolicyAction == defaultPolicyAction
populate_env_details(client)
def check_for_network_policy_manager(client):
np_manager = False
env = client.list_stack(name="network-policy-manager")
if len(env) == 1:
service = get_service_by_name(client, env[0],
"network-policy-manager")
if service.state == "active":
np_manager = True
return np_manager
def create_standalone_containers(client):
hosts = client.list_host(kind='docker', removed_null=True)
cons = []
cons_with_label = []
for host in hosts:
con_name = random_str()
con = client.create_container(
name=con_name,
ports=['3001:22'],
image=HEALTH_CHECK_IMAGE_UUID,
networkMode=MANAGED_NETWORK,
requestedHostId=host.id)
con = client.wait_success(con)
assert con.state == "running"
cons.append(con)
shared_environment["containers"] = cons
for host in hosts:
con_name = random_str()
con = client.create_container(
name=con_name,
ports=['3002:22'],
image=HEALTH_CHECK_IMAGE_UUID,
networkMode=MANAGED_NETWORK,
requestedHostId=host.id,
labels={"com.rancher.stack.location": "east"})
con = client.wait_success(con)
assert con.state == "running"
cons_with_label.append(con)
shared_environment["containers_with_label"] = cons_with_label
| apache-2.0 |
michelts/lettuce | tests/integration/test_brocolis.py | 17 | 2167 | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falcão <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import commands
from lettuce.fs import FileSystem
from nose.tools import assert_equals
from tests.util import run_scenario
current_directory = FileSystem.dirname(__file__)
@FileSystem.in_directory(current_directory, 'django', 'brocolis')
def test_harvest_with_debug_mode_enabled():
'python manage.py harvest -d turns settings.DEBUG=True'
for option in ['-d', '--debug-mode']:
status, out = run_scenario('leaves', 'enabled', **{option: None})
assert_equals(status, 0, out)
@FileSystem.in_directory(current_directory, 'django', 'brocolis')
def test_harvest_with_debug_mode_disabled():
'python manage.py harvest without turns settings.DEBUG=False'
status, out = run_scenario('leaves', 'disabled')
assert_equals(status, 0, out)
@FileSystem.in_directory(current_directory, 'django', 'brocolis')
def test_harvest_sets_environment_variabled_for_gae():
'harvest sets environment variables SERVER_NAME and SERVER_PORT in order to work with google app engine'
status, out = run_scenario('leaves', 'appengine')
assert_equals(status, 0, out)
@FileSystem.in_directory(current_directory, 'django', 'brocolis')
def test_harvest_uses_test_runner():
'harvest uses TEST_RUNNER specified in settings'
status, out = run_scenario('leaves', 'disabled')
assert_equals(status, 0, out)
assert "Custom test runner enabled." in out
| gpl-3.0 |
Chrispassold/ionicons | builder/generate.py | 357 | 9438 | from subprocess import call
import os
import json
BUILDER_PATH = os.path.dirname(os.path.abspath(__file__))
ROOT_PATH = os.path.join(BUILDER_PATH, '..')
FONTS_FOLDER_PATH = os.path.join(ROOT_PATH, 'fonts')
CSS_FOLDER_PATH = os.path.join(ROOT_PATH, 'css')
SCSS_FOLDER_PATH = os.path.join(ROOT_PATH, 'scss')
LESS_FOLDER_PATH = os.path.join(ROOT_PATH, 'less')
def main():
generate_font_files()
data = get_build_data()
rename_svg_glyph_names(data)
generate_scss(data)
generate_less(data)
generate_cheatsheet(data)
generate_component_json(data)
generate_composer_json(data)
generate_bower_json(data)
def generate_font_files():
print "Generate Fonts"
cmd = "fontforge -script %s/scripts/generate_font.py" % (BUILDER_PATH)
call(cmd, shell=True)
def rename_svg_glyph_names(data):
# hacky and slow (but safe) way to rename glyph-name attributes
svg_path = os.path.join(FONTS_FOLDER_PATH, 'ionicons.svg')
svg_file = open(svg_path, 'r+')
svg_text = svg_file.read()
svg_file.seek(0)
for ionicon in data['icons']:
# uniF2CA
org_name = 'uni%s' % (ionicon['code'].replace('0x', '').upper())
ion_name = 'ion-%s' % (ionicon['name'])
svg_text = svg_text.replace(org_name, ion_name)
svg_file.write(svg_text)
svg_file.close()
def generate_less(data):
print "Generate LESS"
font_name = data['name']
font_version = data['version']
css_prefix = data['prefix']
variables_file_path = os.path.join(LESS_FOLDER_PATH, '_ionicons-variables.less')
icons_file_path = os.path.join(LESS_FOLDER_PATH, '_ionicons-icons.less')
d = []
d.append('/*!');
d.append('Ionicons, v%s' % (font_version) );
d.append('Created by Ben Sperry for the Ionic Framework, http://ionicons.com/');
d.append('https://twitter.com/benjsperry https://twitter.com/ionicframework');
d.append('MIT License: https://github.com/driftyco/ionicons');
d.append('*/');
d.append('// Ionicons Variables')
d.append('// --------------------------\n')
d.append('@ionicons-font-path: "../fonts";')
d.append('@ionicons-font-family: "%s";' % (font_name) )
d.append('@ionicons-version: "%s";' % (font_version) )
d.append('@ionicons-prefix: %s;' % (css_prefix) )
d.append('')
for ionicon in data['icons']:
chr_code = ionicon['code'].replace('0x', '\\')
d.append('@ionicon-var-%s: "%s";' % (ionicon['name'], chr_code) )
f = open(variables_file_path, 'w')
f.write( '\n'.join(d) )
f.close()
d = []
d.append('// Ionicons Icons')
d.append('// --------------------------\n')
group = [ '.%s' % (data['name'].lower()) ]
for ionicon in data['icons']:
group.append('.@{ionicons-prefix}%s:before' % (ionicon['name']) )
d.append( ',\n'.join(group) )
d.append('{')
d.append(' &:extend(.ion);')
d.append('}')
for ionicon in data['icons']:
chr_code = ionicon['code'].replace('0x', '\\')
d.append('.@{ionicons-prefix}%s:before { content: @ionicon-var-%s; }' % (ionicon['name'], ionicon['name']) )
f = open(icons_file_path, 'w')
f.write( '\n'.join(d) )
f.close()
def generate_scss(data):
print "Generate SCSS"
font_name = data['name']
font_version = data['version']
css_prefix = data['prefix']
variables_file_path = os.path.join(SCSS_FOLDER_PATH, '_ionicons-variables.scss')
icons_file_path = os.path.join(SCSS_FOLDER_PATH, '_ionicons-icons.scss')
d = []
d.append('// Ionicons Variables')
d.append('// --------------------------\n')
d.append('$ionicons-font-path: "../fonts" !default;')
d.append('$ionicons-font-family: "%s" !default;' % (font_name) )
d.append('$ionicons-version: "%s" !default;' % (font_version) )
d.append('$ionicons-prefix: %s !default;' % (css_prefix) )
d.append('')
for ionicon in data['icons']:
chr_code = ionicon['code'].replace('0x', '\\')
d.append('$ionicon-var-%s: "%s";' % (ionicon['name'], chr_code) )
f = open(variables_file_path, 'w')
f.write( '\n'.join(d) )
f.close()
d = []
d.append('// Ionicons Icons')
d.append('// --------------------------\n')
group = [ '.%s' % (data['name'].lower()) ]
for ionicon in data['icons']:
group.append('.#{$ionicons-prefix}%s:before' % (ionicon['name']) )
d.append( ',\n'.join(group) )
d.append('{')
d.append(' @extend .ion;')
d.append('}')
for ionicon in data['icons']:
chr_code = ionicon['code'].replace('0x', '\\')
d.append('.#{$ionicons-prefix}%s:before { content: $ionicon-var-%s; }' % (ionicon['name'], ionicon['name']) )
f = open(icons_file_path, 'w')
f.write( '\n'.join(d) )
f.close()
generate_css_from_scss(data)
def generate_css_from_scss(data):
print "Generate CSS From SCSS"
scss_file_path = os.path.join(SCSS_FOLDER_PATH, 'ionicons.scss')
css_file_path = os.path.join(CSS_FOLDER_PATH, 'ionicons.css')
css_min_file_path = os.path.join(CSS_FOLDER_PATH, 'ionicons.min.css')
cmd = "sass %s %s --style compact" % (scss_file_path, css_file_path)
call(cmd, shell=True)
print "Generate Minified CSS From SCSS"
cmd = "sass %s %s --style compressed" % (scss_file_path, css_min_file_path)
call(cmd, shell=True)
def generate_cheatsheet(data):
print "Generate Cheatsheet"
cheatsheet_file_path = os.path.join(ROOT_PATH, 'cheatsheet.html')
template_path = os.path.join(BUILDER_PATH, 'cheatsheet', 'template.html')
icon_row_path = os.path.join(BUILDER_PATH, 'cheatsheet', 'icon-row.html')
f = open(template_path, 'r')
template_html = f.read()
f.close()
f = open(icon_row_path, 'r')
icon_row_template = f.read()
f.close()
content = []
for ionicon in data['icons']:
css_code = ionicon['code'].replace('0x', '\\')
escaped_html_code = ionicon['code'].replace('0x', '&#x') + ';'
html_code = ionicon['code'].replace('0x', '&#x') + ';'
item_row = icon_row_template
item_row = item_row.replace('{{name}}', ionicon['name'])
item_row = item_row.replace('{{prefix}}', data['prefix'])
item_row = item_row.replace('{{css_code}}', css_code)
item_row = item_row.replace('{{escaped_html_code}}', escaped_html_code)
item_row = item_row.replace('{{html_code}}', html_code)
content.append(item_row)
template_html = template_html.replace("{{font_name}}", data["name"])
template_html = template_html.replace("{{font_version}}", data["version"])
template_html = template_html.replace("{{icon_count}}", str(len(data["icons"])) )
template_html = template_html.replace("{{content}}", '\n'.join(content) )
f = open(cheatsheet_file_path, 'w')
f.write(template_html)
f.close()
def generate_component_json(data):
print "Generate component.json"
d = {
"name": data['name'],
"repo": "driftyco/ionicons",
"description": "The premium icon font for Ionic Framework.",
"version": data['version'],
"keywords": [],
"dependencies": {},
"development": {},
"license": "MIT",
"styles": [
"css/%s.css" % (data['name'].lower())
],
"fonts": [
"fonts/%s.eot" % (data['name'].lower()),
"fonts/%s.svg" % (data['name'].lower()),
"fonts/%s.ttf" % (data['name'].lower()),
"fonts/%s.woff" % (data['name'].lower())
]
}
txt = json.dumps(d, indent=4, separators=(',', ': '))
component_file_path = os.path.join(ROOT_PATH, 'component.json')
f = open(component_file_path, 'w')
f.write(txt)
f.close()
def generate_composer_json(data):
print "Generate composer.json"
d = {
"name": "driftyco/ionicons",
"description": "The premium icon font for Ionic Framework.",
"keywords": [ "fonts", "icon font", "icons", "ionic", "web font"],
"homepage": "http://ionicons.com/",
"authors": [
{
"name": "Ben Sperry",
"email": "[email protected]",
"role": "Designer",
"homepage": "https://twitter.com/benjsperry"
},
{
"name": "Adam Bradley",
"email": "[email protected]",
"role": "Developer",
"homepage": "https://twitter.com/adamdbradley"
},
{
"name": "Max Lynch",
"email": "[email protected]",
"role": "Developer",
"homepage": "https://twitter.com/maxlynch"
}
],
"extra": {},
"license": [ "MIT" ]
}
txt = json.dumps(d, indent=4, separators=(',', ': '))
composer_file_path = os.path.join(ROOT_PATH, 'composer.json')
f = open(composer_file_path, 'w')
f.write(txt)
f.close()
def generate_bower_json(data):
print "Generate bower.json"
d = {
"name": data['name'],
"version": data['version'],
"homepage": "https://github.com/driftyco/ionicons",
"authors": [
"Ben Sperry <[email protected]>",
"Adam Bradley <[email protected]>",
"Max Lynch <[email protected]>"
],
"description": "Ionicons - free and beautiful icons from the creators of Ionic Framework",
"main": [
"css/%s.css" % (data['name'].lower()),
"fonts/*"
],
"keywords": [ "fonts", "icon font", "icons", "ionic", "web font"],
"license": "MIT",
"ignore": [
"**/.*",
"builder",
"node_modules",
"bower_components",
"test",
"tests"
]
}
txt = json.dumps(d, indent=4, separators=(',', ': '))
bower_file_path = os.path.join(ROOT_PATH, 'bower.json')
f = open(bower_file_path, 'w')
f.write(txt)
f.close()
def get_build_data():
build_data_path = os.path.join(BUILDER_PATH, 'build_data.json')
f = open(build_data_path, 'r')
data = json.loads(f.read())
f.close()
return data
if __name__ == "__main__":
main()
| mit |
dvliman/jaikuengine | .google_appengine/lib/django-1.4/django/contrib/gis/gdal/datasource.py | 92 | 4724 | """
DataSource is a wrapper for the OGR Data Source object, which provides
an interface for reading vector geometry data from many different file
formats (including ESRI shapefiles).
When instantiating a DataSource object, use the filename of a
GDAL-supported data source. For example, a SHP file or a
TIGER/Line file from the government.
The ds_driver keyword is used internally when a ctypes pointer
is passed in directly.
Example:
ds = DataSource('/home/foo/bar.shp')
for layer in ds:
for feature in layer:
# Getting the geometry for the feature.
g = feature.geom
# Getting the 'description' field for the feature.
desc = feature['description']
# We can also increment through all of the fields
# attached to this feature.
for field in feature:
# Get the name of the field (e.g. 'description')
nm = field.name
# Get the type (integer) of the field, e.g. 0 => OFTInteger
t = field.type
# Returns the value the field; OFTIntegers return ints,
# OFTReal returns floats, all else returns string.
val = field.value
"""
# ctypes prerequisites.
from ctypes import byref
# The GDAL C library, OGR exceptions, and the Layer object.
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.driver import Driver
from django.contrib.gis.gdal.error import OGRException, OGRIndexError
from django.contrib.gis.gdal.layer import Layer
# Getting the ctypes prototypes for the DataSource.
from django.contrib.gis.gdal.prototypes import ds as capi
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_DS_* routines are relevant here.
class DataSource(GDALBase):
"Wraps an OGR Data Source object."
#### Python 'magic' routines ####
def __init__(self, ds_input, ds_driver=False, write=False):
# The write flag.
if write:
self._write = 1
else:
self._write = 0
# Registering all the drivers, this needs to be done
# _before_ we try to open up a data source.
if not capi.get_driver_count():
capi.register_all()
if isinstance(ds_input, basestring):
# The data source driver is a void pointer.
ds_driver = Driver.ptr_type()
try:
# OGROpen will auto-detect the data source type.
ds = capi.open_ds(ds_input, self._write, byref(ds_driver))
except OGRException:
# Making the error message more clear rather than something
# like "Invalid pointer returned from OGROpen".
raise OGRException('Could not open the datasource at "%s"' % ds_input)
elif isinstance(ds_input, self.ptr_type) and isinstance(ds_driver, Driver.ptr_type):
ds = ds_input
else:
raise OGRException('Invalid data source input type: %s' % type(ds_input))
if bool(ds):
self.ptr = ds
self.driver = Driver(ds_driver)
else:
# Raise an exception if the returned pointer is NULL
raise OGRException('Invalid data source file "%s"' % ds_input)
def __del__(self):
"Destroys this DataStructure object."
if self._ptr: capi.destroy_ds(self._ptr)
def __iter__(self):
"Allows for iteration over the layers in a data source."
for i in xrange(self.layer_count):
yield self[i]
def __getitem__(self, index):
"Allows use of the index [] operator to get a layer at the index."
if isinstance(index, basestring):
l = capi.get_layer_by_name(self.ptr, index)
if not l: raise OGRIndexError('invalid OGR Layer name given: "%s"' % index)
elif isinstance(index, int):
if index < 0 or index >= self.layer_count:
raise OGRIndexError('index out of range')
l = capi.get_layer(self._ptr, index)
else:
raise TypeError('Invalid index type: %s' % type(index))
return Layer(l, self)
def __len__(self):
"Returns the number of layers within the data source."
return self.layer_count
def __str__(self):
"Returns OGR GetName and Driver for the Data Source."
return '%s (%s)' % (self.name, str(self.driver))
@property
def layer_count(self):
"Returns the number of layers in the data source."
return capi.get_layer_count(self._ptr)
@property
def name(self):
"Returns the name of the data source."
return capi.get_ds_name(self._ptr)
| apache-2.0 |
Rogentos/legacy-anaconda | storage/devicelibs/crypto.py | 2 | 6138 | #
# crypto.py
#
# Copyright (C) 2009 Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author(s): Dave Lehman <[email protected]>
# Martin Sivak <[email protected]>
#
import os
from pycryptsetup import CryptSetup
import iutil
from ..errors import *
import gettext
_ = lambda x: gettext.ldgettext("anaconda", x)
# Keep the character set size a power of two to make sure all characters are
# equally likely
GENERATED_PASSPHRASE_CHARSET = ("0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz"
"./")
# 20 chars * 6 bits per char = 120 "bits of security"
GENERATED_PASSPHRASE_LENGTH = 20
def generateBackupPassphrase():
rnd = os.urandom(GENERATED_PASSPHRASE_LENGTH)
cs = GENERATED_PASSPHRASE_CHARSET
raw = "".join([cs[ord(c) % len(cs)] for c in rnd])
# Make the result easier to read
parts = []
for i in xrange(0, len(raw), 5):
parts.append(raw[i : i + 5])
return "-".join(parts)
def askyes(question):
return True
def dolog(priority, text):
pass
def is_luks(device):
cs = CryptSetup(yesDialog = askyes, logFunc = dolog)
return cs.isLuks(device)
def luks_uuid(device):
cs = CryptSetup(yesDialog = askyes, logFunc = dolog)
return cs.luksUUID(device).strip()
def luks_status(name):
"""True means active, False means inactive (or non-existent)"""
cs = CryptSetup(yesDialog = askyes, logFunc = dolog)
return cs.luksStatus(name)!=0
def luks_format(device,
passphrase=None, key_file=None,
cipher=None, key_size=None):
cs = CryptSetup(yesDialog = askyes, logFunc = dolog)
key_file_unlink = False
if passphrase:
key_file = cs.prepare_passphrase_file(passphrase)
key_file_unlink = True
elif key_file and os.path.isfile(key_file):
pass
else:
raise ValueError("luks_format requires either a passphrase or a key file")
#None is not considered as default value and pycryptsetup doesn't accept it
#so we need to filter out all Nones
kwargs = {}
kwargs["device"] = device
if cipher: kwargs["cipher"] = cipher
if key_file: kwargs["keyfile"] = key_file
if key_size: kwargs["keysize"] = key_size
rc = cs.luksFormat(**kwargs)
if key_file_unlink: os.unlink(key_file)
if rc:
raise CryptoError("luks_format failed for '%s'" % device)
def luks_open(device, name, passphrase=None, key_file=None):
cs = CryptSetup(yesDialog = askyes, logFunc = dolog)
key_file_unlink = False
if passphrase:
key_file = cs.prepare_passphrase_file(passphrase)
key_file_unlink = True
elif key_file and os.path.isfile(key_file):
pass
else:
raise ValueError("luks_open requires either a passphrase or a key file")
rc = cs.luksOpen(device = device, name = name, keyfile = key_file)
if key_file_unlink: os.unlink(key_file)
if rc:
raise CryptoError("luks_open failed for %s (%s)" % (device, name))
def luks_close(name):
cs = CryptSetup(yesDialog = askyes, logFunc = dolog)
rc = cs.luksClose(name)
if rc:
raise CryptoError("luks_close failed for %s" % name)
def luks_add_key(device,
new_passphrase=None, new_key_file=None,
passphrase=None, key_file=None):
params = ["-q"]
p = os.pipe()
if passphrase:
os.write(p[1], "%s\n" % passphrase)
elif key_file and os.path.isfile(key_file):
params.extend(["--key-file", key_file])
else:
raise CryptoError("luks_add_key requires either a passphrase or a key file")
params.extend(["luksAddKey", device])
if new_passphrase:
os.write(p[1], "%s\n" % new_passphrase)
elif new_key_file and os.path.isfile(new_key_file):
params.append("%s" % new_key_file)
else:
raise CryptoError("luks_add_key requires either a passphrase or a key file to add")
os.close(p[1])
rc = iutil.execWithRedirect("cryptsetup", params,
stdin = p[0],
stdout = "/dev/tty5",
stderr = "/dev/tty5")
os.close(p[0])
if rc:
raise CryptoError("luks add key failed with errcode %d" % (rc,))
def luks_remove_key(device,
del_passphrase=None, del_key_file=None,
passphrase=None, key_file=None):
params = []
p = os.pipe()
if del_passphrase: #the first question is about the key we want to remove
os.write(p[1], "%s\n" % del_passphrase)
if passphrase:
os.write(p[1], "%s\n" % passphrase)
elif key_file and os.path.isfile(key_file):
params.extend(["--key-file", key_file])
else:
raise CryptoError("luks_remove_key requires either a passphrase or a key file")
params.extend(["luksRemoveKey", device])
if del_passphrase:
pass
elif del_key_file and os.path.isfile(del_key_file):
params.append("%s" % del_key_file)
else:
raise CryptoError("luks_remove_key requires either a passphrase or a key file to remove")
os.close(p[1])
rc = iutil.execWithRedirect("cryptsetup", params,
stdin = p[0],
stdout = "/dev/tty5",
stderr = "/dev/tty5")
os.close(p[0])
if rc:
raise CryptoError("luks_remove_key failed with errcode %d" % (rc,))
| gpl-2.0 |
eunchong/build | scripts/slave/recipe_modules/skia/ios_flavor.py | 1 | 5292 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import default_flavor
"""iOS flavor utils, used for building for and running tests on iOS."""
class iOSFlavorUtils(default_flavor.DefaultFlavorUtils):
def __init__(self, skia_api):
super(iOSFlavorUtils, self).__init__(skia_api)
self.ios_bin = self._skia_api.m.path['slave_build'].join(
'skia', 'platform_tools', 'ios', 'bin')
def step(self, name, cmd, **kwargs):
args = [self.ios_bin.join('ios_run_skia')]
# Convert 'dm' and 'nanobench' from positional arguments
# to flags, which is what iOSShell expects to select which
# one is being run.
cmd = ["--" + c if c in ['dm', 'nanobench'] else c
for c in cmd]
return self._skia_api.run(self._skia_api.m.step, name=name, cmd=args + cmd,
**kwargs)
def compile(self, target):
"""Build the given target."""
cmd = [self.ios_bin.join('ios_ninja')]
self._skia_api.run(self._skia_api.m.step, 'build iOSShell', cmd=cmd,
cwd=self._skia_api.m.path['checkout'])
def device_path_join(self, *args):
"""Like os.path.join(), but for paths on a connected iOS device."""
return '/'.join(args)
def device_path_exists(self, path):
"""Like os.path.exists(), but for paths on a connected device."""
return self._skia_api.run(
self._skia_api.m.step,
'exists %s' % path,
cmd=[self.ios_bin.join('ios_path_exists'), path],
infra_step=True,
) # pragma: no cover
def _remove_device_dir(self, path):
"""Remove the directory on the device."""
return self._skia_api.run(
self._skia_api.m.step,
'rmdir %s' % path,
cmd=[self.ios_bin.join('ios_rm'), path],
infra_step=True,
)
def _create_device_dir(self, path):
"""Create the directory on the device."""
return self._skia_api.run(
self._skia_api.m.step,
'mkdir %s' % path,
cmd=[self.ios_bin.join('ios_mkdir'), path],
infra_step=True,
)
def copy_directory_contents_to_device(self, host_dir, device_dir):
"""Like shutil.copytree(), but for copying to a connected device."""
return self._skia_api.run(
self._skia_api.m.step,
name='push %s to %s' % (self._skia_api.m.path.basename(host_dir),
self._skia_api.m.path.basename(device_dir)),
cmd=[self.ios_bin.join('ios_push_if_needed'),
host_dir, device_dir],
infra_step=True,
)
def copy_directory_contents_to_host(self, device_dir, host_dir):
"""Like shutil.copytree(), but for copying from a connected device."""
self._skia_api.run(
self._skia_api.m.step,
name='pull %s' % self._skia_api.m.path.basename(device_dir),
cmd=[self.ios_bin.join('ios_pull_if_needed'),
device_dir, host_dir],
infra_step=True,
)
def copy_file_to_device(self, host_path, device_path):
"""Like shutil.copyfile, but for copying to a connected device."""
self._skia_api.run(
self._skia_api.m.step,
name='push %s' % host_path,
cmd=[self.ios_bin.join('ios_push_file'), host_path, device_path],
infra_step=True,
) # pragma: no cover
def create_clean_device_dir(self, path):
"""Like shutil.rmtree() + os.makedirs(), but on a connected device."""
self._remove_device_dir(path)
self._create_device_dir(path)
def install(self):
"""Run device-specific installation steps."""
self._skia_api.run(
self._skia_api.m.step,
name='install iOSShell',
cmd=[self.ios_bin.join('ios_install')],
infra_step=True)
def cleanup_steps(self):
"""Run any device-specific cleanup steps."""
if self._skia_api.do_test_steps or self._skia_api.do_perf_steps:
self._skia_api.run(
self._skia_api.m.step,
name='reboot',
cmd=[self.ios_bin.join('ios_restart')],
infra_step=True)
self._skia_api.run(
self._skia_api.m.step,
name='wait for reboot',
cmd=['sleep', '20'],
infra_step=True)
def read_file_on_device(self, path):
"""Read the given file."""
ret = self._skia_api.run(
self._skia_api.m.step,
name='read %s' % self._skia_api.m.path.basename(path),
cmd=[self.ios_bin.join('ios_cat_file'), path],
stdout=self._skia_api.m.raw_io.output(),
infra_step=True)
return ret.stdout.rstrip() if ret.stdout else ret.stdout
def remove_file_on_device(self, path):
"""Remove the file on the device."""
return self._skia_api.run(
self._skia_api.m.step,
'rm %s' % path,
cmd=[self.ios_bin.join('ios_rm'), path],
infra_step=True,
)
def get_device_dirs(self):
""" Set the directories which will be used by the build steps."""
prefix = self.device_path_join('skiabot', 'skia_')
return default_flavor.DeviceDirs(
dm_dir=prefix + 'dm',
perf_data_dir=prefix + 'perf',
resource_dir=prefix + 'resources',
images_dir=prefix + 'images',
skp_dir=prefix + 'skp/skps',
tmp_dir=prefix + 'tmp_dir')
| bsd-3-clause |
hipnusleo/laserjet | resource/pypi/cryptography-1.7.1/src/_cffi_src/openssl/engine.py | 1 | 5445 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
INCLUDES = """
#include <openssl/engine.h>
"""
TYPES = """
static const long Cryptography_HAS_ENGINE_CRYPTODEV;
typedef ... ENGINE;
typedef ... RSA_METHOD;
typedef ... DSA_METHOD;
typedef ... DH_METHOD;
typedef struct {
int (*bytes)(unsigned char *, int);
int (*pseudorand)(unsigned char *, int);
int (*status)();
...;
} RAND_METHOD;
typedef int (*ENGINE_GEN_INT_FUNC_PTR)(ENGINE *);
typedef ... *ENGINE_CTRL_FUNC_PTR;
typedef ... *ENGINE_LOAD_KEY_PTR;
typedef ... *ENGINE_CIPHERS_PTR;
typedef ... *ENGINE_DIGESTS_PTR;
typedef ... ENGINE_CMD_DEFN;
typedef ... UI_METHOD;
static const unsigned int ENGINE_METHOD_RSA;
static const unsigned int ENGINE_METHOD_DSA;
static const unsigned int ENGINE_METHOD_RAND;
static const unsigned int ENGINE_METHOD_CIPHERS;
static const unsigned int ENGINE_METHOD_DIGESTS;
static const unsigned int ENGINE_METHOD_ALL;
static const unsigned int ENGINE_METHOD_NONE;
static const int ENGINE_R_CONFLICTING_ENGINE_ID;
"""
FUNCTIONS = """
ENGINE *ENGINE_get_first(void);
ENGINE *ENGINE_get_last(void);
ENGINE *ENGINE_get_next(ENGINE *);
ENGINE *ENGINE_get_prev(ENGINE *);
int ENGINE_add(ENGINE *);
int ENGINE_remove(ENGINE *);
ENGINE *ENGINE_by_id(const char *);
int ENGINE_init(ENGINE *);
int ENGINE_finish(ENGINE *);
void ENGINE_load_builtin_engines(void);
ENGINE *ENGINE_get_default_RSA(void);
ENGINE *ENGINE_get_default_DSA(void);
ENGINE *ENGINE_get_default_DH(void);
ENGINE *ENGINE_get_default_RAND(void);
ENGINE *ENGINE_get_cipher_engine(int);
ENGINE *ENGINE_get_digest_engine(int);
int ENGINE_set_default_RSA(ENGINE *);
int ENGINE_set_default_DSA(ENGINE *);
int ENGINE_set_default_DH(ENGINE *);
int ENGINE_set_default_RAND(ENGINE *);
int ENGINE_set_default_ciphers(ENGINE *);
int ENGINE_set_default_digests(ENGINE *);
int ENGINE_set_default_string(ENGINE *, const char *);
int ENGINE_set_default(ENGINE *, unsigned int);
unsigned int ENGINE_get_table_flags(void);
void ENGINE_set_table_flags(unsigned int);
int ENGINE_register_RSA(ENGINE *);
void ENGINE_unregister_RSA(ENGINE *);
void ENGINE_register_all_RSA(void);
int ENGINE_register_DSA(ENGINE *);
void ENGINE_unregister_DSA(ENGINE *);
void ENGINE_register_all_DSA(void);
int ENGINE_register_DH(ENGINE *);
void ENGINE_unregister_DH(ENGINE *);
void ENGINE_register_all_DH(void);
int ENGINE_register_RAND(ENGINE *);
void ENGINE_unregister_RAND(ENGINE *);
void ENGINE_register_all_RAND(void);
int ENGINE_register_ciphers(ENGINE *);
void ENGINE_unregister_ciphers(ENGINE *);
void ENGINE_register_all_ciphers(void);
int ENGINE_register_digests(ENGINE *);
void ENGINE_unregister_digests(ENGINE *);
void ENGINE_register_all_digests(void);
int ENGINE_register_complete(ENGINE *);
int ENGINE_register_all_complete(void);
int ENGINE_ctrl(ENGINE *, int, long, void *, void (*)(void));
int ENGINE_cmd_is_executable(ENGINE *, int);
int ENGINE_ctrl_cmd(ENGINE *, const char *, long, void *, void (*)(void), int);
int ENGINE_ctrl_cmd_string(ENGINE *, const char *, const char *, int);
ENGINE *ENGINE_new(void);
int ENGINE_free(ENGINE *);
int ENGINE_up_ref(ENGINE *);
int ENGINE_set_id(ENGINE *, const char *);
int ENGINE_set_name(ENGINE *, const char *);
int ENGINE_set_RSA(ENGINE *, const RSA_METHOD *);
int ENGINE_set_DSA(ENGINE *, const DSA_METHOD *);
int ENGINE_set_DH(ENGINE *, const DH_METHOD *);
int ENGINE_set_RAND(ENGINE *, const RAND_METHOD *);
int ENGINE_set_destroy_function(ENGINE *, ENGINE_GEN_INT_FUNC_PTR);
int ENGINE_set_init_function(ENGINE *, ENGINE_GEN_INT_FUNC_PTR);
int ENGINE_set_finish_function(ENGINE *, ENGINE_GEN_INT_FUNC_PTR);
int ENGINE_set_ctrl_function(ENGINE *, ENGINE_CTRL_FUNC_PTR);
int ENGINE_set_load_privkey_function(ENGINE *, ENGINE_LOAD_KEY_PTR);
int ENGINE_set_load_pubkey_function(ENGINE *, ENGINE_LOAD_KEY_PTR);
int ENGINE_set_ciphers(ENGINE *, ENGINE_CIPHERS_PTR);
int ENGINE_set_digests(ENGINE *, ENGINE_DIGESTS_PTR);
int ENGINE_set_flags(ENGINE *, int);
int ENGINE_set_cmd_defns(ENGINE *, const ENGINE_CMD_DEFN *);
const char *ENGINE_get_id(const ENGINE *);
const char *ENGINE_get_name(const ENGINE *);
const RSA_METHOD *ENGINE_get_RSA(const ENGINE *);
const DSA_METHOD *ENGINE_get_DSA(const ENGINE *);
const DH_METHOD *ENGINE_get_DH(const ENGINE *);
const RAND_METHOD *ENGINE_get_RAND(const ENGINE *);
const EVP_CIPHER *ENGINE_get_cipher(ENGINE *, int);
const EVP_MD *ENGINE_get_digest(ENGINE *, int);
int ENGINE_get_flags(const ENGINE *);
const ENGINE_CMD_DEFN *ENGINE_get_cmd_defns(const ENGINE *);
EVP_PKEY *ENGINE_load_private_key(ENGINE *, const char *, UI_METHOD *, void *);
EVP_PKEY *ENGINE_load_public_key(ENGINE *, const char *, UI_METHOD *, void *);
void ENGINE_add_conf_module(void);
"""
MACROS = """
/* these became macros in 1.1.0 */
void ENGINE_load_openssl(void);
void ENGINE_load_dynamic(void);
void ENGINE_cleanup(void);
void ENGINE_load_cryptodev(void);
"""
CUSTOMIZATIONS = """
#if defined(LIBRESSL_VERSION_NUMBER)
static const long Cryptography_HAS_ENGINE_CRYPTODEV = 0;
void (*ENGINE_load_cryptodev)(void) = NULL;
#else
static const long Cryptography_HAS_ENGINE_CRYPTODEV = 1;
#endif
"""
| apache-2.0 |
rhinstaller/anaconda | tests/unit_tests/pyanaconda_tests/modules/payloads/payload/test_module_payload_dnf_utils.py | 2 | 15676 | #
# Copyright (C) 2020 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
import unittest
from textwrap import dedent
from unittest.mock import patch, Mock
from blivet.size import Size
from pyanaconda.core.constants import GROUP_PACKAGE_TYPES_REQUIRED, GROUP_PACKAGE_TYPES_ALL
from pyanaconda.modules.common.constants.objects import DEVICE_TREE
from pyanaconda.modules.common.constants.services import STORAGE
from pyanaconda.modules.common.structures.packages import PackagesSelectionData
from pyanaconda.modules.payloads.payload.dnf.dnf_manager import DNFManager
from pyanaconda.modules.payloads.payload.dnf.utils import get_kernel_package, \
get_product_release_version, get_installation_specs, get_kernel_version_list, \
pick_download_location, calculate_required_space, get_free_space_map, _pick_mount_points
from tests.unit_tests.pyanaconda_tests import patch_dbus_get_proxy_with_cache
class DNFUtilsPackagesTestCase(unittest.TestCase):
def test_get_kernel_package_excluded(self):
"""Test the get_kernel_package function with kernel excluded."""
kernel = get_kernel_package(Mock(), exclude_list=["kernel"])
self.assertEqual(kernel, None)
def test_get_kernel_package_unavailable(self):
"""Test the get_kernel_package function with unavailable packages."""
dnf_manager = Mock(spec=DNFManager)
dnf_manager.is_package_available.return_value = False
with self.assertLogs(level="ERROR") as cm:
kernel = get_kernel_package(dnf_manager, exclude_list=[])
msg = "Failed to select a kernel"
self.assertIn(msg, "\n".join(cm.output))
self.assertEqual(kernel, None)
@patch("pyanaconda.modules.payloads.payload.dnf.utils.is_lpae_available")
def test_get_kernel_package_lpae(self, is_lpae):
"""Test the get_kernel_package function with LPAE."""
is_lpae.return_value = True
dnf_manager = Mock(spec=DNFManager)
dnf_manager.is_package_available.return_value = True
kernel = get_kernel_package(dnf_manager, exclude_list=[])
self.assertEqual(kernel, "kernel-lpae")
kernel = get_kernel_package(dnf_manager, exclude_list=["kernel-lpae"])
self.assertEqual(kernel, "kernel")
@patch("pyanaconda.modules.payloads.payload.dnf.utils.is_lpae_available")
def test_get_kernel_package(self, is_lpae):
"""Test the get_kernel_package function."""
is_lpae.return_value = False
dnf_manager = Mock(spec=DNFManager)
dnf_manager.is_package_available.return_value = True
kernel = get_kernel_package(dnf_manager, exclude_list=[])
self.assertEqual(kernel, "kernel")
@patch("pyanaconda.modules.payloads.payload.dnf.utils.productVersion", "invalid")
def test_get_product_release_version_invalid(self):
"""Test the get_product_release_version function with an invalid value."""
self.assertEqual(get_product_release_version(), "rawhide")
@patch("pyanaconda.modules.payloads.payload.dnf.utils.productVersion", "28")
def test_get_product_release_version_number(self):
"""Test the get_product_release_version function with a valid number."""
self.assertEqual(get_product_release_version(), "28")
@patch("pyanaconda.modules.payloads.payload.dnf.utils.productVersion", "7.4")
def test_get_product_release_version_dot(self):
"""Test the get_product_release_version function with a dot."""
self.assertEqual(get_product_release_version(), "7.4")
def test_get_installation_specs_default(self):
"""Test the get_installation_specs function with defaults."""
data = PackagesSelectionData()
self.assertEqual(get_installation_specs(data), (["@core"], []))
def test_get_installation_specs_nocore(self):
"""Test the get_installation_specs function without core."""
data = PackagesSelectionData()
data.core_group_enabled = False
self.assertEqual(get_installation_specs(data), ([], ["@core"]))
def test_get_installation_specs_environment(self):
"""Test the get_installation_specs function with environment."""
data = PackagesSelectionData()
data.environment = "environment-1"
self.assertEqual(get_installation_specs(data), (
["@environment-1", "@core"], []
))
env = "environment-2"
self.assertEqual(get_installation_specs(data, default_environment=env), (
["@environment-1", "@core"], []
))
data.default_environment_enabled = True
self.assertEqual(get_installation_specs(data, default_environment=env), (
["@environment-2", "@core"], []
))
def test_get_installation_specs_packages(self):
"""Test the get_installation_specs function with packages."""
data = PackagesSelectionData()
data.packages = ["p1", "p2", "p3"]
data.excluded_packages = ["p4", "p5", "p6"]
self.assertEqual(get_installation_specs(data), (
["@core", "p1", "p2", "p3"], ["p4", "p5", "p6"]
))
def test_get_installation_specs_groups(self):
"""Test the get_installation_specs function with groups."""
data = PackagesSelectionData()
data.groups = ["g1", "g2", "g3"]
data.excluded_groups = ["g4", "g5", "g6"]
data.groups_package_types = {
"g1": GROUP_PACKAGE_TYPES_REQUIRED,
"g3": GROUP_PACKAGE_TYPES_ALL,
"g4": GROUP_PACKAGE_TYPES_REQUIRED,
"g6": GROUP_PACKAGE_TYPES_ALL,
}
self.assertEqual(get_installation_specs(data), (
[
"@core",
"@g1/mandatory,conditional",
"@g2",
"@g3/mandatory,default,conditional,optional"],
[
"@g4",
"@g5",
"@g6"
]
))
@patch("pyanaconda.modules.payloads.payload.dnf.utils.rpm")
def test_get_kernel_version_list(self, mock_rpm):
"""Test the get_kernel_version_list function."""
hdr_1 = Mock(filenames=[
"/boot/vmlinuz-0-rescue-dbe69c1b88f94a67b689e3f44b0550c8"
"/boot/vmlinuz-5.8.15-201.fc32.x86_64",
"/boot/efi/EFI/default/vmlinuz-6.8.15-201.fc32.x86_64",
])
hdr_2 = Mock(filenames=[
"/boot/vmlinuz-5.8.16-200.fc32.x86_64",
"/boot/efi/EFI/default/vmlinuz-7.8.16-200.fc32.x86_64",
"/boot/vmlinuz-5.8.18-200.fc32.x86_64"
"/boot/efi/EFI/default/vmlinuz-8.8.18-200.fc32.x86_64"
])
ts = Mock()
ts.dbMatch.return_value = [hdr_1, hdr_2]
mock_rpm.TransactionSet.return_value = ts
self.assertEqual(get_kernel_version_list(), [
'5.8.15-201.fc32.x86_64',
'5.8.16-200.fc32.x86_64',
'6.8.15-201.fc32.x86_64',
'7.8.16-200.fc32.x86_64',
'8.8.18-200.fc32.x86_64'
])
@patch("pyanaconda.modules.payloads.payload.dnf.utils.execWithCapture")
def test_get_free_space(self, exec_mock):
"""Test the get_free_space function."""
output = """
Mounted on Avail
/dev 100
/dev/shm 200
/run 300
/ 400
/tmp 500
/boot 600
/home 700
/boot/efi 800
"""
exec_mock.return_value = dedent(output).strip()
self.assertEqual(get_free_space_map(), {
'/dev': Size("100 KiB"),
'/dev/shm': Size("200 KiB"),
'/run': Size("300 KiB"),
'/': Size("400 KiB"),
'/tmp': Size("500 KiB"),
'/boot': Size("600 KiB"),
'/home': Size("700 KiB"),
'/boot/efi': Size("800 KiB"),
})
@patch("os.statvfs")
@patch("pyanaconda.modules.payloads.payload.dnf.utils.conf")
@patch("pyanaconda.modules.payloads.payload.dnf.utils.execWithCapture")
def test_get_free_space_image(self, exec_mock, conf_mock, statvfs_mock):
"""Test the get_free_space function."""
output = """
Mounted on Avail
/ 100
/boot 200
"""
exec_mock.return_value = dedent(output).strip()
conf_mock.target.is_hardware = False
statvfs_mock.return_value = Mock(f_frsize=1024, f_bfree=300)
self.assertEqual(get_free_space_map(), {
'/': Size("100 KiB"),
'/boot': Size("200 KiB"),
'/var/tmp': Size("300 KiB"),
})
def test_pick_mount_points(self):
"""Test the _pick_mount_points function."""
mount_points = {
"/": Size("1 G"),
"/home": Size("1 G"),
"/var/tmp": Size("1 G"),
"/mnt/sysroot": Size("1 G"),
"/mnt/sysroot/home": Size("1 G"),
"/mnt/sysroot/tmp": Size("1 G"),
"/mnt/sysroot/var": Size("1 G"),
"/mnt/sysroot/usr": Size("1 G"),
}
# All mount points are big enough.
# Choose all suitable mount points.
sufficient = _pick_mount_points(
mount_points,
download_size=Size("0.5 G"),
install_size=Size("0.5 G")
)
self.assertEqual(sufficient, {
"/var/tmp",
"/mnt/sysroot",
"/mnt/sysroot/home",
"/mnt/sysroot/tmp",
"/mnt/sysroot/var"
})
# No mount point is big enough for installation.
# Choose non-sysroot mount points for download.
sufficient = _pick_mount_points(
mount_points,
download_size=Size("0.5 G"),
install_size=Size("1.5 G")
)
self.assertEqual(sufficient, {
"/var/tmp",
})
# No mount point is big enough for installation or download.
sufficient = _pick_mount_points(
mount_points,
download_size=Size("1.5 G"),
install_size=Size("1.5 G")
)
self.assertEqual(sufficient, set())
@patch("pyanaconda.modules.payloads.payload.dnf.utils.get_free_space_map")
def test_pick_download_location(self, free_space_getter):
"""Test the pick_download_location function."""
download_size = Size(100)
installation_size = Size(200)
total_size = Size(300)
dnf_manager = Mock()
dnf_manager.get_download_size.return_value = download_size
dnf_manager.get_installation_size.return_value = installation_size
# Found mount points for download and install.
# Don't use /mnt/sysroot if possible.
free_space_getter.return_value = {
"/var/tmp": download_size,
"/mnt/sysroot": total_size,
}
path = pick_download_location(dnf_manager)
self.assertEqual(path, "/var/tmp/dnf.package.cache")
# Found mount points only for download.
# Use the biggest mount point.
free_space_getter.return_value = {
"/mnt/sysroot/tmp": download_size + 1,
"/mnt/sysroot/home": download_size,
}
path = pick_download_location(dnf_manager)
self.assertEqual(path, "/mnt/sysroot/tmp/dnf.package.cache")
# No mount point to use.
# Fail with an exception.
free_space_getter.return_value = {}
with self.assertRaises(RuntimeError) as cm:
pick_download_location(dnf_manager)
msg = "Not enough disk space to download the packages; size 100 B."
self.assertEqual(str(cm.exception), msg)
@patch("pyanaconda.modules.payloads.payload.dnf.utils.execWithCapture")
@patch_dbus_get_proxy_with_cache
def test_get_combined_free_space(self, proxy_getter, exec_mock):
"""Test the get_free_space function with the combined options."""
output = """
Mounted on Avail
/ 100
/tmp 200
"""
exec_mock.return_value = dedent(output).strip()
mount_points = {
'/': Size("300 KiB"),
'/boot': Size("400 KiB"),
}
def get_mount_points():
return list(mount_points.keys())
def get_free_space(paths):
return sum(map(mount_points.get, paths))
device_tree = STORAGE.get_proxy(DEVICE_TREE)
device_tree.GetMountPoints.side_effect = get_mount_points
device_tree.GetFileSystemFreeSpace.side_effect = get_free_space
self.assertEqual(get_free_space_map(current=True, scheduled=False), {
'/': Size("100 KiB"),
'/tmp': Size("200 KiB"),
})
self.assertEqual(get_free_space_map(current=False, scheduled=True), {
'/mnt/sysroot': Size("300 KiB"),
'/mnt/sysroot/boot': Size("400 KiB"),
})
self.assertEqual(get_free_space_map(current=True, scheduled=True), {
'/': Size("100 KiB"),
'/tmp': Size("200 KiB"),
'/mnt/sysroot': Size("300 KiB"),
'/mnt/sysroot/boot': Size("400 KiB"),
})
self.assertEqual(get_free_space_map(current=False, scheduled=False), {})
@patch("pyanaconda.modules.payloads.payload.dnf.utils.get_free_space_map")
def test_calculate_required_space(self, free_space_getter):
"""Test the calculate_required_space function."""
download_size = Size(100)
installation_size = Size(200)
total_size = Size(300)
dnf_manager = Mock()
dnf_manager.get_download_size.return_value = download_size
dnf_manager.get_installation_size.return_value = installation_size
# No mount point to use.
# The total size is required.
free_space_getter.return_value = {}
self.assertEqual(calculate_required_space(dnf_manager), total_size)
# Found a mount point for download and install.
# The total size is required.
free_space_getter.return_value = {
"/mnt/sysroot/home": total_size
}
self.assertEqual(calculate_required_space(dnf_manager), total_size)
# Found a mount point for download.
# The installation size is required.
free_space_getter.return_value = {
"/var/tmp": download_size
}
self.assertEqual(calculate_required_space(dnf_manager), installation_size)
# The biggest mount point can be used for download and install.
# The total size is required.
free_space_getter.return_value = {
"/var/tmp": download_size,
"/mnt/sysroot": total_size
}
self.assertEqual(calculate_required_space(dnf_manager), total_size)
| gpl-2.0 |
adfernandes/pcp | src/pcp/pidstat/test/process_stackutil_test.py | 6 | 2213 | #!/usr/bin/env pmpython
#
# Copyright (C) 2016 Sitaram Shelke.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
import mock
import unittest
from pcp_pidstat import ProcessStackUtil
class TestProcessStackUtil(unittest.TestCase):
def setUp(self):
self.__metric_repository = mock.Mock()
self.__metric_repository.current_value = mock.Mock(side_effect=self.metric_repo_current_value_side_effect)
def metric_repo_current_value_side_effect(self, metric_name,instance):
if metric_name == 'proc.memory.vmstack' and instance == 1:
return 136
if metric_name == 'proc.psinfo.cmd' and instance == 1:
return "test"
if metric_name == 'proc.id.uid' and instance == 1:
return 1
if metric_name == 'proc.psinfo.pid' and instance == 1:
return 1
def test_stack_size(self):
process_stack_usage = ProcessStackUtil(1,self.__metric_repository)
stack_size = process_stack_usage.stack_size()
self.assertEquals(stack_size, 136)
def test_stack_referenced_size(self):
self.skipTest(reason="Implement when suitable metric is found")
def test_pid(self):
process_stack_usage = ProcessStackUtil(1,self.__metric_repository)
pid = process_stack_usage.pid()
self.assertEqual(pid,1)
def test_process_name(self):
process_stack_usage = ProcessStackUtil(1,self.__metric_repository)
name = process_stack_usage.process_name()
self.assertEqual(name,'test')
def test_user_id(self):
process_stack_usage = ProcessStackUtil(1,self.__metric_repository)
user_id = process_stack_usage.user_id()
self.assertEqual(user_id,1)
if __name__ == '__main__':
unittest.main()
| lgpl-2.1 |
mou4e/zirconium | build/android/adb_reverse_forwarder.py | 15 | 2519 | #!/usr/bin/env python
#
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Command line tool for forwarding ports from a device to the host.
Allows an Android device to connect to services running on the host machine,
i.e., "adb forward" in reverse. Requires |host_forwarder| and |device_forwarder|
to be built.
"""
import optparse
import sys
import time
from pylib import android_commands
from pylib import constants, forwarder
from pylib.device import device_utils
from pylib.utils import run_tests_helper
def main(argv):
parser = optparse.OptionParser(usage='Usage: %prog [options] device_port '
'host_port [device_port_2 host_port_2] ...',
description=__doc__)
parser.add_option('-v',
'--verbose',
dest='verbose_count',
default=0,
action='count',
help='Verbose level (multiple times for more)')
parser.add_option('--device',
help='Serial number of device we should use.')
parser.add_option('--debug', action='store_const', const='Debug',
dest='build_type', default='Release',
help='Use Debug build of host tools instead of Release.')
options, args = parser.parse_args(argv)
run_tests_helper.SetLogLevel(options.verbose_count)
if len(args) < 2 or not len(args) % 2:
parser.error('Need even number of port pairs')
sys.exit(1)
try:
port_pairs = map(int, args[1:])
port_pairs = zip(port_pairs[::2], port_pairs[1::2])
except ValueError:
parser.error('Bad port number')
sys.exit(1)
devices = android_commands.GetAttachedDevices()
if options.device:
if options.device not in devices:
raise Exception('Error: %s not in attached devices %s' % (options.device,
','.join(devices)))
devices = [options.device]
else:
if not devices:
raise Exception('Error: no connected devices')
print "No device specified. Defaulting to " + devices[0]
device = device_utils.DeviceUtils(devices[0])
constants.SetBuildType(options.build_type)
try:
forwarder.Forwarder.Map(port_pairs, device)
while True:
time.sleep(60)
except KeyboardInterrupt:
sys.exit(0)
finally:
forwarder.Forwarder.UnmapAllDevicePorts(device)
if __name__ == '__main__':
main(sys.argv)
| bsd-3-clause |
zaqwes8811/micro-apps | extern/gmock-1.6.0/gtest/test/gtest_uninitialized_test.py | 2901 | 2480 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test warns the user when not initialized properly."""
__author__ = '[email protected] (Zhanyong Wan)'
import gtest_test_utils
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_uninitialized_test_')
def Assert(condition):
if not condition:
raise AssertionError
def AssertEq(expected, actual):
if expected != actual:
print 'Expected: %s' % (expected,)
print ' Actual: %s' % (actual,)
raise AssertionError
def TestExitCodeAndOutput(command):
"""Runs the given command and verifies its exit code and output."""
# Verifies that 'command' exits with code 1.
p = gtest_test_utils.Subprocess(command)
Assert(p.exited)
AssertEq(1, p.exit_code)
Assert('InitGoogleTest' in p.output)
class GTestUninitializedTest(gtest_test_utils.TestCase):
def testExitCodeAndOutput(self):
TestExitCodeAndOutput(COMMAND)
if __name__ == '__main__':
gtest_test_utils.Main()
| mit |
pim89/youtube-dl | youtube_dl/extractor/brightcove.py | 5 | 27756 | # coding: utf-8
from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from ..compat import (
compat_etree_fromstring,
compat_parse_qs,
compat_str,
compat_urllib_parse_urlparse,
compat_urlparse,
compat_xml_parse_error,
compat_HTTPError,
)
from ..utils import (
determine_ext,
ExtractorError,
find_xpath_attr,
fix_xml_ampersands,
float_or_none,
js_to_json,
int_or_none,
parse_iso8601,
unescapeHTML,
unsmuggle_url,
update_url_query,
clean_html,
mimetype2ext,
)
class BrightcoveLegacyIE(InfoExtractor):
IE_NAME = 'brightcove:legacy'
_VALID_URL = r'(?:https?://.*brightcove\.com/(services|viewer).*?\?|brightcove:)(?P<query>.*)'
_FEDERATED_URL = 'http://c.brightcove.com/services/viewer/htmlFederated'
_TESTS = [
{
# From http://www.8tv.cat/8aldia/videos/xavier-sala-i-martin-aquesta-tarda-a-8-al-dia/
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1654948606001&flashID=myExperience&%40videoPlayer=2371591881001',
'md5': '5423e113865d26e40624dce2e4b45d95',
'note': 'Test Brightcove downloads and detection in GenericIE',
'info_dict': {
'id': '2371591881001',
'ext': 'mp4',
'title': 'Xavier Sala i Martín: “Un banc que no presta és un banc zombi que no serveix per a res”',
'uploader': '8TV',
'description': 'md5:a950cc4285c43e44d763d036710cd9cd',
'timestamp': 1368213670,
'upload_date': '20130510',
'uploader_id': '1589608506001',
}
},
{
# From http://medianetwork.oracle.com/video/player/1785452137001
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1217746023001&flashID=myPlayer&%40videoPlayer=1785452137001',
'info_dict': {
'id': '1785452137001',
'ext': 'flv',
'title': 'JVMLS 2012: Arrays 2.0 - Opportunities and Challenges',
'description': 'John Rose speaks at the JVM Language Summit, August 1, 2012.',
'uploader': 'Oracle',
'timestamp': 1344975024,
'upload_date': '20120814',
'uploader_id': '1460825906',
},
},
{
# From http://mashable.com/2013/10/26/thermoelectric-bracelet-lets-you-control-your-body-temperature/
'url': 'http://c.brightcove.com/services/viewer/federated_f9?&playerID=1265504713001&publisherID=AQ%7E%7E%2CAAABBzUwv1E%7E%2CxP-xFHVUstiMFlNYfvF4G9yFnNaqCw_9&videoID=2750934548001',
'info_dict': {
'id': '2750934548001',
'ext': 'mp4',
'title': 'This Bracelet Acts as a Personal Thermostat',
'description': 'md5:547b78c64f4112766ccf4e151c20b6a0',
'uploader': 'Mashable',
'timestamp': 1382041798,
'upload_date': '20131017',
'uploader_id': '1130468786001',
},
},
{
# test that the default referer works
# from http://national.ballet.ca/interact/video/Lost_in_Motion_II/
'url': 'http://link.brightcove.com/services/player/bcpid756015033001?bckey=AQ~~,AAAApYJi_Ck~,GxhXCegT1Dp39ilhXuxMJxasUhVNZiil&bctid=2878862109001',
'info_dict': {
'id': '2878862109001',
'ext': 'mp4',
'title': 'Lost in Motion II',
'description': 'md5:363109c02998fee92ec02211bd8000df',
'uploader': 'National Ballet of Canada',
},
'skip': 'Video gone',
},
{
# test flv videos served by akamaihd.net
# From http://www.redbull.com/en/bike/stories/1331655643987/replay-uci-dh-world-cup-2014-from-fort-william
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?%40videoPlayer=ref%3Aevent-stream-356&linkBaseURL=http%3A%2F%2Fwww.redbull.com%2Fen%2Fbike%2Fvideos%2F1331655630249%2Freplay-uci-fort-william-2014-dh&playerKey=AQ%7E%7E%2CAAAApYJ7UqE%7E%2Cxqr_zXk0I-zzNndy8NlHogrCb5QdyZRf&playerID=1398061561001#__youtubedl_smuggle=%7B%22Referer%22%3A+%22http%3A%2F%2Fwww.redbull.com%2Fen%2Fbike%2Fstories%2F1331655643987%2Freplay-uci-dh-world-cup-2014-from-fort-william%22%7D',
# The md5 checksum changes on each download
'info_dict': {
'id': '3750436379001',
'ext': 'flv',
'title': 'UCI MTB World Cup 2014: Fort William, UK - Downhill Finals',
'uploader': 'RBTV Old (do not use)',
'description': 'UCI MTB World Cup 2014: Fort William, UK - Downhill Finals',
'timestamp': 1409122195,
'upload_date': '20140827',
'uploader_id': '710858724001',
},
},
{
# playlist with 'videoList'
# from http://support.brightcove.com/en/video-cloud/docs/playlist-support-single-video-players
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=3550052898001&playerKey=AQ%7E%7E%2CAAABmA9XpXk%7E%2C-Kp7jNgisre1fG5OdqpAFUTcs0lP_ZoL',
'info_dict': {
'title': 'Sealife',
'id': '3550319591001',
},
'playlist_mincount': 7,
},
{
# playlist with 'playlistTab' (https://github.com/rg3/youtube-dl/issues/9965)
'url': 'http://c.brightcove.com/services/json/experience/runtime/?command=get_programming_for_experience&playerKey=AQ%7E%7E,AAABXlLMdok%7E,NJ4EoMlZ4rZdx9eU1rkMVd8EaYPBBUlg',
'info_dict': {
'id': '1522758701001',
'title': 'Lesson 08',
},
'playlist_mincount': 10,
},
]
FLV_VCODECS = {
1: 'SORENSON',
2: 'ON2',
3: 'H264',
4: 'VP8',
}
@classmethod
def _build_brighcove_url(cls, object_str):
"""
Build a Brightcove url from a xml string containing
<object class="BrightcoveExperience">{params}</object>
"""
# Fix up some stupid HTML, see https://github.com/rg3/youtube-dl/issues/1553
object_str = re.sub(r'(<param(?:\s+[a-zA-Z0-9_]+="[^"]*")*)>',
lambda m: m.group(1) + '/>', object_str)
# Fix up some stupid XML, see https://github.com/rg3/youtube-dl/issues/1608
object_str = object_str.replace('<--', '<!--')
# remove namespace to simplify extraction
object_str = re.sub(r'(<object[^>]*)(xmlns=".*?")', r'\1', object_str)
object_str = fix_xml_ampersands(object_str)
try:
object_doc = compat_etree_fromstring(object_str.encode('utf-8'))
except compat_xml_parse_error:
return
fv_el = find_xpath_attr(object_doc, './param', 'name', 'flashVars')
if fv_el is not None:
flashvars = dict(
(k, v[0])
for k, v in compat_parse_qs(fv_el.attrib['value']).items())
else:
flashvars = {}
data_url = object_doc.attrib.get('data', '')
data_url_params = compat_parse_qs(compat_urllib_parse_urlparse(data_url).query)
def find_param(name):
if name in flashvars:
return flashvars[name]
node = find_xpath_attr(object_doc, './param', 'name', name)
if node is not None:
return node.attrib['value']
return data_url_params.get(name)
params = {}
playerID = find_param('playerID')
if playerID is None:
raise ExtractorError('Cannot find player ID')
params['playerID'] = playerID
playerKey = find_param('playerKey')
# Not all pages define this value
if playerKey is not None:
params['playerKey'] = playerKey
# These fields hold the id of the video
videoPlayer = find_param('@videoPlayer') or find_param('videoId') or find_param('videoID') or find_param('@videoList')
if videoPlayer is not None:
params['@videoPlayer'] = videoPlayer
linkBase = find_param('linkBaseURL')
if linkBase is not None:
params['linkBaseURL'] = linkBase
return cls._make_brightcove_url(params)
@classmethod
def _build_brighcove_url_from_js(cls, object_js):
# The layout of JS is as follows:
# customBC.createVideo = function (width, height, playerID, playerKey, videoPlayer, VideoRandomID) {
# // build Brightcove <object /> XML
# }
m = re.search(
r'''(?x)customBC.\createVideo\(
.*? # skipping width and height
["\'](?P<playerID>\d+)["\']\s*,\s* # playerID
["\'](?P<playerKey>AQ[^"\']{48})[^"\']*["\']\s*,\s* # playerKey begins with AQ and is 50 characters
# in length, however it's appended to itself
# in places, so truncate
["\'](?P<videoID>\d+)["\'] # @videoPlayer
''', object_js)
if m:
return cls._make_brightcove_url(m.groupdict())
@classmethod
def _make_brightcove_url(cls, params):
return update_url_query(cls._FEDERATED_URL, params)
@classmethod
def _extract_brightcove_url(cls, webpage):
"""Try to extract the brightcove url from the webpage, returns None
if it can't be found
"""
urls = cls._extract_brightcove_urls(webpage)
return urls[0] if urls else None
@classmethod
def _extract_brightcove_urls(cls, webpage):
"""Return a list of all Brightcove URLs from the webpage """
url_m = re.search(
r'<meta\s+property=[\'"]og:video[\'"]\s+content=[\'"](https?://(?:secure|c)\.brightcove.com/[^\'"]+)[\'"]',
webpage)
if url_m:
url = unescapeHTML(url_m.group(1))
# Some sites don't add it, we can't download with this url, for example:
# http://www.ktvu.com/videos/news/raw-video-caltrain-releases-video-of-man-almost/vCTZdY/
if 'playerKey' in url or 'videoId' in url:
return [url]
matches = re.findall(
r'''(?sx)<object
(?:
[^>]+?class=[\'"][^>]*?BrightcoveExperience.*?[\'"] |
[^>]*?>\s*<param\s+name="movie"\s+value="https?://[^/]*brightcove\.com/
).+?>\s*</object>''',
webpage)
if matches:
return list(filter(None, [cls._build_brighcove_url(m) for m in matches]))
return list(filter(None, [
cls._build_brighcove_url_from_js(custom_bc)
for custom_bc in re.findall(r'(customBC\.createVideo\(.+?\);)', webpage)]))
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
# Change the 'videoId' and others field to '@videoPlayer'
url = re.sub(r'(?<=[?&])(videoI(d|D)|bctid)', '%40videoPlayer', url)
# Change bckey (used by bcove.me urls) to playerKey
url = re.sub(r'(?<=[?&])bckey', 'playerKey', url)
mobj = re.match(self._VALID_URL, url)
query_str = mobj.group('query')
query = compat_urlparse.parse_qs(query_str)
videoPlayer = query.get('@videoPlayer')
if videoPlayer:
# We set the original url as the default 'Referer' header
referer = smuggled_data.get('Referer', url)
return self._get_video_info(
videoPlayer[0], query, referer=referer)
elif 'playerKey' in query:
player_key = query['playerKey']
return self._get_playlist_info(player_key[0])
else:
raise ExtractorError(
'Cannot find playerKey= variable. Did you forget quotes in a shell invocation?',
expected=True)
def _get_video_info(self, video_id, query, referer=None):
headers = {}
linkBase = query.get('linkBaseURL')
if linkBase is not None:
referer = linkBase[0]
if referer is not None:
headers['Referer'] = referer
webpage = self._download_webpage(self._FEDERATED_URL, video_id, headers=headers, query=query)
error_msg = self._html_search_regex(
r"<h1>We're sorry.</h1>([\s\n]*<p>.*?</p>)+", webpage,
'error message', default=None)
if error_msg is not None:
raise ExtractorError(
'brightcove said: %s' % error_msg, expected=True)
self.report_extraction(video_id)
info = self._search_regex(r'var experienceJSON = ({.*});', webpage, 'json')
info = json.loads(info)['data']
video_info = info['programmedContent']['videoPlayer']['mediaDTO']
video_info['_youtubedl_adServerURL'] = info.get('adServerURL')
return self._extract_video_info(video_info)
def _get_playlist_info(self, player_key):
info_url = 'http://c.brightcove.com/services/json/experience/runtime/?command=get_programming_for_experience&playerKey=%s' % player_key
playlist_info = self._download_webpage(
info_url, player_key, 'Downloading playlist information')
json_data = json.loads(playlist_info)
if 'videoList' in json_data:
playlist_info = json_data['videoList']
playlist_dto = playlist_info['mediaCollectionDTO']
elif 'playlistTabs' in json_data:
playlist_info = json_data['playlistTabs']
playlist_dto = playlist_info['lineupListDTO']['playlistDTOs'][0]
else:
raise ExtractorError('Empty playlist')
videos = [self._extract_video_info(video_info) for video_info in playlist_dto['videoDTOs']]
return self.playlist_result(videos, playlist_id='%s' % playlist_info['id'],
playlist_title=playlist_dto['displayName'])
def _extract_video_info(self, video_info):
video_id = compat_str(video_info['id'])
publisher_id = video_info.get('publisherId')
info = {
'id': video_id,
'title': video_info['displayName'].strip(),
'description': video_info.get('shortDescription'),
'thumbnail': video_info.get('videoStillURL') or video_info.get('thumbnailURL'),
'uploader': video_info.get('publisherName'),
'uploader_id': compat_str(publisher_id) if publisher_id else None,
'duration': float_or_none(video_info.get('length'), 1000),
'timestamp': int_or_none(video_info.get('creationDate'), 1000),
}
renditions = video_info.get('renditions', []) + video_info.get('IOSRenditions', [])
if renditions:
formats = []
for rend in renditions:
url = rend['defaultURL']
if not url:
continue
ext = None
if rend['remote']:
url_comp = compat_urllib_parse_urlparse(url)
if url_comp.path.endswith('.m3u8'):
formats.extend(
self._extract_m3u8_formats(
url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False))
continue
elif 'akamaihd.net' in url_comp.netloc:
# This type of renditions are served through
# akamaihd.net, but they don't use f4m manifests
url = url.replace('control/', '') + '?&v=3.3.0&fp=13&r=FEEFJ&g=RTSJIMBMPFPB'
ext = 'flv'
if ext is None:
ext = determine_ext(url)
tbr = int_or_none(rend.get('encodingRate'), 1000)
a_format = {
'format_id': 'http%s' % ('-%s' % tbr if tbr else ''),
'url': url,
'ext': ext,
'filesize': int_or_none(rend.get('size')) or None,
'tbr': tbr,
}
if rend.get('audioOnly'):
a_format.update({
'vcodec': 'none',
})
else:
a_format.update({
'height': int_or_none(rend.get('frameHeight')),
'width': int_or_none(rend.get('frameWidth')),
'vcodec': rend.get('videoCodec'),
})
# m3u8 manifests with remote == false are media playlists
# Not calling _extract_m3u8_formats here to save network traffic
if ext == 'm3u8':
a_format.update({
'format_id': 'hls%s' % ('-%s' % tbr if tbr else ''),
'ext': 'mp4',
'protocol': 'm3u8_native',
})
formats.append(a_format)
self._sort_formats(formats)
info['formats'] = formats
elif video_info.get('FLVFullLengthURL') is not None:
info.update({
'url': video_info['FLVFullLengthURL'],
'vcodec': self.FLV_VCODECS.get(video_info.get('FLVFullCodec')),
'filesize': int_or_none(video_info.get('FLVFullSize')),
})
if self._downloader.params.get('include_ads', False):
adServerURL = video_info.get('_youtubedl_adServerURL')
if adServerURL:
ad_info = {
'_type': 'url',
'url': adServerURL,
}
if 'url' in info:
return {
'_type': 'playlist',
'title': info['title'],
'entries': [ad_info, info],
}
else:
return ad_info
if 'url' not in info and not info.get('formats'):
raise ExtractorError('Unable to extract video url for %s' % video_id)
return info
class BrightcoveNewIE(InfoExtractor):
IE_NAME = 'brightcove:new'
_VALID_URL = r'https?://players\.brightcove\.net/(?P<account_id>\d+)/(?P<player_id>[^/]+)_(?P<embed>[^/]+)/index\.html\?.*videoId=(?P<video_id>\d+|ref:[^&]+)'
_TESTS = [{
'url': 'http://players.brightcove.net/929656772001/e41d32dc-ec74-459e-a845-6c69f7b724ea_default/index.html?videoId=4463358922001',
'md5': 'c8100925723840d4b0d243f7025703be',
'info_dict': {
'id': '4463358922001',
'ext': 'mp4',
'title': 'Meet the man behind Popcorn Time',
'description': 'md5:eac376a4fe366edc70279bfb681aea16',
'duration': 165.768,
'timestamp': 1441391203,
'upload_date': '20150904',
'uploader_id': '929656772001',
'formats': 'mincount:22',
},
}, {
# with rtmp streams
'url': 'http://players.brightcove.net/4036320279001/5d112ed9-283f-485f-a7f9-33f42e8bc042_default/index.html?videoId=4279049078001',
'info_dict': {
'id': '4279049078001',
'ext': 'mp4',
'title': 'Titansgrave: Chapter 0',
'description': 'Titansgrave: Chapter 0',
'duration': 1242.058,
'timestamp': 1433556729,
'upload_date': '20150606',
'uploader_id': '4036320279001',
'formats': 'mincount:41',
},
'params': {
# m3u8 download
'skip_download': True,
}
}, {
# ref: prefixed video id
'url': 'http://players.brightcove.net/3910869709001/21519b5c-4b3b-4363-accb-bdc8f358f823_default/index.html?videoId=ref:7069442',
'only_matching': True,
}, {
# non numeric ref: prefixed video id
'url': 'http://players.brightcove.net/710858724001/default_default/index.html?videoId=ref:event-stream-356',
'only_matching': True,
}, {
# unavailable video without message but with error_code
'url': 'http://players.brightcove.net/1305187701/c832abfb-641b-44eb-9da0-2fe76786505f_default/index.html?videoId=4377407326001',
'only_matching': True,
}]
@staticmethod
def _extract_url(webpage):
urls = BrightcoveNewIE._extract_urls(webpage)
return urls[0] if urls else None
@staticmethod
def _extract_urls(webpage):
# Reference:
# 1. http://docs.brightcove.com/en/video-cloud/brightcove-player/guides/publish-video.html#setvideoiniframe
# 2. http://docs.brightcove.com/en/video-cloud/brightcove-player/guides/publish-video.html#setvideousingjavascript
# 3. http://docs.brightcove.com/en/video-cloud/brightcove-player/guides/embed-in-page.html
# 4. https://support.brightcove.com/en/video-cloud/docs/dynamically-assigning-videos-player
entries = []
# Look for iframe embeds [1]
for _, url in re.findall(
r'<iframe[^>]+src=(["\'])((?:https?:)?//players\.brightcove\.net/\d+/[^/]+/index\.html.+?)\1', webpage):
entries.append(url if url.startswith('http') else 'http:' + url)
# Look for embed_in_page embeds [2]
for video_id, account_id, player_id, embed in re.findall(
# According to examples from [3] it's unclear whether video id
# may be optional and what to do when it is
# According to [4] data-video-id may be prefixed with ref:
r'''(?sx)
<video[^>]+
data-video-id=["\'](\d+|ref:[^"\']+)["\'][^>]*>.*?
</video>.*?
<script[^>]+
src=["\'](?:https?:)?//players\.brightcove\.net/
(\d+)/([^/]+)_([^/]+)/index(?:\.min)?\.js
''', webpage):
entries.append(
'http://players.brightcove.net/%s/%s_%s/index.html?videoId=%s'
% (account_id, player_id, embed, video_id))
return entries
def _real_extract(self, url):
account_id, player_id, embed, video_id = re.match(self._VALID_URL, url).groups()
webpage = self._download_webpage(
'http://players.brightcove.net/%s/%s_%s/index.min.js'
% (account_id, player_id, embed), video_id)
policy_key = None
catalog = self._search_regex(
r'catalog\(({.+?})\);', webpage, 'catalog', default=None)
if catalog:
catalog = self._parse_json(
js_to_json(catalog), video_id, fatal=False)
if catalog:
policy_key = catalog.get('policyKey')
if not policy_key:
policy_key = self._search_regex(
r'policyKey\s*:\s*(["\'])(?P<pk>.+?)\1',
webpage, 'policy key', group='pk')
api_url = 'https://edge.api.brightcove.com/playback/v1/accounts/%s/videos/%s' % (account_id, video_id)
try:
json_data = self._download_json(api_url, video_id, headers={
'Accept': 'application/json;pk=%s' % policy_key
})
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
json_data = self._parse_json(e.cause.read().decode(), video_id)[0]
raise ExtractorError(
json_data.get('message') or json_data['error_code'], expected=True)
raise
title = json_data['name'].strip()
formats = []
for source in json_data.get('sources', []):
container = source.get('container')
ext = mimetype2ext(source.get('type'))
src = source.get('src')
if ext == 'ism':
continue
elif ext == 'm3u8' or container == 'M2TS':
if not src:
continue
formats.extend(self._extract_m3u8_formats(
src, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False))
elif ext == 'mpd':
if not src:
continue
formats.extend(self._extract_mpd_formats(src, video_id, 'dash', fatal=False))
else:
streaming_src = source.get('streaming_src')
stream_name, app_name = source.get('stream_name'), source.get('app_name')
if not src and not streaming_src and (not stream_name or not app_name):
continue
tbr = float_or_none(source.get('avg_bitrate'), 1000)
height = int_or_none(source.get('height'))
width = int_or_none(source.get('width'))
f = {
'tbr': tbr,
'filesize': int_or_none(source.get('size')),
'container': container,
'ext': ext or container.lower(),
}
if width == 0 and height == 0:
f.update({
'vcodec': 'none',
})
else:
f.update({
'width': width,
'height': height,
'vcodec': source.get('codec'),
})
def build_format_id(kind):
format_id = kind
if tbr:
format_id += '-%dk' % int(tbr)
if height:
format_id += '-%dp' % height
return format_id
if src or streaming_src:
f.update({
'url': src or streaming_src,
'format_id': build_format_id('http' if src else 'http-streaming'),
'source_preference': 0 if src else -1,
})
else:
f.update({
'url': app_name,
'play_path': stream_name,
'format_id': build_format_id('rtmp'),
})
formats.append(f)
errors = json_data.get('errors')
if not formats and errors:
error = errors[0]
raise ExtractorError(
error.get('message') or error.get('error_subcode') or error['error_code'], expected=True)
self._sort_formats(formats)
subtitles = {}
for text_track in json_data.get('text_tracks', []):
if text_track.get('src'):
subtitles.setdefault(text_track.get('srclang'), []).append({
'url': text_track['src'],
})
is_live = False
duration = float_or_none(json_data.get('duration'), 1000)
if duration and duration < 0:
is_live = True
return {
'id': video_id,
'title': self._live_title(title) if is_live else title,
'description': clean_html(json_data.get('description')),
'thumbnail': json_data.get('thumbnail') or json_data.get('poster'),
'duration': duration,
'timestamp': parse_iso8601(json_data.get('published_at')),
'uploader_id': account_id,
'formats': formats,
'subtitles': subtitles,
'tags': json_data.get('tags', []),
'is_live': is_live,
}
| unlicense |
BowdoinOrient/bongo | bongo/apps/bongo/tests/model_tests.py | 1 | 13646 | from bongo.apps.bongo.tests import factories
from django.test import TestCase
from django.contrib.auth.models import User
from django.conf import settings
from django.utils.text import slugify
import os
"""
Test content type models and related:
test, video, PDF, photo, HTML, pullquote, post
"""
class TextTestCase(TestCase):
def test_creator(self):
text = factories.TextFactory.create()
creator1 = factories.CreatorFactory.create()
creator2 = factories.CreatorFactory.create()
text.creators.add(creator1)
text.creators.add(creator2)
text.save()
for creator in text.creators.all():
self.assertIn(text, creator.works())
for creator in [creator1, creator2]:
self.assertIn(text, creator.works())
text.delete()
creator1.delete()
creator2.delete()
def test_fields(self):
text = factories.TextFactory.create()
self.assertIsNotNone(text.caption)
def test_excerpt(self):
text = factories.TextFactory.build()
self.assertEquals(text.excerpt, "The excerpt isn't correct until it's saved")
text.body = (
"Quinoa hashtag Kickstarter bespoke. Schlitz PBR&B 3 wolf moon, photo booth swag occupy banh mi PBR " +
"artisan lo-fi nor.bongo. Lomo selvage leggings quinoa, ugh cliche cornhole asymmetrical gluten-free " +
"Echo Park. Tumblr put a bird on it drinking vinegar sriracha, leggings mumbl.bongo actually four " +
"loko twee fixie mustache. Mustache drinking vinegar cliche, meggings before they sold out fap " +
"Kickstarter tofu banjo master cleanse ennui fingerstache kogi you probably haven't heard of them. " +
"Polaroid photo booth chia biodiesel trust fund typewriter locavore, Blue Bottle 90's Neutra umami " +
"flannel. Portland Helvetica umami freegan locavore direct trade, polaroid 3 wolf moon actually."
)
text.save()
self.assertEquals(text.excerpt, (
"Quinoa hashtag Kickstarter bespoke. Schlitz PBR&B 3 wolf moon, photo booth swag occupy banh mi PBR " +
"artisan lo-fi nor.bongo. Lomo selvage leggings quinoa, ugh cliche cornhole asymmetrical gluten-free " +
"Echo Park."
))
text.delete()
class VideoTestCase(TestCase):
def test_creator(self):
video = factories.VideoFactory.create()
creator1 = factories.CreatorFactory.create()
creator2 = factories.CreatorFactory.create()
video.creators.add(creator1)
video.creators.add(creator2)
video.save()
for creator in video.creators.all():
self.assertIn(video, creator.works())
for creator in [creator1, creator2]:
self.assertIn(video, creator.works())
video.delete()
creator1.delete()
creator2.delete()
def test_fields(self):
video = factories.VideoFactory.build()
self.assertIsNotNone(video.caption)
self.assertIsNotNone(video.url())
class PDFTestCase(TestCase):
def test_creator(self):
pdf = factories.PDFFactory.create()
creator1 = factories.CreatorFactory.create()
creator2 = factories.CreatorFactory.create()
pdf.creators.add(creator1)
pdf.creators.add(creator2)
pdf.save()
for creator in pdf.creators.all():
self.assertIn(pdf, creator.works())
for creator in [creator1, creator2]:
self.assertIn(pdf, creator.works())
pdf.delete()
creator1.delete()
creator2.delete()
def test_fields(self):
pdf = factories.PDFFactory.create()
self.assertIsNotNone(pdf.caption)
# @todo: test staticfile
class PhotoTestCase(TestCase):
def test_creator(self):
photo = factories.PhotoFactory.create()
creator1 = factories.CreatorFactory.create()
creator2 = factories.CreatorFactory.create()
photo.creators.add(creator1)
photo.creators.add(creator2)
photo.save()
for creator in photo.creators.all():
self.assertIn(photo, creator.works())
for creator in [creator1, creator2]:
self.assertIn(photo, creator.works())
photo.delete()
creator1.delete()
creator2.delete()
def test_fields(self):
photo = factories.PhotoFactory.create()
self.assertIsNotNone(photo.caption)
# @todo: test staticfile
class HTMLTestCase(TestCase):
def test_creator(self):
html = factories.HTMLFactory.create()
creator1 = factories.CreatorFactory.create()
creator2 = factories.CreatorFactory.create()
html.creators.add(creator1)
html.creators.add(creator2)
html.save()
for creator in html.creators.all():
self.assertIn(html, creator.works())
for creator in [creator1, creator2]:
self.assertIn(html, creator.works())
html.delete()
creator1.delete()
creator2.delete()
def test_fields(self):
html = factories.HTMLFactory.create()
self.assertIsNotNone(html.caption)
self.assertIsNotNone(html.content)
class PullquoteTestCase(TestCase):
def test_creator(self):
pullquote = factories.PullquoteFactory.create()
creator1 = factories.CreatorFactory.create()
creator2 = factories.CreatorFactory.create()
pullquote.creators.add(creator1)
pullquote.creators.add(creator2)
pullquote.save()
for creator in pullquote.creators.all():
self.assertIn(pullquote, creator.works())
for creator in [creator1, creator2]:
self.assertIn(pullquote, creator.works())
pullquote.delete()
creator1.delete()
creator2.delete()
def test_fields(self):
pullquote = factories.PullquoteFactory.create()
self.assertIsNotNone(pullquote.caption)
self.assertIsNotNone(pullquote.quote)
self.assertIsNotNone(pullquote.attribution)
class PostTestCase(TestCase):
def test_similar_tags(self):
# this is a damn good article. one of the best.
with open(
os.path.normpath(
os.path.join(
settings.SITE_ROOT,
"bongo/apps/bongo/tests/naked.txt"
)
),
"r"
) as f_txt:
articlebody = f_txt.read()
post = factories.PostFactory.create()
similar_post = factories.PostFactory.create()
text = factories.TextFactory.create(body=articlebody)
post.text.add(text)
post.save()
similar_post.text.add(text)
similar_post.save()
post.taggit()
similar_post.taggit()
self.assertNotEqual(post.tags.all().count(), 0)
self.assertNotEqual(similar_post.tags.all().count(), 0)
self.assertEqual(post.similar_tags()[0], similar_post)
def test_popularity(self):
post1 = factories.PostFactory.create()
post1.views_global = 1
post2 = factories.PostFactory.create()
post2.views_global = 2
post3 = factories.PostFactory.create()
post3.views_global = 3
self.assertGreater(post2.popularity(), post1.popularity())
self.assertGreater(post3.popularity(), post2.popularity())
def test_primary_section(self):
"""Test that this convenience method works, which, duh"""
post = factories.PostFactory.create()
self.assertEqual(post.primary_section(), post.section.classname())
def test_creators(self):
"""Test the creators() method for finding the authors of post's content"""
post = factories.PostFactory.create()
text = factories.TextFactory.create()
author = factories.CreatorFactory.create()
text.creators.add(author)
post.text.add(text)
text2 = factories.TextFactory.create()
text2.creators.add(author)
post.text.add(text2)
creators = list(post.creators())
self.assertIn(author, creators)
self.assertEqual(len(creators), 1)
def test_slug(self):
"""Test that an article gets assigned a slug when saved"""
post = factories.PostFactory.create()
post.save()
self.assertEqual(post.slug, slugify(post.title))
def test_slug_collision(self):
posts = [factories.PostFactory.create() for x in range(3)]
for post in posts:
post.slug = None
post.title = "Campus Concern Raises Concern"
post.save()
self.assertEqual(posts[0].slug, "campus-concern-raises-concern")
self.assertEqual(posts[1].slug, "campus-concern-raises-concern-2")
self.assertEqual(posts[2].slug, "campus-concern-raises-concern-3")
"""
Test user-related models:
creators, users, jobs
"""
class UserTestCase(TestCase):
def test_password(self):
""" Test that a user gets a password, and it works to log them in """
user = factories.UserFactory.create()
self.assertNotEqual(user.password, u'')
self.assertEqual(user.check_password("defaultpassword"), True)
class CreatorTestCase(TestCase):
def test_foreign_key(self):
""" Test that Creators are properly hooked up to Jobs and Users """
user = factories.UserFactory.create()
creator = factories.CreatorFactory.create()
job = factories.JobFactory.create()
creator.user = user
creator.job = job
creator.save()
self.assertEquals(type(creator.user), User)
from bongo.apps.bongo.models import Job
self.assertEquals(type(creator.job), Job)
creator.delete()
def test_works(self):
""" Test the connection between a creator and the content they've made """
me = factories.CreatorFactory.create()
photo = factories.PhotoFactory.create()
photo.creators.add(me)
photo.save()
video = factories.VideoFactory.create()
video.creators.add(me)
video.save()
self.assertIn(photo, me.works())
self.assertIn(video, me.works())
me.delete()
photo.delete()
video.delete()
def test_primary_section(self):
"""Test that Creators' primary_section method works"""
creator = factories.CreatorFactory.create()
section1 = factories.SectionFactory.create()
section2 = factories.SectionFactory.create()
post1 = factories.PostFactory.create()
post1text = factories.TextFactory.create()
post1text.creators.add(creator)
post1.text.add(post1text)
post1.section = section1
post1.save()
post2 = factories.PostFactory.create()
post2text = factories.TextFactory.create()
post2text.creators.add(creator)
post2.text.add(post2text)
post2.section = section2
post2.save()
post3 = factories.PostFactory.create()
post3text = factories.TextFactory.create()
post3text.creators.add(creator)
post3.text.add(post3text)
post3.section = section2
post3.save()
self.assertEqual(creator.primary_section(), section2.classname())
class JobTestCase(TestCase):
def test_foreign_key(self):
job = factories.JobFactory.create()
creator = factories.CreatorFactory.create()
creator.job = job
creator.save()
self.assertEqual(job, creator.job)
self.assertIn(creator, job.workers())
job.delete()
creator.delete()
"""
Test metadata models:
series, volumes, issues, sections, tags
"""
class SeriesTestCase(TestCase):
def test_m2m(self):
# @TODO
pass
def test_primary_section(self):
"""Test that Series' primary_section method works"""
series = factories.SeriesFactory.create()
section1 = factories.SectionFactory.create()
section2 = factories.SectionFactory.create()
post1 = factories.PostFactory.create()
post1.section = section1
post1.series.add(series)
post1.save()
post2 = factories.PostFactory.create()
post2.section = section2
post2.series.add(series)
post2.save()
post3 = factories.PostFactory.create()
post3.section = section2
post3.series.add(series)
post3.save()
self.assertEqual(series.primary_section(), section2.classname())
class VolumeTestCase(TestCase):
def test_foreign_key(self):
# @TODO
pass
class IssueTestCase(TestCase):
def test_foreign_key(self):
# @TODO
pass
def test_custom_save(self):
issue = factories.IssueFactory.create(
volume = factories.VolumeFactory.create()
)
self.assertEqual(issue.scribd, None)
self.assertEqual(issue.scribd_image, None)
issue.scribd = 99999999
issue.save()
self.assertEqual(issue.scribd_image, None)
issue.scribd = 201901393
issue.save()
self.assertEqual(issue.scribd_image[:8], "https://")
class SectionTestCase(TestCase):
def test_foreign_key(self):
# @TODO
pass
def test_shortname(self):
section = factories.SectionFactory.create()
self.assertLess(len(section.classname()), 9)
self.assertEqual(section.classname(), section.classname().lower())
class TagTestCase(TestCase):
def test_foreign_key(self):
# @TODO
pass
def test_autogen(self):
# @TODO
pass
| mit |
viridia/coda | third-party/python/ply-3.4/ply/cpp.py | 192 | 33040 | # -----------------------------------------------------------------------------
# cpp.py
#
# Author: David Beazley (http://www.dabeaz.com)
# Copyright (C) 2007
# All rights reserved
#
# This module implements an ANSI-C style lexical preprocessor for PLY.
# -----------------------------------------------------------------------------
from __future__ import generators
# -----------------------------------------------------------------------------
# Default preprocessor lexer definitions. These tokens are enough to get
# a basic preprocessor working. Other modules may import these if they want
# -----------------------------------------------------------------------------
tokens = (
'CPP_ID','CPP_INTEGER', 'CPP_FLOAT', 'CPP_STRING', 'CPP_CHAR', 'CPP_WS', 'CPP_COMMENT', 'CPP_POUND','CPP_DPOUND'
)
literals = "+-*/%|&~^<>=!?()[]{}.,;:\\\'\""
# Whitespace
def t_CPP_WS(t):
r'\s+'
t.lexer.lineno += t.value.count("\n")
return t
t_CPP_POUND = r'\#'
t_CPP_DPOUND = r'\#\#'
# Identifier
t_CPP_ID = r'[A-Za-z_][\w_]*'
# Integer literal
def CPP_INTEGER(t):
r'(((((0x)|(0X))[0-9a-fA-F]+)|(\d+))([uU]|[lL]|[uU][lL]|[lL][uU])?)'
return t
t_CPP_INTEGER = CPP_INTEGER
# Floating literal
t_CPP_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
# String literal
def t_CPP_STRING(t):
r'\"([^\\\n]|(\\(.|\n)))*?\"'
t.lexer.lineno += t.value.count("\n")
return t
# Character constant 'c' or L'c'
def t_CPP_CHAR(t):
r'(L)?\'([^\\\n]|(\\(.|\n)))*?\''
t.lexer.lineno += t.value.count("\n")
return t
# Comment
def t_CPP_COMMENT(t):
r'(/\*(.|\n)*?\*/)|(//.*?\n)'
t.lexer.lineno += t.value.count("\n")
return t
def t_error(t):
t.type = t.value[0]
t.value = t.value[0]
t.lexer.skip(1)
return t
import re
import copy
import time
import os.path
# -----------------------------------------------------------------------------
# trigraph()
#
# Given an input string, this function replaces all trigraph sequences.
# The following mapping is used:
#
# ??= #
# ??/ \
# ??' ^
# ??( [
# ??) ]
# ??! |
# ??< {
# ??> }
# ??- ~
# -----------------------------------------------------------------------------
_trigraph_pat = re.compile(r'''\?\?[=/\'\(\)\!<>\-]''')
_trigraph_rep = {
'=':'#',
'/':'\\',
"'":'^',
'(':'[',
')':']',
'!':'|',
'<':'{',
'>':'}',
'-':'~'
}
def trigraph(input):
return _trigraph_pat.sub(lambda g: _trigraph_rep[g.group()[-1]],input)
# ------------------------------------------------------------------
# Macro object
#
# This object holds information about preprocessor macros
#
# .name - Macro name (string)
# .value - Macro value (a list of tokens)
# .arglist - List of argument names
# .variadic - Boolean indicating whether or not variadic macro
# .vararg - Name of the variadic parameter
#
# When a macro is created, the macro replacement token sequence is
# pre-scanned and used to create patch lists that are later used
# during macro expansion
# ------------------------------------------------------------------
class Macro(object):
def __init__(self,name,value,arglist=None,variadic=False):
self.name = name
self.value = value
self.arglist = arglist
self.variadic = variadic
if variadic:
self.vararg = arglist[-1]
self.source = None
# ------------------------------------------------------------------
# Preprocessor object
#
# Object representing a preprocessor. Contains macro definitions,
# include directories, and other information
# ------------------------------------------------------------------
class Preprocessor(object):
def __init__(self,lexer=None):
if lexer is None:
lexer = lex.lexer
self.lexer = lexer
self.macros = { }
self.path = []
self.temp_path = []
# Probe the lexer for selected tokens
self.lexprobe()
tm = time.localtime()
self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm))
self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm))
self.parser = None
# -----------------------------------------------------------------------------
# tokenize()
#
# Utility function. Given a string of text, tokenize into a list of tokens
# -----------------------------------------------------------------------------
def tokenize(self,text):
tokens = []
self.lexer.input(text)
while True:
tok = self.lexer.token()
if not tok: break
tokens.append(tok)
return tokens
# ---------------------------------------------------------------------
# error()
#
# Report a preprocessor error/warning of some kind
# ----------------------------------------------------------------------
def error(self,file,line,msg):
print("%s:%d %s" % (file,line,msg))
# ----------------------------------------------------------------------
# lexprobe()
#
# This method probes the preprocessor lexer object to discover
# the token types of symbols that are important to the preprocessor.
# If this works right, the preprocessor will simply "work"
# with any suitable lexer regardless of how tokens have been named.
# ----------------------------------------------------------------------
def lexprobe(self):
# Determine the token type for identifiers
self.lexer.input("identifier")
tok = self.lexer.token()
if not tok or tok.value != "identifier":
print("Couldn't determine identifier type")
else:
self.t_ID = tok.type
# Determine the token type for integers
self.lexer.input("12345")
tok = self.lexer.token()
if not tok or int(tok.value) != 12345:
print("Couldn't determine integer type")
else:
self.t_INTEGER = tok.type
self.t_INTEGER_TYPE = type(tok.value)
# Determine the token type for strings enclosed in double quotes
self.lexer.input("\"filename\"")
tok = self.lexer.token()
if not tok or tok.value != "\"filename\"":
print("Couldn't determine string type")
else:
self.t_STRING = tok.type
# Determine the token type for whitespace--if any
self.lexer.input(" ")
tok = self.lexer.token()
if not tok or tok.value != " ":
self.t_SPACE = None
else:
self.t_SPACE = tok.type
# Determine the token type for newlines
self.lexer.input("\n")
tok = self.lexer.token()
if not tok or tok.value != "\n":
self.t_NEWLINE = None
print("Couldn't determine token for newlines")
else:
self.t_NEWLINE = tok.type
self.t_WS = (self.t_SPACE, self.t_NEWLINE)
# Check for other characters used by the preprocessor
chars = [ '<','>','#','##','\\','(',')',',','.']
for c in chars:
self.lexer.input(c)
tok = self.lexer.token()
if not tok or tok.value != c:
print("Unable to lex '%s' required for preprocessor" % c)
# ----------------------------------------------------------------------
# add_path()
#
# Adds a search path to the preprocessor.
# ----------------------------------------------------------------------
def add_path(self,path):
self.path.append(path)
# ----------------------------------------------------------------------
# group_lines()
#
# Given an input string, this function splits it into lines. Trailing whitespace
# is removed. Any line ending with \ is grouped with the next line. This
# function forms the lowest level of the preprocessor---grouping into text into
# a line-by-line format.
# ----------------------------------------------------------------------
def group_lines(self,input):
lex = self.lexer.clone()
lines = [x.rstrip() for x in input.splitlines()]
for i in xrange(len(lines)):
j = i+1
while lines[i].endswith('\\') and (j < len(lines)):
lines[i] = lines[i][:-1]+lines[j]
lines[j] = ""
j += 1
input = "\n".join(lines)
lex.input(input)
lex.lineno = 1
current_line = []
while True:
tok = lex.token()
if not tok:
break
current_line.append(tok)
if tok.type in self.t_WS and '\n' in tok.value:
yield current_line
current_line = []
if current_line:
yield current_line
# ----------------------------------------------------------------------
# tokenstrip()
#
# Remove leading/trailing whitespace tokens from a token list
# ----------------------------------------------------------------------
def tokenstrip(self,tokens):
i = 0
while i < len(tokens) and tokens[i].type in self.t_WS:
i += 1
del tokens[:i]
i = len(tokens)-1
while i >= 0 and tokens[i].type in self.t_WS:
i -= 1
del tokens[i+1:]
return tokens
# ----------------------------------------------------------------------
# collect_args()
#
# Collects comma separated arguments from a list of tokens. The arguments
# must be enclosed in parenthesis. Returns a tuple (tokencount,args,positions)
# where tokencount is the number of tokens consumed, args is a list of arguments,
# and positions is a list of integers containing the starting index of each
# argument. Each argument is represented by a list of tokens.
#
# When collecting arguments, leading and trailing whitespace is removed
# from each argument.
#
# This function properly handles nested parenthesis and commas---these do not
# define new arguments.
# ----------------------------------------------------------------------
def collect_args(self,tokenlist):
args = []
positions = []
current_arg = []
nesting = 1
tokenlen = len(tokenlist)
# Search for the opening '('.
i = 0
while (i < tokenlen) and (tokenlist[i].type in self.t_WS):
i += 1
if (i < tokenlen) and (tokenlist[i].value == '('):
positions.append(i+1)
else:
self.error(self.source,tokenlist[0].lineno,"Missing '(' in macro arguments")
return 0, [], []
i += 1
while i < tokenlen:
t = tokenlist[i]
if t.value == '(':
current_arg.append(t)
nesting += 1
elif t.value == ')':
nesting -= 1
if nesting == 0:
if current_arg:
args.append(self.tokenstrip(current_arg))
positions.append(i)
return i+1,args,positions
current_arg.append(t)
elif t.value == ',' and nesting == 1:
args.append(self.tokenstrip(current_arg))
positions.append(i+1)
current_arg = []
else:
current_arg.append(t)
i += 1
# Missing end argument
self.error(self.source,tokenlist[-1].lineno,"Missing ')' in macro arguments")
return 0, [],[]
# ----------------------------------------------------------------------
# macro_prescan()
#
# Examine the macro value (token sequence) and identify patch points
# This is used to speed up macro expansion later on---we'll know
# right away where to apply patches to the value to form the expansion
# ----------------------------------------------------------------------
def macro_prescan(self,macro):
macro.patch = [] # Standard macro arguments
macro.str_patch = [] # String conversion expansion
macro.var_comma_patch = [] # Variadic macro comma patch
i = 0
while i < len(macro.value):
if macro.value[i].type == self.t_ID and macro.value[i].value in macro.arglist:
argnum = macro.arglist.index(macro.value[i].value)
# Conversion of argument to a string
if i > 0 and macro.value[i-1].value == '#':
macro.value[i] = copy.copy(macro.value[i])
macro.value[i].type = self.t_STRING
del macro.value[i-1]
macro.str_patch.append((argnum,i-1))
continue
# Concatenation
elif (i > 0 and macro.value[i-1].value == '##'):
macro.patch.append(('c',argnum,i-1))
del macro.value[i-1]
continue
elif ((i+1) < len(macro.value) and macro.value[i+1].value == '##'):
macro.patch.append(('c',argnum,i))
i += 1
continue
# Standard expansion
else:
macro.patch.append(('e',argnum,i))
elif macro.value[i].value == '##':
if macro.variadic and (i > 0) and (macro.value[i-1].value == ',') and \
((i+1) < len(macro.value)) and (macro.value[i+1].type == self.t_ID) and \
(macro.value[i+1].value == macro.vararg):
macro.var_comma_patch.append(i-1)
i += 1
macro.patch.sort(key=lambda x: x[2],reverse=True)
# ----------------------------------------------------------------------
# macro_expand_args()
#
# Given a Macro and list of arguments (each a token list), this method
# returns an expanded version of a macro. The return value is a token sequence
# representing the replacement macro tokens
# ----------------------------------------------------------------------
def macro_expand_args(self,macro,args):
# Make a copy of the macro token sequence
rep = [copy.copy(_x) for _x in macro.value]
# Make string expansion patches. These do not alter the length of the replacement sequence
str_expansion = {}
for argnum, i in macro.str_patch:
if argnum not in str_expansion:
str_expansion[argnum] = ('"%s"' % "".join([x.value for x in args[argnum]])).replace("\\","\\\\")
rep[i] = copy.copy(rep[i])
rep[i].value = str_expansion[argnum]
# Make the variadic macro comma patch. If the variadic macro argument is empty, we get rid
comma_patch = False
if macro.variadic and not args[-1]:
for i in macro.var_comma_patch:
rep[i] = None
comma_patch = True
# Make all other patches. The order of these matters. It is assumed that the patch list
# has been sorted in reverse order of patch location since replacements will cause the
# size of the replacement sequence to expand from the patch point.
expanded = { }
for ptype, argnum, i in macro.patch:
# Concatenation. Argument is left unexpanded
if ptype == 'c':
rep[i:i+1] = args[argnum]
# Normal expansion. Argument is macro expanded first
elif ptype == 'e':
if argnum not in expanded:
expanded[argnum] = self.expand_macros(args[argnum])
rep[i:i+1] = expanded[argnum]
# Get rid of removed comma if necessary
if comma_patch:
rep = [_i for _i in rep if _i]
return rep
# ----------------------------------------------------------------------
# expand_macros()
#
# Given a list of tokens, this function performs macro expansion.
# The expanded argument is a dictionary that contains macros already
# expanded. This is used to prevent infinite recursion.
# ----------------------------------------------------------------------
def expand_macros(self,tokens,expanded=None):
if expanded is None:
expanded = {}
i = 0
while i < len(tokens):
t = tokens[i]
if t.type == self.t_ID:
if t.value in self.macros and t.value not in expanded:
# Yes, we found a macro match
expanded[t.value] = True
m = self.macros[t.value]
if not m.arglist:
# A simple macro
ex = self.expand_macros([copy.copy(_x) for _x in m.value],expanded)
for e in ex:
e.lineno = t.lineno
tokens[i:i+1] = ex
i += len(ex)
else:
# A macro with arguments
j = i + 1
while j < len(tokens) and tokens[j].type in self.t_WS:
j += 1
if tokens[j].value == '(':
tokcount,args,positions = self.collect_args(tokens[j:])
if not m.variadic and len(args) != len(m.arglist):
self.error(self.source,t.lineno,"Macro %s requires %d arguments" % (t.value,len(m.arglist)))
i = j + tokcount
elif m.variadic and len(args) < len(m.arglist)-1:
if len(m.arglist) > 2:
self.error(self.source,t.lineno,"Macro %s must have at least %d arguments" % (t.value, len(m.arglist)-1))
else:
self.error(self.source,t.lineno,"Macro %s must have at least %d argument" % (t.value, len(m.arglist)-1))
i = j + tokcount
else:
if m.variadic:
if len(args) == len(m.arglist)-1:
args.append([])
else:
args[len(m.arglist)-1] = tokens[j+positions[len(m.arglist)-1]:j+tokcount-1]
del args[len(m.arglist):]
# Get macro replacement text
rep = self.macro_expand_args(m,args)
rep = self.expand_macros(rep,expanded)
for r in rep:
r.lineno = t.lineno
tokens[i:j+tokcount] = rep
i += len(rep)
del expanded[t.value]
continue
elif t.value == '__LINE__':
t.type = self.t_INTEGER
t.value = self.t_INTEGER_TYPE(t.lineno)
i += 1
return tokens
# ----------------------------------------------------------------------
# evalexpr()
#
# Evaluate an expression token sequence for the purposes of evaluating
# integral expressions.
# ----------------------------------------------------------------------
def evalexpr(self,tokens):
# tokens = tokenize(line)
# Search for defined macros
i = 0
while i < len(tokens):
if tokens[i].type == self.t_ID and tokens[i].value == 'defined':
j = i + 1
needparen = False
result = "0L"
while j < len(tokens):
if tokens[j].type in self.t_WS:
j += 1
continue
elif tokens[j].type == self.t_ID:
if tokens[j].value in self.macros:
result = "1L"
else:
result = "0L"
if not needparen: break
elif tokens[j].value == '(':
needparen = True
elif tokens[j].value == ')':
break
else:
self.error(self.source,tokens[i].lineno,"Malformed defined()")
j += 1
tokens[i].type = self.t_INTEGER
tokens[i].value = self.t_INTEGER_TYPE(result)
del tokens[i+1:j+1]
i += 1
tokens = self.expand_macros(tokens)
for i,t in enumerate(tokens):
if t.type == self.t_ID:
tokens[i] = copy.copy(t)
tokens[i].type = self.t_INTEGER
tokens[i].value = self.t_INTEGER_TYPE("0L")
elif t.type == self.t_INTEGER:
tokens[i] = copy.copy(t)
# Strip off any trailing suffixes
tokens[i].value = str(tokens[i].value)
while tokens[i].value[-1] not in "0123456789abcdefABCDEF":
tokens[i].value = tokens[i].value[:-1]
expr = "".join([str(x.value) for x in tokens])
expr = expr.replace("&&"," and ")
expr = expr.replace("||"," or ")
expr = expr.replace("!"," not ")
try:
result = eval(expr)
except StandardError:
self.error(self.source,tokens[0].lineno,"Couldn't evaluate expression")
result = 0
return result
# ----------------------------------------------------------------------
# parsegen()
#
# Parse an input string/
# ----------------------------------------------------------------------
def parsegen(self,input,source=None):
# Replace trigraph sequences
t = trigraph(input)
lines = self.group_lines(t)
if not source:
source = ""
self.define("__FILE__ \"%s\"" % source)
self.source = source
chunk = []
enable = True
iftrigger = False
ifstack = []
for x in lines:
for i,tok in enumerate(x):
if tok.type not in self.t_WS: break
if tok.value == '#':
# Preprocessor directive
for tok in x:
if tok in self.t_WS and '\n' in tok.value:
chunk.append(tok)
dirtokens = self.tokenstrip(x[i+1:])
if dirtokens:
name = dirtokens[0].value
args = self.tokenstrip(dirtokens[1:])
else:
name = ""
args = []
if name == 'define':
if enable:
for tok in self.expand_macros(chunk):
yield tok
chunk = []
self.define(args)
elif name == 'include':
if enable:
for tok in self.expand_macros(chunk):
yield tok
chunk = []
oldfile = self.macros['__FILE__']
for tok in self.include(args):
yield tok
self.macros['__FILE__'] = oldfile
self.source = source
elif name == 'undef':
if enable:
for tok in self.expand_macros(chunk):
yield tok
chunk = []
self.undef(args)
elif name == 'ifdef':
ifstack.append((enable,iftrigger))
if enable:
if not args[0].value in self.macros:
enable = False
iftrigger = False
else:
iftrigger = True
elif name == 'ifndef':
ifstack.append((enable,iftrigger))
if enable:
if args[0].value in self.macros:
enable = False
iftrigger = False
else:
iftrigger = True
elif name == 'if':
ifstack.append((enable,iftrigger))
if enable:
result = self.evalexpr(args)
if not result:
enable = False
iftrigger = False
else:
iftrigger = True
elif name == 'elif':
if ifstack:
if ifstack[-1][0]: # We only pay attention if outer "if" allows this
if enable: # If already true, we flip enable False
enable = False
elif not iftrigger: # If False, but not triggered yet, we'll check expression
result = self.evalexpr(args)
if result:
enable = True
iftrigger = True
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #elif")
elif name == 'else':
if ifstack:
if ifstack[-1][0]:
if enable:
enable = False
elif not iftrigger:
enable = True
iftrigger = True
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #else")
elif name == 'endif':
if ifstack:
enable,iftrigger = ifstack.pop()
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #endif")
else:
# Unknown preprocessor directive
pass
else:
# Normal text
if enable:
chunk.extend(x)
for tok in self.expand_macros(chunk):
yield tok
chunk = []
# ----------------------------------------------------------------------
# include()
#
# Implementation of file-inclusion
# ----------------------------------------------------------------------
def include(self,tokens):
# Try to extract the filename and then process an include file
if not tokens:
return
if tokens:
if tokens[0].value != '<' and tokens[0].type != self.t_STRING:
tokens = self.expand_macros(tokens)
if tokens[0].value == '<':
# Include <...>
i = 1
while i < len(tokens):
if tokens[i].value == '>':
break
i += 1
else:
print("Malformed #include <...>")
return
filename = "".join([x.value for x in tokens[1:i]])
path = self.path + [""] + self.temp_path
elif tokens[0].type == self.t_STRING:
filename = tokens[0].value[1:-1]
path = self.temp_path + [""] + self.path
else:
print("Malformed #include statement")
return
for p in path:
iname = os.path.join(p,filename)
try:
data = open(iname,"r").read()
dname = os.path.dirname(iname)
if dname:
self.temp_path.insert(0,dname)
for tok in self.parsegen(data,filename):
yield tok
if dname:
del self.temp_path[0]
break
except IOError:
pass
else:
print("Couldn't find '%s'" % filename)
# ----------------------------------------------------------------------
# define()
#
# Define a new macro
# ----------------------------------------------------------------------
def define(self,tokens):
if isinstance(tokens,(str,unicode)):
tokens = self.tokenize(tokens)
linetok = tokens
try:
name = linetok[0]
if len(linetok) > 1:
mtype = linetok[1]
else:
mtype = None
if not mtype:
m = Macro(name.value,[])
self.macros[name.value] = m
elif mtype.type in self.t_WS:
# A normal macro
m = Macro(name.value,self.tokenstrip(linetok[2:]))
self.macros[name.value] = m
elif mtype.value == '(':
# A macro with arguments
tokcount, args, positions = self.collect_args(linetok[1:])
variadic = False
for a in args:
if variadic:
print("No more arguments may follow a variadic argument")
break
astr = "".join([str(_i.value) for _i in a])
if astr == "...":
variadic = True
a[0].type = self.t_ID
a[0].value = '__VA_ARGS__'
variadic = True
del a[1:]
continue
elif astr[-3:] == "..." and a[0].type == self.t_ID:
variadic = True
del a[1:]
# If, for some reason, "." is part of the identifier, strip off the name for the purposes
# of macro expansion
if a[0].value[-3:] == '...':
a[0].value = a[0].value[:-3]
continue
if len(a) > 1 or a[0].type != self.t_ID:
print("Invalid macro argument")
break
else:
mvalue = self.tokenstrip(linetok[1+tokcount:])
i = 0
while i < len(mvalue):
if i+1 < len(mvalue):
if mvalue[i].type in self.t_WS and mvalue[i+1].value == '##':
del mvalue[i]
continue
elif mvalue[i].value == '##' and mvalue[i+1].type in self.t_WS:
del mvalue[i+1]
i += 1
m = Macro(name.value,mvalue,[x[0].value for x in args],variadic)
self.macro_prescan(m)
self.macros[name.value] = m
else:
print("Bad macro definition")
except LookupError:
print("Bad macro definition")
# ----------------------------------------------------------------------
# undef()
#
# Undefine a macro
# ----------------------------------------------------------------------
def undef(self,tokens):
id = tokens[0].value
try:
del self.macros[id]
except LookupError:
pass
# ----------------------------------------------------------------------
# parse()
#
# Parse input text.
# ----------------------------------------------------------------------
def parse(self,input,source=None,ignore={}):
self.ignore = ignore
self.parser = self.parsegen(input,source)
# ----------------------------------------------------------------------
# token()
#
# Method to return individual tokens
# ----------------------------------------------------------------------
def token(self):
try:
while True:
tok = next(self.parser)
if tok.type not in self.ignore: return tok
except StopIteration:
self.parser = None
return None
if __name__ == '__main__':
import ply.lex as lex
lexer = lex.lex()
# Run a preprocessor
import sys
f = open(sys.argv[1])
input = f.read()
p = Preprocessor(lexer)
p.parse(input,sys.argv[1])
while True:
tok = p.token()
if not tok: break
print(p.source, tok)
| apache-2.0 |
Chilledheart/chromium | tools/telemetry/third_party/typ/typ/arg_parser.py | 33 | 13928 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import optparse
from typ.host import Host
class _Bailout(Exception):
pass
DEFAULT_COVERAGE_OMIT = ['*/typ/*', '*/site-packages/*']
DEFAULT_STATUS_FORMAT = '[%f/%t] '
DEFAULT_SUFFIXES = ['*_test.py', '*_unittest.py']
class ArgumentParser(argparse.ArgumentParser):
@staticmethod
def add_option_group(parser, title, discovery=False,
running=False, reporting=False, skip=None):
# TODO: Get rid of this when telemetry upgrades to argparse.
ap = ArgumentParser(add_help=False, version=False, discovery=discovery,
running=running, reporting=reporting)
optlist = ap.optparse_options(skip=skip)
group = optparse.OptionGroup(parser, title)
group.add_options(optlist)
parser.add_option_group(group)
def __init__(self, host=None, add_help=True, version=True, discovery=True,
reporting=True, running=True):
super(ArgumentParser, self).__init__(prog='typ', add_help=add_help)
self._host = host or Host()
self.exit_status = None
self.usage = '%(prog)s [options] [tests...]'
if version:
self.add_argument('-V', '--version', action='store_true',
help='Print the typ version and exit.')
if discovery:
self.add_argument('-f', '--file-list', metavar='FILENAME',
action='store',
help=('Takes the list of tests from the file '
'(use "-" for stdin).'))
self.add_argument('--all', action='store_true',
help=('Run all the tests, including the ones '
'normally skipped.'))
self.add_argument('--isolate', metavar='glob', default=[],
action='append',
help=('Globs of tests to run in isolation '
'(serially).'))
self.add_argument('--skip', metavar='glob', default=[],
action='append',
help=('Globs of test names to skip ('
'defaults to %(default)s).'))
self.add_argument('--suffixes', metavar='glob', default=[],
action='append',
help=('Globs of test filenames to look for ('
'can specify multiple times; defaults '
'to %s).' % DEFAULT_SUFFIXES))
if reporting:
self.add_argument('--builder-name',
help=('Builder name to include in the '
'uploaded data.'))
self.add_argument('-c', '--coverage', action='store_true',
help='Reports coverage information.')
self.add_argument('--coverage-source', action='append',
default=[],
help=('Directories to include when running and '
'reporting coverage (defaults to '
'--top-level-dir plus --path)'))
self.add_argument('--coverage-omit', action='append',
default=[],
help=('Globs to omit when reporting coverage '
'(defaults to %s).' %
DEFAULT_COVERAGE_OMIT))
self.add_argument('--coverage-annotate', action='store_true',
help=('Produce an annotate source report.'))
self.add_argument('--coverage-show-missing', action='store_true',
help=('Show missing line ranges in coverage '
'report.'))
self.add_argument('--master-name',
help=('Buildbot master name to include in the '
'uploaded data.'))
self.add_argument('--metadata', action='append', default=[],
help=('Optional key=value metadata that will '
'be included in the results.'))
self.add_argument('--test-results-server',
help=('If specified, uploads the full results '
'to this server.'))
self.add_argument('--test-type',
help=('Name of test type to include in the '
'uploaded data (e.g., '
'"telemetry_unittests").'))
self.add_argument('--write-full-results-to', metavar='FILENAME',
action='store',
help=('If specified, writes the full results to '
'that path.'))
self.add_argument('--write-trace-to', metavar='FILENAME',
action='store',
help=('If specified, writes the trace to '
'that path.'))
self.add_argument('tests', nargs='*', default=[],
help=argparse.SUPPRESS)
if running:
self.add_argument('-d', '--debugger', action='store_true',
help='Runs the tests under the debugger.')
self.add_argument('-j', '--jobs', metavar='N', type=int,
default=self._host.cpu_count(),
help=('Runs N jobs in parallel '
'(defaults to %(default)s).'))
self.add_argument('-l', '--list-only', action='store_true',
help='Lists all the test names found and exits.')
self.add_argument('-n', '--dry-run', action='store_true',
help=argparse.SUPPRESS)
self.add_argument('-q', '--quiet', action='store_true',
default=False,
help=('Runs as quietly as possible '
'(only prints errors).'))
self.add_argument('-s', '--status-format',
default=self._host.getenv('NINJA_STATUS',
DEFAULT_STATUS_FORMAT),
help=argparse.SUPPRESS)
self.add_argument('-t', '--timing', action='store_true',
help='Prints timing info.')
self.add_argument('-v', '--verbose', action='count', default=0,
help=('Prints more stuff (can specify multiple '
'times for more output).'))
self.add_argument('--passthrough', action='store_true',
default=False,
help='Prints all output while running.')
self.add_argument('--retry-limit', type=int, default=0,
help='Retries each failure up to N times.')
self.add_argument('--terminal-width', type=int,
default=self._host.terminal_width(),
help=argparse.SUPPRESS)
self.add_argument('--overwrite', action='store_true',
default=None,
help=argparse.SUPPRESS)
self.add_argument('--no-overwrite', action='store_false',
dest='overwrite', default=None,
help=argparse.SUPPRESS)
if discovery or running:
self.add_argument('-P', '--path', action='append', default=[],
help=('Adds dir to sys.path (can specify '
'multiple times).'))
self.add_argument('--top-level-dir', default=None,
help=('Sets the top directory of project '
'(used when running subdirs).'))
def parse_args(self, args=None, namespace=None):
try:
rargs = super(ArgumentParser, self).parse_args(args=args,
namespace=namespace)
except _Bailout:
return None
for val in rargs.metadata:
if '=' not in val:
self._print_message('Error: malformed --metadata "%s"' % val)
self.exit_status = 2
if rargs.test_results_server:
if not rargs.builder_name:
self._print_message('Error: --builder-name must be specified '
'along with --test-result-server')
self.exit_status = 2
if not rargs.master_name:
self._print_message('Error: --master-name must be specified '
'along with --test-result-server')
self.exit_status = 2
if not rargs.test_type:
self._print_message('Error: --test-type must be specified '
'along with --test-result-server')
self.exit_status = 2
if not rargs.suffixes:
rargs.suffixes = DEFAULT_SUFFIXES
if not rargs.coverage_omit:
rargs.coverage_omit = DEFAULT_COVERAGE_OMIT
if rargs.debugger: # pragma: no cover
rargs.jobs = 1
rargs.passthrough = True
if rargs.overwrite is None:
rargs.overwrite = self._host.stdout.isatty() and not rargs.verbose
return rargs
# Redefining built-in 'file' pylint: disable=W0622
def _print_message(self, msg, file=None):
self._host.print_(msg=msg, stream=file, end='\n')
def print_help(self, file=None):
self._print_message(msg=self.format_help(), file=file)
def error(self, message, bailout=True): # pylint: disable=W0221
self.exit(2, '%s: error: %s\n' % (self.prog, message), bailout=bailout)
def exit(self, status=0, message=None, # pylint: disable=W0221
bailout=True):
self.exit_status = status
if message:
self._print_message(message, file=self._host.stderr)
if bailout:
raise _Bailout()
def optparse_options(self, skip=None):
skip = skip or []
options = []
for action in self._actions:
args = [flag for flag in action.option_strings if flag not in skip]
if not args or action.help == '==SUPPRESS==':
# must either be a positional argument like 'tests'
# or an option we want to skip altogether.
continue
kwargs = {
'default': action.default,
'dest': action.dest,
'help': action.help,
'metavar': action.metavar,
'type': action.type,
'action': _action_str(action)
}
options.append(optparse.make_option(*args, **kwargs))
return options
def argv_from_args(self, args):
default_parser = ArgumentParser(host=self._host)
default_args = default_parser.parse_args([])
argv = []
tests = []
d = vars(args)
for k in sorted(d.keys()):
v = d[k]
argname = _argname_from_key(k)
action = self._action_for_key(k)
action_str = _action_str(action)
if k == 'tests':
tests = v
continue
if getattr(default_args, k) == v:
# this arg has the default value, so skip it.
continue
assert action_str in ['append', 'count', 'store', 'store_true']
if action_str == 'append':
for el in v:
argv.append(argname)
argv.append(el)
elif action_str == 'count':
for _ in range(v):
argv.append(argname)
elif action_str == 'store':
argv.append(argname)
argv.append(str(v))
else:
# action_str == 'store_true'
argv.append(argname)
return argv + tests
def _action_for_key(self, key):
for action in self._actions:
if action.dest == key:
return action
assert False, ('Could not find an action for %s' # pragma: no cover
% key)
def _action_str(action):
# Access to a protected member pylint: disable=W0212
assert action.__class__ in (
argparse._AppendAction,
argparse._CountAction,
argparse._StoreAction,
argparse._StoreTrueAction
)
if isinstance(action, argparse._AppendAction):
return 'append'
if isinstance(action, argparse._CountAction):
return 'count'
if isinstance(action, argparse._StoreAction):
return 'store'
if isinstance(action, argparse._StoreTrueAction):
return 'store_true'
def _argname_from_key(key):
return '--' + key.replace('_', '-')
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.