repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
sunlianqiang/kbengine | kbe/src/lib/python/Lib/idlelib/IOBinding.py | 70 | 19745 | import os
import types
import shlex
import sys
import codecs
import tempfile
import tkinter.filedialog as tkFileDialog
import tkinter.messagebox as tkMessageBox
import re
from tkinter import *
from tkinter.simpledialog import askstring
from idlelib.configHandler import idleConf
from codecs import BOM_UTF8
# Try setting the locale, so that we can find out
# what encoding to use
try:
import locale
locale.setlocale(locale.LC_CTYPE, "")
except (ImportError, locale.Error):
pass
# Encoding for file names
filesystemencoding = sys.getfilesystemencoding() ### currently unused
locale_encoding = 'ascii'
if sys.platform == 'win32':
# On Windows, we could use "mbcs". However, to give the user
# a portable encoding name, we need to find the code page
try:
locale_encoding = locale.getdefaultlocale()[1]
codecs.lookup(locale_encoding)
except LookupError:
pass
else:
try:
# Different things can fail here: the locale module may not be
# loaded, it may not offer nl_langinfo, or CODESET, or the
# resulting codeset may be unknown to Python. We ignore all
# these problems, falling back to ASCII
locale_encoding = locale.nl_langinfo(locale.CODESET)
if locale_encoding is None or locale_encoding is '':
# situation occurs on Mac OS X
locale_encoding = 'ascii'
codecs.lookup(locale_encoding)
except (NameError, AttributeError, LookupError):
# Try getdefaultlocale: it parses environment variables,
# which may give a clue. Unfortunately, getdefaultlocale has
# bugs that can cause ValueError.
try:
locale_encoding = locale.getdefaultlocale()[1]
if locale_encoding is None or locale_encoding is '':
# situation occurs on Mac OS X
locale_encoding = 'ascii'
codecs.lookup(locale_encoding)
except (ValueError, LookupError):
pass
locale_encoding = locale_encoding.lower()
encoding = locale_encoding ### KBK 07Sep07 This is used all over IDLE, check!
### 'encoding' is used below in encode(), check!
coding_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)
blank_re = re.compile(r'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII)
def coding_spec(data):
"""Return the encoding declaration according to PEP 263.
When checking encoded data, only the first two lines should be passed
in to avoid a UnicodeDecodeError if the rest of the data is not unicode.
The first two lines would contain the encoding specification.
Raise a LookupError if the encoding is declared but unknown.
"""
if isinstance(data, bytes):
# This encoding might be wrong. However, the coding
# spec must be ASCII-only, so any non-ASCII characters
# around here will be ignored. Decoding to Latin-1 should
# never fail (except for memory outage)
lines = data.decode('iso-8859-1')
else:
lines = data
# consider only the first two lines
if '\n' in lines:
lst = lines.split('\n', 2)[:2]
elif '\r' in lines:
lst = lines.split('\r', 2)[:2]
else:
lst = [lines]
for line in lst:
match = coding_re.match(line)
if match is not None:
break
if not blank_re.match(line):
return None
else:
return None
name = match.group(1)
try:
codecs.lookup(name)
except LookupError:
# The standard encoding error does not indicate the encoding
raise LookupError("Unknown encoding: "+name)
return name
class IOBinding:
def __init__(self, editwin):
self.editwin = editwin
self.text = editwin.text
self.__id_open = self.text.bind("<<open-window-from-file>>", self.open)
self.__id_save = self.text.bind("<<save-window>>", self.save)
self.__id_saveas = self.text.bind("<<save-window-as-file>>",
self.save_as)
self.__id_savecopy = self.text.bind("<<save-copy-of-window-as-file>>",
self.save_a_copy)
self.fileencoding = None
self.__id_print = self.text.bind("<<print-window>>", self.print_window)
def close(self):
# Undo command bindings
self.text.unbind("<<open-window-from-file>>", self.__id_open)
self.text.unbind("<<save-window>>", self.__id_save)
self.text.unbind("<<save-window-as-file>>",self.__id_saveas)
self.text.unbind("<<save-copy-of-window-as-file>>", self.__id_savecopy)
self.text.unbind("<<print-window>>", self.__id_print)
# Break cycles
self.editwin = None
self.text = None
self.filename_change_hook = None
def get_saved(self):
return self.editwin.get_saved()
def set_saved(self, flag):
self.editwin.set_saved(flag)
def reset_undo(self):
self.editwin.reset_undo()
filename_change_hook = None
def set_filename_change_hook(self, hook):
self.filename_change_hook = hook
filename = None
dirname = None
def set_filename(self, filename):
if filename and os.path.isdir(filename):
self.filename = None
self.dirname = filename
else:
self.filename = filename
self.dirname = None
self.set_saved(1)
if self.filename_change_hook:
self.filename_change_hook()
def open(self, event=None, editFile=None):
flist = self.editwin.flist
# Save in case parent window is closed (ie, during askopenfile()).
if flist:
if not editFile:
filename = self.askopenfile()
else:
filename=editFile
if filename:
# If editFile is valid and already open, flist.open will
# shift focus to its existing window.
# If the current window exists and is a fresh unnamed,
# unmodified editor window (not an interpreter shell),
# pass self.loadfile to flist.open so it will load the file
# in the current window (if the file is not already open)
# instead of a new window.
if (self.editwin and
not getattr(self.editwin, 'interp', None) and
not self.filename and
self.get_saved()):
flist.open(filename, self.loadfile)
else:
flist.open(filename)
else:
if self.text:
self.text.focus_set()
return "break"
# Code for use outside IDLE:
if self.get_saved():
reply = self.maybesave()
if reply == "cancel":
self.text.focus_set()
return "break"
if not editFile:
filename = self.askopenfile()
else:
filename=editFile
if filename:
self.loadfile(filename)
else:
self.text.focus_set()
return "break"
eol = r"(\r\n)|\n|\r" # \r\n (Windows), \n (UNIX), or \r (Mac)
eol_re = re.compile(eol)
eol_convention = os.linesep # default
def loadfile(self, filename):
try:
# open the file in binary mode so that we can handle
# end-of-line convention ourselves.
with open(filename, 'rb') as f:
two_lines = f.readline() + f.readline()
f.seek(0)
bytes = f.read()
except OSError as msg:
tkMessageBox.showerror("I/O Error", str(msg), master=self.text)
return False
chars, converted = self._decode(two_lines, bytes)
if chars is None:
tkMessageBox.showerror("Decoding Error",
"File %s\nFailed to Decode" % filename,
parent=self.text)
return False
# We now convert all end-of-lines to '\n's
firsteol = self.eol_re.search(chars)
if firsteol:
self.eol_convention = firsteol.group(0)
chars = self.eol_re.sub(r"\n", chars)
self.text.delete("1.0", "end")
self.set_filename(None)
self.text.insert("1.0", chars)
self.reset_undo()
self.set_filename(filename)
if converted:
# We need to save the conversion results first
# before being able to execute the code
self.set_saved(False)
self.text.mark_set("insert", "1.0")
self.text.yview("insert")
self.updaterecentfileslist(filename)
return True
def _decode(self, two_lines, bytes):
"Create a Unicode string."
chars = None
# Check presence of a UTF-8 signature first
if bytes.startswith(BOM_UTF8):
try:
chars = bytes[3:].decode("utf-8")
except UnicodeDecodeError:
# has UTF-8 signature, but fails to decode...
return None, False
else:
# Indicates that this file originally had a BOM
self.fileencoding = 'BOM'
return chars, False
# Next look for coding specification
try:
enc = coding_spec(two_lines)
except LookupError as name:
tkMessageBox.showerror(
title="Error loading the file",
message="The encoding '%s' is not known to this Python "\
"installation. The file may not display correctly" % name,
master = self.text)
enc = None
except UnicodeDecodeError:
return None, False
if enc:
try:
chars = str(bytes, enc)
self.fileencoding = enc
return chars, False
except UnicodeDecodeError:
pass
# Try ascii:
try:
chars = str(bytes, 'ascii')
self.fileencoding = None
return chars, False
except UnicodeDecodeError:
pass
# Try utf-8:
try:
chars = str(bytes, 'utf-8')
self.fileencoding = 'utf-8'
return chars, False
except UnicodeDecodeError:
pass
# Finally, try the locale's encoding. This is deprecated;
# the user should declare a non-ASCII encoding
try:
# Wait for the editor window to appear
self.editwin.text.update()
enc = askstring(
"Specify file encoding",
"The file's encoding is invalid for Python 3.x.\n"
"IDLE will convert it to UTF-8.\n"
"What is the current encoding of the file?",
initialvalue = locale_encoding,
parent = self.editwin.text)
if enc:
chars = str(bytes, enc)
self.fileencoding = None
return chars, True
except (UnicodeDecodeError, LookupError):
pass
return None, False # None on failure
def maybesave(self):
if self.get_saved():
return "yes"
message = "Do you want to save %s before closing?" % (
self.filename or "this untitled document")
confirm = tkMessageBox.askyesnocancel(
title="Save On Close",
message=message,
default=tkMessageBox.YES,
master=self.text)
if confirm:
reply = "yes"
self.save(None)
if not self.get_saved():
reply = "cancel"
elif confirm is None:
reply = "cancel"
else:
reply = "no"
self.text.focus_set()
return reply
def save(self, event):
if not self.filename:
self.save_as(event)
else:
if self.writefile(self.filename):
self.set_saved(True)
try:
self.editwin.store_file_breaks()
except AttributeError: # may be a PyShell
pass
self.text.focus_set()
return "break"
def save_as(self, event):
filename = self.asksavefile()
if filename:
if self.writefile(filename):
self.set_filename(filename)
self.set_saved(1)
try:
self.editwin.store_file_breaks()
except AttributeError:
pass
self.text.focus_set()
self.updaterecentfileslist(filename)
return "break"
def save_a_copy(self, event):
filename = self.asksavefile()
if filename:
self.writefile(filename)
self.text.focus_set()
self.updaterecentfileslist(filename)
return "break"
def writefile(self, filename):
self.fixlastline()
text = self.text.get("1.0", "end-1c")
if self.eol_convention != "\n":
text = text.replace("\n", self.eol_convention)
chars = self.encode(text)
try:
with open(filename, "wb") as f:
f.write(chars)
return True
except OSError as msg:
tkMessageBox.showerror("I/O Error", str(msg),
master=self.text)
return False
def encode(self, chars):
if isinstance(chars, bytes):
# This is either plain ASCII, or Tk was returning mixed-encoding
# text to us. Don't try to guess further.
return chars
# Preserve a BOM that might have been present on opening
if self.fileencoding == 'BOM':
return BOM_UTF8 + chars.encode("utf-8")
# See whether there is anything non-ASCII in it.
# If not, no need to figure out the encoding.
try:
return chars.encode('ascii')
except UnicodeError:
pass
# Check if there is an encoding declared
try:
# a string, let coding_spec slice it to the first two lines
enc = coding_spec(chars)
failed = None
except LookupError as msg:
failed = msg
enc = None
else:
if not enc:
# PEP 3120: default source encoding is UTF-8
enc = 'utf-8'
if enc:
try:
return chars.encode(enc)
except UnicodeError:
failed = "Invalid encoding '%s'" % enc
tkMessageBox.showerror(
"I/O Error",
"%s.\nSaving as UTF-8" % failed,
master = self.text)
# Fallback: save as UTF-8, with BOM - ignoring the incorrect
# declared encoding
return BOM_UTF8 + chars.encode("utf-8")
def fixlastline(self):
c = self.text.get("end-2c")
if c != '\n':
self.text.insert("end-1c", "\n")
def print_window(self, event):
confirm = tkMessageBox.askokcancel(
title="Print",
message="Print to Default Printer",
default=tkMessageBox.OK,
master=self.text)
if not confirm:
self.text.focus_set()
return "break"
tempfilename = None
saved = self.get_saved()
if saved:
filename = self.filename
# shell undo is reset after every prompt, looks saved, probably isn't
if not saved or filename is None:
(tfd, tempfilename) = tempfile.mkstemp(prefix='IDLE_tmp_')
filename = tempfilename
os.close(tfd)
if not self.writefile(tempfilename):
os.unlink(tempfilename)
return "break"
platform = os.name
printPlatform = True
if platform == 'posix': #posix platform
command = idleConf.GetOption('main','General',
'print-command-posix')
command = command + " 2>&1"
elif platform == 'nt': #win32 platform
command = idleConf.GetOption('main','General','print-command-win')
else: #no printing for this platform
printPlatform = False
if printPlatform: #we can try to print for this platform
command = command % shlex.quote(filename)
pipe = os.popen(command, "r")
# things can get ugly on NT if there is no printer available.
output = pipe.read().strip()
status = pipe.close()
if status:
output = "Printing failed (exit status 0x%x)\n" % \
status + output
if output:
output = "Printing command: %s\n" % repr(command) + output
tkMessageBox.showerror("Print status", output, master=self.text)
else: #no printing for this platform
message = "Printing is not enabled for this platform: %s" % platform
tkMessageBox.showinfo("Print status", message, master=self.text)
if tempfilename:
os.unlink(tempfilename)
return "break"
opendialog = None
savedialog = None
filetypes = [
("Python files", "*.py *.pyw", "TEXT"),
("Text files", "*.txt", "TEXT"),
("All files", "*"),
]
defaultextension = '.py' if sys.platform == 'darwin' else ''
def askopenfile(self):
dir, base = self.defaultfilename("open")
if not self.opendialog:
self.opendialog = tkFileDialog.Open(master=self.text,
filetypes=self.filetypes)
filename = self.opendialog.show(initialdir=dir, initialfile=base)
return filename
def defaultfilename(self, mode="open"):
if self.filename:
return os.path.split(self.filename)
elif self.dirname:
return self.dirname, ""
else:
try:
pwd = os.getcwd()
except OSError:
pwd = ""
return pwd, ""
def asksavefile(self):
dir, base = self.defaultfilename("save")
if not self.savedialog:
self.savedialog = tkFileDialog.SaveAs(
master=self.text,
filetypes=self.filetypes,
defaultextension=self.defaultextension)
filename = self.savedialog.show(initialdir=dir, initialfile=base)
return filename
def updaterecentfileslist(self,filename):
"Update recent file list on all editor windows"
if self.editwin.flist:
self.editwin.update_recent_files_list(filename)
def _io_binding(parent):
root = Tk()
root.title("Test IOBinding")
width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
root.geometry("+%d+%d"%(x, y + 150))
class MyEditWin:
def __init__(self, text):
self.text = text
self.flist = None
self.text.bind("<Control-o>", self.open)
self.text.bind("<Control-s>", self.save)
def get_saved(self): return 0
def set_saved(self, flag): pass
def reset_undo(self): pass
def open(self, event):
self.text.event_generate("<<open-window-from-file>>")
def save(self, event):
self.text.event_generate("<<save-window>>")
text = Text(root)
text.pack()
text.focus_set()
editwin = MyEditWin(text)
io = IOBinding(editwin)
if __name__ == "__main__":
from idlelib.idle_test.htest import run
run(_io_binding)
| lgpl-3.0 |
ilexius/odoo | addons/website_project_issue/tests/test_access_rights.py | 45 | 6654 | # -*- coding: utf-8 -*-
from openerp.addons.project.tests.test_access_rights import TestPortalProjectBase
from openerp.exceptions import AccessError
from openerp.tools import mute_logger
class TestPortalProjectBase(TestPortalProjectBase):
def setUp(self):
super(TestPortalProjectBase, self).setUp()
Issue = self.env['project.issue'].with_context({'mail_create_nolog': True})
self.issue_1 = Issue.create({
'name': 'Test1', 'user_id': False, 'project_id': self.project_pigs.id})
self.issue_2 = Issue.create({
'name': 'Test2', 'user_id': False, 'project_id': self.project_pigs.id})
self.issue_3 = Issue.create({
'name': 'Test3', 'user_id': False, 'project_id': self.project_pigs.id})
self.issue_4 = Issue.create({
'name': 'Test4', 'user_id': self.user_projectuser.id, 'project_id': self.project_pigs.id})
self.issue_5 = Issue.create({
'name': 'Test5', 'user_id': self.user_portal.id, 'project_id': self.project_pigs.id})
self.issue_6 = Issue.create({
'name': 'Test6', 'user_id': self.user_public.id, 'project_id': self.project_pigs.id})
class TestPortalIssue(TestPortalProjectBase):
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.models')
def test_00_project_access_rights(self):
""" Test basic project access rights, for project and portal_project """
pigs_id = self.project_pigs.id
Issue = self.env['project.issue']
# ----------------------------------------
# CASE1: portal project
# ----------------------------------------
self.project_pigs.write({'privacy_visibility': 'portal'})
# Do: Alfred reads project -> ok (employee ok public)
# Test: all project issues visible
issues = Issue.sudo(self.user_projectuser.id).search([('project_id', '=', pigs_id)])
test_issue_ids = set([self.issue_1.id, self.issue_2.id, self.issue_3.id, self.issue_4.id, self.issue_5.id, self.issue_6.id])
self.assertEqual(set(issues.ids), test_issue_ids,
'access rights: project user cannot see all issues of a portal project')
# Do: Bert reads project -> crash, no group
# Test: no project issue searchable
self.assertRaises(AccessError, Issue.sudo(self.user_noone.id).search, [('project_id', '=', pigs_id)])
# Data: issue follower
self.issue_1.sudo(self.user_projectuser.id).message_subscribe_users(user_ids=[self.user_portal.id])
self.issue_3.sudo(self.user_projectuser.id).message_subscribe_users(user_ids=[self.user_portal.id])
# Do: Chell reads project -> ok (portal ok public)
# Test: only followed project issues visible + assigned
issues = Issue.sudo(self.user_portal.id).search([('project_id', '=', pigs_id)])
self.assertEqual(set(issues.ids), set([self.issue_1.id, self.issue_3.id, self.issue_5.id]),
'access rights: portal user should see the followed issues of a portal project')
# Data: issue follower cleaning
self.issue_1.sudo(self.user_projectuser.id).message_unsubscribe_users(user_ids=[self.user_portal.id])
self.issue_3.sudo(self.user_projectuser.id).message_unsubscribe_users(user_ids=[self.user_portal.id])
# ----------------------------------------
# CASE2: employee project
# ----------------------------------------
self.project_pigs.write({'privacy_visibility': 'employees'})
# Do: Alfred reads project -> ok (employee ok employee)
# Test: all project issues visible
issues = Issue.sudo(self.user_projectuser.id).search([('project_id', '=', pigs_id)])
self.assertEqual(set(issues.ids), set([self.issue_1.id, self.issue_2.id, self.issue_3.id,
self.issue_4.id, self.issue_5.id, self.issue_6.id]),
'access rights: project user cannot see all issues of an employees project')
# Do: Chell reads project -> ko (portal ko employee)
# Test: no project issue visible + assigned
issues = Issue.sudo(self.user_portal.id).search([('project_id', '=', pigs_id)])
self.assertFalse(issues.ids, 'access rights: portal user should not see issues of an employees project, even if assigned')
# ----------------------------------------
# CASE3: followers project
# ----------------------------------------
self.project_pigs.write({'privacy_visibility': 'followers'})
# Do: Alfred reads project -> ko (employee ko followers)
# Test: no project issue visible
issues = Issue.sudo(self.user_projectuser.id).search([('project_id', '=', pigs_id)])
self.assertEqual(set(issues.ids), set([self.issue_4.id]),
'access rights: employee user should not see issues of a not-followed followers project, only assigned')
# Do: Chell reads project -> ko (portal ko employee)
# Test: no project issue visible
issues = Issue.sudo(self.user_portal.id).search([('project_id', '=', pigs_id)])
self.assertEqual(set(issues.ids), set([self.issue_5.id]),
'access rights: portal user should not see issues of a not-followed followers project, only assigned')
# Data: subscribe Alfred, Chell and Donovan as follower
self.project_pigs.message_subscribe_users(user_ids=[self.user_projectuser.id, self.user_portal.id, self.user_public.id])
self.issue_1.sudo(self.user_projectmanager.id).message_subscribe_users(user_ids=[self.user_portal.id, self.user_projectuser.id])
self.issue_3.sudo(self.user_projectmanager.id).message_subscribe_users(user_ids=[self.user_portal.id, self.user_projectuser.id])
# Do: Alfred reads project -> ok (follower ok followers)
# Test: followed + assigned issues visible
issues = Issue.sudo(self.user_projectuser.id).search([('project_id', '=', pigs_id)])
self.assertEqual(set(issues.ids), set([self.issue_1.id, self.issue_3.id, self.issue_4.id]),
'access rights: employee user should not see followed + assigned issues of a follower project')
# Do: Chell reads project -> ok (follower ok follower)
# Test: followed + assigned issues visible
issues = Issue.sudo(self.user_portal.id).search([('project_id', '=', pigs_id)])
self.assertEqual(set(issues.ids), set([self.issue_1.id, self.issue_3.id, self.issue_5.id]),
'access rights: employee user should not see followed + assigned issues of a follower project')
| gpl-3.0 |
lightcn/odoo | addons/base_report_designer/plugin/openerp_report_designer/bin/script/Expression.py | 384 | 4146 | #########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer [email protected]
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import uno
import string
import unohelper
import xmlrpclib
from com.sun.star.task import XJobExecutor
if __name__<>"package":
from lib.gui import *
from lib.error import ErrorDialog
from lib.functions import *
database="test"
uid = 3
class Expression(unohelper.Base, XJobExecutor ):
def __init__(self, sExpression="", sName="", bFromModify=False):
LoginTest()
if not loginstatus and __name__=="package":
exit(1)
self.win = DBModalDialog(60, 50, 180, 65, "Expression Builder")
self.win.addFixedText("lblExpression",17 , 10, 35, 15, "Expression :")
self.win.addEdit("txtExpression", -5, 5, 123, 15)
self.win.addFixedText("lblName", 2, 30, 50, 15, "Displayed Name :")
self.win.addEdit("txtName", -5, 25, 123, 15)
self.win.addButton( "btnOK", -5, -5, 40, 15, "OK", actionListenerProc = self.btnOk_clicked )
self.win.addButton( "btnCancel", -5 - 40 -5, -5, 40, 15, "Cancel", actionListenerProc = self.btnCancel_clicked )
self.bModify=bFromModify
if self.bModify==True:
self.win.setEditText("txtExpression",sExpression)
self.win.setEditText("txtName",sName)
self.win.doModalDialog("",None)
def btnOk_clicked(self, oActionEvent):
desktop=getDesktop()
doc = desktop.getCurrentComponent()
text = doc.Text
cursor = doc.getCurrentController().getViewCursor()
if self.bModify==True:
oCurObj=cursor.TextField
sKey=u""+self.win.getEditText("txtName")
sValue=u"[[ " + self.win.getEditText("txtExpression") + " ]]"
oCurObj.Items = (sKey,sValue)
oCurObj.update()
self.win.endExecute()
else:
oInputList = doc.createInstance("com.sun.star.text.TextField.DropDown")
if self.win.getEditText("txtName")!="" and self.win.getEditText("txtExpression")!="":
sKey=u""+self.win.getEditText("txtName")
sValue=u"[[ " + self.win.getEditText("txtExpression") + " ]]"
if cursor.TextTable==None:
oInputList.Items = (sKey,sValue)
text.insertTextContent(cursor,oInputList,False)
else:
oTable = cursor.TextTable
oCurCell = cursor.Cell
tableText = oTable.getCellByName( oCurCell.CellName )
oInputList.Items = (sKey,sValue)
tableText.insertTextContent(cursor,oInputList,False)
self.win.endExecute()
else:
ErrorDialog("Please fill appropriate data in Name field or in Expression field.")
def btnCancel_clicked(self, oActionEvent):
self.win.endExecute()
if __name__<>"package" and __name__=="__main__":
Expression()
elif __name__=="package":
g_ImplementationHelper.addImplementation( Expression, "org.openoffice.openerp.report.expression", ("com.sun.star.task.Job",),)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ray-project/ray | python/ray/autoscaler/_private/aliyun/node_provider.py | 1 | 12663 | import random
import threading
from collections import defaultdict
import logging
import time
from typing import Any, Dict, List, Optional
from ray.autoscaler.node_provider import NodeProvider
from ray.autoscaler.tags import TAG_RAY_CLUSTER_NAME, TAG_RAY_NODE_NAME, \
TAG_RAY_LAUNCH_CONFIG, TAG_RAY_NODE_KIND, \
TAG_RAY_USER_NODE_TYPE, TAG_RAY_NODE_STATUS
from ray.autoscaler._private.constants import BOTO_MAX_RETRIES
from ray.autoscaler._private.log_timer import LogTimer
from ray.autoscaler._private.cli_logger import cli_logger
from ray.autoscaler._private.aliyun.utils import AcsClient
from ray.autoscaler._private.aliyun.config import PENDING, STOPPED, \
STOPPING, RUNNING, bootstrap_aliyun
logger = logging.getLogger(__name__)
TAG_BATCH_DELAY = 1
STOPPING_NODE_DELAY = 1
class AliyunNodeProvider(NodeProvider):
def __init__(self, provider_config, cluster_name):
NodeProvider.__init__(self, provider_config, cluster_name)
self.cache_stopped_nodes = provider_config.get("cache_stopped_nodes",
True)
self.acs = AcsClient(
access_key=provider_config["access_key"],
access_key_secret=provider_config["access_key_secret"],
region_id=provider_config["region"],
max_retries=BOTO_MAX_RETRIES,
)
# Try availability zones round-robin, starting from random offset
self.subnet_idx = random.randint(0, 100)
# Tags that we believe to actually be on the node.
self.tag_cache = {}
# Tags that we will soon upload.
self.tag_cache_pending = defaultdict(dict)
# Number of threads waiting for a batched tag update.
self.batch_thread_count = 0
self.batch_update_done = threading.Event()
self.batch_update_done.set()
self.ready_for_new_batch = threading.Event()
self.ready_for_new_batch.set()
self.tag_cache_lock = threading.Lock()
self.count_lock = threading.Lock()
# Cache of node objects from the last nodes() call. This avoids
# excessive DescribeInstances requests.
self.cached_nodes = {}
def non_terminated_nodes(self, tag_filters: Dict[str, str]) -> List[str]:
tags = [
{
"Key": TAG_RAY_CLUSTER_NAME,
"Value": self.cluster_name,
},
]
for k, v in tag_filters.items():
tags.append({
"Key": k,
"Value": v,
})
instances = self.acs.describe_instances(tags=tags)
non_terminated_instance = []
for instance in instances:
if instance.get("Status") == RUNNING or instance.get(
"Status") == PENDING:
non_terminated_instance.append(instance.get("InstanceId"))
self.cached_nodes[instance.get("InstanceId")] = instance
return non_terminated_instance
def is_running(self, node_id: str) -> bool:
instances = self.acs.describe_instances(instance_ids=[node_id])
if instances is not None:
instance = instances[0]
return instance.get("Status") == "Running"
cli_logger.error("Invalid node id: %s", node_id)
return False
def is_terminated(self, node_id: str) -> bool:
instances = self.acs.describe_instances(instance_ids=[node_id])
if instances is not None:
assert len(instances) == 1
instance = instances[0]
return instance.get("Status") == "Stopped"
cli_logger.error("Invalid node id: %s", node_id)
return False
def node_tags(self, node_id: str) -> Dict[str, str]:
instances = self.acs.describe_instances(instance_ids=[node_id])
if instances is not None:
assert len(instances) == 1
instance = instances[0]
if instance.get("Tags") is not None:
node_tags = dict()
for tag in instance.get("Tags").get("Tag"):
node_tags[tag.get("TagKey")] = tag.get("TagValue")
return node_tags
return dict()
def external_ip(self, node_id: str) -> str:
while True:
instances = self.acs.describe_instances(instance_ids=[node_id])
if instances is not None:
assert len(instances)
instance = instances[0]
if instance.get("PublicIpAddress") is not None \
and instance.get(
"PublicIpAddress").get("IpAddress") is not None:
if len(instance.get("PublicIpAddress").get(
"IpAddress")) > 0:
return instance.get("PublicIpAddress").get(
"IpAddress")[0]
cli_logger.error(
"PublicIpAddress attribute is not exist. %s" % instance)
time.sleep(STOPPING_NODE_DELAY)
def internal_ip(self, node_id: str) -> str:
while True:
instances = self.acs.describe_instances(instance_ids=[node_id])
if instances is not None:
assert len(instances) == 1
instance = instances[0]
if instance.get("VpcAttributes") is not None and instance.get(
"VpcAttributes").get(
"PrivateIpAddress") is not None and len(
instance.get("VpcAttributes").get(
"PrivateIpAddress").get("IpAddress")) > 0:
return instance.get("VpcAttributes").get(
"PrivateIpAddress").get("IpAddress")[0]
cli_logger.error(
"InnerIpAddress attribute is not exist. %s" % instance)
time.sleep(STOPPING_NODE_DELAY)
def set_node_tags(self, node_id: str, tags: Dict[str, str]) -> None:
is_batching_thread = False
with self.tag_cache_lock:
if not self.tag_cache_pending:
is_batching_thread = True
# Wait for threads in the last batch to exit
self.ready_for_new_batch.wait()
self.ready_for_new_batch.clear()
self.batch_update_done.clear()
self.tag_cache_pending[node_id].update(tags)
if is_batching_thread:
time.sleep(TAG_BATCH_DELAY)
with self.tag_cache_lock:
self._update_node_tags()
self.batch_update_done.set()
with self.count_lock:
self.batch_thread_count += 1
self.batch_update_done.wait()
with self.count_lock:
self.batch_thread_count -= 1
if self.batch_thread_count == 0:
self.ready_for_new_batch.set()
def _update_node_tags(self):
batch_updates = defaultdict(list)
for node_id, tags in self.tag_cache_pending.items():
for x in tags.items():
batch_updates[x].append(node_id)
self.tag_cache[node_id] = tags
self.tag_cache_pending = defaultdict(dict)
self._create_tags(batch_updates)
def _create_tags(self, batch_updates):
for (k, v), node_ids in batch_updates.items():
m = "Set tag {}={} on {}".format(k, v, node_ids)
with LogTimer("AliyunNodeProvider: {}".format(m)):
if k == TAG_RAY_NODE_NAME:
k = "Name"
self.acs.tag_resource(node_ids, [{"Key": k, "Value": v}])
def create_node(self, node_config: Dict[str, Any], tags: Dict[str, str],
count: int) -> Optional[Dict[str, Any]]:
filter_tags = [{
"Key": TAG_RAY_CLUSTER_NAME,
"Value": self.cluster_name,
}, {
"Key": TAG_RAY_NODE_KIND,
"Value": tags[TAG_RAY_NODE_KIND]
}, {
"Key": TAG_RAY_USER_NODE_TYPE,
"Value": tags[TAG_RAY_USER_NODE_TYPE]
}, {
"Key": TAG_RAY_LAUNCH_CONFIG,
"Value": tags[TAG_RAY_LAUNCH_CONFIG]
}, {
"Key": TAG_RAY_NODE_NAME,
"Value": tags[TAG_RAY_NODE_NAME]
}]
reused_nodes_dict = {}
if self.cache_stopped_nodes:
reuse_nodes_candidate = self.acs.describe_instances(
tags=filter_tags)
if reuse_nodes_candidate:
with cli_logger.group("Stopping instances to reuse"):
reuse_node_ids = []
for node in reuse_nodes_candidate:
node_id = node.get("InstanceId")
status = node.get("Status")
if status != STOPPING and status != STOPPED:
continue
if status == STOPPING:
# wait for node stopped
while self.acs.describe_instances(
instance_ids=[node_id])[0].get(
"Status") == STOPPING:
logging.info("wait for %s stop" % node_id)
time.sleep(STOPPING_NODE_DELAY)
# logger.info("reuse %s" % node_id)
reuse_node_ids.append(node_id)
reused_nodes_dict[node.get("InstanceId")] = node
self.acs.start_instance(node_id)
self.tag_cache[node_id] = node.get("Tags")
self.set_node_tags(node_id, tags)
if len(reuse_node_ids) == count:
break
count -= len(reuse_node_ids)
created_nodes_dict = {}
if count > 0:
filter_tags.append({
"Key": TAG_RAY_NODE_STATUS,
"Value": tags[TAG_RAY_NODE_STATUS]
})
instance_id_sets = self.acs.run_instances(
instance_type=node_config["InstanceType"],
image_id=node_config["ImageId"],
tags=filter_tags,
amount=count,
vswitch_id=self.provider_config["v_switch_id"],
security_group_id=self.provider_config["security_group_id"],
key_pair_name=self.provider_config["key_name"])
instances = self.acs.describe_instances(
instance_ids=instance_id_sets)
if instances is not None:
for instance in instances:
created_nodes_dict[instance.get("InstanceId")] = instance
all_created_nodes = reused_nodes_dict
all_created_nodes.update(created_nodes_dict)
return all_created_nodes
def terminate_node(self, node_id: str) -> None:
logger.info("terminate node: %s" % node_id)
if self.cache_stopped_nodes:
logger.info(
"Stopping instance {} (to terminate instead, "
"set `cache_stopped_nodes: False` "
"under `provider` in the cluster configuration)").format(
node_id)
self.acs.stop_instance(node_id)
else:
self.acs.delete_instance(node_id)
def terminate_nodes(self, node_ids: List[str]) -> None:
if not node_ids:
return
if self.cache_stopped_nodes:
logger.info(
"Stopping instances {} (to terminate instead, "
"set `cache_stopped_nodes: False` "
"under `provider` in the cluster configuration)".format(
node_ids))
self.acs.stop_instances(node_ids)
else:
self.acs.delete_instances(node_ids)
def _get_node(self, node_id):
"""Refresh and get info for this node, updating the cache."""
self.non_terminated_nodes({}) # Side effect: updates cache
if node_id in self.cached_nodes:
return self.cached_nodes[node_id]
# Node not in {pending, running} -- retry with a point query. This
# usually means the node was recently preempted or terminated.
matches = self.acs.describe_instances(instance_ids=[node_id])
assert len(matches) == 1, "Invalid instance id {}".format(node_id)
return matches[0]
def _get_cached_node(self, node_id):
"""Return node info from cache if possible, otherwise fetches it."""
if node_id in self.cached_nodes:
return self.cached_nodes[node_id]
return self._get_node(node_id)
@staticmethod
def bootstrap_config(cluster_config):
return bootstrap_aliyun(cluster_config)
| apache-2.0 |
dkubiak789/odoo | addons/payment_ogone/data/ogone.py | 395 | 30321 | # -*- coding: utf-8 -*-
OGONE_ERROR_MAP = {
'0020001001': "Authorization failed, please retry",
'0020001002': "Authorization failed, please retry",
'0020001003': "Authorization failed, please retry",
'0020001004': "Authorization failed, please retry",
'0020001005': "Authorization failed, please retry",
'0020001006': "Authorization failed, please retry",
'0020001007': "Authorization failed, please retry",
'0020001008': "Authorization failed, please retry",
'0020001009': "Authorization failed, please retry",
'0020001010': "Authorization failed, please retry",
'0030001999': "Our payment system is currently under maintenance, please try later",
'0050001005': "Expiry date error",
'0050001007': "Requested Operation code not allowed",
'0050001008': "Invalid delay value",
'0050001010': "Input date in invalid format",
'0050001013': "Unable to parse socket input stream",
'0050001014': "Error in parsing stream content",
'0050001015': "Currency error",
'0050001016': "Transaction still posted at end of wait",
'0050001017': "Sync value not compatible with delay value",
'0050001019': "Transaction duplicate of a pre-existing transaction",
'0050001020': "Acceptation code empty while required for the transaction",
'0050001024': "Maintenance acquirer differs from original transaction acquirer",
'0050001025': "Maintenance merchant differs from original transaction merchant",
'0050001028': "Maintenance operation not accurate for the original transaction",
'0050001031': "Host application unknown for the transaction",
'0050001032': "Unable to perform requested operation with requested currency",
'0050001033': "Maintenance card number differs from original transaction card number",
'0050001034': "Operation code not allowed",
'0050001035': "Exception occurred in socket input stream treatment",
'0050001036': "Card length does not correspond to an acceptable value for the brand",
'0050001036': "Card length does not correspond to an acceptable value for the brand",
'0050001068': "A technical problem occurred, please contact helpdesk",
'0050001069': "Invalid check for CardID and Brand",
'0050001070': "A technical problem occurred, please contact helpdesk",
'0050001116': "Unknown origin IP",
'0050001117': "No origin IP detected",
'0050001118': "Merchant configuration problem, please contact support",
'10001001': "Communication failure",
'10001002': "Communication failure",
'10001003': "Communication failure",
'10001004': "Communication failure",
'10001005': "Communication failure",
'20001001': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001002': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001003': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001004': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001005': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001006': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001007': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001008': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001009': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001010': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001101': "A technical problem occurred, please contact helpdesk",
'20001105': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001111': "A technical problem occurred, please contact helpdesk",
'20002001': "Origin for the response of the bank can not be checked",
'20002002': "Beneficiary account number has been modified during processing",
'20002003': "Amount has been modified during processing",
'20002004': "Currency has been modified during processing",
'20002005': "No feedback from the bank server has been detected",
'30001001': "Payment refused by the acquirer",
'30001002': "Duplicate request",
'30001010': "A technical problem occurred, please contact helpdesk",
'30001011': "A technical problem occurred, please contact helpdesk",
'30001012': "Card black listed - Contact acquirer",
'30001015': "Your merchant's acquirer is temporarily unavailable, please try later or choose another payment method.",
'30001051': "A technical problem occurred, please contact helpdesk",
'30001054': "A technical problem occurred, please contact helpdesk",
'30001057': "Your merchant's acquirer is temporarily unavailable, please try later or choose another payment method.",
'30001058': "Your merchant's acquirer is temporarily unavailable, please try later or choose another payment method.",
'30001060': "Aquirer indicates that a failure occured during payment processing",
'30001070': "RATEPAY Invalid Response Type (Failure)",
'30001071': "RATEPAY Missing Mandatory status code field (failure)",
'30001072': "RATEPAY Missing Mandatory Result code field (failure)",
'30001073': "RATEPAY Response parsing Failed",
'30001090': "CVC check required by front end and returned invalid by acquirer",
'30001091': "ZIP check required by front end and returned invalid by acquirer",
'30001092': "Address check required by front end and returned as invalid by acquirer.",
'30001100': "Unauthorized buyer's country",
'30001101': "IP country <> card country",
'30001102': "Number of different countries too high",
'30001103': "unauthorized card country",
'30001104': "unauthorized ip address country",
'30001105': "Anonymous proxy",
'30001110': "If the problem persists, please contact Support, or go to paysafecard's card balance page (https://customer.cc.at.paysafecard.com/psccustomer/GetWelcomePanelServlet?language=en) to see when the amount reserved on your card will be available again.",
'30001120': "IP address in merchant's black list",
'30001130': "BIN in merchant's black list",
'30001131': "Wrong BIN for 3xCB",
'30001140': "Card in merchant's card blacklist",
'30001141': "Email in blacklist",
'30001142': "Passenger name in blacklist",
'30001143': "Card holder name in blacklist",
'30001144': "Passenger name different from owner name",
'30001145': "Time to departure too short",
'30001149': "Card Configured in Card Supplier Limit for another relation (CSL)",
'30001150': "Card not configured in the system for this customer (CSL)",
'30001151': "REF1 not allowed for this relationship (Contract number",
'30001152': "Card/Supplier Amount limit reached (CSL)",
'30001153': "Card not allowed for this supplier (Date out of contract bounds)",
'30001154': "You have reached the usage limit allowed",
'30001155': "You have reached the usage limit allowed",
'30001156': "You have reached the usage limit allowed",
'30001157': "Unauthorized IP country for itinerary",
'30001158': "email usage limit reached",
'30001159': "Unauthorized card country/IP country combination",
'30001160': "Postcode in highrisk group",
'30001161': "generic blacklist match",
'30001162': "Billing Address is a PO Box",
'30001180': "maximum scoring reached",
'30001997': "Authorization canceled by simulation",
'30001998': "A technical problem occurred, please try again.",
'30001999': "Your merchant's acquirer is temporarily unavailable, please try later or choose another payment method.",
'30002001': "Payment refused by the financial institution",
'30002001': "Payment refused by the financial institution",
'30021001': "Call acquirer support call number.",
'30022001': "Payment must be approved by the acquirer before execution.",
'30031001': "Invalid merchant number.",
'30041001': "Retain card.",
'30051001': "Authorization declined",
'30071001': "Retain card - special conditions.",
'30121001': "Invalid transaction",
'30131001': "Invalid amount",
'30131002': "You have reached the total amount allowed",
'30141001': "Invalid card number",
'30151001': "Unknown acquiring institution.",
'30171001': "Payment method cancelled by the buyer",
'30171002': "The maximum time allowed is elapsed.",
'30191001': "Try again later.",
'30201001': "A technical problem occurred, please contact helpdesk",
'30301001': "Invalid format",
'30311001': "Unknown acquirer ID.",
'30331001': "Card expired.",
'30341001': "Suspicion of fraud.",
'30341002': "Suspicion of fraud (3rdMan)",
'30341003': "Suspicion of fraud (Perseuss)",
'30341004': "Suspicion of fraud (ETHOCA)",
'30381001': "A technical problem occurred, please contact helpdesk",
'30401001': "Invalid function.",
'30411001': "Lost card.",
'30431001': "Stolen card, pick up",
'30511001': "Insufficient funds.",
'30521001': "No Authorization. Contact the issuer of your card.",
'30541001': "Card expired.",
'30551001': "Invalid PIN.",
'30561001': "Card not in authorizer's database.",
'30571001': "Transaction not permitted on card.",
'30581001': "Transaction not allowed on this terminal",
'30591001': "Suspicion of fraud.",
'30601001': "The merchant must contact the acquirer.",
'30611001': "Amount exceeds card ceiling.",
'30621001': "Restricted card.",
'30631001': "Security policy not respected.",
'30641001': "Amount changed from ref. trn.",
'30681001': "Tardy response.",
'30751001': "PIN entered incorrectly too often",
'30761001': "Card holder already contesting.",
'30771001': "PIN entry required.",
'30811001': "Message flow error.",
'30821001': "Authorization center unavailable",
'30831001': "Authorization center unavailable",
'30901001': "Temporary system shutdown.",
'30911001': "Acquirer unavailable.",
'30921001': "Invalid card type for acquirer.",
'30941001': "Duplicate transaction",
'30961001': "Processing temporarily not possible",
'30971001': "A technical problem occurred, please contact helpdesk",
'30981001': "A technical problem occurred, please contact helpdesk",
'31011001': "Unknown acceptance code",
'31021001': "Invalid currency",
'31031001': "Acceptance code missing",
'31041001': "Inactive card",
'31051001': "Merchant not active",
'31061001': "Invalid expiration date",
'31071001': "Interrupted host communication",
'31081001': "Card refused",
'31091001': "Invalid password",
'31101001': "Plafond transaction (majoré du bonus) dépassé",
'31111001': "Plafond mensuel (majoré du bonus) dépassé",
'31121001': "Plafond centre de facturation dépassé",
'31131001': "Plafond entreprise dépassé",
'31141001': "Code MCC du fournisseur non autorisé pour la carte",
'31151001': "Numéro SIRET du fournisseur non autorisé pour la carte",
'31161001': "This is not a valid online banking account",
'32001004': "A technical problem occurred, please try again.",
'34011001': "Bezahlung mit RatePAY nicht möglich.",
'39991001': "A technical problem occurred, please contact the helpdesk of your acquirer",
'40001001': "A technical problem occurred, please try again.",
'40001002': "A technical problem occurred, please try again.",
'40001003': "A technical problem occurred, please try again.",
'40001004': "A technical problem occurred, please try again.",
'40001005': "A technical problem occurred, please try again.",
'40001006': "A technical problem occurred, please try again.",
'40001007': "A technical problem occurred, please try again.",
'40001008': "A technical problem occurred, please try again.",
'40001009': "A technical problem occurred, please try again.",
'40001010': "A technical problem occurred, please try again.",
'40001011': "A technical problem occurred, please contact helpdesk",
'40001012': "Your merchant's acquirer is temporarily unavailable, please try later or choose another payment method.",
'40001013': "A technical problem occurred, please contact helpdesk",
'40001016': "A technical problem occurred, please contact helpdesk",
'40001018': "A technical problem occurred, please try again.",
'40001019': "Sorry, an error occurred during processing. Please retry the operation (use back button of the browser). If problem persists, contact your merchant's helpdesk.",
'40001020': "Sorry, an error occurred during processing. Please retry the operation (use back button of the browser). If problem persists, contact your merchant's helpdesk.",
'40001050': "A technical problem occurred, please contact helpdesk",
'40001133': "Authentication failed, the signature of your bank access control server is incorrect",
'40001134': "Authentication failed, please retry or cancel.",
'40001135': "Authentication temporary unavailable, please retry or cancel.",
'40001136': "Technical problem with your browser, please retry or cancel",
'40001137': "Your bank access control server is temporary unavailable, please retry or cancel",
'40001998': "Temporary technical problem. Please retry a little bit later.",
'50001001': "Unknown card type",
'50001002': "Card number format check failed for given card number.",
'50001003': "Merchant data error",
'50001004': "Merchant identification missing",
'50001005': "Expiry date error",
'50001006': "Amount is not a number",
'50001007': "A technical problem occurred, please contact helpdesk",
'50001008': "A technical problem occurred, please contact helpdesk",
'50001009': "A technical problem occurred, please contact helpdesk",
'50001010': "A technical problem occurred, please contact helpdesk",
'50001011': "Brand not supported for that merchant",
'50001012': "A technical problem occurred, please contact helpdesk",
'50001013': "A technical problem occurred, please contact helpdesk",
'50001014': "A technical problem occurred, please contact helpdesk",
'50001015': "Invalid currency code",
'50001016': "A technical problem occurred, please contact helpdesk",
'50001017': "A technical problem occurred, please contact helpdesk",
'50001018': "A technical problem occurred, please contact helpdesk",
'50001019': "A technical problem occurred, please contact helpdesk",
'50001020': "A technical problem occurred, please contact helpdesk",
'50001021': "A technical problem occurred, please contact helpdesk",
'50001022': "A technical problem occurred, please contact helpdesk",
'50001023': "A technical problem occurred, please contact helpdesk",
'50001024': "A technical problem occurred, please contact helpdesk",
'50001025': "A technical problem occurred, please contact helpdesk",
'50001026': "A technical problem occurred, please contact helpdesk",
'50001027': "A technical problem occurred, please contact helpdesk",
'50001028': "A technical problem occurred, please contact helpdesk",
'50001029': "A technical problem occurred, please contact helpdesk",
'50001030': "A technical problem occurred, please contact helpdesk",
'50001031': "A technical problem occurred, please contact helpdesk",
'50001032': "A technical problem occurred, please contact helpdesk",
'50001033': "A technical problem occurred, please contact helpdesk",
'50001034': "A technical problem occurred, please contact helpdesk",
'50001035': "A technical problem occurred, please contact helpdesk",
'50001036': "Card length does not correspond to an acceptable value for the brand",
'50001037': "Purchasing card number for a regular merchant",
'50001038': "Non Purchasing card for a Purchasing card merchant",
'50001039': "Details sent for a non-Purchasing card merchant, please contact helpdesk",
'50001040': "Details not sent for a Purchasing card transaction, please contact helpdesk",
'50001041': "Payment detail validation failed",
'50001042': "Given transactions amounts (tax,discount,shipping,net,etc…) do not compute correctly together",
'50001043': "A technical problem occurred, please contact helpdesk",
'50001044': "No acquirer configured for this operation",
'50001045': "No UID configured for this operation",
'50001046': "Operation not allowed for the merchant",
'50001047': "A technical problem occurred, please contact helpdesk",
'50001048': "A technical problem occurred, please contact helpdesk",
'50001049': "A technical problem occurred, please contact helpdesk",
'50001050': "A technical problem occurred, please contact helpdesk",
'50001051': "A technical problem occurred, please contact helpdesk",
'50001052': "A technical problem occurred, please contact helpdesk",
'50001053': "A technical problem occurred, please contact helpdesk",
'50001054': "Card number incorrect or incompatible",
'50001055': "A technical problem occurred, please contact helpdesk",
'50001056': "A technical problem occurred, please contact helpdesk",
'50001057': "A technical problem occurred, please contact helpdesk",
'50001058': "A technical problem occurred, please contact helpdesk",
'50001059': "A technical problem occurred, please contact helpdesk",
'50001060': "A technical problem occurred, please contact helpdesk",
'50001061': "A technical problem occurred, please contact helpdesk",
'50001062': "A technical problem occurred, please contact helpdesk",
'50001063': "Card Issue Number does not correspond to range or not present",
'50001064': "Start Date not valid or not present",
'50001066': "Format of CVC code invalid",
'50001067': "The merchant is not enrolled for 3D-Secure",
'50001068': "The card number or account number (PAN) is invalid",
'50001069': "Invalid check for CardID and Brand",
'50001070': "The ECI value given is either not supported, or in conflict with other data in the transaction",
'50001071': "Incomplete TRN demat",
'50001072': "Incomplete PAY demat",
'50001073': "No demat APP",
'50001074': "Authorisation too old",
'50001075': "VERRes was an error message",
'50001076': "DCP amount greater than authorisation amount",
'50001077': "Details negative amount",
'50001078': "Details negative quantity",
'50001079': "Could not decode/decompress received PARes (3D-Secure)",
'50001080': "Received PARes was an erereor message from ACS (3D-Secure)",
'50001081': "Received PARes format was invalid according to the 3DS specifications (3D-Secure)",
'50001082': "PAReq/PARes reconciliation failure (3D-Secure)",
'50001084': "Maximum amount reached",
'50001087': "The transaction type requires authentication, please check with your bank.",
'50001090': "CVC missing at input, but CVC check asked",
'50001091': "ZIP missing at input, but ZIP check asked",
'50001092': "Address missing at input, but Address check asked",
'50001095': "Invalid date of birth",
'50001096': "Invalid commodity code",
'50001097': "The requested currency and brand are incompatible.",
'50001111': "Data validation error",
'50001113': "This order has already been processed",
'50001114': "Error pre-payment check page access",
'50001115': "Request not received in secure mode",
'50001116': "Unknown IP address origin",
'50001117': "NO IP address origin",
'50001118': "Pspid not found or not correct",
'50001119': "Password incorrect or disabled due to numbers of errors",
'50001120': "Invalid currency",
'50001121': "Invalid number of decimals for the currency",
'50001122': "Currency not accepted by the merchant",
'50001123': "Card type not active",
'50001124': "Number of lines don't match with number of payments",
'50001125': "Format validation error",
'50001126': "Overflow in data capture requests for the original order",
'50001127': "The original order is not in a correct status",
'50001128': "missing authorization code for unauthorized order",
'50001129': "Overflow in refunds requests",
'50001130': "Error access to original order",
'50001131': "Error access to original history item",
'50001132': "The Selected Catalog is empty",
'50001133': "Duplicate request",
'50001134': "Authentication failed, please retry or cancel.",
'50001135': "Authentication temporary unavailable, please retry or cancel.",
'50001136': "Technical problem with your browser, please retry or cancel",
'50001137': "Your bank access control server is temporary unavailable, please retry or cancel",
'50001150': "Fraud Detection, Technical error (IP not valid)",
'50001151': "Fraud detection : technical error (IPCTY unknown or error)",
'50001152': "Fraud detection : technical error (CCCTY unknown or error)",
'50001153': "Overflow in redo-authorisation requests",
'50001170': "Dynamic BIN check failed",
'50001171': "Dynamic country check failed",
'50001172': "Error in Amadeus signature",
'50001174': "Card Holder Name is too long",
'50001175': "Name contains invalid characters",
'50001176': "Card number is too long",
'50001177': "Card number contains non-numeric info",
'50001178': "Card Number Empty",
'50001179': "CVC too long",
'50001180': "CVC contains non-numeric info",
'50001181': "Expiration date contains non-numeric info",
'50001182': "Invalid expiration month",
'50001183': "Expiration date must be in the future",
'50001184': "SHA Mismatch",
'50001205': "Missing mandatory fields for billing address.",
'50001206': "Missing mandatory field date of birth.",
'50001207': "Missing required shopping basket details.",
'50001208': "Missing social security number",
'50001209': "Invalid country code",
'50001210': "Missing yearly salary",
'50001211': "Missing gender",
'50001212': "Missing email",
'50001213': "Missing IP address",
'50001214': "Missing part payment campaign ID",
'50001215': "Missing invoice number",
'50001216': "The alias must be different than the card number",
'60000001': "account number unknown",
'60000003': "not credited dd-mm-yy",
'60000005': "name/number do not correspond",
'60000007': "account number blocked",
'60000008': "specific direct debit block",
'60000009': "account number WKA",
'60000010': "administrative reason",
'60000011': "account number expired",
'60000012': "no direct debit authorisation given",
'60000013': "debit not approved",
'60000014': "double payment",
'60000018': "name/address/city not entered",
'60001001': "no original direct debit for revocation",
'60001002': "payer’s account number format error",
'60001004': "payer’s account at different bank",
'60001005': "payee’s account at different bank",
'60001006': "payee’s account number format error",
'60001007': "payer’s account number blocked",
'60001008': "payer’s account number expired",
'60001009': "payee’s account number expired",
'60001010': "direct debit not possible",
'60001011': "creditor payment not possible",
'60001012': "payer’s account number unknown WKA-number",
'60001013': "payee’s account number unknown WKA-number",
'60001014': "impermissible WKA transaction",
'60001015': "period for revocation expired",
'60001017': "reason for revocation not correct",
'60001018': "original run number not numeric",
'60001019': "payment ID incorrect",
'60001020': "amount not numeric",
'60001021': "amount zero not permitted",
'60001022': "negative amount not permitted",
'60001023': "payer and payee giro account number",
'60001025': "processing code (verwerkingscode) incorrect",
'60001028': "revocation not permitted",
'60001029': "guaranteed direct debit on giro account number",
'60001030': "NBC transaction type incorrect",
'60001031': "description too large",
'60001032': "book account number not issued",
'60001034': "book account number incorrect",
'60001035': "payer’s account number not numeric",
'60001036': "payer’s account number not eleven-proof",
'60001037': "payer’s account number not issued",
'60001039': "payer’s account number of DNB/BGC/BLA",
'60001040': "payee’s account number not numeric",
'60001041': "payee’s account number not eleven-proof",
'60001042': "payee’s account number not issued",
'60001044': "payee’s account number unknown",
'60001050': "payee’s name missing",
'60001051': "indicate payee’s bank account number instead of 3102",
'60001052': "no direct debit contract",
'60001053': "amount beyond bounds",
'60001054': "selective direct debit block",
'60001055': "original run number unknown",
'60001057': "payer’s name missing",
'60001058': "payee’s account number missing",
'60001059': "restore not permitted",
'60001060': "bank’s reference (navraaggegeven) missing",
'60001061': "BEC/GBK number incorrect",
'60001062': "BEC/GBK code incorrect",
'60001087': "book account number not numeric",
'60001090': "cancelled on request",
'60001091': "cancellation order executed",
'60001092': "cancelled instead of bended",
'60001093': "book account number is a shortened account number",
'60001094': "instructing party account number not identical with payer",
'60001095': "payee unknown GBK acceptor",
'60001097': "instructing party account number not identical with payee",
'60001099': "clearing not permitted",
'60001101': "payer’s account number not spaces",
'60001102': "PAN length not numeric",
'60001103': "PAN length outside limits",
'60001104': "track number not numeric",
'60001105': "track number not valid",
'60001106': "PAN sequence number not numeric",
'60001107': "domestic PAN not numeric",
'60001108': "domestic PAN not eleven-proof",
'60001109': "domestic PAN not issued",
'60001110': "foreign PAN not numeric",
'60001111': "card valid date not numeric",
'60001112': "book period number (boekperiodenr) not numeric",
'60001113': "transaction number not numeric",
'60001114': "transaction time not numeric",
'60001115': "transaction no valid time",
'60001116': "transaction date not numeric",
'60001117': "transaction no valid date",
'60001118': "STAN not numeric",
'60001119': "instructing party’s name missing",
'60001120': "foreign amount (bedrag-vv) not numeric",
'60001122': "rate (verrekenkoers) not numeric",
'60001125': "number of decimals (aantaldecimalen) incorrect",
'60001126': "tariff (tarifering) not B/O/S",
'60001127': "domestic costs (kostenbinnenland) not numeric",
'60001128': "domestic costs (kostenbinnenland) not higher than zero",
'60001129': "foreign costs (kostenbuitenland) not numeric",
'60001130': "foreign costs (kostenbuitenland) not higher than zero",
'60001131': "domestic costs (kostenbinnenland) not zero",
'60001132': "foreign costs (kostenbuitenland) not zero",
'60001134': "Euro record not fully filled in",
'60001135': "Client currency incorrect",
'60001136': "Amount NLG not numeric",
'60001137': "Amount NLG not higher than zero",
'60001138': "Amount NLG not equal to Amount",
'60001139': "Amount NLG incorrectly converted",
'60001140': "Amount EUR not numeric",
'60001141': "Amount EUR not greater than zero",
'60001142': "Amount EUR not equal to Amount",
'60001143': "Amount EUR incorrectly converted",
'60001144': "Client currency not NLG",
'60001145': "rate euro-vv (Koerseuro-vv) not numeric",
'60001146': "comma rate euro-vv (Kommakoerseuro-vv) incorrect",
'60001147': "acceptgiro distributor not valid",
'60001148': "Original run number and/or BRN are missing",
'60001149': "Amount/Account number/ BRN different",
'60001150': "Direct debit already revoked/restored",
'60001151': "Direct debit already reversed/revoked/restored",
'60001153': "Payer’s account number not known",
}
DATA_VALIDATION_ERROR = '50001111'
def retryable(error):
return error in [
'0020001001', '0020001002', '0020001003', '0020001004', '0020001005',
'0020001006', '0020001007', '0020001008', '0020001009', '0020001010',
'30001010', '30001011', '30001015',
'30001057', '30001058',
'30001998', '30001999',
#'30611001', # amount exceeds card limit
'30961001',
'40001001', '40001002', '40001003', '40001004', '40001005',
'40001006', '40001007', '40001008', '40001009', '40001010',
'40001012',
'40001018', '40001019', '40001020',
'40001134', '40001135', '40001136', '40001137',
#'50001174', # cardholder name too long
]
| agpl-3.0 |
louisswarren/hieretikz | drinkerclass.py | 1 | 6686 | import subprocess
from hierarchyclass import *
from tikzify import *
formulae = 'tt lem wlem dgp glpoa gmp dp he dnsu dnse ud'.split()
globals().update({f: f for f in formulae})
efq = 'efq'
globals().update({future: future for future in
'dpn glpon mgmp glpon'.split()})
# These are actually equivalent.
ip = he
glpo = lem
hen = dpn
wgmp = dnsu
formula_layout = '''\
glpoa
lem
dp he
ud gmp dgp
dnsu dnse
wlem
'''
formula_strs = {f: f.upper() for f in formulae}
formula_strs[dnse] = R'DNS$\exists$'
formula_strs[glpoa] = "GLPO$'$"
formula_strs[glpon] = R'GLPO$_\neg$'
formula_strs[dnsu] = R'DNS$\forall$,WGMP'
formula_strs[lem] = R'LEM,GLPO'
formula_strs[dpn] = R'DP$_\lnot$,HE$_\lnot$'
unnamed_proofs = {
# (he, ip), (ip, he),
# (lem, glpo), (glpo, lem),
# (dpn, hen), (hen, dpn),
# (dnsu, wgmp), (wgmp, dnsu),
(lem, wlem),
(dp, dpn),
(he, hen),
(gmp, wgmp),
(dgp, wlem),
(glpoa, lem),
(glpoa, gmp),
(dp, ud),
(dp, gmp),
# (dp, dnsu),
(glpo, dpn),
(he, dnse),
(glpo, dnse),
(gmp, dnse),
(dpn, dnse),
# (glpoa, wgmp),
(dp, efq, tt, dgp),
(he, efq, tt, dgp),
# (dp, tt, wlem),
(he, tt, wlem),
(gmp, tt, wlem),
(dp, lem, glpoa),
# (gmp, lem, glpoa), # Not in tome
(dnse, tt, wlem),
(gmp, mgmp), (glpo, glpon), (glpon, wlem), (glpon, dnse), # Speculation
}
# EFQ isn't on the diagram, so these won't be plotted
unnamed_proofs.update({(efq, lem, f) for f in formulae if f not in (efq, lem, tt)})
proofs = {p: '{}-{}'.format(','.join(p[:-1]), p[-1]) for p in unnamed_proofs}
named_models = {
'dp-cm': (
{tt, efq, he, dgp, wlem, glpon, ud},
{dp, lem, dnsu, wgmp, mgmp},
),
'dp-cm-lobot': (
{tt, he, lem, dpn, hen, dgp, wlem, dnsu, dnse, glpo, glpoa, glpon, gmp, ud},
{dp},
),
'he-cm': (
{tt, efq, dp, dgp, wlem, glpon, ud},
{he, lem},
),
'he-cm-lobot': (
{tt, dp, lem, dpn, hen, dgp, wlem, dnsu, dnse, glpo, glpoa, glpon, gmp, ud},
{he},
),
'linear-growing-terms': (
{tt, efq, wlem, dgp},
{dp, he, lem, dnse, glpoa, ud},
),
'two-world-constant-terms': (
{tt, efq, dp, he, wlem, dgp, ud},
{lem},
),
'two-world-growing-terms': (
{tt, efq, wlem, dgp, wgmp},
{glpoa, dp, he, dpn, hen, gmp, dnse, glpon, ud},
),
'two-world-growing-terms-lobot': (
{tt, gmp, glpoa},
{ud},
),
'two-world-growing-terms-with-bot': (
{tt, lem, wlem, dgp},
{glpoa, dp, he, gmp, wgmp, ud, mgmp},
),
'v-const-term': (
{tt, efq, dnsu, ud},
{wlem, dgp, dnse},
),
'v-const-term-lobot': (
{tt, glpoa, lem, dpn, hen, gmp, dnse, glpon, ud},
{dgp},
),
'diamond-constant-terms': (
{tt, efq, wlem, gmp, ud},
{dgp, lem},
),
'beth-width-two': (
{lem, he, dp},
set(),
),
'one-term-v': (
{efq, dp, he},
{wlem, dgp},
),
'one-term-v-lobot': (
{tt, dp, he},
{dgp},
),
'one-term-v-lem': (
{dp, he, lem, ud, glpoa},
{dgp},
),
'trivial-lobot': (
{f for f in formulae if f is not efq},
{efq},
),
'one-world-one-term': (
{f for f in formulae if f is not tt} | {efq},
{tt},
),
'non-full-dp-cm-with-single-term-root': (
{he, efq},
{ud},
),
'non-full-dp-cm-with-single-term-root-lem': (
{tt, he, lem},
{ud},
),
}
models = [(k, *map(frozenset, v)) for k, v in named_models.items()]
if __name__ == '__main__':
h = Hierarchy((Arrow(tails, head) for *tails, head in unnamed_proofs),
(Tier(low, high, name) for name, (low, high) in named_models.items()))
qarrows = h.find_qarrows(set(formulae))
ev_qarrows = {arrow.edge: h.evaluate_qarrow(arrow, set(formulae), 1) for arrow in qarrows}
minimal_diagram = TikzHierarchy(name_dict=formula_strs)
minimal_diagram.add_string_node_layout(formula_layout)
minimal_diagram.add_edges((set(proofs)), color=False)
minimal_diagram.add_edges(set(arrow.edge for arrow in qarrows), 'dashed')
qarrows2 = h.find_qarrows(set(formulae), 2)
ev_qarrows2 = {arrow.edge: h.evaluate_qarrow(arrow, set(formulae), 2) for arrow in qarrows2}
minimal_diagram2 = TikzHierarchy(name_dict=formula_strs)
minimal_diagram2.add_string_node_layout(formula_layout)
minimal_diagram2.add_edges((set(proofs)), color=False)
minimal_diagram2.add_edges(set(arrow.edge for arrow in qarrows2), 'dashed')
inth = h.under_quotient(efq)
int_qarrows = inth.find_qarrows(set(formulae) - {efq})
int_ev_qarrows = {arrow.edge: inth.evaluate_qarrow(arrow, set(formulae), 1) for arrow in int_qarrows}
int_diagram = TikzHierarchy(name_dict=formula_strs)
int_diagram.add_string_node_layout(formula_layout)
int_diagram.add_edges(set(arrow.edge for arrow in inth.arrows), color=False)
int_diagram.add_edges(set(arrow.edge for arrow in int_qarrows), 'dashed')
tth = h.under_quotient(tt)
tt_qarrows = tth.find_qarrows(set(formulae) - {tt})
tt_ev_qarrows = {arrow.edge: tth.evaluate_qarrow(arrow, set(formulae), 1) for arrow in tt_qarrows}
tt_diagram = TikzHierarchy(name_dict=formula_strs)
tt_diagram.add_string_node_layout(formula_layout)
tt_diagram.add_edges(set(arrow.edge for arrow in tth.arrows), color=False)
tt_diagram.add_edges(set(arrow.edge for arrow in tt_qarrows), 'dashed')
tex = make_sections(
('Minimal Logic', minimal_diagram),
('Investigations ({})'.format(len(qarrows)),
make_columns(make_connections_list(ev_qarrows)), 1),
('Minimal Logic 2', minimal_diagram2),
('Investigations ({})'.format(len(qarrows2)),
make_columns(make_connections_list(ev_qarrows2)), 1),
('Intuitionistic Logic', int_diagram),
('Investigations ({})'.format(len(int_qarrows)),
make_columns(make_connections_list(int_ev_qarrows)), 1),
('Two-termed semantics', tt_diagram),
('Investigations ({})'.format(len(tt_qarrows)),
make_columns(make_connections_list(tt_ev_qarrows)), 1),
)
document = make_latex_document(tex)
with open('drinker.tex', 'w') as f:
f.write(document)
subprocess.call(['pdflatex', 'drinker.tex'], stdout=subprocess.DEVNULL)
#with open('backdrinker.tex', 'r') as f:
# assert(f.read() == document)
| mit |
waynegm/OpendTect-External-Attributes | Python_3/Filtering/ex_prewitt.py | 3 | 1093 | # Prewitt External Attribute
import sys,os
import numpy as np
from scipy.ndimage import prewitt
sys.path.insert(0, os.path.join(sys.path[0], '..'))
import extattrib as xa
xa.params = {
'Inputs': ['Input'],
'Output' : ['Average Gradient', 'In-line gradient', 'Cross-line gradient', 'Z gradient'],
'ZSampMargin' : {'Value': [-1,1], 'Hidden': True},
'StepOut' : {'Value': [1,1], 'Hidden': True},
'Parallel' : False,
'Help' : 'http://waynegm.github.io/OpendTect-Plugin-Docs/Attributes/ExternalAttrib/'
}
def doCompute():
inlpos = xa.SI['nrinl']//2
crlpos = xa.SI['nrcrl']//2
while True:
xa.doInput()
indata = xa.Input['Input']
xa.Output['In-line gradient'] = prewitt(indata, axis=0)[inlpos,crlpos,:]
xa.Output['Cross-line gradient'] = prewitt(indata, axis=1)[inlpos,crlpos,:]
xa.Output['Z gradient'] = prewitt(indata, axis=2)[inlpos,crlpos,:]
xa.Output['Average Gradient'] = ( xa.Output['In-line gradient']
+ xa.Output['Cross-line gradient']
+ xa.Output['Z gradient'] )/3
xa.doOutput()
xa.doCompute = doCompute
xa.run(sys.argv[1:])
| mit |
josenavas/QiiTa | qiita_pet/test/test_download.py | 1 | 13165 | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from unittest import main
from mock import Mock
from os.path import exists, isdir, join, basename
from os import remove, makedirs, close
from shutil import rmtree
from tempfile import mkdtemp, mkstemp
from biom.util import biom_open
from biom import example_table as et
from qiita_pet.test.tornado_test_base import TestHandlerBase
from qiita_pet.handlers.base_handlers import BaseHandler
from qiita_db.user import User
from qiita_db.study import Study
from qiita_db.artifact import Artifact
from qiita_db.software import Parameters, Command
class TestDownloadHandler(TestHandlerBase):
def setUp(self):
super(TestDownloadHandler, self).setUp()
self._clean_up_files = []
def tearDown(self):
super(TestDownloadHandler, self).tearDown()
for fp in self._clean_up_files:
if exists(fp):
if isdir(fp):
rmtree(fp)
else:
remove(fp)
def test_download(self):
# check success
response = self.get('/download/1')
self.assertEqual(response.code, 200)
self.assertEqual(response.body, (
"This installation of Qiita was not equipped with nginx, so it "
"is incapable of serving files. The file you attempted to "
"download is located at raw_data/1_s_G1_L001_sequences.fastq.gz"))
# failure
response = self.get('/download/1000')
self.assertEqual(response.code, 403)
# directory
a = Artifact(1)
fd, fp = mkstemp(suffix='.html')
close(fd)
with open(fp, 'w') as f:
f.write('\n')
self._clean_up_files.append(fp)
dirpath = mkdtemp()
fd, fp2 = mkstemp(suffix='.txt', dir=dirpath)
close(fd)
with open(fp2, 'w') as f:
f.write('\n')
self._clean_up_files.append(dirpath)
a.set_html_summary(fp, support_dir=dirpath)
for fp_id, _, fp_type in a.filepaths:
if fp_type == 'html_summary_dir':
break
response = self.get('/download/%d' % fp_id)
self.assertEqual(response.code, 200)
fp_name = basename(fp2)
dirname = basename(dirpath)
self.assertEqual(
response.body, "- 1 /protected/FASTQ/1/%s/%s FASTQ/1/%s/%s\n"
% (dirname, fp_name, dirname, fp_name))
class TestDownloadStudyBIOMSHandler(TestHandlerBase):
def setUp(self):
super(TestDownloadStudyBIOMSHandler, self).setUp()
self._clean_up_files = []
def tearDown(self):
super(TestDownloadStudyBIOMSHandler, self).tearDown()
for fp in self._clean_up_files:
if exists(fp):
if isdir(fp):
rmtree(fp)
else:
remove(fp)
def test_download_study(self):
tmp_dir = mkdtemp()
self._clean_up_files.append(tmp_dir)
biom_fp = join(tmp_dir, 'otu_table.biom')
smr_dir = join(tmp_dir, 'sortmerna_picked_otus')
log_dir = join(smr_dir, 'seqs_otus.log')
tgz = join(tmp_dir, 'sortmerna_picked_otus.tgz')
with biom_open(biom_fp, 'w') as f:
et.to_hdf5(f, "test")
makedirs(smr_dir)
with open(log_dir, 'w') as f:
f.write('\n')
with open(tgz, 'w') as f:
f.write('\n')
files_biom = [(biom_fp, 'biom'), (smr_dir, 'directory'), (tgz, 'tgz')]
params = Parameters.from_default_params(
Command(3).default_parameter_sets.next(), {'input_data': 1})
a = Artifact.create(files_biom, "BIOM", parents=[Artifact(2)],
processing_parameters=params)
for _, fp, _ in a.filepaths:
self._clean_up_files.append(fp)
response = self.get('/download_study_bioms/1')
self.assertEqual(response.code, 200)
exp = (
'- 1256812 /protected/processed_data/1_study_1001_closed_'
'reference_otu_table.biom processed_data/1_study_1001_closed_'
'reference_otu_table.biom\n'
'- 36615 /protected/templates/1_prep_1_qiime_[0-9]*-'
'[0-9]*.txt mapping_files/4_mapping_file.txt\n'
'- 1256812 /protected/processed_data/'
'1_study_1001_closed_reference_otu_table.biom processed_data/'
'1_study_1001_closed_reference_otu_table.biom\n'
'- 36615 /protected/templates/1_prep_1_qiime_[0-9]*-'
'[0-9]*.txt mapping_files/5_mapping_file.txt\n'
'- 1256812 /protected/processed_data/'
'1_study_1001_closed_reference_otu_table_Silva.biom processed_data'
'/1_study_1001_closed_reference_otu_table_Silva.biom\n'
'- 36615 /protected/templates/1_prep_1_qiime_[0-9]*-'
'[0-9]*.txt mapping_files/6_mapping_file.txt\n'
'- 36615 /protected/templates/1_prep_2_qiime_[0-9]*-'
'[0-9]*.txt mapping_files/7_mapping_file.txt\n'
'- [0-9]* /protected/BIOM/{0}/otu_table.biom '
'BIOM/{0}/otu_table.biom\n'
'- 1 /protected/BIOM/{0}/sortmerna_picked_otus/seqs_otus.log '
'BIOM/{0}/sortmerna_picked_otus/seqs_otus.log\n'
'- 36615 /protected/templates/1_prep_1_qiime_[0-9]*-[0-9]*.'
'txt mapping_files/{0}_mapping_file.txt\n'.format(a.id))
self.assertRegexpMatches(response.body, exp)
response = self.get('/download_study_bioms/200')
self.assertEqual(response.code, 405)
# changing user so we can test the failures
BaseHandler.get_current_user = Mock(
return_value=User("[email protected]"))
response = self.get('/download_study_bioms/1')
self.assertEqual(response.code, 405)
a.visibility = 'public'
response = self.get('/download_study_bioms/1')
self.assertEqual(response.code, 200)
exp = (
'- [0-9]* /protected/BIOM/{0}/otu_table.biom '
'BIOM/{0}/otu_table.biom\n'
'- 1 /protected/BIOM/{0}/sortmerna_picked_otus/seqs_otus.log '
'BIOM/{0}/sortmerna_picked_otus/seqs_otus.log\n'
'- 36615 /protected/templates/1_prep_1_qiime_[0-9]*-[0-9]*.'
'txt mapping_files/{0}_mapping_file.txt\n'.format(a.id))
self.assertRegexpMatches(response.body, exp)
class TestDownloadRelease(TestHandlerBase):
def setUp(self):
super(TestDownloadRelease, self).setUp()
def tearDown(self):
super(TestDownloadRelease, self).tearDown()
def test_download(self):
# check success
response = self.get('/release/download/1')
self.assertEqual(response.code, 200)
self.assertIn(
"This installation of Qiita was not equipped with nginx, so it is "
"incapable of serving files. The file you attempted to download "
"is located at", response.body)
class TestDownloadRawData(TestHandlerBase):
def setUp(self):
super(TestDownloadRawData, self).setUp()
self._clean_up_files = []
def tearDown(self):
super(TestDownloadRawData, self).tearDown()
for fp in self._clean_up_files:
if exists(fp):
if isdir(fp):
rmtree(fp)
else:
remove(fp)
def test_download_raw_data(self):
# it's possible that one of the tests is deleting the raw data
# so we will make sure that the files exists so this test passes
all_files = [fp for a in Study(1).artifacts()
for _, fp, _ in a.filepaths]
for fp in all_files:
if not exists(fp):
with open(fp, 'w') as f:
f.write('')
response = self.get('/download_raw_data/1')
self.assertEqual(response.code, 200)
exp = (
'- 58 /protected/raw_data/1_s_G1_L001_sequences.fastq.gz '
'raw_data/1_s_G1_L001_sequences.fastq.gz\n'
'- 58 /protected/raw_data/1_s_G1_L001_sequences_barcodes.fastq.gz '
'raw_data/1_s_G1_L001_sequences_barcodes.fastq.gz\n'
'- 36615 /protected/templates/1_prep_1_qiime_[0-9]*-[0-9]*.txt '
'mapping_files/1_mapping_file.txt\n'
'- 36615 /protected/templates/1_prep_2_qiime_[0-9]*-[0-9]*.txt '
'mapping_files/7_mapping_file.txt\n')
self.assertRegexpMatches(response.body, exp)
response = self.get('/download_study_bioms/200')
self.assertEqual(response.code, 405)
# changing user so we can test the failures
BaseHandler.get_current_user = Mock(
return_value=User("[email protected]"))
response = self.get('/download_study_bioms/1')
self.assertEqual(response.code, 405)
class TestDownloadEBISampleAccessions(TestHandlerBase):
def setUp(self):
super(TestDownloadEBISampleAccessions, self).setUp()
def tearDown(self):
super(TestDownloadEBISampleAccessions, self).tearDown()
def test_download(self):
# check success
response = self.get('/download_ebi_accessions/samples/1')
exp = ("sample_name\tsample_accession\n1.SKB2.640194\tERS000008\n"
"1.SKM4.640180\tERS000004\n1.SKB3.640195\tERS000024\n"
"1.SKB6.640176\tERS000025\n1.SKD6.640190\tERS000007\n"
"1.SKM6.640187\tERS000022\n1.SKD9.640182\tERS000019\n"
"1.SKM8.640201\tERS000014\n1.SKM2.640199\tERS000015\n"
"1.SKD2.640178\tERS000009\n1.SKB7.640196\tERS000002\n"
"1.SKD4.640185\tERS000023\n1.SKB8.640193\tERS000000\n"
"1.SKM3.640197\tERS000018\n1.SKD5.640186\tERS000017\n"
"1.SKB1.640202\tERS000011\n1.SKM1.640183\tERS000025\n"
"1.SKD1.640179\tERS000012\n1.SKD3.640198\tERS000013\n"
"1.SKB5.640181\tERS000006\n1.SKB4.640189\tERS000020\n"
"1.SKB9.640200\tERS000016\n1.SKM9.640192\tERS000003\n"
"1.SKD8.640184\tERS000001\n1.SKM5.640177\tERS000005\n"
"1.SKM7.640188\tERS000010\n1.SKD7.640191\tERS000021")
self.assertEqual(response.code, 200)
self.assertRegexpMatches(response.body, exp)
# changing user so we can test the failures
BaseHandler.get_current_user = Mock(
return_value=User("[email protected]"))
response = self.get('/download_ebi_accessions/samples/1')
self.assertEqual(response.code, 405)
class TestDownloadEBIPrepAccessions(TestHandlerBase):
def setUp(self):
super(TestDownloadEBIPrepAccessions, self).setUp()
def tearDown(self):
super(TestDownloadEBIPrepAccessions, self).tearDown()
def test_download(self):
# check success
response = self.get('/download_ebi_accessions/experiments/1')
exp = ("sample_name\texperiment_accession\n1.SKB2.640194\tERX0000008\n"
"1.SKM4.640180\tERX0000004\n1.SKB3.640195\tERX0000024\n"
"1.SKB6.640176\tERX0000025\n1.SKD6.640190\tERX0000007\n"
"1.SKM6.640187\tERX0000022\n1.SKD9.640182\tERX0000019\n"
"1.SKM8.640201\tERX0000014\n1.SKM2.640199\tERX0000015\n"
"1.SKD2.640178\tERX0000009\n1.SKB7.640196\tERX0000002\n"
"1.SKD4.640185\tERX0000023\n1.SKB8.640193\tERX0000000\n"
"1.SKM3.640197\tERX0000018\n1.SKD5.640186\tERX0000017\n"
"1.SKB1.640202\tERX0000011\n1.SKM1.640183\tERX0000026\n"
"1.SKD1.640179\tERX0000012\n1.SKD3.640198\tERX0000013\n"
"1.SKB5.640181\tERX0000006\n1.SKB4.640189\tERX0000020\n"
"1.SKB9.640200\tERX0000016\n1.SKM9.640192\tERX0000003\n"
"1.SKD8.640184\tERX0000001\n1.SKM5.640177\tERX0000005\n"
"1.SKM7.640188\tERX0000010\n1.SKD7.640191\tERX0000021")
self.assertEqual(response.code, 200)
self.assertRegexpMatches(response.body, exp)
# changing user so we can test the failures
BaseHandler.get_current_user = Mock(
return_value=User("[email protected]"))
response = self.get('/download_ebi_accessions/experiments/1')
self.assertEqual(response.code, 405)
class TestDownloadUpload(TestHandlerBase):
def setUp(self):
super(TestDownloadUpload, self).setUp()
def tearDown(self):
super(TestDownloadUpload, self).tearDown()
def test_download(self):
# check failure
response = self.get('/download_upload/1/uploaded_file.txt')
self.assertEqual(response.code, 403)
# check success
BaseHandler.get_current_user = Mock(return_value=User("[email protected]"))
response = self.get('/download_upload/1/uploaded_file.txt')
self.assertEqual(response.code, 200)
if __name__ == '__main__':
main()
| bsd-3-clause |
iut-ibk/DynaMind-UrbanSim | 3rdparty/opus/src/urbansim_parcel/job_x_building/same_sector_employment_in_building.py | 2 | 2096 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from urbansim.abstract_variables.abstract_number_of_agents_with_same_attribute_value import abstract_number_of_agents_with_same_attribute_value
class same_sector_employment_in_building(abstract_number_of_agents_with_same_attribute_value):
""""""
agent_attribute_name = "job.sector_id"
agent_dependencies = []
choice_set_dependencies = []
#unique_agent_attribute_value = range(1, 20)
geography_dataset_name = 'building'
## use default
#expression_agents_of_attribute_by_geography = "'agents_of_attribute_%(agent_attribute_value)s = %(geography_dataset_name)s.aggregate(%(agent_attribute_name)s==%(agent_attribute_value)s)'"
from opus_core.tests import opus_unittest
from opus_core.tests.utils.variable_tester import VariableTester
from numpy import arange, array
from numpy import ma
class Tests(opus_unittest.OpusTestCase):
def test_my_inputs(self):
tester = VariableTester(
__file__,
package_order=['urbansim_parcel', 'urbansim', 'opus_core'],
test_data={
"job":{
'job_id': array([1, 2, 3, 4, 5, 6]),
'building_id':array([1, 1, 5, 3, 3, 3]),
'sector_id': array([1, 1, 2, 1, 3, 3]),
},
"building":{
'building_id': array([1, 2, 3, 4, 5,]),
},
})
## mind the mirror of gridcells in waling_distance calculus
should_be = array([[2, 0, 1, 0, 0],
[2, 0, 1, 0, 0],
[0, 0, 0, 0, 1],
[2, 0, 1, 0, 0],
[0, 0, 2, 0, 0],
[0, 0, 2, 0, 0]])
tester.test_is_close_for_variable_defined_by_this_module(self, should_be)
if __name__=='__main__':
opus_unittest.main()
| gpl-2.0 |
mrf345/FQM | migrations/versions/b41c62db00a1_.py | 1 | 1488 | """ Convert printer `vendor` and `product` to int type. And add `name`.
Revision ID: b41c62db00a1
Revises: d37b1524c3fc
Create Date: 2020-06-06 16:49:00.859545
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b41c62db00a1'
down_revision = 'd37b1524c3fc'
branch_labels = None
depends_on = None
def upgrade():
try:
with op.batch_alter_table('printers') as batch:
batch.alter_column('vendor',
existing_type=sa.VARCHAR(length=100),
type_=sa.Integer(),
existing_nullable=True)
batch.alter_column('product',
existing_type=sa.VARCHAR(length=100),
type_=sa.Integer(),
existing_nullable=True)
batch.add_column(sa.Column('name', sa.String(100), nullable=True))
except Exception:
pass
def downgrade():
with op.batch_alter_table('printers') as batch:
batch.alter_column('vendor',
existing_type=sa.Integer(),
type_=sa.VARCHAR(length=100),
existing_nullable=True)
batch.alter_column('product',
existing_type=sa.Integer(),
type_=sa.VARCHAR(length=100),
existing_nullable=True)
batch.drop_column('name')
| mpl-2.0 |
AOKP/external_chromium_org | ppapi/c/documentation/doxy_cleanup.py | 177 | 4451 | #!/usr/bin/python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''This utility cleans up the html files as emitted by doxygen so
that they are suitable for publication on a Google documentation site.
'''
import optparse
import os
import re
import shutil
import string
import sys
try:
from BeautifulSoup import BeautifulSoup, Tag
except (ImportError, NotImplementedError):
print ("This tool requires the BeautifulSoup package "
"(see http://www.crummy.com/software/BeautifulSoup/).\n"
"Make sure that the file BeautifulSoup.py is either in this directory "
"or is available in your PYTHON_PATH")
raise
class HTMLFixer(object):
'''This class cleans up the html strings as produced by Doxygen
'''
def __init__(self, html):
self.soup = BeautifulSoup(html)
def FixTableHeadings(self):
'''Fixes the doxygen table headings.
This includes:
- Using bare <h2> title row instead of row embedded in <tr><td> in table
- Putting the "name" attribute into the "id" attribute of the <tr> tag.
- Splitting up tables into multiple separate tables if a table
heading appears in the middle of a table.
For example, this html:
<table>
<tr><td colspan="2"><h2><a name="pub-attribs"></a>
Data Fields List</h2></td></tr>
...
</table>
would be converted to this:
<h2>Data Fields List</h2>
<table>
...
</table>
'''
table_headers = []
for tag in self.soup.findAll('tr'):
if tag.td and tag.td.h2 and tag.td.h2.a and tag.td.h2.a['name']:
#tag['id'] = tag.td.h2.a['name']
tag.string = tag.td.h2.a.next
tag.name = 'h2'
table_headers.append(tag)
# reverse the list so that earlier tags don't delete later tags
table_headers.reverse()
# Split up tables that have multiple table header (th) rows
for tag in table_headers:
print "Header tag: %s is %s" % (tag.name, tag.string.strip())
# Is this a heading in the middle of a table?
if tag.findPreviousSibling('tr') and tag.parent.name == 'table':
print "Splitting Table named %s" % tag.string.strip()
table = tag.parent
table_parent = table.parent
table_index = table_parent.contents.index(table)
new_table = Tag(self.soup, name='table', attrs=table.attrs)
table_parent.insert(table_index + 1, new_table)
tag_index = table.contents.index(tag)
for index, row in enumerate(table.contents[tag_index:]):
new_table.insert(index, row)
# Now move the <h2> tag to be in front of the <table> tag
assert tag.parent.name == 'table'
table = tag.parent
table_parent = table.parent
table_index = table_parent.contents.index(table)
table_parent.insert(table_index, tag)
def RemoveTopHeadings(self):
'''Removes <div> sections with a header, tabs, or navpath class attribute'''
header_tags = self.soup.findAll(
name='div',
attrs={'class' : re.compile('^(header|tabs[0-9]*|navpath)$')})
[tag.extract() for tag in header_tags]
def FixAll(self):
self.FixTableHeadings()
self.RemoveTopHeadings()
def __str__(self):
return str(self.soup)
def main():
'''Main entry for the doxy_cleanup utility
doxy_cleanup takes a list of html files and modifies them in place.'''
parser = optparse.OptionParser(usage='Usage: %prog [options] files...')
parser.add_option('-m', '--move', dest='move', action='store_true',
default=False, help='move html files to "original_html"')
options, files = parser.parse_args()
if not files:
parser.print_usage()
return 1
for filename in files:
try:
with open(filename, 'r') as file:
html = file.read()
print "Processing %s" % filename
fixer = HTMLFixer(html)
fixer.FixAll()
with open(filename, 'w') as file:
file.write(str(fixer))
if options.move:
new_directory = os.path.join(
os.path.dirname(os.path.dirname(filename)), 'original_html')
if not os.path.exists(new_directory):
os.mkdir(new_directory)
shutil.move(filename, new_directory)
except:
print "Error while processing %s" % filename
raise
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
valmynd/MediaFetcher | src/plugins/youtube_dl/test/test_compat.py | 1 | 5072 | #!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from youtube_dl.compat import (
compat_getenv,
compat_setenv,
compat_etree_fromstring,
compat_expanduser,
compat_shlex_split,
compat_str,
compat_struct_unpack,
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
compat_urllib_parse_urlencode,
)
class TestCompat(unittest.TestCase):
def test_compat_getenv(self):
test_str = 'тест'
compat_setenv('YOUTUBE_DL_COMPAT_GETENV', test_str)
self.assertEqual(compat_getenv('YOUTUBE_DL_COMPAT_GETENV'), test_str)
def test_compat_setenv(self):
test_var = 'YOUTUBE_DL_COMPAT_SETENV'
test_str = 'тест'
compat_setenv(test_var, test_str)
compat_getenv(test_var)
self.assertEqual(compat_getenv(test_var), test_str)
def test_compat_expanduser(self):
old_home = os.environ.get('HOME')
test_str = r'C:\Documents and Settings\тест\Application Data'
compat_setenv('HOME', test_str)
self.assertEqual(compat_expanduser('~'), test_str)
compat_setenv('HOME', old_home or '')
def test_all_present(self):
import youtube_dl.compat
all_names = youtube_dl.compat.__all__
present_names = set(filter(
lambda c: '_' in c and not c.startswith('_'),
dir(youtube_dl.compat))) - set(['unicode_literals'])
self.assertEqual(all_names, sorted(present_names))
def test_compat_urllib_parse_unquote(self):
self.assertEqual(compat_urllib_parse_unquote('abc%20def'), 'abc def')
self.assertEqual(compat_urllib_parse_unquote('%7e/abc+def'), '~/abc+def')
self.assertEqual(compat_urllib_parse_unquote(''), '')
self.assertEqual(compat_urllib_parse_unquote('%'), '%')
self.assertEqual(compat_urllib_parse_unquote('%%'), '%%')
self.assertEqual(compat_urllib_parse_unquote('%%%'), '%%%')
self.assertEqual(compat_urllib_parse_unquote('%2F'), '/')
self.assertEqual(compat_urllib_parse_unquote('%2f'), '/')
self.assertEqual(compat_urllib_parse_unquote('%E6%B4%A5%E6%B3%A2'), '津波')
self.assertEqual(
compat_urllib_parse_unquote('''<meta property="og:description" content="%E2%96%81%E2%96%82%E2%96%83%E2%96%84%25%E2%96%85%E2%96%86%E2%96%87%E2%96%88" />
%<a href="https://ar.wikipedia.org/wiki/%D8%AA%D8%B3%D9%88%D9%86%D8%A7%D9%85%D9%8A">%a'''),
'''<meta property="og:description" content="▁▂▃▄%▅▆▇█" />
%<a href="https://ar.wikipedia.org/wiki/تسونامي">%a''')
self.assertEqual(
compat_urllib_parse_unquote(
'''%28%5E%E2%97%A3_%E2%97%A2%5E%29%E3%81%A3%EF%B8%BB%E3%83%87%E2%95%90%E4%B8%80 %E2%87%80 %E2%87%80 %E2%87%80 %E2%87%80 %E2%87%80 %E2%86%B6%I%Break%25Things%'''),
'''(^◣_◢^)っ︻デ═一 ⇀ ⇀ ⇀ ⇀ ⇀ ↶%I%Break%Things%''')
def test_compat_urllib_parse_unquote_plus(self):
self.assertEqual(compat_urllib_parse_unquote_plus('abc%20def'), 'abc def')
self.assertEqual(compat_urllib_parse_unquote_plus('%7e/abc+def'), '~/abc def')
def test_compat_urllib_parse_urlencode(self):
self.assertEqual(compat_urllib_parse_urlencode({'abc': 'def'}), 'abc=def')
self.assertEqual(compat_urllib_parse_urlencode({'abc': b'def'}), 'abc=def')
self.assertEqual(compat_urllib_parse_urlencode({b'abc': 'def'}), 'abc=def')
self.assertEqual(compat_urllib_parse_urlencode({b'abc': b'def'}), 'abc=def')
self.assertEqual(compat_urllib_parse_urlencode([('abc', 'def')]), 'abc=def')
self.assertEqual(compat_urllib_parse_urlencode([('abc', b'def')]), 'abc=def')
self.assertEqual(compat_urllib_parse_urlencode([(b'abc', 'def')]), 'abc=def')
self.assertEqual(compat_urllib_parse_urlencode([(b'abc', b'def')]), 'abc=def')
def test_compat_shlex_split(self):
self.assertEqual(compat_shlex_split('-option "one two"'), ['-option', 'one two'])
self.assertEqual(compat_shlex_split('-option "one\ntwo" \n -flag'), ['-option', 'one\ntwo', '-flag'])
self.assertEqual(compat_shlex_split('-val 中文'), ['-val', '中文'])
def test_compat_etree_fromstring(self):
xml = '''
<root foo="bar" spam="中文">
<normal>foo</normal>
<chinese>中文</chinese>
<foo><bar>spam</bar></foo>
</root>
'''
doc = compat_etree_fromstring(xml.encode('utf-8'))
self.assertTrue(isinstance(doc.attrib['foo'], compat_str))
self.assertTrue(isinstance(doc.attrib['spam'], compat_str))
self.assertTrue(isinstance(doc.find('normal').text, compat_str))
self.assertTrue(isinstance(doc.find('chinese').text, compat_str))
self.assertTrue(isinstance(doc.find('foo/bar').text, compat_str))
def test_compat_etree_fromstring_doctype(self):
xml = '''<?xml version="1.0"?>
<!DOCTYPE smil PUBLIC "-//W3C//DTD SMIL 2.0//EN" "http://www.w3.org/2001/SMIL20/SMIL20.dtd">
<smil xmlns="http://www.w3.org/2001/SMIL20/Language"></smil>'''
compat_etree_fromstring(xml)
def test_struct_unpack(self):
self.assertEqual(compat_struct_unpack('!B', b'\x00'), (0,))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
Bitl/RBXLegacy-src | Cut/RBXLegacyDiscordBot/lib/youtube_dl/extractor/porn91.py | 40 | 1888 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
parse_duration,
int_or_none,
ExtractorError,
)
class Porn91IE(InfoExtractor):
IE_NAME = '91porn'
_VALID_URL = r'(?:https?://)(?:www\.|)91porn\.com/.+?\?viewkey=(?P<id>[\w\d]+)'
_TEST = {
'url': 'http://91porn.com/view_video.php?viewkey=7e42283b4f5ab36da134',
'md5': '7fcdb5349354f40d41689bd0fa8db05a',
'info_dict': {
'id': '7e42283b4f5ab36da134',
'title': '18岁大一漂亮学妹,水嫩性感,再爽一次!',
'ext': 'mp4',
'duration': 431,
'age_limit': 18,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
self._set_cookie('91porn.com', 'language', 'cn_CN')
webpage = self._download_webpage(
'http://91porn.com/view_video.php?viewkey=%s' % video_id, video_id)
if '作为游客,你每天只可观看10个视频' in webpage:
raise ExtractorError('91 Porn says: Daily limit 10 videos exceeded', expected=True)
title = self._search_regex(
r'<div id="viewvideo-title">([^<]+)</div>', webpage, 'title')
title = title.replace('\n', '')
info_dict = self._parse_html5_media_entries(url, webpage, video_id)[0]
duration = parse_duration(self._search_regex(
r'时长:\s*</span>\s*(\d+:\d+)', webpage, 'duration', fatal=False))
comment_count = int_or_none(self._search_regex(
r'留言:\s*</span>\s*(\d+)', webpage, 'comment count', fatal=False))
info_dict.update({
'id': video_id,
'title': title,
'duration': duration,
'comment_count': comment_count,
'age_limit': self._rta_search(webpage),
})
return info_dict
| gpl-3.0 |
jshum/dd-agent | tests/checks/mock/test_supervisord.py | 37 | 18752 | # stdlib
from socket import socket
import unittest
import xmlrpclib
# 3p
from mock import patch
# project
from checks import AgentCheck
from tests.checks.common import get_check
class TestSupervisordCheck(unittest.TestCase):
TEST_CASES = [{
'yaml': """
init_config:
instances:
- name: server1
host: localhost
port: 9001""",
'expected_instances': [{
'host': 'localhost',
'name': 'server1',
'port': 9001
}],
'expected_metrics': {
'server1': [
('supervisord.process.count', 1, {'type': 'gauge', 'tags': ['supervisord_server:server1', 'status:up']}),
('supervisord.process.count', 1, {'type': 'gauge', 'tags': ['supervisord_server:server1', 'status:down']}),
('supervisord.process.count', 1, {'type': 'gauge', 'tags': ['supervisord_server:server1', 'status:unknown']}),
('supervisord.process.uptime', 0, {'type': 'gauge', 'tags': ['supervisord_server:server1', 'supervisord_process:python']}),
('supervisord.process.uptime', 125, {'type': 'gauge', 'tags': ['supervisord_server:server1', 'supervisord_process:mysql']}),
('supervisord.process.uptime', 0, {'type': 'gauge', 'tags': ['supervisord_server:server1', 'supervisord_process:java']})
]
},
'expected_service_checks': {
'server1': [{
'status': AgentCheck.OK,
'tags': ['supervisord_server:server1'],
'check': 'supervisord.can_connect',
}, {
'status': AgentCheck.OK,
'tags': ['supervisord_server:server1', 'supervisord_process:mysql'],
'check': 'supervisord.process.status'
}, {
'status': AgentCheck.CRITICAL,
'tags': ['supervisord_server:server1', 'supervisord_process:java'],
'check': 'supervisord.process.status'
}, {
'status': AgentCheck.UNKNOWN,
'tags': ['supervisord_server:server1', 'supervisord_process:python'],
'check': 'supervisord.process.status'
}]
}
}, {
'yaml': """
init_config:
instances:
- name: server0
host: localhost
port: 9001
user: user
pass: pass
proc_names:
- apache2
- webapp
- name: server1
host: 10.60.130.82""",
'expected_instances': [{
'name': 'server0',
'host': 'localhost',
'port': 9001,
'user': 'user',
'pass': 'pass',
'proc_names': ['apache2', 'webapp'],
}, {
'host': '10.60.130.82',
'name': 'server1'
}],
'expected_metrics': {
'server0': [
('supervisord.process.count', 0, {'type': 'gauge', 'tags': ['supervisord_server:server0', 'status:up']}),
('supervisord.process.count', 2, {'type': 'gauge', 'tags': ['supervisord_server:server0', 'status:down']}),
('supervisord.process.count', 0, {'type': 'gauge', 'tags': ['supervisord_server:server0', 'status:unknown']}),
('supervisord.process.uptime', 0, {'type': 'gauge', 'tags': ['supervisord_server:server0', 'supervisord_process:apache2']}),
('supervisord.process.uptime', 2, {'type': 'gauge', 'tags': ['supervisord_server:server0', 'supervisord_process:webapp']}),
],
'server1': [
('supervisord.process.count', 0, {'type': 'gauge', 'tags': ['supervisord_server:server1', 'status:up']}),
('supervisord.process.count', 1, {'type': 'gauge', 'tags': ['supervisord_server:server1', 'status:down']}),
('supervisord.process.count', 0, {'type': 'gauge', 'tags': ['supervisord_server:server1', 'status:unknown']}),
('supervisord.process.uptime', 0, {'type': 'gauge', 'tags': ['supervisord_server:server1', 'supervisord_process:ruby']})
]
},
'expected_service_checks': {
'server0': [{
'status': AgentCheck.OK,
'tags': ['supervisord_server:server0'],
'check': 'supervisord.can_connect',
}, {
'status': AgentCheck.CRITICAL,
'tags': ['supervisord_server:server0', 'supervisord_process:apache2'],
'check': 'supervisord.process.status'
}, {
'status': AgentCheck.CRITICAL,
'tags': ['supervisord_server:server0', 'supervisord_process:webapp'],
'check': 'supervisord.process.status'
}],
'server1': [{
'status': AgentCheck.OK,
'tags': ['supervisord_server:server1'],
'check': 'supervisord.can_connect',
}, {
'status': AgentCheck.CRITICAL,
'tags': ['supervisord_server:server1', 'supervisord_process:ruby'],
'check': 'supervisord.process.status'
}]
}
}, {
'yaml': """
init_config:
instances:
- name: server0
host: invalid_host
port: 9009""",
'expected_instances': [{
'name': 'server0',
'host': 'invalid_host',
'port': 9009
}],
'error_message': """Cannot connect to http://invalid_host:9009. Make sure supervisor is running and XML-RPC inet interface is enabled."""
}, {
'yaml': """
init_config:
instances:
- name: server0
host: localhost
port: 9010
user: invalid_user
pass: invalid_pass""",
'expected_instances': [{
'name': 'server0',
'host': 'localhost',
'port': 9010,
'user': 'invalid_user',
'pass': 'invalid_pass'
}],
'error_message': """Username or password to server0 are incorrect."""
}, {
'yaml': """
init_config:
instances:
- name: server0
host: localhost
port: 9001
proc_names:
- mysql
- invalid_process""",
'expected_instances': [{
'name': 'server0',
'host': 'localhost',
'port': 9001,
'proc_names': ['mysql', 'invalid_process']
}],
'expected_metrics': {
'server0': [
('supervisord.process.count', 1, {'type': 'gauge', 'tags': ['supervisord_server:server0', 'status:up']}),
('supervisord.process.count', 0, {'type': 'gauge', 'tags': ['supervisord_server:server0', 'status:down']}),
('supervisord.process.count', 0, {'type': 'gauge', 'tags': ['supervisord_server:server0', 'status:unknown']}),
('supervisord.process.uptime', 125, {'type': 'gauge', 'tags': ['supervisord_server:server0', 'supervisord_process:mysql']})
]
},
'expected_service_checks': {
'server0': [{
'status': AgentCheck.OK,
'tags': ['supervisord_server:server0'],
'check': 'supervisord.can_connect',
}, {
'status': AgentCheck.OK,
'tags': ['supervisord_server:server0', 'supervisord_process:mysql'],
'check': 'supervisord.process.status'
}]
}
}, {
'yaml': """
init_config:
instances:
- name: server0
host: localhost
port: 9001
proc_regex:
- '^mysq.$'
- invalid_process""",
'expected_instances': [{
'name': 'server0',
'host': 'localhost',
'port': 9001,
'proc_regex': ['^mysq.$', 'invalid_process']
}],
'expected_metrics': {
'server0': [
('supervisord.process.count', 1,
{'type': 'gauge', 'tags': ['supervisord_server:server0', 'status:up']}),
('supervisord.process.count', 0,
{'type': 'gauge', 'tags': ['supervisord_server:server0', 'status:down']}),
('supervisord.process.count', 0,
{'type': 'gauge', 'tags': ['supervisord_server:server0', 'status:unknown']}),
('supervisord.process.uptime', 125, {'type': 'gauge',
'tags': ['supervisord_server:server0',
'supervisord_process:mysql']})
]
},
'expected_service_checks': {
'server0': [{
'status': AgentCheck.OK,
'tags': ['supervisord_server:server0'],
'check': 'supervisord.can_connect',
}, {
'status': AgentCheck.OK,
'tags': ['supervisord_server:server0', 'supervisord_process:mysql'],
'check': 'supervisord.process.status'
}]
}
}]
def setUp(self):
self.patcher = patch('xmlrpclib.Server', self.mock_server)
self.patcher.start()
def tearDown(self):
self.patcher.stop()
# Integration Test #####################################################
def test_check(self):
"""Integration test for supervisord check. Using a mocked supervisord."""
for tc in self.TEST_CASES:
check, instances = get_check('supervisord', tc['yaml'])
self.assertTrue(check is not None, msg=check)
self.assertEquals(tc['expected_instances'], instances)
for instance in instances:
name = instance['name']
try:
# Run the check
check.check(instance)
except Exception, e:
if 'error_message' in tc: # excepted error
self.assertEquals(str(e), tc['error_message'])
else:
self.assertTrue(False, msg=str(e))
else:
# Assert that the check collected the right metrics
expected_metrics = tc['expected_metrics'][name]
self.assert_metrics(expected_metrics, check.get_metrics())
# Assert that the check generated the right service checks
expected_service_checks = tc['expected_service_checks'][name]
self.assert_service_checks(expected_service_checks,
check.get_service_checks())
# Unit Tests ###########################################################
def test_build_message(self):
"""Unit test supervisord build service check message."""
process = {
'now': 1414815513,
'group': 'mysql',
'description': 'pid 787, uptime 0:02:05',
'pid': 787,
'stderr_logfile': '/var/log/supervisor/mysql-stderr---supervisor-3ATI82.log',
'stop': 0,
'statename': 'RUNNING',
'start': 1414815388,
'state': 20,
'stdout_logfile': '/var/log/mysql/mysql.log',
'logfile': '/var/log/mysql/mysql.log',
'exitstatus': 0,
'spawnerr': '',
'name': 'mysql'
}
expected_message = """Current time: 2014-11-01 04:18:33
Process name: mysql
Process group: mysql
Description: pid 787, uptime 0:02:05
Error log file: /var/log/supervisor/mysql-stderr---supervisor-3ATI82.log
Stdout log file: /var/log/mysql/mysql.log
Log file: /var/log/mysql/mysql.log
State: RUNNING
Start time: 2014-11-01 04:16:28
Stop time: \nExit Status: 0"""
check, _ = get_check('supervisord', self.TEST_CASES[0]['yaml'])
self.assertEquals(expected_message, check._build_message(process))
# Helper Methods #######################################################
@staticmethod
def mock_server(url):
return MockXmlRcpServer(url)
def assert_metrics(self, expected, actual):
actual = [TestSupervisordCheck.norm_metric(metric) for metric in actual]
self.assertEquals(len(actual), len(expected), msg='Invalid # metrics reported.\n'
'Expected: {0}. Found: {1}'.format(len(expected), len(actual)))
self.assertTrue(all([expected_metric in actual for expected_metric in expected]),
msg='Reported metrics are incorrect.\nExpected: {0}.\n'
'Found: {1}'.format(expected, actual))
def assert_service_checks(self, expected, actual):
actual = [TestSupervisordCheck.norm_service_check(service_check)
for service_check in actual]
self.assertEquals(len(actual), len(expected), msg='Invalid # service checks reported.'
'\nExpected: {0}. Found: {1}.'.format(expected, actual))
self.assertTrue(all([expected_service_check in actual
for expected_service_check in expected]),
msg='Reported service checks are incorrect.\nExpected:{0}\n'
'Found:{1}'.format(expected, actual))
@staticmethod
def norm_metric(metric):
'''Removes hostname and timestamp'''
metric[3].pop('hostname')
return (metric[0], metric[2], metric[3])
@staticmethod
def norm_service_check(service_check):
'''Removes timestamp, host_name, message and id'''
for field in ['timestamp', 'host_name', 'message', 'id']:
service_check.pop(field)
return service_check
class MockXmlRcpServer:
"""Class that mocks an XML RPC server. Initialized using a mocked
supervisord server url, which is used to initialize the supervisord
server.
"""
def __init__(self, url):
self.supervisor = MockSupervisor(url)
class MockSupervisor:
"""Class that mocks a supervisord sever. Initialized using the server url
and mocks process methods providing mocked process information for testing
purposes.
"""
MOCK_PROCESSES = {
'http://localhost:9001/RPC2': [{
'now': 1414815513,
'group': 'mysql',
'description': 'pid 787, uptime 0:02:05',
'pid': 787,
'stderr_logfile': '/var/log/supervisor/mysql-stderr---supervisor-3ATI82.log',
'stop': 0,
'statename': 'RUNNING',
'start': 1414815388,
'state': 20,
'stdout_logfile': '/var/log/mysql/mysql.log',
'logfile': '/var/log/mysql/mysql.log',
'exitstatus': 0,
'spawnerr': '',
'name': 'mysql'
}, {
'now': 1414815738,
'group': 'java',
'description': 'Nov 01 04:22 AM',
'pid': 0,
'stderr_logfile': '/var/log/supervisor/java-stderr---supervisor-lSdcKZ.log',
'stop': 1414815722,
'statename': 'STOPPED',
'start': 1414815388,
'state': 0,
'stdout_logfile': '/var/log/java/java.log',
'logfile': '/var/log/java/java.log',
'exitstatus': 21,
'spawnerr': '',
'name': 'java'
}, {
'now': 1414815738,
'group': 'python',
'description': '',
'pid': 2765,
'stderr_logfile': '/var/log/supervisor/python-stderr---supervisor-vFzxIg.log',
'stop': 1414815737,
'statename': 'STARTING',
'start': 1414815737,
'state': 10,
'stdout_logfile': '/var/log/python/python.log',
'logfile': '/var/log/python/python.log',
'exitstatus': 0,
'spawnerr': '',
'name': 'python'
}],
'http://user:pass@localhost:9001/RPC2': [{
'now': 1414869824,
'group': 'apache2',
'description': 'Exited too quickly (process log may have details)',
'pid': 0,
'stderr_logfile': '/var/log/supervisor/apache2-stderr---supervisor-0PkXWd.log',
'stop': 1414867047,
'statename': 'FATAL',
'start': 1414867047,
'state': 200,
'stdout_logfile': '/var/log/apache2/apache2.log',
'logfile': '/var/log/apache2/apache2.log',
'exitstatus': 0,
'spawnerr': 'Exited too quickly (process log may have details)',
'name': 'apache2'
}, {
'now': 1414871104,
'group': 'webapp',
'description': '',
'pid': 17600,
'stderr_logfile': '/var/log/supervisor/webapp-stderr---supervisor-onZK__.log',
'stop': 1414871101,
'statename': 'STOPPING',
'start': 1414871102,
'state': 40,
'stdout_logfile': '/var/log/company/webapp.log',
'logfile': '/var/log/company/webapp.log',
'exitstatus': 1,
'spawnerr': '',
'name': 'webapp'
}],
'http://10.60.130.82:9001/RPC2': [{
'now': 1414871588,
'group': 'ruby',
'description': 'Exited too quickly (process log may have details)',
'pid': 0,
'stderr_logfile': '/var/log/supervisor/ruby-stderr---supervisor-BU7Wat.log',
'stop': 1414871588,
'statename': 'BACKOFF',
'start': 1414871588,
'state': 30,
'stdout_logfile': '/var/log/ruby/ruby.log',
'logfile': '/var/log/ruby/ruby.log',
'exitstatus': 0,
'spawnerr': 'Exited too quickly (process log may have details)',
'name': 'ruby'
}]
}
def __init__(self, url):
self.url = url
def getAllProcessInfo(self):
self._validate_request()
return self.MOCK_PROCESSES[self.url]
def getProcessInfo(self, proc_name):
self._validate_request(proc=proc_name)
for proc in self.MOCK_PROCESSES[self.url]:
if proc['name'] == proc_name:
return proc
raise Exception('Process not found: %s' % proc_name)
def _validate_request(self, proc=None):
'''Validates request and simulates errors when not valid'''
if 'invalid_host' in self.url:
# Simulate connecting to an invalid host/port in order to
# raise `socket.error: [Errno 111] Connection refused`
socket().connect(('localhost', 38837))
elif 'invalid_pass' in self.url:
# Simulate xmlrpc exception for invalid credentials
raise xmlrpclib.ProtocolError(self.url[7:], 401,
'Unauthorized', None)
elif proc is not None and 'invalid' in proc:
# Simulate xmlrpc exception for process not found
raise xmlrpclib.Fault(10, 'BAD_NAME')
| bsd-3-clause |
Scalr/libcloud | libcloud/storage/drivers/atmos.py | 12 | 17006 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import base64
import hashlib
import hmac
import time
from libcloud.utils.py3 import PY3
from libcloud.utils.py3 import b
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import next
from libcloud.utils.py3 import urlparse
from libcloud.utils.py3 import urlencode
from libcloud.utils.py3 import urlquote
from libcloud.utils.py3 import urlunquote
if PY3:
from io import FileIO as file
from libcloud.utils.files import read_in_chunks, guess_file_mime_type
from libcloud.common.base import ConnectionUserAndKey, XmlResponse
from libcloud.common.types import LibcloudError
from libcloud.storage.base import Object, Container, StorageDriver, CHUNK_SIZE
from libcloud.storage.types import ContainerAlreadyExistsError, \
ContainerDoesNotExistError, ContainerIsNotEmptyError, \
ObjectDoesNotExistError
def collapse(s):
return ' '.join([x for x in s.split(' ') if x])
class AtmosError(LibcloudError):
def __init__(self, code, message, driver=None):
super(AtmosError, self).__init__(value=message, driver=driver)
self.code = code
class AtmosResponse(XmlResponse):
def success(self):
return self.status in (httplib.OK, httplib.CREATED, httplib.NO_CONTENT,
httplib.PARTIAL_CONTENT)
def parse_error(self):
tree = self.parse_body()
if tree is None:
return None
code = int(tree.find('Code').text)
message = tree.find('Message').text
raise AtmosError(code=code, message=message,
driver=self.connection.driver)
class AtmosConnection(ConnectionUserAndKey):
responseCls = AtmosResponse
def add_default_headers(self, headers):
headers['x-emc-uid'] = self.user_id
headers['Date'] = time.strftime('%a, %d %b %Y %H:%M:%S GMT',
time.gmtime())
headers['x-emc-date'] = headers['Date']
if 'Content-Type' not in headers:
headers['Content-Type'] = 'application/octet-stream'
if 'Accept' not in headers:
headers['Accept'] = '*/*'
return headers
def pre_connect_hook(self, params, headers):
headers['x-emc-signature'] = self._calculate_signature(params, headers)
return params, headers
def _calculate_signature(self, params, headers):
pathstring = urlunquote(self.action)
if pathstring.startswith(self.driver.path):
pathstring = pathstring[len(self.driver.path):]
if params:
if type(params) is dict:
params = list(params.items())
pathstring += '?' + urlencode(params)
pathstring = pathstring.lower()
xhdrs = [(k, v) for k, v in list(headers.items()) if
k.startswith('x-emc-')]
xhdrs.sort(key=lambda x: x[0])
signature = [
self.method,
headers.get('Content-Type', ''),
headers.get('Range', ''),
headers.get('Date', ''),
pathstring,
]
signature.extend([k + ':' + collapse(v) for k, v in xhdrs])
signature = '\n'.join(signature)
key = base64.b64decode(self.key)
signature = hmac.new(b(key), b(signature), hashlib.sha1).digest()
return base64.b64encode(b(signature)).decode('utf-8')
class AtmosDriver(StorageDriver):
connectionCls = AtmosConnection
host = None
path = None
api_name = 'atmos'
supports_chunked_encoding = True
website = 'http://atmosonline.com/'
name = 'atmos'
DEFAULT_CDN_TTL = 60 * 60 * 24 * 7 # 1 week
def __init__(self, key, secret=None, secure=True, host=None, port=None):
host = host or self.host
super(AtmosDriver, self).__init__(key, secret, secure, host, port)
def iterate_containers(self):
result = self.connection.request(self._namespace_path(''))
entries = self._list_objects(result.object, object_type='directory')
for entry in entries:
extra = {
'object_id': entry['id']
}
yield Container(entry['name'], extra, self)
def get_container(self, container_name):
path = self._namespace_path(container_name) + '/?metadata/system'
try:
result = self.connection.request(path)
except AtmosError:
e = sys.exc_info()[1]
if e.code != 1003:
raise
raise ContainerDoesNotExistError(e, self, container_name)
meta = self._emc_meta(result)
extra = {
'object_id': meta['objectid']
}
return Container(container_name, extra, self)
def create_container(self, container_name):
path = self._namespace_path(container_name) + '/'
try:
self.connection.request(path, method='POST')
except AtmosError:
e = sys.exc_info()[1]
if e.code != 1016:
raise
raise ContainerAlreadyExistsError(e, self, container_name)
return self.get_container(container_name)
def delete_container(self, container):
try:
self.connection.request(self._namespace_path(container.name) + '/',
method='DELETE')
except AtmosError:
e = sys.exc_info()[1]
if e.code == 1003:
raise ContainerDoesNotExistError(e, self, container.name)
elif e.code == 1023:
raise ContainerIsNotEmptyError(e, self, container.name)
return True
def get_object(self, container_name, object_name):
container = self.get_container(container_name)
object_name_cleaned = self._clean_object_name(object_name)
path = self._namespace_path(container_name) + '/' + object_name_cleaned
try:
result = self.connection.request(path + '?metadata/system')
system_meta = self._emc_meta(result)
result = self.connection.request(path + '?metadata/user')
user_meta = self._emc_meta(result)
except AtmosError:
e = sys.exc_info()[1]
if e.code != 1003:
raise
raise ObjectDoesNotExistError(e, self, object_name)
last_modified = time.strptime(system_meta['mtime'],
'%Y-%m-%dT%H:%M:%SZ')
last_modified = time.strftime('%a, %d %b %Y %H:%M:%S GMT',
last_modified)
extra = {
'object_id': system_meta['objectid'],
'last_modified': last_modified
}
data_hash = user_meta.pop('md5', '')
return Object(object_name, int(system_meta['size']), data_hash, extra,
user_meta, container, self)
def upload_object(self, file_path, container, object_name, extra=None,
verify_hash=True):
method = 'PUT'
extra = extra or {}
object_name_cleaned = self._clean_object_name(object_name)
request_path = self._namespace_path(container.name) + '/' +\
object_name_cleaned
content_type = extra.get('content_type', None)
try:
self.connection.request(request_path + '?metadata/system')
except AtmosError:
e = sys.exc_info()[1]
if e.code != 1003:
raise
method = 'POST'
result_dict = self._upload_object(
object_name=object_name,
content_type=content_type,
request_path=request_path,
request_method=method,
headers={}, file_path=file_path)
bytes_transferred = result_dict['bytes_transferred']
if extra is None:
meta_data = {}
else:
meta_data = extra.get('meta_data', {})
meta_data['md5'] = result_dict['data_hash']
user_meta = ', '.join([k + '=' + str(v) for k, v in
list(meta_data.items())])
self.connection.request(request_path + '?metadata/user', method='POST',
headers={'x-emc-meta': user_meta})
result = self.connection.request(request_path + '?metadata/system')
meta = self._emc_meta(result)
del meta_data['md5']
extra = {
'object_id': meta['objectid'],
'meta_data': meta_data,
}
return Object(object_name, bytes_transferred, result_dict['data_hash'],
extra, meta_data, container, self)
def upload_object_via_stream(self, iterator, container, object_name,
extra=None):
if isinstance(iterator, file):
iterator = iter(iterator)
data_hash = hashlib.md5()
generator = read_in_chunks(iterator, CHUNK_SIZE, True)
bytes_transferred = 0
try:
chunk = next(generator)
except StopIteration:
chunk = ''
path = self._namespace_path(container.name + '/' + object_name)
method = 'PUT'
if extra is not None:
content_type = extra.get('content_type', None)
else:
content_type = None
if not content_type:
content_type, _ = guess_file_mime_type(object_name)
if not content_type:
raise AttributeError(
'File content-type could not be guessed and' +
' no content_type value provided')
try:
self.connection.request(path + '?metadata/system')
except AtmosError:
e = sys.exc_info()[1]
if e.code != 1003:
raise
method = 'POST'
while True:
end = bytes_transferred + len(chunk) - 1
data_hash.update(b(chunk))
headers = {
'x-emc-meta': 'md5=' + data_hash.hexdigest(),
'Content-Type': content_type,
}
if len(chunk) > 0 and bytes_transferred > 0:
headers['Range'] = 'Bytes=%d-%d' % (bytes_transferred, end)
method = 'PUT'
result = self.connection.request(path, method=method, data=chunk,
headers=headers)
bytes_transferred += len(chunk)
try:
chunk = next(generator)
except StopIteration:
break
if len(chunk) == 0:
break
data_hash = data_hash.hexdigest()
if extra is None:
meta_data = {}
else:
meta_data = extra.get('meta_data', {})
meta_data['md5'] = data_hash
user_meta = ', '.join([k + '=' + str(v) for k, v in
list(meta_data.items())])
self.connection.request(path + '?metadata/user', method='POST',
headers={'x-emc-meta': user_meta})
result = self.connection.request(path + '?metadata/system')
meta = self._emc_meta(result)
extra = {
'object_id': meta['objectid'],
'meta_data': meta_data,
}
return Object(object_name, bytes_transferred, data_hash, extra,
meta_data, container, self)
def download_object(self, obj, destination_path, overwrite_existing=False,
delete_on_failure=True):
path = self._namespace_path(obj.container.name + '/' + obj.name)
response = self.connection.request(path, method='GET', raw=True)
return self._get_object(obj=obj, callback=self._save_object,
response=response,
callback_kwargs={
'obj': obj,
'response': response.response,
'destination_path': destination_path,
'overwrite_existing': overwrite_existing,
'delete_on_failure': delete_on_failure
},
success_status_code=httplib.OK)
def download_object_as_stream(self, obj, chunk_size=None):
path = self._namespace_path(obj.container.name + '/' + obj.name)
response = self.connection.request(path, method='GET', raw=True)
return self._get_object(obj=obj, callback=read_in_chunks,
response=response,
callback_kwargs={
'iterator': response.response,
'chunk_size': chunk_size
},
success_status_code=httplib.OK)
def delete_object(self, obj):
path = self._namespace_path(obj.container.name) + '/' +\
self._clean_object_name(obj.name)
try:
self.connection.request(path, method='DELETE')
except AtmosError:
e = sys.exc_info()[1]
if e.code != 1003:
raise
raise ObjectDoesNotExistError(e, self, obj.name)
return True
def enable_object_cdn(self, obj):
return True
def get_object_cdn_url(self, obj, expiry=None, use_object=False):
"""
Return an object CDN URL.
:param obj: Object instance
:type obj: :class:`Object`
:param expiry: Expiry
:type expiry: ``str``
:param use_object: Use object
:type use_object: ``bool``
:rtype: ``str``
"""
if use_object:
path = '/rest/objects' + obj.meta_data['object_id']
else:
path = '/rest/namespace/' + obj.container.name + '/' + obj.name
if self.secure:
protocol = 'https'
else:
protocol = 'http'
expiry = str(expiry or int(time.time()) + self.DEFAULT_CDN_TTL)
params = [
('uid', self.key),
('expires', expiry),
]
params.append(('signature', self._cdn_signature(path, params, expiry)))
params = urlencode(params)
path = self.path + path
return urlparse.urlunparse((protocol, self.host, path, '', params, ''))
def _cdn_signature(self, path, params, expiry):
key = base64.b64decode(self.secret)
signature = '\n'.join(['GET', path.lower(), self.key, expiry])
signature = hmac.new(key, signature, hashlib.sha1).digest()
return base64.b64encode(signature)
def _list_objects(self, tree, object_type=None):
listing = tree.find(self._emc_tag('DirectoryList'))
entries = []
for entry in listing.findall(self._emc_tag('DirectoryEntry')):
file_type = entry.find(self._emc_tag('FileType')).text
if object_type is not None and object_type != file_type:
continue
entries.append({
'id': entry.find(self._emc_tag('ObjectID')).text,
'type': file_type,
'name': entry.find(self._emc_tag('Filename')).text
})
return entries
def _clean_object_name(self, name):
return urlquote(name.encode('ascii'))
def _namespace_path(self, path):
return self.path + '/rest/namespace/' + urlquote(path.encode('ascii'))
def _object_path(self, object_id):
return self.path + '/rest/objects/' + object_id.encode('ascii')
@staticmethod
def _emc_tag(tag):
return '{http://www.emc.com/cos/}' + tag
def _emc_meta(self, response):
meta = response.headers.get('x-emc-meta', '')
if len(meta) == 0:
return {}
meta = meta.split(', ')
return dict([x.split('=', 1) for x in meta])
def iterate_container_objects(self, container):
headers = {'x-emc-include-meta': '1'}
path = self._namespace_path(container.name) + '/'
result = self.connection.request(path, headers=headers)
entries = self._list_objects(result.object, object_type='regular')
for entry in entries:
metadata = {'object_id': entry['id']}
yield Object(entry['name'], 0, '', {}, metadata, container, self)
| apache-2.0 |
haeusser/tensorflow | tensorflow/contrib/linalg/python/kernel_tests/linear_operator_composition_test.py | 13 | 8641 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import linalg as linalg_lib
from tensorflow.contrib.linalg.python.ops import linear_operator_test_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
linalg = linalg_lib
random_seed.set_random_seed(23)
rng = np.random.RandomState(0)
class SquareLinearOperatorCompositionTest(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
def setUp(self):
# Increase from 1e-6 to 1e-4
self._atol[dtypes.float32] = 1e-4
self._atol[dtypes.complex64] = 1e-4
self._rtol[dtypes.float32] = 1e-4
self._rtol[dtypes.complex64] = 1e-4
def _operator_and_mat_and_feed_dict(self, shape, dtype, use_placeholder):
sess = ops.get_default_session()
shape = list(shape)
# Either 1 or 2 matrices, depending.
num_operators = rng.randint(low=1, high=3)
matrices = [
linear_operator_test_util.random_positive_definite_matrix(
shape, dtype, force_well_conditioned=True)
for _ in range(num_operators)
]
if use_placeholder:
matrices_ph = [
array_ops.placeholder(dtype=dtype) for _ in range(num_operators)
]
# Evaluate here because (i) you cannot feed a tensor, and (ii)
# values are random and we want the same value used for both mat and
# feed_dict.
matrices = sess.run(matrices)
operator = linalg.LinearOperatorComposition(
[linalg.LinearOperatorMatrix(m_ph) for m_ph in matrices_ph])
feed_dict = {m_ph: m for (m_ph, m) in zip(matrices_ph, matrices)}
else:
operator = linalg.LinearOperatorComposition(
[linalg.LinearOperatorMatrix(m) for m in matrices])
feed_dict = None
# Convert back to Tensor. Needed if use_placeholder, since then we have
# already evaluated each matrix to a numpy array.
apply_order_list = list(reversed(matrices))
mat = ops.convert_to_tensor(apply_order_list[0])
for other_mat in apply_order_list[1:]:
mat = math_ops.matmul(other_mat, mat)
return operator, mat, feed_dict
def test_is_x_flags(self):
# Matrix with two positive eigenvalues, 1, and 1.
# The matrix values do not effect auto-setting of the flags.
matrix = [[1., 0.], [1., 1.]]
operator = linalg.LinearOperatorComposition(
[linalg.LinearOperatorMatrix(matrix)],
is_positive_definite=True,
is_non_singular=True,
is_self_adjoint=False)
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertFalse(operator.is_self_adjoint)
def test_is_non_singular_auto_set(self):
# Matrix with two positive eigenvalues, 11 and 8.
# The matrix values do not effect auto-setting of the flags.
matrix = [[11., 0.], [1., 8.]]
operator_1 = linalg.LinearOperatorMatrix(matrix, is_non_singular=True)
operator_2 = linalg.LinearOperatorMatrix(matrix, is_non_singular=True)
operator = linalg.LinearOperatorComposition(
[operator_1, operator_2],
is_positive_definite=False, # No reason it HAS to be False...
is_non_singular=None)
self.assertFalse(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
with self.assertRaisesRegexp(ValueError, "always non-singular"):
linalg.LinearOperatorComposition(
[operator_1, operator_2], is_non_singular=False)
def test_name(self):
matrix = [[11., 0.], [1., 8.]]
operator_1 = linalg.LinearOperatorMatrix(matrix, name="left")
operator_2 = linalg.LinearOperatorMatrix(matrix, name="right")
operator = linalg.LinearOperatorComposition([operator_1, operator_2])
self.assertEqual("left_o_right", operator.name)
def test_different_dtypes_raises(self):
operators = [
linalg.LinearOperatorMatrix(rng.rand(2, 3, 3)),
linalg.LinearOperatorMatrix(rng.rand(2, 3, 3).astype(np.float32))
]
with self.assertRaisesRegexp(TypeError, "same dtype"):
linalg.LinearOperatorComposition(operators)
def test_empty_operators_raises(self):
with self.assertRaisesRegexp(ValueError, "non-empty"):
linalg.LinearOperatorComposition([])
class NonSquareLinearOperatorCompositionTest(
linear_operator_test_util.NonSquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
def setUp(self):
# Increase from 1e-6 to 1e-4
self._atol[dtypes.float32] = 1e-4
self._atol[dtypes.complex64] = 1e-4
self._rtol[dtypes.float32] = 1e-4
self._rtol[dtypes.complex64] = 1e-4
def _operator_and_mat_and_feed_dict(self, shape, dtype, use_placeholder):
sess = ops.get_default_session()
shape = list(shape)
# Test only the case of 2 matrices.
# The Square test uses either 1 or 2, so we have tested the case of 1 matrix
# sufficiently.
num_operators = 2
# Create 2 matrices/operators, A1, A2, which becomes A = A1 A2.
# Use inner dimension of 2.
k = 2
batch_shape = shape[:-2]
shape_1 = batch_shape + [shape[-2], k]
shape_2 = batch_shape + [k, shape[-1]]
matrices = [
linear_operator_test_util.random_normal(
shape_1, dtype=dtype), linear_operator_test_util.random_normal(
shape_2, dtype=dtype)
]
if use_placeholder:
matrices_ph = [
array_ops.placeholder(dtype=dtype) for _ in range(num_operators)
]
# Evaluate here because (i) you cannot feed a tensor, and (ii)
# values are random and we want the same value used for both mat and
# feed_dict.
matrices = sess.run(matrices)
operator = linalg.LinearOperatorComposition(
[linalg.LinearOperatorMatrix(m_ph) for m_ph in matrices_ph])
feed_dict = {m_ph: m for (m_ph, m) in zip(matrices_ph, matrices)}
else:
operator = linalg.LinearOperatorComposition(
[linalg.LinearOperatorMatrix(m) for m in matrices])
feed_dict = None
# Convert back to Tensor. Needed if use_placeholder, since then we have
# already evaluated each matrix to a numpy array.
apply_order_list = list(reversed(matrices))
mat = ops.convert_to_tensor(apply_order_list[0])
for other_mat in apply_order_list[1:]:
mat = math_ops.matmul(other_mat, mat)
return operator, mat, feed_dict
def test_static_shapes(self):
operators = [
linalg.LinearOperatorMatrix(rng.rand(2, 3, 4)),
linalg.LinearOperatorMatrix(rng.rand(2, 4, 5))
]
operator = linalg.LinearOperatorComposition(operators)
self.assertAllEqual((2, 3, 5), operator.shape)
def test_shape_tensors_when_statically_available(self):
operators = [
linalg.LinearOperatorMatrix(rng.rand(2, 3, 4)),
linalg.LinearOperatorMatrix(rng.rand(2, 4, 5))
]
operator = linalg.LinearOperatorComposition(operators)
with self.test_session():
self.assertAllEqual((2, 3, 5), operator.shape_tensor().eval())
def test_shape_tensors_when_only_dynamically_available(self):
mat_1 = rng.rand(1, 2, 3, 4)
mat_2 = rng.rand(1, 2, 4, 5)
mat_ph_1 = array_ops.placeholder(dtypes.float64)
mat_ph_2 = array_ops.placeholder(dtypes.float64)
feed_dict = {mat_ph_1: mat_1, mat_ph_2: mat_2}
operators = [
linalg.LinearOperatorMatrix(mat_ph_1),
linalg.LinearOperatorMatrix(mat_ph_2)
]
operator = linalg.LinearOperatorComposition(operators)
with self.test_session():
self.assertAllEqual(
(1, 2, 3, 5), operator.shape_tensor().eval(feed_dict=feed_dict))
if __name__ == "__main__":
test.main()
| apache-2.0 |
PLyczkowski/Sticky-Keymap | 2.74/python/lib/encodings/cp866.py | 272 | 34396 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP866.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp866',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0410, # CYRILLIC CAPITAL LETTER A
0x0081: 0x0411, # CYRILLIC CAPITAL LETTER BE
0x0082: 0x0412, # CYRILLIC CAPITAL LETTER VE
0x0083: 0x0413, # CYRILLIC CAPITAL LETTER GHE
0x0084: 0x0414, # CYRILLIC CAPITAL LETTER DE
0x0085: 0x0415, # CYRILLIC CAPITAL LETTER IE
0x0086: 0x0416, # CYRILLIC CAPITAL LETTER ZHE
0x0087: 0x0417, # CYRILLIC CAPITAL LETTER ZE
0x0088: 0x0418, # CYRILLIC CAPITAL LETTER I
0x0089: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I
0x008a: 0x041a, # CYRILLIC CAPITAL LETTER KA
0x008b: 0x041b, # CYRILLIC CAPITAL LETTER EL
0x008c: 0x041c, # CYRILLIC CAPITAL LETTER EM
0x008d: 0x041d, # CYRILLIC CAPITAL LETTER EN
0x008e: 0x041e, # CYRILLIC CAPITAL LETTER O
0x008f: 0x041f, # CYRILLIC CAPITAL LETTER PE
0x0090: 0x0420, # CYRILLIC CAPITAL LETTER ER
0x0091: 0x0421, # CYRILLIC CAPITAL LETTER ES
0x0092: 0x0422, # CYRILLIC CAPITAL LETTER TE
0x0093: 0x0423, # CYRILLIC CAPITAL LETTER U
0x0094: 0x0424, # CYRILLIC CAPITAL LETTER EF
0x0095: 0x0425, # CYRILLIC CAPITAL LETTER HA
0x0096: 0x0426, # CYRILLIC CAPITAL LETTER TSE
0x0097: 0x0427, # CYRILLIC CAPITAL LETTER CHE
0x0098: 0x0428, # CYRILLIC CAPITAL LETTER SHA
0x0099: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA
0x009a: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x009b: 0x042b, # CYRILLIC CAPITAL LETTER YERU
0x009c: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x009d: 0x042d, # CYRILLIC CAPITAL LETTER E
0x009e: 0x042e, # CYRILLIC CAPITAL LETTER YU
0x009f: 0x042f, # CYRILLIC CAPITAL LETTER YA
0x00a0: 0x0430, # CYRILLIC SMALL LETTER A
0x00a1: 0x0431, # CYRILLIC SMALL LETTER BE
0x00a2: 0x0432, # CYRILLIC SMALL LETTER VE
0x00a3: 0x0433, # CYRILLIC SMALL LETTER GHE
0x00a4: 0x0434, # CYRILLIC SMALL LETTER DE
0x00a5: 0x0435, # CYRILLIC SMALL LETTER IE
0x00a6: 0x0436, # CYRILLIC SMALL LETTER ZHE
0x00a7: 0x0437, # CYRILLIC SMALL LETTER ZE
0x00a8: 0x0438, # CYRILLIC SMALL LETTER I
0x00a9: 0x0439, # CYRILLIC SMALL LETTER SHORT I
0x00aa: 0x043a, # CYRILLIC SMALL LETTER KA
0x00ab: 0x043b, # CYRILLIC SMALL LETTER EL
0x00ac: 0x043c, # CYRILLIC SMALL LETTER EM
0x00ad: 0x043d, # CYRILLIC SMALL LETTER EN
0x00ae: 0x043e, # CYRILLIC SMALL LETTER O
0x00af: 0x043f, # CYRILLIC SMALL LETTER PE
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x0440, # CYRILLIC SMALL LETTER ER
0x00e1: 0x0441, # CYRILLIC SMALL LETTER ES
0x00e2: 0x0442, # CYRILLIC SMALL LETTER TE
0x00e3: 0x0443, # CYRILLIC SMALL LETTER U
0x00e4: 0x0444, # CYRILLIC SMALL LETTER EF
0x00e5: 0x0445, # CYRILLIC SMALL LETTER HA
0x00e6: 0x0446, # CYRILLIC SMALL LETTER TSE
0x00e7: 0x0447, # CYRILLIC SMALL LETTER CHE
0x00e8: 0x0448, # CYRILLIC SMALL LETTER SHA
0x00e9: 0x0449, # CYRILLIC SMALL LETTER SHCHA
0x00ea: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN
0x00eb: 0x044b, # CYRILLIC SMALL LETTER YERU
0x00ec: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN
0x00ed: 0x044d, # CYRILLIC SMALL LETTER E
0x00ee: 0x044e, # CYRILLIC SMALL LETTER YU
0x00ef: 0x044f, # CYRILLIC SMALL LETTER YA
0x00f0: 0x0401, # CYRILLIC CAPITAL LETTER IO
0x00f1: 0x0451, # CYRILLIC SMALL LETTER IO
0x00f2: 0x0404, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x00f3: 0x0454, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x00f4: 0x0407, # CYRILLIC CAPITAL LETTER YI
0x00f5: 0x0457, # CYRILLIC SMALL LETTER YI
0x00f6: 0x040e, # CYRILLIC CAPITAL LETTER SHORT U
0x00f7: 0x045e, # CYRILLIC SMALL LETTER SHORT U
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x2116, # NUMERO SIGN
0x00fd: 0x00a4, # CURRENCY SIGN
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\u0410' # 0x0080 -> CYRILLIC CAPITAL LETTER A
'\u0411' # 0x0081 -> CYRILLIC CAPITAL LETTER BE
'\u0412' # 0x0082 -> CYRILLIC CAPITAL LETTER VE
'\u0413' # 0x0083 -> CYRILLIC CAPITAL LETTER GHE
'\u0414' # 0x0084 -> CYRILLIC CAPITAL LETTER DE
'\u0415' # 0x0085 -> CYRILLIC CAPITAL LETTER IE
'\u0416' # 0x0086 -> CYRILLIC CAPITAL LETTER ZHE
'\u0417' # 0x0087 -> CYRILLIC CAPITAL LETTER ZE
'\u0418' # 0x0088 -> CYRILLIC CAPITAL LETTER I
'\u0419' # 0x0089 -> CYRILLIC CAPITAL LETTER SHORT I
'\u041a' # 0x008a -> CYRILLIC CAPITAL LETTER KA
'\u041b' # 0x008b -> CYRILLIC CAPITAL LETTER EL
'\u041c' # 0x008c -> CYRILLIC CAPITAL LETTER EM
'\u041d' # 0x008d -> CYRILLIC CAPITAL LETTER EN
'\u041e' # 0x008e -> CYRILLIC CAPITAL LETTER O
'\u041f' # 0x008f -> CYRILLIC CAPITAL LETTER PE
'\u0420' # 0x0090 -> CYRILLIC CAPITAL LETTER ER
'\u0421' # 0x0091 -> CYRILLIC CAPITAL LETTER ES
'\u0422' # 0x0092 -> CYRILLIC CAPITAL LETTER TE
'\u0423' # 0x0093 -> CYRILLIC CAPITAL LETTER U
'\u0424' # 0x0094 -> CYRILLIC CAPITAL LETTER EF
'\u0425' # 0x0095 -> CYRILLIC CAPITAL LETTER HA
'\u0426' # 0x0096 -> CYRILLIC CAPITAL LETTER TSE
'\u0427' # 0x0097 -> CYRILLIC CAPITAL LETTER CHE
'\u0428' # 0x0098 -> CYRILLIC CAPITAL LETTER SHA
'\u0429' # 0x0099 -> CYRILLIC CAPITAL LETTER SHCHA
'\u042a' # 0x009a -> CYRILLIC CAPITAL LETTER HARD SIGN
'\u042b' # 0x009b -> CYRILLIC CAPITAL LETTER YERU
'\u042c' # 0x009c -> CYRILLIC CAPITAL LETTER SOFT SIGN
'\u042d' # 0x009d -> CYRILLIC CAPITAL LETTER E
'\u042e' # 0x009e -> CYRILLIC CAPITAL LETTER YU
'\u042f' # 0x009f -> CYRILLIC CAPITAL LETTER YA
'\u0430' # 0x00a0 -> CYRILLIC SMALL LETTER A
'\u0431' # 0x00a1 -> CYRILLIC SMALL LETTER BE
'\u0432' # 0x00a2 -> CYRILLIC SMALL LETTER VE
'\u0433' # 0x00a3 -> CYRILLIC SMALL LETTER GHE
'\u0434' # 0x00a4 -> CYRILLIC SMALL LETTER DE
'\u0435' # 0x00a5 -> CYRILLIC SMALL LETTER IE
'\u0436' # 0x00a6 -> CYRILLIC SMALL LETTER ZHE
'\u0437' # 0x00a7 -> CYRILLIC SMALL LETTER ZE
'\u0438' # 0x00a8 -> CYRILLIC SMALL LETTER I
'\u0439' # 0x00a9 -> CYRILLIC SMALL LETTER SHORT I
'\u043a' # 0x00aa -> CYRILLIC SMALL LETTER KA
'\u043b' # 0x00ab -> CYRILLIC SMALL LETTER EL
'\u043c' # 0x00ac -> CYRILLIC SMALL LETTER EM
'\u043d' # 0x00ad -> CYRILLIC SMALL LETTER EN
'\u043e' # 0x00ae -> CYRILLIC SMALL LETTER O
'\u043f' # 0x00af -> CYRILLIC SMALL LETTER PE
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u258c' # 0x00dd -> LEFT HALF BLOCK
'\u2590' # 0x00de -> RIGHT HALF BLOCK
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\u0440' # 0x00e0 -> CYRILLIC SMALL LETTER ER
'\u0441' # 0x00e1 -> CYRILLIC SMALL LETTER ES
'\u0442' # 0x00e2 -> CYRILLIC SMALL LETTER TE
'\u0443' # 0x00e3 -> CYRILLIC SMALL LETTER U
'\u0444' # 0x00e4 -> CYRILLIC SMALL LETTER EF
'\u0445' # 0x00e5 -> CYRILLIC SMALL LETTER HA
'\u0446' # 0x00e6 -> CYRILLIC SMALL LETTER TSE
'\u0447' # 0x00e7 -> CYRILLIC SMALL LETTER CHE
'\u0448' # 0x00e8 -> CYRILLIC SMALL LETTER SHA
'\u0449' # 0x00e9 -> CYRILLIC SMALL LETTER SHCHA
'\u044a' # 0x00ea -> CYRILLIC SMALL LETTER HARD SIGN
'\u044b' # 0x00eb -> CYRILLIC SMALL LETTER YERU
'\u044c' # 0x00ec -> CYRILLIC SMALL LETTER SOFT SIGN
'\u044d' # 0x00ed -> CYRILLIC SMALL LETTER E
'\u044e' # 0x00ee -> CYRILLIC SMALL LETTER YU
'\u044f' # 0x00ef -> CYRILLIC SMALL LETTER YA
'\u0401' # 0x00f0 -> CYRILLIC CAPITAL LETTER IO
'\u0451' # 0x00f1 -> CYRILLIC SMALL LETTER IO
'\u0404' # 0x00f2 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
'\u0454' # 0x00f3 -> CYRILLIC SMALL LETTER UKRAINIAN IE
'\u0407' # 0x00f4 -> CYRILLIC CAPITAL LETTER YI
'\u0457' # 0x00f5 -> CYRILLIC SMALL LETTER YI
'\u040e' # 0x00f6 -> CYRILLIC CAPITAL LETTER SHORT U
'\u045e' # 0x00f7 -> CYRILLIC SMALL LETTER SHORT U
'\xb0' # 0x00f8 -> DEGREE SIGN
'\u2219' # 0x00f9 -> BULLET OPERATOR
'\xb7' # 0x00fa -> MIDDLE DOT
'\u221a' # 0x00fb -> SQUARE ROOT
'\u2116' # 0x00fc -> NUMERO SIGN
'\xa4' # 0x00fd -> CURRENCY SIGN
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a4: 0x00fd, # CURRENCY SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x0401: 0x00f0, # CYRILLIC CAPITAL LETTER IO
0x0404: 0x00f2, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x0407: 0x00f4, # CYRILLIC CAPITAL LETTER YI
0x040e: 0x00f6, # CYRILLIC CAPITAL LETTER SHORT U
0x0410: 0x0080, # CYRILLIC CAPITAL LETTER A
0x0411: 0x0081, # CYRILLIC CAPITAL LETTER BE
0x0412: 0x0082, # CYRILLIC CAPITAL LETTER VE
0x0413: 0x0083, # CYRILLIC CAPITAL LETTER GHE
0x0414: 0x0084, # CYRILLIC CAPITAL LETTER DE
0x0415: 0x0085, # CYRILLIC CAPITAL LETTER IE
0x0416: 0x0086, # CYRILLIC CAPITAL LETTER ZHE
0x0417: 0x0087, # CYRILLIC CAPITAL LETTER ZE
0x0418: 0x0088, # CYRILLIC CAPITAL LETTER I
0x0419: 0x0089, # CYRILLIC CAPITAL LETTER SHORT I
0x041a: 0x008a, # CYRILLIC CAPITAL LETTER KA
0x041b: 0x008b, # CYRILLIC CAPITAL LETTER EL
0x041c: 0x008c, # CYRILLIC CAPITAL LETTER EM
0x041d: 0x008d, # CYRILLIC CAPITAL LETTER EN
0x041e: 0x008e, # CYRILLIC CAPITAL LETTER O
0x041f: 0x008f, # CYRILLIC CAPITAL LETTER PE
0x0420: 0x0090, # CYRILLIC CAPITAL LETTER ER
0x0421: 0x0091, # CYRILLIC CAPITAL LETTER ES
0x0422: 0x0092, # CYRILLIC CAPITAL LETTER TE
0x0423: 0x0093, # CYRILLIC CAPITAL LETTER U
0x0424: 0x0094, # CYRILLIC CAPITAL LETTER EF
0x0425: 0x0095, # CYRILLIC CAPITAL LETTER HA
0x0426: 0x0096, # CYRILLIC CAPITAL LETTER TSE
0x0427: 0x0097, # CYRILLIC CAPITAL LETTER CHE
0x0428: 0x0098, # CYRILLIC CAPITAL LETTER SHA
0x0429: 0x0099, # CYRILLIC CAPITAL LETTER SHCHA
0x042a: 0x009a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x042b: 0x009b, # CYRILLIC CAPITAL LETTER YERU
0x042c: 0x009c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x042d: 0x009d, # CYRILLIC CAPITAL LETTER E
0x042e: 0x009e, # CYRILLIC CAPITAL LETTER YU
0x042f: 0x009f, # CYRILLIC CAPITAL LETTER YA
0x0430: 0x00a0, # CYRILLIC SMALL LETTER A
0x0431: 0x00a1, # CYRILLIC SMALL LETTER BE
0x0432: 0x00a2, # CYRILLIC SMALL LETTER VE
0x0433: 0x00a3, # CYRILLIC SMALL LETTER GHE
0x0434: 0x00a4, # CYRILLIC SMALL LETTER DE
0x0435: 0x00a5, # CYRILLIC SMALL LETTER IE
0x0436: 0x00a6, # CYRILLIC SMALL LETTER ZHE
0x0437: 0x00a7, # CYRILLIC SMALL LETTER ZE
0x0438: 0x00a8, # CYRILLIC SMALL LETTER I
0x0439: 0x00a9, # CYRILLIC SMALL LETTER SHORT I
0x043a: 0x00aa, # CYRILLIC SMALL LETTER KA
0x043b: 0x00ab, # CYRILLIC SMALL LETTER EL
0x043c: 0x00ac, # CYRILLIC SMALL LETTER EM
0x043d: 0x00ad, # CYRILLIC SMALL LETTER EN
0x043e: 0x00ae, # CYRILLIC SMALL LETTER O
0x043f: 0x00af, # CYRILLIC SMALL LETTER PE
0x0440: 0x00e0, # CYRILLIC SMALL LETTER ER
0x0441: 0x00e1, # CYRILLIC SMALL LETTER ES
0x0442: 0x00e2, # CYRILLIC SMALL LETTER TE
0x0443: 0x00e3, # CYRILLIC SMALL LETTER U
0x0444: 0x00e4, # CYRILLIC SMALL LETTER EF
0x0445: 0x00e5, # CYRILLIC SMALL LETTER HA
0x0446: 0x00e6, # CYRILLIC SMALL LETTER TSE
0x0447: 0x00e7, # CYRILLIC SMALL LETTER CHE
0x0448: 0x00e8, # CYRILLIC SMALL LETTER SHA
0x0449: 0x00e9, # CYRILLIC SMALL LETTER SHCHA
0x044a: 0x00ea, # CYRILLIC SMALL LETTER HARD SIGN
0x044b: 0x00eb, # CYRILLIC SMALL LETTER YERU
0x044c: 0x00ec, # CYRILLIC SMALL LETTER SOFT SIGN
0x044d: 0x00ed, # CYRILLIC SMALL LETTER E
0x044e: 0x00ee, # CYRILLIC SMALL LETTER YU
0x044f: 0x00ef, # CYRILLIC SMALL LETTER YA
0x0451: 0x00f1, # CYRILLIC SMALL LETTER IO
0x0454: 0x00f3, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x0457: 0x00f5, # CYRILLIC SMALL LETTER YI
0x045e: 0x00f7, # CYRILLIC SMALL LETTER SHORT U
0x2116: 0x00fc, # NUMERO SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| gpl-2.0 |
oblitum/ycmd | cpp/ycm/tests/gmock/scripts/upload_gmock.py | 770 | 2833 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""upload_gmock.py v0.1.0 -- uploads a Google Mock patch for review.
This simple wrapper passes all command line flags and
[email protected] to upload.py.
USAGE: upload_gmock.py [options for upload.py]
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import sys
CC_FLAG = '--cc='
GMOCK_GROUP = '[email protected]'
def main():
# Finds the path to upload.py, assuming it is in the same directory
# as this file.
my_dir = os.path.dirname(os.path.abspath(__file__))
upload_py_path = os.path.join(my_dir, 'upload.py')
# Adds Google Mock discussion group to the cc line if it's not there
# already.
upload_py_argv = [upload_py_path]
found_cc_flag = False
for arg in sys.argv[1:]:
if arg.startswith(CC_FLAG):
found_cc_flag = True
cc_line = arg[len(CC_FLAG):]
cc_list = [addr for addr in cc_line.split(',') if addr]
if GMOCK_GROUP not in cc_list:
cc_list.append(GMOCK_GROUP)
upload_py_argv.append(CC_FLAG + ','.join(cc_list))
else:
upload_py_argv.append(arg)
if not found_cc_flag:
upload_py_argv.append(CC_FLAG + GMOCK_GROUP)
# Invokes upload.py with the modified command line flags.
os.execv(upload_py_path, upload_py_argv)
if __name__ == '__main__':
main()
| gpl-3.0 |
BT-ojossen/stock-logistics-workflow | __unported__/stock_sale_filters/stock.py | 33 | 2395 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Alexandre Fayolle
# Copyright 2012 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv.orm import Model
from openerp.osv import fields
class stock_picking(Model):
_inherit = 'stock.picking'
_columns = {
'carrier_partner_id': fields.related('carrier_id', 'partner_id',
type='many2one',
relation='res.partner',
string='Carrier Name',
readonly=True,
help="Name of the carrier partner"),
'sale_shop_id': fields.related('sale_id', 'shop_id',
type='many2one',
relation='sale.shop',
string='Shop',
readonly=True,
help='The shop from which the sale order for the picking was issued')
}
class sale_order(Model):
_inherit = 'sale.order'
_columns = {
'carrier_partner_id': fields.related('carrier_id', 'partner_id',
type='many2one',
relation='res.partner',
string='Carrier Name',
readonly=True,
help="Name of the carrier partner")
}
| agpl-3.0 |
richardnpaul/FWL-Website | lib/python2.7/site-packages/django/contrib/auth/tests/management.py | 97 | 9156 | from __future__ import unicode_literals
from datetime import date
from django.contrib.auth import models, management
from django.contrib.auth.management import create_permissions
from django.contrib.auth.management.commands import changepassword
from django.contrib.auth.models import User
from django.contrib.auth.tests import CustomUser
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.core.management import call_command
from django.core.management.base import CommandError
from django.core.management.validation import get_validation_errors
from django.db.models.loading import get_app
from django.test import TestCase
from django.test.utils import override_settings
from django.utils import six
from django.utils.six import StringIO
@skipIfCustomUser
class GetDefaultUsernameTestCase(TestCase):
def setUp(self):
self.old_get_system_username = management.get_system_username
def tearDown(self):
management.get_system_username = self.old_get_system_username
def test_actual_implementation(self):
self.assertIsInstance(management.get_system_username(), six.text_type)
def test_simple(self):
management.get_system_username = lambda: 'joe'
self.assertEqual(management.get_default_username(), 'joe')
def test_existing(self):
models.User.objects.create(username='joe')
management.get_system_username = lambda: 'joe'
self.assertEqual(management.get_default_username(), '')
self.assertEqual(
management.get_default_username(check_db=False), 'joe')
def test_i18n(self):
# 'Julia' with accented 'u':
management.get_system_username = lambda: 'J\xfalia'
self.assertEqual(management.get_default_username(), 'julia')
@skipIfCustomUser
class ChangepasswordManagementCommandTestCase(TestCase):
def setUp(self):
self.user = models.User.objects.create_user(username='joe', password='qwerty')
self.stdout = StringIO()
self.stderr = StringIO()
def tearDown(self):
self.stdout.close()
self.stderr.close()
def test_that_changepassword_command_changes_joes_password(self):
"Executing the changepassword management command should change joe's password"
self.assertTrue(self.user.check_password('qwerty'))
command = changepassword.Command()
command._get_pass = lambda *args: 'not qwerty'
command.execute("joe", stdout=self.stdout)
command_output = self.stdout.getvalue().strip()
self.assertEqual(command_output, "Changing password for user 'joe'\nPassword changed successfully for user 'joe'")
self.assertTrue(models.User.objects.get(username="joe").check_password("not qwerty"))
def test_that_max_tries_exits_1(self):
"""
A CommandError should be thrown by handle() if the user enters in
mismatched passwords three times.
"""
command = changepassword.Command()
command._get_pass = lambda *args: args or 'foo'
with self.assertRaises(CommandError):
command.execute("joe", stdout=self.stdout, stderr=self.stderr)
@skipIfCustomUser
class CreatesuperuserManagementCommandTestCase(TestCase):
def test_createsuperuser(self):
"Check the operation of the createsuperuser management command"
# We can use the management command to create a superuser
new_io = StringIO()
call_command("createsuperuser",
interactive=False,
username="joe",
email="[email protected]",
stdout=new_io
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = User.objects.get(username="joe")
self.assertEqual(u.email, '[email protected]')
# created password should be unusable
self.assertFalse(u.has_usable_password())
def test_verbosity_zero(self):
# We can supress output on the management command
new_io = StringIO()
call_command("createsuperuser",
interactive=False,
username="joe2",
email="[email protected]",
verbosity=0,
stdout=new_io
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, '')
u = User.objects.get(username="joe2")
self.assertEqual(u.email, '[email protected]')
self.assertFalse(u.has_usable_password())
def test_email_in_username(self):
new_io = StringIO()
call_command("createsuperuser",
interactive=False,
username="[email protected]",
email="[email protected]",
stdout=new_io
)
u = User._default_manager.get(username="[email protected]")
self.assertEqual(u.email, '[email protected]')
self.assertFalse(u.has_usable_password())
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
def test_swappable_user(self):
"A superuser can be created when a custom User model is in use"
# We can use the management command to create a superuser
# We skip validation because the temporary substitution of the
# swappable User model messes with validation.
new_io = StringIO()
call_command("createsuperuser",
interactive=False,
email="[email protected]",
date_of_birth="1976-04-01",
stdout=new_io,
skip_validation=True
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = CustomUser._default_manager.get(email="[email protected]")
self.assertEqual(u.date_of_birth, date(1976, 4, 1))
# created password should be unusable
self.assertFalse(u.has_usable_password())
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
def test_swappable_user_missing_required_field(self):
"A Custom superuser won't be created when a required field isn't provided"
# We can use the management command to create a superuser
# We skip validation because the temporary substitution of the
# swappable User model messes with validation.
new_io = StringIO()
with self.assertRaises(CommandError):
call_command("createsuperuser",
interactive=False,
username="[email protected]",
stdout=new_io,
stderr=new_io,
skip_validation=True
)
self.assertEqual(CustomUser._default_manager.count(), 0)
class CustomUserModelValidationTestCase(TestCase):
@override_settings(AUTH_USER_MODEL='auth.CustomUserBadRequiredFields')
def test_username_not_in_required_fields(self):
"USERNAME_FIELD should not appear in REQUIRED_FIELDS."
new_io = StringIO()
get_validation_errors(new_io, get_app('auth'))
self.assertIn("The field named as the USERNAME_FIELD should not be included in REQUIRED_FIELDS on a swappable User model.", new_io.getvalue())
@override_settings(AUTH_USER_MODEL='auth.CustomUserNonUniqueUsername')
def test_username_non_unique(self):
"A non-unique USERNAME_FIELD should raise a model validation error."
new_io = StringIO()
get_validation_errors(new_io, get_app('auth'))
self.assertIn("The USERNAME_FIELD must be unique. Add unique=True to the field parameters.", new_io.getvalue())
class PermissionDuplicationTestCase(TestCase):
def setUp(self):
self._original_permissions = models.Permission._meta.permissions[:]
def tearDown(self):
models.Permission._meta.permissions = self._original_permissions
def test_duplicated_permissions(self):
"""
Test that we show proper error message if we are trying to create
duplicate permissions.
"""
# check duplicated default permission
models.Permission._meta.permissions = [
('change_permission', 'Can edit permission (duplicate)')]
six.assertRaisesRegex(self, CommandError,
"The permission codename 'change_permission' clashes with a "
"builtin permission for model 'auth.Permission'.",
create_permissions, models, [], verbosity=0)
# check duplicated custom permissions
models.Permission._meta.permissions = [
('my_custom_permission', 'Some permission'),
('other_one', 'Some other permission'),
('my_custom_permission', 'Some permission with duplicate permission code'),
]
six.assertRaisesRegex(self, CommandError,
"The permission codename 'my_custom_permission' is duplicated for model "
"'auth.Permission'.",
create_permissions, models, [], verbosity=0)
# should not raise anything
models.Permission._meta.permissions = [
('my_custom_permission', 'Some permission'),
('other_one', 'Some other permission'),
]
create_permissions(models, [], verbosity=0)
| gpl-3.0 |
aventuri/opencaster | code/libs/dvbobjects/dvbobjects/PSI/MGT.py | 5 | 2405 | #! /usr/bin/env python
# This file is part of the dvbobjects library.
#
# Copyright 2010-2013 Lorenzo Pallara [email protected]
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import string
from dvbobjects.MPEG.Section import Section
from dvbobjects.utils import *
######################################################################
class master_guide_section(Section):
table_id = 0xC7
section_max_size = 4096
def pack_section_body(self):
# pack tables_loop
tl_bytes = string.join(
map(lambda x: x.pack(),
self.tables_loop),
"")
# pack descriptors_loop
dl_bytes = string.join(
map(lambda x: x.pack(),
self.descriptors_loop),
"")
self.table_id_extension = 0
self.private_indicator = 1
fmt = "!BH%dsH%ds" % (len(tl_bytes), len(dl_bytes))
return pack(fmt,
self.ATSC_protocol_version,
len(self.tables_loop),
tl_bytes,
0xF000 | (len(dl_bytes) & 0x0FFF),
dl_bytes,
)
######################################################################
class table_loop_item(DVBobject):
def pack(self):
# pack transport_descriptor_loop
dl_bytes = string.join(
map(lambda x: x.pack(),
self.descriptors_loop),
"")
fmt = "!HHBLH%ds" % len(dl_bytes)
return pack(fmt,
self.table_type,
0xE000 | (self.table_type_pid & 0x1FFF),
0xE0 | (self.table_type_version_number & 0x1F),
self.number_bytes,
0xF000 | (len(dl_bytes) & 0x0FFF),
dl_bytes,
)
| gpl-2.0 |
dcroc16/skunk_works | google_appengine/lib/django-1.5/tests/regressiontests/generic_views/dates.py | 50 | 31477 | from __future__ import absolute_import
import time
import datetime
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django.test.utils import override_settings
from django.utils import timezone
from django.utils.unittest import skipUnless
from .models import Book, BookSigning
TZ_SUPPORT = hasattr(time, 'tzset')
# On OSes that don't provide tzset (Windows), we can't set the timezone
# in which the program runs. As a consequence, we must skip tests that
# don't enforce a specific timezone (with timezone.override or equivalent),
# or attempt to interpret naive datetimes in the default timezone.
requires_tz_support = skipUnless(TZ_SUPPORT,
"This test relies on the ability to run a program in an arbitrary "
"time zone, but your operating system isn't able to do that.")
def _make_books(n, base_date):
for i in range(n):
b = Book.objects.create(
name='Book %d' % i,
slug='book-%d' % i,
pages=100+i,
pubdate=base_date - datetime.timedelta(days=i))
class ArchiveIndexViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'regressiontests.generic_views.urls'
def test_archive_view(self):
res = self.client.get('/dates/books/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.all()))
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
def test_archive_view_context_object_name(self):
res = self.client.get('/dates/books/context_object_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['thingies']), list(Book.objects.all()))
self.assertFalse('latest' in res.context)
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
def test_empty_archive_view(self):
Book.objects.all().delete()
res = self.client.get('/dates/books/')
self.assertEqual(res.status_code, 404)
def test_allow_empty_archive_view(self):
Book.objects.all().delete()
res = self.client.get('/dates/books/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [])
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
def test_archive_view_template(self):
res = self.client.get('/dates/books/template_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.all()))
self.assertTemplateUsed(res, 'generic_views/list.html')
def test_archive_view_template_suffix(self):
res = self.client.get('/dates/books/template_name_suffix/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.all()))
self.assertTemplateUsed(res, 'generic_views/book_detail.html')
def test_archive_view_invalid(self):
self.assertRaises(ImproperlyConfigured, self.client.get, '/dates/books/invalid/')
def test_archive_view_by_month(self):
res = self.client.get('/dates/books/by_month/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'month', 'DESC')))
def test_paginated_archive_view(self):
_make_books(20, base_date=datetime.date.today())
res = self.client.get('/dates/books/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.all()[0:10]))
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
res = self.client.get('/dates/books/paginated/?page=2')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['page_obj'].number, 2)
self.assertEqual(list(res.context['latest']), list(Book.objects.all()[10:20]))
def test_paginated_archive_view_does_not_load_entire_table(self):
# Regression test for #18087
_make_books(20, base_date=datetime.date.today())
# 1 query for years list + 1 query for books
with self.assertNumQueries(2):
self.client.get('/dates/books/')
# same as above + 1 query to test if books exist + 1 query to count them
with self.assertNumQueries(4):
self.client.get('/dates/books/paginated/')
def test_no_duplicate_query(self):
# Regression test for #18354
with self.assertNumQueries(2):
self.client.get('/dates/books/reverse/')
def test_datetime_archive_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
res = self.client.get('/dates/booksignings/')
self.assertEqual(res.status_code, 200)
@requires_tz_support
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_archive_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/')
self.assertEqual(res.status_code, 200)
def test_date_list_order(self):
"""date_list should be sorted descending in index"""
_make_books(5, base_date=datetime.date(2011, 12, 25))
res = self.client.get('/dates/books/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(reversed(sorted(res.context['date_list']))))
class YearArchiveViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'regressiontests.generic_views.urls'
def test_year_view(self):
res = self.client.get('/dates/books/2008/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [datetime.datetime(2008, 10, 1)])
self.assertEqual(res.context['year'], datetime.date(2008, 1, 1))
self.assertTemplateUsed(res, 'generic_views/book_archive_year.html')
# Since allow_empty=False, next/prev years must be valid (#7164)
self.assertEqual(res.context['next_year'], None)
self.assertEqual(res.context['previous_year'], datetime.date(2006, 1, 1))
def test_year_view_make_object_list(self):
res = self.client.get('/dates/books/2006/make_object_list/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [datetime.datetime(2006, 5, 1)])
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2006)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2006)))
self.assertTemplateUsed(res, 'generic_views/book_archive_year.html')
def test_year_view_empty(self):
res = self.client.get('/dates/books/1999/')
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/1999/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [])
self.assertEqual(list(res.context['book_list']), [])
# Since allow_empty=True, next/prev are allowed to be empty years (#7164)
self.assertEqual(res.context['next_year'], datetime.date(2000, 1, 1))
self.assertEqual(res.context['previous_year'], datetime.date(1998, 1, 1))
def test_year_view_allow_future(self):
# Create a new book in the future
year = datetime.date.today().year + 1
b = Book.objects.create(name="The New New Testement", pages=600, pubdate=datetime.date(year, 1, 1))
res = self.client.get('/dates/books/%s/' % year)
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/%s/allow_empty/' % year)
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [])
res = self.client.get('/dates/books/%s/allow_future/' % year)
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [datetime.datetime(year, 1, 1)])
def test_year_view_paginated(self):
res = self.client.get('/dates/books/2006/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2006)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2006)))
self.assertTemplateUsed(res, 'generic_views/book_archive_year.html')
def test_year_view_invalid_pattern(self):
res = self.client.get('/dates/books/no_year/')
self.assertEqual(res.status_code, 404)
def test_no_duplicate_query(self):
# Regression test for #18354
with self.assertNumQueries(4):
self.client.get('/dates/books/2008/reverse/')
def test_datetime_year_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
res = self.client.get('/dates/booksignings/2008/')
self.assertEqual(res.status_code, 200)
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_year_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/2008/')
self.assertEqual(res.status_code, 200)
def test_date_list_order(self):
"""date_list should be sorted ascending in year view"""
_make_books(10, base_date=datetime.date(2011, 12, 25))
res = self.client.get('/dates/books/2011/')
self.assertEqual(list(res.context['date_list']), list(sorted(res.context['date_list'])))
class MonthArchiveViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'regressiontests.generic_views.urls'
def test_month_view(self):
res = self.client.get('/dates/books/2008/oct/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/book_archive_month.html')
self.assertEqual(list(res.context['date_list']), [datetime.datetime(2008, 10, 1)])
self.assertEqual(list(res.context['book_list']),
list(Book.objects.filter(pubdate=datetime.date(2008, 10, 1))))
self.assertEqual(res.context['month'], datetime.date(2008, 10, 1))
# Since allow_empty=False, next/prev months must be valid (#7164)
self.assertEqual(res.context['next_month'], None)
self.assertEqual(res.context['previous_month'], datetime.date(2006, 5, 1))
def test_month_view_allow_empty(self):
# allow_empty = False, empty month
res = self.client.get('/dates/books/2000/jan/')
self.assertEqual(res.status_code, 404)
# allow_empty = True, empty month
res = self.client.get('/dates/books/2000/jan/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [])
self.assertEqual(list(res.context['book_list']), [])
self.assertEqual(res.context['month'], datetime.date(2000, 1, 1))
# Since allow_empty=True, next/prev are allowed to be empty months (#7164)
self.assertEqual(res.context['next_month'], datetime.date(2000, 2, 1))
self.assertEqual(res.context['previous_month'], datetime.date(1999, 12, 1))
# allow_empty but not allow_future: next_month should be empty (#7164)
url = datetime.date.today().strftime('/dates/books/%Y/%b/allow_empty/').lower()
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_month'], None)
def test_month_view_allow_future(self):
future = (datetime.date.today() + datetime.timedelta(days=60)).replace(day=1)
urlbit = future.strftime('%Y/%b').lower()
b = Book.objects.create(name="The New New Testement", pages=600, pubdate=future)
# allow_future = False, future month
res = self.client.get('/dates/books/%s/' % urlbit)
self.assertEqual(res.status_code, 404)
# allow_future = True, valid future month
res = self.client.get('/dates/books/%s/allow_future/' % urlbit)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['date_list'][0].date(), b.pubdate)
self.assertEqual(list(res.context['book_list']), [b])
self.assertEqual(res.context['month'], future)
# Since allow_future = True but not allow_empty, next/prev are not
# allowed to be empty months (#7164)
self.assertEqual(res.context['next_month'], None)
self.assertEqual(res.context['previous_month'], datetime.date(2008, 10, 1))
# allow_future, but not allow_empty, with a current month. So next
# should be in the future (yup, #7164, again)
res = self.client.get('/dates/books/2008/oct/allow_future/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_month'], future)
self.assertEqual(res.context['previous_month'], datetime.date(2006, 5, 1))
def test_month_view_paginated(self):
res = self.client.get('/dates/books/2008/oct/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2008, pubdate__month=10)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2008, pubdate__month=10)))
self.assertTemplateUsed(res, 'generic_views/book_archive_month.html')
def test_custom_month_format(self):
res = self.client.get('/dates/books/2008/10/')
self.assertEqual(res.status_code, 200)
def test_month_view_invalid_pattern(self):
res = self.client.get('/dates/books/2007/no_month/')
self.assertEqual(res.status_code, 404)
def test_previous_month_without_content(self):
"Content can exist on any day of the previous month. Refs #14711"
self.pubdate_list = [
datetime.date(2010, month, day)
for month,day in ((9,1), (10,2), (11,3))
]
for pubdate in self.pubdate_list:
name = str(pubdate)
Book.objects.create(name=name, slug=name, pages=100, pubdate=pubdate)
res = self.client.get('/dates/books/2010/nov/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['previous_month'], datetime.date(2010,10,1))
# The following test demonstrates the bug
res = self.client.get('/dates/books/2010/nov/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['previous_month'], datetime.date(2010,10,1))
# The bug does not occur here because a Book with pubdate of Sep 1 exists
res = self.client.get('/dates/books/2010/oct/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['previous_month'], datetime.date(2010,9,1))
def test_datetime_month_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 2, 1, 12, 0))
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
BookSigning.objects.create(event_date=datetime.datetime(2008, 6, 3, 12, 0))
res = self.client.get('/dates/booksignings/2008/apr/')
self.assertEqual(res.status_code, 200)
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_month_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 2, 1, 12, 0, tzinfo=timezone.utc))
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
BookSigning.objects.create(event_date=datetime.datetime(2008, 6, 3, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/2008/apr/')
self.assertEqual(res.status_code, 200)
def test_date_list_order(self):
"""date_list should be sorted ascending in month view"""
_make_books(10, base_date=datetime.date(2011, 12, 25))
res = self.client.get('/dates/books/2011/dec/')
self.assertEqual(list(res.context['date_list']), list(sorted(res.context['date_list'])))
class WeekArchiveViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'regressiontests.generic_views.urls'
def test_week_view(self):
res = self.client.get('/dates/books/2008/week/39/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/book_archive_week.html')
self.assertEqual(res.context['book_list'][0], Book.objects.get(pubdate=datetime.date(2008, 10, 1)))
self.assertEqual(res.context['week'], datetime.date(2008, 9, 28))
# Since allow_empty=False, next/prev weeks must be valid
self.assertEqual(res.context['next_week'], None)
self.assertEqual(res.context['previous_week'], datetime.date(2006, 4, 30))
def test_week_view_allow_empty(self):
# allow_empty = False, empty week
res = self.client.get('/dates/books/2008/week/12/')
self.assertEqual(res.status_code, 404)
# allow_empty = True, empty month
res = self.client.get('/dates/books/2008/week/12/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [])
self.assertEqual(res.context['week'], datetime.date(2008, 3, 23))
# Since allow_empty=True, next/prev are allowed to be empty weeks
self.assertEqual(res.context['next_week'], datetime.date(2008, 3, 30))
self.assertEqual(res.context['previous_week'], datetime.date(2008, 3, 16))
# allow_empty but not allow_future: next_week should be empty
url = datetime.date.today().strftime('/dates/books/%Y/week/%U/allow_empty/').lower()
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_week'], None)
def test_week_view_allow_future(self):
# January 7th always falls in week 1, given Python's definition of week numbers
future = datetime.date(datetime.date.today().year + 1, 1, 7)
future_sunday = future - datetime.timedelta(days=(future.weekday() + 1) % 7)
b = Book.objects.create(name="The New New Testement", pages=600, pubdate=future)
res = self.client.get('/dates/books/%s/week/1/' % future.year)
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/%s/week/1/allow_future/' % future.year)
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [b])
self.assertEqual(res.context['week'], future_sunday)
# Since allow_future = True but not allow_empty, next/prev are not
# allowed to be empty weeks
self.assertEqual(res.context['next_week'], None)
self.assertEqual(res.context['previous_week'], datetime.date(2008, 9, 28))
# allow_future, but not allow_empty, with a current week. So next
# should be in the future
res = self.client.get('/dates/books/2008/week/39/allow_future/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_week'], future_sunday)
self.assertEqual(res.context['previous_week'], datetime.date(2006, 4, 30))
def test_week_view_paginated(self):
week_start = datetime.date(2008, 9, 28)
week_end = week_start + datetime.timedelta(days=7)
res = self.client.get('/dates/books/2008/week/39/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__gte=week_start, pubdate__lt=week_end)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__gte=week_start, pubdate__lt=week_end)))
self.assertTemplateUsed(res, 'generic_views/book_archive_week.html')
def test_week_view_invalid_pattern(self):
res = self.client.get('/dates/books/2007/week/no_week/')
self.assertEqual(res.status_code, 404)
def test_week_start_Monday(self):
# Regression for #14752
res = self.client.get('/dates/books/2008/week/39/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['week'], datetime.date(2008, 9, 28))
res = self.client.get('/dates/books/2008/week/39/monday/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['week'], datetime.date(2008, 9, 29))
def test_datetime_week_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
res = self.client.get('/dates/booksignings/2008/week/13/')
self.assertEqual(res.status_code, 200)
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_week_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/2008/week/13/')
self.assertEqual(res.status_code, 200)
class DayArchiveViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'regressiontests.generic_views.urls'
def test_day_view(self):
res = self.client.get('/dates/books/2008/oct/01/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/book_archive_day.html')
self.assertEqual(list(res.context['book_list']),
list(Book.objects.filter(pubdate=datetime.date(2008, 10, 1))))
self.assertEqual(res.context['day'], datetime.date(2008, 10, 1))
# Since allow_empty=False, next/prev days must be valid.
self.assertEqual(res.context['next_day'], None)
self.assertEqual(res.context['previous_day'], datetime.date(2006, 5, 1))
def test_day_view_allow_empty(self):
# allow_empty = False, empty month
res = self.client.get('/dates/books/2000/jan/1/')
self.assertEqual(res.status_code, 404)
# allow_empty = True, empty month
res = self.client.get('/dates/books/2000/jan/1/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [])
self.assertEqual(res.context['day'], datetime.date(2000, 1, 1))
# Since it's allow empty, next/prev are allowed to be empty months (#7164)
self.assertEqual(res.context['next_day'], datetime.date(2000, 1, 2))
self.assertEqual(res.context['previous_day'], datetime.date(1999, 12, 31))
# allow_empty but not allow_future: next_month should be empty (#7164)
url = datetime.date.today().strftime('/dates/books/%Y/%b/%d/allow_empty/').lower()
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_day'], None)
def test_day_view_allow_future(self):
future = (datetime.date.today() + datetime.timedelta(days=60))
urlbit = future.strftime('%Y/%b/%d').lower()
b = Book.objects.create(name="The New New Testement", pages=600, pubdate=future)
# allow_future = False, future month
res = self.client.get('/dates/books/%s/' % urlbit)
self.assertEqual(res.status_code, 404)
# allow_future = True, valid future month
res = self.client.get('/dates/books/%s/allow_future/' % urlbit)
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [b])
self.assertEqual(res.context['day'], future)
# allow_future but not allow_empty, next/prev must be valid
self.assertEqual(res.context['next_day'], None)
self.assertEqual(res.context['previous_day'], datetime.date(2008, 10, 1))
# allow_future, but not allow_empty, with a current month.
res = self.client.get('/dates/books/2008/oct/01/allow_future/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_day'], future)
self.assertEqual(res.context['previous_day'], datetime.date(2006, 5, 1))
# allow_future for yesterday, next_day is today (#17192)
today = datetime.date.today()
yesterday = today - datetime.timedelta(days=1)
res = self.client.get('/dates/books/%s/allow_empty_and_future/'
% yesterday.strftime('%Y/%b/%d').lower())
self.assertEqual(res.context['next_day'], today)
def test_day_view_paginated(self):
res = self.client.get('/dates/books/2008/oct/1/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2008, pubdate__month=10, pubdate__day=1)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2008, pubdate__month=10, pubdate__day=1)))
self.assertTemplateUsed(res, 'generic_views/book_archive_day.html')
def test_next_prev_context(self):
res = self.client.get('/dates/books/2008/oct/01/')
self.assertEqual(res.content, b"Archive for Oct. 1, 2008. Previous day is May 1, 2006")
def test_custom_month_format(self):
res = self.client.get('/dates/books/2008/10/01/')
self.assertEqual(res.status_code, 200)
def test_day_view_invalid_pattern(self):
res = self.client.get('/dates/books/2007/oct/no_day/')
self.assertEqual(res.status_code, 404)
def test_today_view(self):
res = self.client.get('/dates/books/today/')
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/today/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['day'], datetime.date.today())
def test_datetime_day_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
res = self.client.get('/dates/booksignings/2008/apr/2/')
self.assertEqual(res.status_code, 200)
@requires_tz_support
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_day_view(self):
bs = BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/2008/apr/2/')
self.assertEqual(res.status_code, 200)
# 2008-04-02T00:00:00+03:00 (beginning of day) > 2008-04-01T22:00:00+00:00 (book signing event date)
bs.event_date = datetime.datetime(2008, 4, 1, 22, 0, tzinfo=timezone.utc)
bs.save()
res = self.client.get('/dates/booksignings/2008/apr/2/')
self.assertEqual(res.status_code, 200)
# 2008-04-03T00:00:00+03:00 (end of day) > 2008-04-02T22:00:00+00:00 (book signing event date)
bs.event_date = datetime.datetime(2008, 4, 2, 22, 0, tzinfo=timezone.utc)
bs.save()
res = self.client.get('/dates/booksignings/2008/apr/2/')
self.assertEqual(res.status_code, 404)
class DateDetailViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'regressiontests.generic_views.urls'
def test_date_detail_by_pk(self):
res = self.client.get('/dates/books/2008/oct/01/1/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Book.objects.get(pk=1))
self.assertEqual(res.context['book'], Book.objects.get(pk=1))
self.assertTemplateUsed(res, 'generic_views/book_detail.html')
def test_date_detail_by_slug(self):
res = self.client.get('/dates/books/2006/may/01/byslug/dreaming-in-code/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['book'], Book.objects.get(slug='dreaming-in-code'))
def test_date_detail_custom_month_format(self):
res = self.client.get('/dates/books/2008/10/01/1/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['book'], Book.objects.get(pk=1))
def test_date_detail_allow_future(self):
future = (datetime.date.today() + datetime.timedelta(days=60))
urlbit = future.strftime('%Y/%b/%d').lower()
b = Book.objects.create(name="The New New Testement", slug="new-new", pages=600, pubdate=future)
res = self.client.get('/dates/books/%s/new-new/' % urlbit)
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/%s/%s/allow_future/' % (urlbit, b.id))
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['book'], b)
self.assertTemplateUsed(res, 'generic_views/book_detail.html')
def test_invalid_url(self):
self.assertRaises(AttributeError, self.client.get, "/dates/books/2008/oct/01/nopk/")
def test_get_object_custom_queryset(self):
"""
Ensure that custom querysets are used when provided to
BaseDateDetailView.get_object()
Refs #16918.
"""
res = self.client.get(
'/dates/books/get_object_custom_queryset/2006/may/01/2/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Book.objects.get(pk=2))
self.assertEqual(res.context['book'], Book.objects.get(pk=2))
self.assertTemplateUsed(res, 'generic_views/book_detail.html')
res = self.client.get(
'/dates/books/get_object_custom_queryset/2008/oct/01/1/')
self.assertEqual(res.status_code, 404)
def test_datetime_date_detail(self):
bs = BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
res = self.client.get('/dates/booksignings/2008/apr/2/%d/' % bs.pk)
self.assertEqual(res.status_code, 200)
@requires_tz_support
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_date_detail(self):
bs = BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/2008/apr/2/%d/' % bs.pk)
self.assertEqual(res.status_code, 200)
# 2008-04-02T00:00:00+03:00 (beginning of day) > 2008-04-01T22:00:00+00:00 (book signing event date)
bs.event_date = datetime.datetime(2008, 4, 1, 22, 0, tzinfo=timezone.utc)
bs.save()
res = self.client.get('/dates/booksignings/2008/apr/2/%d/' % bs.pk)
self.assertEqual(res.status_code, 200)
# 2008-04-03T00:00:00+03:00 (end of day) > 2008-04-02T22:00:00+00:00 (book signing event date)
bs.event_date = datetime.datetime(2008, 4, 2, 22, 0, tzinfo=timezone.utc)
bs.save()
res = self.client.get('/dates/booksignings/2008/apr/2/%d/' % bs.pk)
self.assertEqual(res.status_code, 404)
| mit |
hpi-xnor/BMXNet | python/mxnet/ndarray/_internal.py | 34 | 1986 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import, unused-import
"""NDArray namespace used to register internal functions."""
import os as _os
import sys as _sys
import numpy as np
try:
if int(_os.environ.get("MXNET_ENABLE_CYTHON", True)) == 0:
from .._ctypes.ndarray import NDArrayBase, CachedOp
from .._ctypes.ndarray import _set_ndarray_class, _imperative_invoke
elif _sys.version_info >= (3, 0):
from .._cy3.ndarray import NDArrayBase, CachedOp
from .._cy3.ndarray import _set_ndarray_class, _imperative_invoke
else:
from .._cy2.ndarray import NDArrayBase, CachedOp
from .._cy2.ndarray import _set_ndarray_class, _imperative_invoke
except ImportError:
if int(_os.environ.get("MXNET_ENFORCE_CYTHON", False)) != 0:
raise ImportError("Cython Module cannot be loaded but MXNET_ENFORCE_CYTHON=1")
from .._ctypes.ndarray import NDArrayBase, CachedOp
from .._ctypes.ndarray import _set_ndarray_class, _imperative_invoke
from ..base import _Null
try:
from .gen__internal import * # pylint: disable=unused-wildcard-import
except ImportError:
pass
__all__ = ['NDArrayBase', 'CachedOp', '_imperative_invoke', '_set_ndarray_class']
| apache-2.0 |
ilexius/odoo | openerp/addons/test_impex/tests/test_load.py | 7 | 44231 | # -*- coding: utf-8 -*-
import json
import pkgutil
import unittest
import openerp.modules.registry
import openerp
from openerp.tests import common
from openerp.tools.misc import mute_logger
def message(msg, type='error', from_=0, to_=0, record=0, field='value', **kwargs):
return dict(kwargs,
type=type, rows={'from': from_, 'to': to_}, record=record,
field=field, message=msg)
def moreaction(**kwargs):
return dict(kwargs,
type='ir.actions.act_window',
target='new',
view_mode='tree,form',
view_type='form',
views=[(False, 'tree'), (False, 'form')],
help=u"See all possible values")
def values(seq, field='value'):
return [item[field] for item in seq]
class ImporterCase(common.TransactionCase):
model_name = False
def __init__(self, *args, **kwargs):
super(ImporterCase, self).__init__(*args, **kwargs)
self.model = None
def setUp(self):
super(ImporterCase, self).setUp()
self.model = self.registry(self.model_name)
self.registry('ir.model.data').clear_caches()
def import_(self, fields, rows, context=None):
return self.model.load(
self.cr, openerp.SUPERUSER_ID, fields, rows, context=context)
def read(self, fields=('value',), domain=(), context=None):
return self.model.read(
self.cr, openerp.SUPERUSER_ID,
self.model.search(self.cr, openerp.SUPERUSER_ID, domain, context=context),
fields=fields, context=context)
def browse(self, domain=(), context=None):
return self.model.browse(
self.cr, openerp.SUPERUSER_ID,
self.model.search(self.cr, openerp.SUPERUSER_ID, domain, context=context),
context=context)
def xid(self, record):
ModelData = self.registry('ir.model.data')
ids = ModelData.search(
self.cr, openerp.SUPERUSER_ID,
[('model', '=', record._name), ('res_id', '=', record.id)])
if ids:
d = ModelData.read(
self.cr, openerp.SUPERUSER_ID, ids, ['name', 'module'])[0]
if d['module']:
return '%s.%s' % (d['module'], d['name'])
return d['name']
name = record.name_get()[0][1]
# fix dotted name_get results, otherwise xid lookups blow up
name = name.replace('.', '-')
ModelData.create(self.cr, openerp.SUPERUSER_ID, {
'name': name,
'model': record._name,
'res_id': record.id,
'module': '__test__'
})
return '__test__.' + name
def add_translations(self, name, type, code, *tnx):
self.registry('res.lang').load_lang(self.cr, openerp.SUPERUSER_ID, code)
Translations = self.registry('ir.translation')
for source, value in tnx:
Translations.create(self.cr, openerp.SUPERUSER_ID, {
'name': name,
'lang': code,
'type': type,
'src': source,
'value': value,
'state': 'translated',
})
class test_ids_stuff(ImporterCase):
model_name = 'export.integer'
def test_create_with_id(self):
result = self.import_(['.id', 'value'], [['42', '36']])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [{
'type': 'error',
'rows': {'from': 0, 'to': 0},
'record': 0,
'field': '.id',
'message': u"Unknown database identifier '42'",
}])
def test_create_with_xid(self):
result = self.import_(['id', 'value'], [['somexmlid', '42']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual(
'somexmlid',
self.xid(self.browse()[0]))
def test_update_with_id(self):
id = self.model.create(self.cr, openerp.SUPERUSER_ID, {'value': 36})
self.assertEqual(
36,
self.model.browse(self.cr, openerp.SUPERUSER_ID, id).value)
result = self.import_(['.id', 'value'], [[str(id), '42']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual(
[42], # updated value to imported
values(self.read()))
def test_update_with_xid(self):
self.import_(['id', 'value'], [['somexmlid', '36']])
self.assertEqual([36], values(self.read()))
self.import_(['id', 'value'], [['somexmlid', '1234567']])
self.assertEqual([1234567], values(self.read()))
class test_boolean_field(ImporterCase):
model_name = 'export.boolean'
def test_empty(self):
self.assertEqual(
self.import_(['value'], []),
{'ids': [], 'messages': []})
def test_exported(self):
result = self.import_(['value'], [['False'], ['True'], ])
self.assertEqual(len(result['ids']), 2)
self.assertFalse(result['messages'])
records = self.read()
self.assertEqual([
False,
True,
], values(records))
def test_falses(self):
for lang, source, value in [('fr_FR', 'no', u'non'),
('de_DE', 'no', u'nein'),
('ru_RU', 'no', u'нет'),
('nl_BE', 'false', u'vals'),
('lt_LT', 'false', u'klaidingas')]:
self.add_translations('test_import.py', 'code', lang, (source, value))
falses = [[u'0'], [u'no'], [u'false'], [u'FALSE'], [u''],
[u'non'], # no, fr
[u'nein'], # no, de
[u'нет'], # no, ru
[u'vals'], # false, nl
[u'klaidingas'], # false, lt,
]
result = self.import_(['value'], falses)
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), len(falses))
self.assertEqual([False] * len(falses), values(self.read()))
def test_trues(self):
trues = [['None'], ['nil'], ['()'], ['f'], ['#f'],
# Problem: OpenOffice (and probably excel) output localized booleans
['VRAI'], ['ok'], ['true'], ['yes'], ['1'], ]
result = self.import_(['value'], trues)
self.assertEqual(len(result['ids']), 10)
self.assertEqual(result['messages'], [
message(u"Unknown value '%s' for boolean field 'unknown', assuming 'yes'" % v[0],
moreinfo=u"Use '1' for yes and '0' for no",
type='warning', from_=i, to_=i, record=i)
for i, v in enumerate(trues)
if v[0] not in ('true', 'yes', '1')
])
self.assertEqual(
[True] * 10,
values(self.read()))
class test_integer_field(ImporterCase):
model_name = 'export.integer'
def test_none(self):
self.assertEqual(
self.import_(['value'], []),
{'ids': [], 'messages': []})
def test_empty(self):
result = self.import_(['value'], [['']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual(
[False],
values(self.read()))
def test_zero(self):
result = self.import_(['value'], [['0']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
result = self.import_(['value'], [['-0']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual([False, False], values(self.read()))
def test_positives(self):
result = self.import_(['value'], [
['1'],
['42'],
[str(2**31-1)],
['12345678']
])
self.assertEqual(len(result['ids']), 4)
self.assertFalse(result['messages'])
self.assertEqual([
1, 42, 2**31-1, 12345678
], values(self.read()))
def test_negatives(self):
result = self.import_(['value'], [
['-1'],
['-42'],
[str(-(2**31 - 1))],
[str(-(2**31))],
['-12345678']
])
self.assertEqual(len(result['ids']), 5)
self.assertFalse(result['messages'])
self.assertEqual([
-1, -42, -(2**31 - 1), -(2**31), -12345678
], values(self.read()))
@mute_logger('openerp.sql_db', 'openerp.models')
def test_out_of_range(self):
result = self.import_(['value'], [[str(2**31)]])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [{
'type': 'error',
'rows': {'from': 0, 'to': 0},
'record': 0,
'message': "integer out of range\n"
}])
result = self.import_(['value'], [[str(-2**32)]])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [{
'type': 'error',
'rows': {'from': 0, 'to': 0},
'record': 0,
'message': "integer out of range\n"
}])
def test_nonsense(self):
result = self.import_(['value'], [['zorglub']])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [{
'type': 'error',
'rows': {'from': 0, 'to': 0},
'record': 0,
'field': 'value',
'message': u"'zorglub' does not seem to be an integer for field 'unknown'",
}])
class test_float_field(ImporterCase):
model_name = 'export.float'
def test_none(self):
self.assertEqual(
self.import_(['value'], []),
{'ids': [], 'messages': []})
def test_empty(self):
result = self.import_(['value'], [['']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual(
[False],
values(self.read()))
def test_zero(self):
result = self.import_(['value'], [['0']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
result = self.import_(['value'], [['-0']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual([False, False], values(self.read()))
def test_positives(self):
result = self.import_(['value'], [
['1'],
['42'],
[str(2**31-1)],
['12345678'],
[str(2**33)],
['0.000001'],
])
self.assertEqual(len(result['ids']), 6)
self.assertFalse(result['messages'])
self.assertEqual([
1, 42, 2**31-1, 12345678, 2.0**33, .000001
], values(self.read()))
def test_negatives(self):
result = self.import_(['value'], [
['-1'],
['-42'],
[str(-2**31 + 1)],
[str(-2**31)],
['-12345678'],
[str(-2**33)],
['-0.000001'],
])
self.assertEqual(len(result['ids']), 7)
self.assertFalse(result['messages'])
self.assertEqual([
-1, -42, -(2**31 - 1), -(2**31), -12345678, -2.0**33, -.000001
], values(self.read()))
def test_nonsense(self):
result = self.import_(['value'], [['foobar']])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [
message(u"'foobar' does not seem to be a number for field 'unknown'")])
class test_string_field(ImporterCase):
model_name = 'export.string.bounded'
def test_empty(self):
result = self.import_(['value'], [['']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual([False], values(self.read()))
def test_imported(self):
result = self.import_(['value'], [
[u'foobar'],
[u'foobarbaz'],
[u'Með suð í eyrum við spilum endalaust'],
[u"People 'get' types. They use them all the time. Telling "
u"someone he can't pound a nail with a banana doesn't much "
u"surprise him."]
])
self.assertEqual(len(result['ids']), 4)
self.assertFalse(result['messages'])
self.assertEqual([
u"foobar",
u"foobarbaz",
u"Með suð í eyrum ",
u"People 'get' typ",
], values(self.read()))
class test_unbound_string_field(ImporterCase):
model_name = 'export.string'
def test_imported(self):
result = self.import_(['value'], [
[u'í dag viðrar vel til loftárása'],
# ackbar.jpg
[u"If they ask you about fun, you tell them – fun is a filthy"
u" parasite"]
])
self.assertEqual(len(result['ids']), 2)
self.assertFalse(result['messages'])
self.assertEqual([
u"í dag viðrar vel til loftárása",
u"If they ask you about fun, you tell them – fun is a filthy parasite"
], values(self.read()))
class test_required_string_field(ImporterCase):
model_name = 'export.string.required'
@mute_logger('openerp.sql_db', 'openerp.models')
def test_empty(self):
result = self.import_(['value'], [[]])
self.assertEqual(result['messages'], [message(
u"Missing required value for the field 'unknown' (value)")])
self.assertIs(result['ids'], False)
@mute_logger('openerp.sql_db', 'openerp.models')
def test_not_provided(self):
result = self.import_(['const'], [['12']])
self.assertEqual(result['messages'], [message(
u"Missing required value for the field 'unknown' (value)")])
self.assertIs(result['ids'], False)
class test_text(ImporterCase):
model_name = 'export.text'
def test_empty(self):
result = self.import_(['value'], [['']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual([False], values(self.read()))
def test_imported(self):
s = (u"Breiðskífa er notað um útgefna hljómplötu sem inniheldur "
u"stúdíóupptökur frá einum flytjanda. Breiðskífur eru oftast "
u"milli 25-80 mínútur og er lengd þeirra oft miðuð við 33⅓ "
u"snúninga 12 tommu vínylplötur (sem geta verið allt að 30 mín "
u"hvor hlið).\n\nBreiðskífur eru stundum tvöfaldar og eru þær þá"
u" gefnar út á tveimur geisladiskum eða tveimur vínylplötum.")
result = self.import_(['value'], [[s]])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual([s], values(self.read()))
class test_selection(ImporterCase):
model_name = 'export.selection'
translations_fr = [
("Foo", "tete"),
("Bar", "titi"),
("Qux", "toto"),
]
def test_imported(self):
result = self.import_(['value'], [
['Qux'],
['Bar'],
['Foo'],
['2'],
])
self.assertEqual(len(result['ids']), 4)
self.assertFalse(result['messages'])
self.assertEqual([3, 2, 1, 2], values(self.read()))
def test_imported_translated(self):
self.add_translations(
'export.selection,value', 'selection', 'fr_FR', *self.translations_fr)
result = self.import_(['value'], [
['toto'],
['tete'],
['titi'],
], context={'lang': 'fr_FR'})
self.assertEqual(len(result['ids']), 3)
self.assertFalse(result['messages'])
self.assertEqual([3, 1, 2], values(self.read()))
result = self.import_(['value'], [['Foo']], context={'lang': 'fr_FR'})
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
def test_invalid(self):
result = self.import_(['value'], [['Baz']])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [message(
u"Value 'Baz' not found in selection field 'unknown'",
moreinfo="Foo Bar Qux 4".split())])
result = self.import_(['value'], [[42]])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [message(
u"Value '42' not found in selection field 'unknown'",
moreinfo="Foo Bar Qux 4".split())])
class test_selection_with_default(ImporterCase):
model_name = 'export.selection.withdefault'
def test_empty(self):
""" Empty cells should set corresponding field to False
"""
result = self.import_(['value'], [['']])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
self.assertEqual(
values(self.read()),
[False])
def test_default(self):
""" Non-provided cells should set corresponding field to default
"""
result = self.import_(['const'], [['42']])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
self.assertEqual(
values(self.read()),
[2])
class test_selection_function(ImporterCase):
model_name = 'export.selection.function'
translations_fr = [
("Corge", "toto"),
("Grault", "titi"),
("Wheee", "tete"),
("Moog", "tutu"),
]
def test_imported(self):
""" import uses fields_get, so translates import label (may or may not
be good news) *and* serializes the selection function to reverse it:
import does not actually know that the selection field uses a function
"""
# NOTE: conflict between a value and a label => pick first
result = self.import_(['value'], [
['3'],
["Grault"],
])
self.assertEqual(len(result['ids']), 2)
self.assertFalse(result['messages'])
self.assertEqual(
[3, 1],
values(self.read()))
def test_translated(self):
""" Expects output of selection function returns translated labels
"""
self.add_translations(
'export.selection,value', 'selection', 'fr_FR', *self.translations_fr)
result = self.import_(['value'], [
['titi'],
['tete'],
], context={'lang': 'fr_FR'})
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 2)
self.assertEqual(values(self.read()), [1, 2])
result = self.import_(['value'], [['Wheee']], context={'lang': 'fr_FR'})
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
class test_m2o(ImporterCase):
model_name = 'export.many2one'
def test_by_name(self):
# create integer objects
integer_id1 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
integer_id2 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 36})
# get its name
name1 = dict(self.registry('export.integer').name_get(
self.cr, openerp.SUPERUSER_ID,[integer_id1]))[integer_id1]
name2 = dict(self.registry('export.integer').name_get(
self.cr, openerp.SUPERUSER_ID,[integer_id2]))[integer_id2]
result = self.import_(['value'], [
# import by name_get
[name1],
[name1],
[name2],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 3)
# correct ids assigned to corresponding records
self.assertEqual([
(integer_id1, name1),
(integer_id1, name1),
(integer_id2, name2),],
values(self.read()))
def test_by_xid(self):
ExportInteger = self.registry('export.integer')
integer_id = ExportInteger.create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
xid = self.xid(ExportInteger.browse(
self.cr, openerp.SUPERUSER_ID, [integer_id])[0])
result = self.import_(['value/id'], [[xid]])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
b = self.browse()
self.assertEqual(42, b[0].value.value)
def test_by_id(self):
integer_id = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
result = self.import_(['value/.id'], [[integer_id]])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
b = self.browse()
self.assertEqual(42, b[0].value.value)
def test_by_names(self):
integer_id1 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
integer_id2 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
name1 = dict(self.registry('export.integer').name_get(
self.cr, openerp.SUPERUSER_ID,[integer_id1]))[integer_id1]
name2 = dict(self.registry('export.integer').name_get(
self.cr, openerp.SUPERUSER_ID,[integer_id2]))[integer_id2]
# names should be the same
self.assertEqual(name1, name2)
result = self.import_(['value'], [[name2]])
self.assertEqual(
result['messages'],
[message(u"Found multiple matches for field 'unknown' (2 matches)",
type='warning')])
self.assertEqual(len(result['ids']), 1)
self.assertEqual([
(integer_id1, name1)
], values(self.read()))
def test_fail_by_implicit_id(self):
""" Can't implicitly import records by id
"""
# create integer objects
integer_id1 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
integer_id2 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 36})
# Because name_search all the things. Fallback schmallback
result = self.import_(['value'], [
# import by id, without specifying it
[integer_id1],
[integer_id2],
[integer_id1],
])
self.assertEqual(result['messages'], [
message(u"No matching record found for name '%s' in field 'unknown'" % id,
from_=index, to_=index, record=index,
moreinfo=moreaction(res_model='export.integer'))
for index, id in enumerate([integer_id1, integer_id2, integer_id1])])
self.assertIs(result['ids'], False)
@mute_logger('openerp.sql_db')
def test_fail_id_mistype(self):
result = self.import_(['value/.id'], [["foo"]])
self.assertEqual(result['messages'], [
message(u"Invalid database id 'foo' for the field 'unknown'",
moreinfo=moreaction(res_model='ir.model.data',
domain=[('model','=','export.integer')]))
])
self.assertIs(result['ids'], False)
def test_sub_field(self):
""" Does not implicitly create the record, does not warn that you can't
import m2o subfields (at all)...
"""
result = self.import_(['value/value'], [['42']])
self.assertEqual(result['messages'], [
message(u"Can not create Many-To-One records indirectly, import "
u"the field separately")])
self.assertIs(result['ids'], False)
def test_fail_noids(self):
result = self.import_(['value'], [['nameisnoexist:3']])
self.assertEqual(result['messages'], [message(
u"No matching record found for name 'nameisnoexist:3' "
u"in field 'unknown'", moreinfo=moreaction(
res_model='export.integer'))])
self.assertIs(result['ids'], False)
result = self.import_(['value/id'], [['noxidhere']])
self.assertEqual(result['messages'], [message(
u"No matching record found for external id 'noxidhere' "
u"in field 'unknown'", moreinfo=moreaction(
res_model='ir.model.data', domain=[('model','=','export.integer')]))])
self.assertIs(result['ids'], False)
result = self.import_(['value/.id'], [['66']])
self.assertEqual(result['messages'], [message(
u"No matching record found for database id '66' "
u"in field 'unknown'", moreinfo=moreaction(
res_model='ir.model.data', domain=[('model','=','export.integer')]))])
self.assertIs(result['ids'], False)
def test_fail_multiple(self):
result = self.import_(
['value', 'value/id'],
[['somename', 'somexid']])
self.assertEqual(result['messages'], [message(
u"Ambiguous specification for field 'unknown', only provide one of "
u"name, external id or database id")])
self.assertIs(result['ids'], False)
class test_m2m(ImporterCase):
model_name = 'export.many2many'
# apparently, one and only thing which works is a
# csv_internal_sep-separated list of ids, xids, or names (depending if
# m2m/.id, m2m/id or m2m[/anythingelse]
def test_ids(self):
id1 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 3, 'str': 'record0'})
id2 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 44, 'str': 'record1'})
id3 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 84, 'str': 'record2'})
id4 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 9, 'str': 'record3'})
id5 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 99, 'str': 'record4'})
result = self.import_(['value/.id'], [
['%d,%d' % (id1, id2)],
['%d,%d,%d' % (id1, id3, id4)],
['%d,%d,%d' % (id1, id2, id3)],
['%d' % id5]
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 4)
ids = lambda records: [record.id for record in records]
b = self.browse()
self.assertEqual(ids(b[0].value), [id1, id2])
self.assertEqual(values(b[0].value), [3, 44])
self.assertEqual(ids(b[2].value), [id1, id2, id3])
self.assertEqual(values(b[2].value), [3, 44, 84])
def test_noids(self):
result = self.import_(['value/.id'], [['42']])
self.assertEqual(result['messages'], [message(
u"No matching record found for database id '42' in field "
u"'unknown'", moreinfo=moreaction(
res_model='ir.model.data', domain=[('model','=','export.many2many.other')]))])
self.assertIs(result['ids'], False)
def test_xids(self):
M2O_o = self.registry('export.many2many.other')
id1 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 3, 'str': 'record0'})
id2 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 44, 'str': 'record1'})
id3 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 84, 'str': 'record2'})
id4 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 9, 'str': 'record3'})
records = M2O_o.browse(self.cr, openerp.SUPERUSER_ID, [id1, id2, id3, id4])
result = self.import_(['value/id'], [
['%s,%s' % (self.xid(records[0]), self.xid(records[1]))],
['%s' % self.xid(records[3])],
['%s,%s' % (self.xid(records[2]), self.xid(records[1]))],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 3)
b = self.browse()
self.assertEqual(values(b[0].value), [3, 44])
self.assertEqual(values(b[2].value), [44, 84])
def test_noxids(self):
result = self.import_(['value/id'], [['noxidforthat']])
self.assertEqual(result['messages'], [message(
u"No matching record found for external id 'noxidforthat' in field"
u" 'unknown'", moreinfo=moreaction(
res_model='ir.model.data', domain=[('model','=','export.many2many.other')]))])
self.assertIs(result['ids'], False)
def test_names(self):
M2O_o = self.registry('export.many2many.other')
id1 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 3, 'str': 'record0'})
id2 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 44, 'str': 'record1'})
id3 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 84, 'str': 'record2'})
id4 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 9, 'str': 'record3'})
records = M2O_o.browse(self.cr, openerp.SUPERUSER_ID, [id1, id2, id3, id4])
name = lambda record: record.name_get()[0][1]
result = self.import_(['value'], [
['%s,%s' % (name(records[1]), name(records[2]))],
['%s,%s,%s' % (name(records[0]), name(records[1]), name(records[2]))],
['%s,%s' % (name(records[0]), name(records[3]))],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 3)
b = self.browse()
self.assertEqual(values(b[1].value), [3, 44, 84])
self.assertEqual(values(b[2].value), [3, 9])
def test_nonames(self):
result = self.import_(['value'], [['wherethem2mhavenonames']])
self.assertEqual(result['messages'], [message(
u"No matching record found for name 'wherethem2mhavenonames' in "
u"field 'unknown'", moreinfo=moreaction(
res_model='export.many2many.other'))])
self.assertIs(result['ids'], False)
def test_import_to_existing(self):
M2O_o = self.registry('export.many2many.other')
id1 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 3, 'str': 'record0'})
id2 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 44, 'str': 'record1'})
id3 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 84, 'str': 'record2'})
id4 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 9, 'str': 'record3'})
xid = 'myxid'
result = self.import_(['id', 'value/.id'], [[xid, '%d,%d' % (id1, id2)]])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
result = self.import_(['id', 'value/.id'], [[xid, '%d,%d' % (id3, id4)]])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
b = self.browse()
self.assertEqual(len(b), 1)
# TODO: replacement of existing m2m values is correct?
self.assertEqual(values(b[0].value), [84, 9])
class test_o2m(ImporterCase):
model_name = 'export.one2many'
def test_name_get(self):
s = u'Java is a DSL for taking large XML files and converting them ' \
u'to stack traces'
result = self.import_(
['const', 'value'],
[['5', s]])
self.assertEqual(result['messages'], [message(
u"No matching record found for name '%s' in field 'unknown'" % s,
moreinfo=moreaction(res_model='export.one2many.child'))])
self.assertIs(result['ids'], False)
def test_single(self):
result = self.import_(['const', 'value/value'], [
['5', '63']
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
(b,) = self.browse()
self.assertEqual(b.const, 5)
self.assertEqual(values(b.value), [63])
def test_multicore(self):
result = self.import_(['const', 'value/value'], [
['5', '63'],
['6', '64'],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 2)
b1, b2 = self.browse()
self.assertEqual(b1.const, 5)
self.assertEqual(values(b1.value), [63])
self.assertEqual(b2.const, 6)
self.assertEqual(values(b2.value), [64])
def test_multisub(self):
result = self.import_(['const', 'value/value'], [
['5', '63'],
['', '64'],
['', '65'],
['', '66'],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
(b,) = self.browse()
self.assertEqual(values(b.value), [63, 64, 65, 66])
def test_multi_subfields(self):
result = self.import_(['value/str', 'const', 'value/value'], [
['this', '5', '63'],
['is', '', '64'],
['the', '', '65'],
['rhythm', '', '66'],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
(b,) = self.browse()
self.assertEqual(values(b.value), [63, 64, 65, 66])
self.assertEqual(
values(b.value, 'str'),
'this is the rhythm'.split())
def test_link_inline(self):
""" m2m-style specification for o2ms
"""
id1 = self.registry('export.one2many.child').create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Bf', 'value': 109
})
id2 = self.registry('export.one2many.child').create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Me', 'value': 262
})
result = self.import_(['const', 'value/.id'], [
['42', '%d,%d' % (id1, id2)]
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
[b] = self.browse()
self.assertEqual(b.const, 42)
# automatically forces link between core record and o2ms
self.assertEqual(values(b.value), [109, 262])
self.assertEqual(values(b.value, field='parent_id'), [b, b])
def test_link(self):
""" O2M relating to an existing record (update) force a LINK_TO as well
"""
O2M = self.registry('export.one2many.child')
id1 = O2M.create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Bf', 'value': 109
})
id2 = O2M.create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Me', 'value': 262
})
result = self.import_(['const', 'value/.id'], [
['42', str(id1)],
['', str(id2)],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
[b] = self.browse()
self.assertEqual(b.const, 42)
# automatically forces link between core record and o2ms
self.assertEqual(values(b.value), [109, 262])
self.assertEqual(values(b.value, field='parent_id'), [b, b])
def test_link_2(self):
O2M_c = self.registry('export.one2many.child')
id1 = O2M_c.create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Bf', 'value': 109
})
id2 = O2M_c.create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Me', 'value': 262
})
result = self.import_(['const', 'value/.id', 'value/value'], [
['42', str(id1), '1'],
['', str(id2), '2'],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
[b] = self.browse()
self.assertEqual(b.const, 42)
self.assertEqual(values(b.value), [1, 2])
self.assertEqual(values(b.value, field='parent_id'), [b, b])
class test_o2m_multiple(ImporterCase):
model_name = 'export.one2many.multiple'
def test_multi_mixed(self):
result = self.import_(['const', 'child1/value', 'child2/value'], [
['5', '11', '21'],
['', '12', '22'],
['', '13', '23'],
['', '14', ''],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
[b] = self.browse()
self.assertEqual(values(b.child1), [11, 12, 13, 14])
self.assertEqual(values(b.child2), [21, 22, 23])
def test_multi(self):
result = self.import_(['const', 'child1/value', 'child2/value'], [
['5', '11', '21'],
['', '12', ''],
['', '13', ''],
['', '14', ''],
['', '', '22'],
['', '', '23'],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
[b] = self.browse()
self.assertEqual(values(b.child1), [11, 12, 13, 14])
self.assertEqual(values(b.child2), [21, 22, 23])
def test_multi_fullsplit(self):
result = self.import_(['const', 'child1/value', 'child2/value'], [
['5', '11', ''],
['', '12', ''],
['', '13', ''],
['', '14', ''],
['', '', '21'],
['', '', '22'],
['', '', '23'],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
[b] = self.browse()
self.assertEqual(b.const, 5)
self.assertEqual(values(b.child1), [11, 12, 13, 14])
self.assertEqual(values(b.child2), [21, 22, 23])
class test_realworld(common.TransactionCase):
def test_bigfile(self):
data = json.loads(pkgutil.get_data(self.__module__, 'contacts_big.json'))
result = self.registry('res.partner').load(
self.cr, openerp.SUPERUSER_ID,
['name', 'mobile', 'email', 'image'],
data)
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), len(data))
def test_backlink(self):
data = json.loads(pkgutil.get_data(self.__module__, 'contacts.json'))
result = self.registry('res.partner').load(
self.cr, openerp.SUPERUSER_ID,
["name", "type", "street", "city", "country_id", "category_id",
"supplier", "customer", "is_company", "parent_id"],
data)
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), len(data))
def test_recursive_o2m(self):
""" The content of the o2m field's dict needs to go through conversion
as it may be composed of convertables or other relational fields
"""
self.registry('ir.model.data').clear_caches()
Model = self.registry('export.one2many.recursive')
result = Model.load(self.cr, openerp.SUPERUSER_ID,
['value', 'child/const', 'child/child1/str', 'child/child2/value'],
[
['4', '42', 'foo', '55'],
['', '43', 'bar', '56'],
['', '', 'baz', ''],
['', '55', 'qux', '57'],
['5', '99', 'wheee', ''],
['', '98', '', '12'],
],
context=None)
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 2)
b = Model.browse(self.cr, openerp.SUPERUSER_ID, result['ids'], context=None)
self.assertEqual((b[0].value, b[1].value), (4, 5))
self.assertEqual([child.str for child in b[0].child[1].child1],
['bar', 'baz'])
self.assertFalse(len(b[1].child[1].child1))
self.assertEqual([child.value for child in b[1].child[1].child2],
[12])
class test_date(ImporterCase):
model_name = 'export.date'
def test_empty(self):
self.assertEqual(
self.import_(['value'], []),
{'ids': [], 'messages': []})
def test_basic(self):
result = self.import_(['value'], [['2012-02-03']])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
def test_invalid(self):
result = self.import_(['value'], [['not really a date']])
self.assertEqual(result['messages'], [
message(u"'not really a date' does not seem to be a valid date "
u"for field 'unknown'",
moreinfo=u"Use the format '2012-12-31'")])
self.assertIs(result['ids'], False)
class test_datetime(ImporterCase):
model_name = 'export.datetime'
def test_empty(self):
self.assertEqual(
self.import_(['value'], []),
{'ids': [], 'messages': []})
def test_basic(self):
result = self.import_(['value'], [['2012-02-03 11:11:11']])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
def test_invalid(self):
result = self.import_(['value'], [['not really a datetime']])
self.assertEqual(result['messages'], [
message(u"'not really a datetime' does not seem to be a valid "
u"datetime for field 'unknown'",
moreinfo=u"Use the format '2012-12-31 23:59:59'")])
self.assertIs(result['ids'], False)
def test_checktz1(self):
""" Imported date should be interpreted as being in the tz provided by
the context
"""
# write dummy tz in user (Asia/Hovd UTC+0700), should be superseded by
# context
self.registry('res.users').write(
self.cr, openerp.SUPERUSER_ID, [openerp.SUPERUSER_ID],
{'tz': 'Asia/Hovd'})
# UTC+1400
result = self.import_(
['value'], [['2012-02-03 11:11:11']], {'tz': 'Pacific/Kiritimati'})
self.assertFalse(result['messages'])
self.assertEqual(
values(self.read(domain=[('id', 'in', result['ids'])])),
['2012-02-02 21:11:11'])
# UTC-0930
result = self.import_(
['value'], [['2012-02-03 11:11:11']], {'tz': 'Pacific/Marquesas'})
self.assertFalse(result['messages'])
self.assertEqual(
values(self.read(domain=[('id', 'in', result['ids'])])),
['2012-02-03 20:41:11'])
def test_usertz(self):
""" If the context does not hold a timezone, the importing user's tz
should be used
"""
# UTC +1000
self.registry('res.users').write(
self.cr, openerp.SUPERUSER_ID, [openerp.SUPERUSER_ID],
{'tz': 'Asia/Yakutsk'})
result = self.import_(
['value'], [['2012-02-03 11:11:11']])
self.assertFalse(result['messages'])
self.assertEqual(
values(self.read(domain=[('id', 'in', result['ids'])])),
['2012-02-03 01:11:11'])
def test_notz(self):
""" If there is no tz either in the context or on the user, falls back
to UTC
"""
self.registry('res.users').write(
self.cr, openerp.SUPERUSER_ID, [openerp.SUPERUSER_ID],
{'tz': False})
result = self.import_(['value'], [['2012-02-03 11:11:11']])
self.assertFalse(result['messages'])
self.assertEqual(
values(self.read(domain=[('id', 'in', result['ids'])])),
['2012-02-03 11:11:11'])
class test_unique(ImporterCase):
model_name = 'export.unique'
@mute_logger('openerp.sql_db')
def test_unique(self):
result = self.import_(['value'], [
['1'],
['1'],
['2'],
['3'],
['3'],
])
self.assertFalse(result['ids'])
self.assertEqual(result['messages'], [
dict(message=u"The value for the field 'value' already exists. "
u"This might be 'unknown' in the current model, "
u"or a field of the same name in an o2m.",
type='error', rows={'from': 1, 'to': 1},
record=1, field='value'),
dict(message=u"The value for the field 'value' already exists. "
u"This might be 'unknown' in the current model, "
u"or a field of the same name in an o2m.",
type='error', rows={'from': 4, 'to': 4},
record=4, field='value'),
])
| gpl-3.0 |
vtemian/uni-west | second_year/os/exams/os_sched.py | 1 | 1793 | import Queue
import copy
fd = open('processes.txt')
processes = []
endTime = 0
time = 0
for line in fd:
tempProc = line.split(" ")
tempProc[0] = int(tempProc[0])
tempProc[1] = int(tempProc[1])
tempProc.append(0)
tempProc.append(0)
tempProc.append(0)
process = (arrival, burst, tw, tr, visited) = tempProc
processes.append(process)
for process in processes:
#print("Arrival {}; Burst {}".format(process[0],process[1]))
endTime += int(process[1])
pass
backupProcesses = copy.deepcopy(processes)
def getProcessesFifo(q, ps, time):
for process in ps:
if process[0] <= time and int(process[4]) == 0:
process[4] = 1
q.append(process)
q.sort(key=lambda tup: tup[0])
return q
def computeTr(ps):
for process in ps:
process[3] = process[1] + process[2]
def fifo(ps):
time = -1
q = []
while len(q) == 0:
time +=1
q = getProcessesFifo(q, ps, time)
while time < endTime:
q = getProcessesFifo(q, ps, time)
process = q.pop(0)
process[2] = time - process[0]
time += process[1]
computeTr(ps)
print "Fifo"
print "Arr Burst Tw Tr"
for process in ps:
print("{} {} {} {}".format(process[0],process[1],process[2],process[3]))
def sjf(ps):
time = -1
q = []
while len(q) == 0:
time +=1
q = getProcessesFifo(q, ps, time)
while time < endTime:
q = getProcessesFifo(q, ps, time)
q.sort(key=lambda tup: tup[1])
process = q.pop(0)
process[2] = time - process[0]
time += process[1]
computeTr(ps)
print "SJF"
print "Arr Burst Tw Tr"
for process in ps:
print("{} {} {} {}".format(process[0],process[1],process[2],process[3]))
fifo(processes)
processes = copy.deepcopy(backupProcesses)
sjf(processes)
processes = copy.deepcopy(backupProcesses)
| apache-2.0 |
google-code/betsynetpdf | sumatrapdf/scripts/util.py | 4 | 21807 | import os
import re
import subprocess
import sys
import hashlib
import string
import time
import types
import zipfile
import bz2
import shutil
def log(s):
print(s)
sys.stdout.flush()
def strip_empty_lines(s):
s = s.replace("\r\n", "\n")
lines = [l.strip() for l in s.split("\n") if len(l.strip()) > 0]
return string.join(lines, "\n")
def trim_str(s):
if len(s) < 75:
return (s, False)
# we don't want to trim if adding "..." would make it bigger than original
if len(s) < 78:
return (s, False)
return (s[:75], True)
def test_for_flag(args, arg, has_data=False):
if arg not in args:
if not has_data:
return False
for argx in args:
if argx.startswith(arg + "="):
args.remove(argx)
return argx[len(arg) + 1:]
return None
if not has_data:
args.remove(arg)
return True
idx = args.index(arg)
if idx == len(args) - 1:
return None
data = args[idx + 1]
args.pop(idx + 1)
args.pop(idx)
return data
def file_sha1(fp):
data = open(fp, "rb").read()
m = hashlib.sha1()
m.update(data)
return m.hexdigest()
def delete_file(path):
if os.path.exists(path):
os.remove(path)
def create_dir(d):
if not os.path.exists(d):
os.makedirs(d)
return d
def verify_path_exists(path):
if not os.path.exists(path):
print("path '%s' doesn't exist" % path)
sys.exit(1)
return path
def verify_started_in_right_directory():
if os.path.exists(os.path.join("scripts", "build.py")):
return
if os.path.exists(os.path.join(os.getcwd(), "scripts", "build.py")):
return
print("This script must be run from top of the source tree")
sys.exit(1)
def subprocess_flags():
# this magic disables the modal dialog that windows shows if the process crashes
# TODO: it doesn't seem to work, maybe because it was actually a crash in a process
# sub-launched from the process I'm launching. I had to manually disable this in
# registry, as per http://stackoverflow.com/questions/396369/how-do-i-disable-the-debug-close-application-dialog-on-windows-vista:
# DWORD HKLM or HKCU\Software\Microsoft\Windows\Windows Error Reporting\DontShowUI = "1"
# DWORD HKLM or HKCU\Software\Microsoft\Windows\Windows Error Reporting\Disabled = "1"
# see: http://msdn.microsoft.com/en-us/library/bb513638.aspx
if sys.platform.startswith("win"):
import ctypes
SEM_NOGPFAULTERRORBOX = 0x0002 # From MSDN
ctypes.windll.kernel32.SetErrorMode(SEM_NOGPFAULTERRORBOX)
return 0x8000000 # win32con.CREATE_NO_WINDOW?
return 0
# Apparently shell argument to Popen it must be False on unix/mac and True
# on windows
def shell_arg():
if os.name == "nt":
return True
return False
# will throw an exception if a command doesn't exist
# otherwise returns a tuple:
# (stdout, stderr, errcode)
def run_cmd(*args):
cmd = " ".join(args)
print("run_cmd: '%s'" % cmd)
cmdproc = subprocess.Popen(args, shell=shell_arg(), stdout=subprocess.PIPE,
stderr=subprocess.PIPE, creationflags=subprocess_flags())
res = cmdproc.communicate()
return (res[0], res[1], cmdproc.returncode)
# like run_cmd() but throws an exception if command returns non-0 error code
def run_cmd_throw(*args):
cmd = " ".join(args)
print("run_cmd_throw: '%s'" % cmd)
cmdproc = subprocess.Popen(args, shell=shell_arg(), stdout=subprocess.PIPE,
stderr=subprocess.PIPE, creationflags=subprocess_flags())
res = cmdproc.communicate()
errcode = cmdproc.returncode
if 0 != errcode:
print("Failed with error code %d" % errcode)
if len(res[0]) > 0:
print("Stdout:\n%s" % res[0])
if len(res[1]) > 0:
print("Stderr:\n%s" % res[1])
raise Exception("'%s' failed with error code %d" % (cmd, errcode))
return (res[0], res[1])
# work-around a problem with running devenv from command-line:
# http://social.msdn.microsoft.com/Forums/en-US/msbuild/thread/9d8b9d4a-c453-4f17-8dc6-838681af90f4
def kill_msbuild():
(stdout, stderr, err) = run_cmd("taskkill", "/F", "/IM", "msbuild.exe")
if err not in (0, 128): # 0 is no error, 128 is 'process not found'
print("err: %d\n%s%s" % (err, stdout, stderr))
print("exiting")
sys.exit(1)
# Parse output of svn info and return revision number indicated by
# "Last Changed Rev" field or, if that doesn't exist, by "Revision" field
def parse_svninfo_out(txt):
ver = re.findall(r'(?m)^Last Changed Rev: (\d+)', txt)
if ver:
return ver[0]
ver = re.findall(r'(?m)^Revision: (\d+)', txt)
if ver:
return ver[0]
raise Exception("parse_svn_info_out() failed to parse '%s'" % txt)
# returns local and latest (on the server) svn versions
def get_svn_versions():
(out, err) = run_cmd_throw("svn", "info")
ver_local = str(parse_svninfo_out(out))
(out, err) = run_cmd_throw("svn", "info",
"https://sumatrapdf.googlecode.com/svn/trunk")
ver_latest = str(parse_svninfo_out(out))
return ver_local, ver_latest
# Given a line in svn info output:
# URL: https://sumatrapdf.googlecode.com/svn/trunk
# return '/trunk' part
def get_svn_branch():
(out, err) = run_cmd_throw("svn", "info")
url = re.findall(r'URL: (.+)', out)[0]
s = "https://sumatrapdf.googlecode.com/svn"
assert url.startswith(s), "'%s' should start with '%s'" % (url, s)
return url[len(s):]
# Parse output of "svn log -r${rev} -v", which looks sth. like this:
#------------------------------------------------------------------------
# r6667 | kkowalczyk | 2012-09-25 22:52:34 -0700 (Tue, 25 Sep 2012) | 1 line
# Changed paths:
# M /trunk/installer-vc2008.vcproj
# D /trunk/src/utils/Http.h
# A /trunk/src/utils/HttpUtil.cpp (from /trunk/src/utils/Http.cpp:6665)
#
# rename Http.[h|cpp] => HttpUtil.[h|cpp]
#------------------------------------------------------------------------
# Returns a tuple:
# (user, comment, modified, added, deleted)
# or None in case this is not a source checkin (but e.g. a wiki page edit)
def parse_svnlog_out(txt):
lines = [l.strip() for l in txt.split("\n")]
# remove empty line at the end
if len(lines) > 1 and len(lines[-1]) == 0:
lines = lines[:-1]
if 1 == len(lines):
return None
if not lines[0].startswith("---"):
print(txt)
print("l: '%s'" % lines[0])
assert lines[0].startswith("----")
if not lines[-1].startswith("---"):
print(txt)
print("l: '%s'" % lines[-1])
assert lines[-1].startswith("----")
user = lines[1].split(" | ")[1]
assert "Changed paths:" == lines[2]
modified = []
added = []
deleted = []
lines = lines[3:]
n = 0
while True:
if 0 == len(lines[n]):
break
s = lines[n]
#print("s: %s" % s)
typ = s[0]
name = s[2:]
assert name[0] == '/'
if typ == 'M':
modified.append(name)
elif typ == 'D':
deleted.append(name)
elif typ == 'A':
added.append(name)
else:
print("typ: %s\n" % typ)
assert False
n += 1
lines = lines[n + 1:-1] # skip the last ----
comment = string.join(lines, "\n")
return (user, comment, modified, added, deleted)
def parse_svnlog_out_test():
s = """------------------------------------------------------------------------
r6667 | kkowalczyk | 2012-09-25 22:52:34 -0700 (Tue, 25 Sep 2012) | 1 line
Changed paths:
M /trunk/src/SumatraPDF.cpp
D /trunk/src/utils/Http.h
A /trunk/src/utils/HttpUtil.h (from /trunk/src/utils/Http.h:6665)
M /trunk/sumatrapdf-vc2012.vcxproj
M /trunk/sumatrapdf-vc2012.vcxproj.filters
rename Http.[h|cpp] => HttpUtil.[h|cpp]
------------------------------------------------------------------------"""
res = parse_svnlog_out(s)
(user, comment, modified, added, deleted) = res
print("User: %s\nComment: %s\nModified: %s\nAdded: %s\nDeleted: %s\n" %
(user, comment, str(modified), str(added), str(deleted)))
assert user == "kkowalczyk"
assert comment == "rename Http.[h|cpp] => HttpUtil.[h|cpp]"
def parse_svnlog_out_test2(startrev=1, endrev=6700):
rev = endrev
while rev >= startrev:
(out, err) = run_cmd_throw("svn", "log", "-r%s" % str(rev), "-v")
res = parse_svnlog_out(out)
print("\nRev: %s" % str(rev))
if None == res:
print("Not svn checkin")
else:
(user, comment, modified, added, deleted) = res
print(
"User: %s\nComment: %s\nModified: %s\nAdded: %s\nDeleted: %s\n" %
(user, comment, str(modified), str(added), str(deleted)))
rev -= 1
# version line is in the format:
# define CURR_VERSION 1.1
def extract_sumatra_version(file_path):
content = open(file_path).read()
ver = re.findall(r'CURR_VERSION (\d+(?:\.\d+)*)', content)[0]
return ver
def file_remove_try_hard(path):
removeRetryCount = 0
while removeRetryCount < 3:
try:
os.remove(path)
return
except:
# try to sleep to make the time for the file not be used anymore
time.sleep(1)
print "exception: n %s, n %s, n %s n when trying to remove file %s" % (sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2], path)
removeRetryCount += 1
def zip_file(dst_zip_file, src_path, in_zip_name=None, compress=True, append=False):
mode = "w"
if append:
mode = "a"
if compress:
zf = zipfile.ZipFile(dst_zip_file, mode, zipfile.ZIP_DEFLATED)
else:
zf = zipfile.ZipFile(dst_zip_file, mode, zipfile.ZIP_STORED)
if in_zip_name is None:
in_zip_name = os.path.basename(src_path)
zf.write(src_path, in_zip_name)
zf.close()
def bz_file_compress(src, dst):
with open(src, "rb") as src_fo:
with bz2.BZ2File(dst, "w", buffering=16 * 1024 * 1024, compresslevel=9) as dst_fo:
shutil.copyfileobj(src_fo, dst_fo, length=1 * 1024 * 1024)
def formatInt(x):
if x < 0:
return '-' + formatInt(-x)
result = ''
while x >= 1000:
x, r = divmod(x, 1000)
result = ".%03d%s" % (r, result)
return "%d%s" % (x, result)
def str2bool(s):
if s.lower() in ("true", "1"):
return True
if s.lower() in ("false", "0"):
return False
assert(False)
class Serializable(object):
def __init__(self, fields, fields_no_serialize, read_from_file=None):
self.fields = fields
self.fields_no_serialize = fields_no_serialize
self.vals = {}
if read_from_file != None:
self.from_s(open(read_from_file, "r").read())
def type_of_field(self, name):
return type(self.fields[name])
def from_s(self, s):
# print(s)
lines = s.split("\n")
for l in lines:
(name, val) = l.split(": ", 1)
tp = self.type_of_field(name)
if tp == types.IntType:
self.vals[name] = int(val)
elif tp == types.LongType:
self.vals[name] = long(val)
elif tp == types.BooleanType:
self.vals[name] = str2bool(val)
elif tp in (types.StringType, types.UnicodeType):
self.vals[name] = val
else:
print(name)
assert(False)
def to_s(self):
res = []
for k, v in self.vals.items():
if k in self.fields_no_serialize:
continue
res.append("%s: %s" % (k, str(v)))
return string.join(res, "\n")
def write_to_file(self, filename):
open(filename, "w").write(self.to_s())
def compat_types(self, tp1, tp2):
if tp1 == tp2:
return True
num_types = (types.IntType, types.LongType)
if tp1 in num_types and tp2 in num_types:
return True
return False
def __setattr__(self, k, v):
if k in self.fields:
if not self.compat_types(type(v), type(self.fields[k])):
print("k='%s', %s != %s (type(v) != type(self.fields[k]))" % (
k, type(v), type(self.fields[k])))
assert type(v) == type(self.fields[k])
self.vals[k] = v
else:
super(Serializable, self).__setattr__(k, v)
def __getattr__(self, k):
if k in self.vals:
return self.vals[k]
if k in self.fields:
return self.fields[k]
return super(Serializable, self).__getattribute__(k)
import smtplib
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
def sendmail(sender, senderpwd, to, subject, body):
# print("sendmail is disabled"); return
mail = MIMEMultipart()
mail['From'] = sender
toHdr = to
if isinstance(toHdr, list):
toHdr = ", ".join(toHdr)
mail['To'] = toHdr
mail['Subject'] = subject
mail.attach(MIMEText(body))
msg = mail.as_string()
# print(msg)
mailServer = smtplib.SMTP("smtp.gmail.com", 587)
mailServer.ehlo()
mailServer.starttls()
mailServer.ehlo()
mailServer.login(sender, senderpwd)
mailServer.sendmail(sender, to, msg)
mailServer.close()
# Some operations, like uploading to s3, require knowing s3 credential
# We store all such information that cannot be publicly known in a file
# config.py. This object is just a wrapper to documents the fields
# and given default values if config.py doesn't exist
class Config(object):
def __init__(self):
self.aws_access = None
self.aws_secret = None
self.cert_pwd = None
self.trans_ul_secret = None
self.notifier_email = None
self.notifier_email_pwd = None
def GetNotifierEmailAndPwdMustExist(self):
assert(None != self.notifier_email and None != self.notifier_email_pwd)
return (self.notifier_email, self.notifier_email_pwd)
def HasNotifierEmail(self):
return self.notifier_email != None and self.notifier_email_pwd != None
def GetCertPwdMustExist(self):
assert(None != self.cert_pwd)
return self.cert_pwd
def GetTransUploadSecret(self):
assert(None != self.trans_ul_secret)
return self.trans_ul_secret
# TODO: could verify aws creds better i.e. check the lengths
def GetAwsCredsMustExist(self):
assert(None != self.aws_access)
assert(None != self.aws_secret)
return (self.aws_access, self.aws_secret)
def HasAwsCreds(self):
if None is self.aws_access:
return False
if None is self.aws_secret:
return False
return True
g_config = None
def load_config():
global g_config
if g_config != None:
return g_config
c = Config()
try:
import config
c.aws_access = config.aws_access
c.aws_secret = config.aws_secret
c.cert_pwd = config.cert_pwd
c.notifier_email = config.notifier_email
c.notifier_email_pwd = config.notifier_email_pwd
c.trans_ul_secret = config.trans_ul_secret
except:
# it's ok if doesn't exist, we just won't have the config data
print("no config.py!")
g_config = c
return g_config
def test_load_config():
c = load_config()
vals = (c.aws_access, c.aws_secret, c.cert_pwd, c.trans_ul_secret)
print("aws_secret: %s\naws_secret: %s\ncert_pwd: %s\ntrans_ul_secret: %s" %
vals)
def gob_uvarint_encode(i):
assert i >= 0
if i <= 0x7f:
return chr(i)
res = ""
while i > 0:
b = i & 0xff
res += chr(b)
i = i >> 8
l = 256 - len(res)
res = res[::-1] # reverse string
return chr(l) + res
def gob_varint_encode(i):
if i < 0:
i = (~i << 1) | 1
else:
i = i << 1
return gob_uvarint_encode(i)
# data generated with UtilTests.cpp (define GEN_PYTHON_TESTS to 1)
def test_gob():
assert gob_varint_encode(0) == chr(0)
assert gob_varint_encode(1) == chr(2)
assert gob_varint_encode(127) == chr(255) + chr(254)
assert gob_varint_encode(128) == chr(254) + chr(1) + chr(0)
assert gob_varint_encode(129) == chr(254) + chr(1) + chr(2)
assert gob_varint_encode(254) == chr(254) + chr(1) + chr(252)
assert gob_varint_encode(255) == chr(254) + chr(1) + chr(254)
assert gob_varint_encode(256) == chr(254) + chr(2) + chr(0)
assert gob_varint_encode(4660) == chr(254) + chr(36) + chr(104)
assert gob_varint_encode(74565) == chr(253) + chr(2) + chr(70) + chr(138)
assert gob_varint_encode(1193046) == chr(253) + \
chr(36) + chr(104) + chr(172)
assert gob_varint_encode(19088743) == chr(252) + \
chr(2) + chr(70) + chr(138) + chr(206)
assert gob_varint_encode(305419896) == chr(252) + \
chr(36) + chr(104) + chr(172) + chr(240)
assert gob_varint_encode(2147483647) == chr(252) + \
chr(255) + chr(255) + chr(255) + chr(254)
assert gob_varint_encode(-1) == chr(1)
assert gob_varint_encode(-2) == chr(3)
assert gob_varint_encode(-255) == chr(254) + chr(1) + chr(253)
assert gob_varint_encode(-256) == chr(254) + chr(1) + chr(255)
assert gob_varint_encode(-257) == chr(254) + chr(2) + chr(1)
assert gob_varint_encode(-4660) == chr(254) + chr(36) + chr(103)
assert gob_varint_encode(-74565) == chr(253) + chr(2) + chr(70) + chr(137)
assert gob_varint_encode(-1193046) == chr(253) + \
chr(36) + chr(104) + chr(171)
assert gob_varint_encode(-1197415) == chr(253) + \
chr(36) + chr(138) + chr(205)
assert gob_varint_encode(-19158648) == chr(252) + \
chr(2) + chr(72) + chr(172) + chr(239)
assert gob_uvarint_encode(0) == chr(0)
assert gob_uvarint_encode(1) == chr(1)
assert gob_uvarint_encode(127) == chr(127)
assert gob_uvarint_encode(128) == chr(255) + chr(128)
assert gob_uvarint_encode(129) == chr(255) + chr(129)
assert gob_uvarint_encode(254) == chr(255) + chr(254)
assert gob_uvarint_encode(255) == chr(255) + chr(255)
assert gob_uvarint_encode(256) == chr(254) + chr(1) + chr(0)
assert gob_uvarint_encode(4660) == chr(254) + chr(18) + chr(52)
assert gob_uvarint_encode(74565) == chr(253) + chr(1) + chr(35) + chr(69)
assert gob_uvarint_encode(1193046) == chr(253) + \
chr(18) + chr(52) + chr(86)
assert gob_uvarint_encode(19088743) == chr(252) + \
chr(1) + chr(35) + chr(69) + chr(103)
assert gob_uvarint_encode(305419896) == chr(252) + \
chr(18) + chr(52) + chr(86) + chr(120)
assert gob_uvarint_encode(2147483647) == chr(252) + \
chr(127) + chr(255) + chr(255) + chr(255)
assert gob_uvarint_encode(2147483648) == chr(252) + \
chr(128) + chr(0) + chr(0) + chr(0)
assert gob_uvarint_encode(2147483649) == chr(252) + \
chr(128) + chr(0) + chr(0) + chr(1)
assert gob_uvarint_encode(4294967294) == chr(252) + \
chr(255) + chr(255) + chr(255) + chr(254)
assert gob_uvarint_encode(4294967295) == chr(252) + \
chr(255) + chr(255) + chr(255) + chr(255)
# for easy generation of the compact form of storing strings in C
class SeqStrings(object):
def __init__(self):
self.strings = {}
self.strings_seq = ""
def get_all(self):
return self.strings_seq + chr(0)
# Note: this only works if strings are ascii, which is the case for us so
# far
def get_all_c_escaped(self):
s = self.get_all()
s = s.replace(chr(0), "\\0")
return '"' + s + '"'
def add(self, s):
self.get_offset(s)
def get_offset(self, s):
if s not in self.strings:
self.strings[s] = len(self.strings_seq)
self.strings_seq = self.strings_seq + s + chr(0)
return self.strings[s]
(FMT_NONE, FMT_LEFT, FMT_RIGHT) = (0, 1, 2)
def get_col_fmt(col_fmt, col):
if col >= len(col_fmt):
return FMT_NONE
return col_fmt[col]
def fmt_str(s, max, fmt):
add = max - len(s)
if fmt == FMT_LEFT:
return " " * add + s
elif fmt == FMT_RIGHT:
return s + " " * add
return s
"""
[
["a", "bc", "def"],
["ab", "fabo", "d"]
]
=>
[
["a ", "bc ", "def"],
["ab", "fabo", "d "]
]
"""
def fmt_rows(rows, col_fmt=[]):
col_max_len = {}
for row in rows:
for col in range(len(row)):
el_len = len(row[col])
curr_max = col_max_len.get(col, 0)
if el_len > curr_max:
col_max_len[col] = el_len
res = []
for row in rows:
res_row = []
for col in range(len(row)):
s = fmt_str(row[col], col_max_len[col], get_col_fmt(col_fmt, col))
res_row.append(s)
res.append(res_row)
return res
if __name__ == "__main__":
# parse_svnlog_out_test2()
# test_load_config()
test_gob()
| gpl-3.0 |
ran0101/namebench | libnamebench/config.py | 173 | 11391 | #!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define and process configuration from command-line or config file."""
__author__ = '[email protected] (Thomas Stromberg)'
import ConfigParser
import csv
import optparse
import os.path
import re
import StringIO
import tempfile
import nb_third_party
# from third_party
import httplib2
import addr_util
import data_sources
import nameserver
import nameserver_list
import sys_nameservers
import util
import version
TRUNK_URL = 'http://namebench.googlecode.com/svn/trunk/'
SETS_TO_TAGS_MAP = {
'system': ['system', 'dhcp'],
'global': ['global', 'preferred'],
'preferred': ['preferred'],
'nearby': ['nearby', 'dhcp', 'internal'],
'all': ['global', 'nearby', 'country', 'system', 'dhcp', 'internal', 'network', 'preferred', 'isp', 'likely-isp'],
'regional': ['internal', 'country', 'nearby', 'network', 'isp', 'likely-isp'],
'isp': ['isp', 'dhcp', 'internal', 'likely-isp'],
'network': ['network', 'internal', 'dhcp'],
}
def ExpandSetsToTags(set_names):
tags = set()
for set_name in set_names:
tags.update(set(SETS_TO_TAGS_MAP.get(set_name, [set_name])))
return tags
def GetMergedConfiguration():
"""Get all of our configuration setup."""
options = ParseCommandLineArguments()
return MergeConfigurationFileOptions(options)
def ParseCommandLineArguments(default_config_file='config/namebench.cfg'):
"""Get our option configuration setup.
Args:
default_config_file: path to configuration (may be relative)
Returns:
stuple of (OptionParser object, args)
"""
ds = data_sources.DataSources()
import_types = ds.ListSourceTypes()
parser = optparse.OptionParser()
parser.add_option('-6', '--ipv6_only', dest='ipv6_only', action='store_true', help='Only include IPv6 name servers')
parser.add_option('-4', '--ipv4_only', dest='ipv4_only', action='store_true', help='Only include IPv4 name servers')
parser.add_option('-b', '--censorship-checks', dest='enable_censorship_checks', action='store_true', help='Enable censorship checks')
parser.add_option('-c', '--country', dest='country', default=None, help='Set country (overrides GeoIP)')
parser.add_option('-H', '--skip-health-checks', dest='skip_health_checks', action='store_true', default=False, help='Skip health checks')
parser.add_option('-G', '--hide_results', dest='hide_results', action='store_true', help='Upload results, but keep them hidden from indexes.')
parser.add_option('-i', '--input', dest='input_source', help=('Import hostnames from an filename or application (%s)' % ', '.join(import_types)))
parser.add_option('-I', '--ips', dest='servers', default=[], help='A list of ips to test (can also be passed as arguments)')
parser.add_option('-j', '--health_threads', dest='health_thread_count', type='int', help='# of health check threads to use')
parser.add_option('-J', '--benchmark_threads', dest='benchmark_thread_count', type='int', help='# of benchmark threads to use')
parser.add_option('-k', '--distance_km', dest='distance', default=1250, help='Distance in km for determining if server is nearby')
parser.add_option('-K', '--overload_distance_km', dest='overload_distance', default=250, help='Like -k, but used if the country already has >350 servers.')
parser.add_option('-m', '--select_mode', dest='select_mode', default='automatic', help='Selection algorithm to use (weighted, random, chunk)')
parser.add_option('-M', '--max_servers_to_check', dest='max_servers_to_check', default=350, help='Maximum number of servers to inspect')
parser.add_option('-n', '--num_servers', dest='num_servers', type='int', help='Number of nameservers to include in test')
parser.add_option('-o', '--output', dest='output_file', default=None, help='Filename to write output to')
parser.add_option('-O', '--csv_output', dest='csv_file', default=None, help='Filename to write query details to (CSV)')
parser.add_option('-p', '--psn') # Silly Mac OS X adding -psn_0_xxxx
parser.add_option('-P', '--ping_timeout', dest='ping_timeout', type='float', help='# of seconds ping requests timeout in.')
parser.add_option('-q', '--query_count', dest='query_count', type='int', help='Number of queries per run.')
parser.add_option('-r', '--runs', dest='run_count', default=1, type='int', help='Number of test runs to perform on each nameserver.')
parser.add_option('-s', '--sets', dest='server_sets', default=[], help='Comma-separated list of sets to test (%s)' % SETS_TO_TAGS_MAP.keys())
parser.add_option('-T', '--template', dest='template', default='html', help='Template to use for output generation (ascii, html, resolv.conf)')
parser.add_option('-U', '--site_url', dest='site_url', help='URL to upload results to (http://namebench.appspot.com/)')
parser.add_option('-u', '--upload_results', dest='upload_results', action='store_true', help='Upload anonymized results to SITE_URL (False)')
parser.add_option('-V', '--invalidate_cache', dest='invalidate_cache', action='store_true', help='Force health cache to be invalidated')
parser.add_option('-w', '--open_webbrowser', dest='open_webbrowser', action='store_true', help='Opens the final report in your browser')
parser.add_option('-x', '--no_gui', dest='no_gui', action='store_true', help='Disable GUI')
parser.add_option('-Y', '--health_timeout', dest='health_timeout', type='float', help='health check timeout (in seconds)')
parser.add_option('-y', '--timeout', dest='timeout', type='float', help='# of seconds general requests timeout in.')
parser.add_option('-z', '--config', dest='config', default=default_config_file, help='Config file to use.')
options, args = parser.parse_args()
if options.server_sets:
if ',' in options.server_sets:
sets = options.server_sets.split(',')
else:
sets = [options.server_sets,]
options.tags = ExpandSetsToTags(sets)
else:
options.tags = set()
if args:
options.servers.extend(addr_util.ExtractIPsFromString(' '.join(args)))
options.tags.add('specified')
return options
def GetNameServerData(filename='config/servers.csv'):
server_file = util.FindDataFile(filename)
ns_data = _ParseNameServerListing(open(server_file))
# Add the system servers for later reference.
for i, ip in enumerate(sys_nameservers.GetCurrentNameServers()):
ns = nameserver.NameServer(ip, name='SYS%s-%s' % (i, ip), system_position=i)
ns_data.append(ns)
for i, ip in enumerate(sys_nameservers.GetAssignedNameServers()):
ns = nameserver.NameServer(ip, name='DHCP%s-%s' % (i, ip), dhcp_position=i)
ns_data.append(ns)
return ns_data
def _ParseNameServerListing(fp):
fields = ['ip', 'tags', 'provider', 'instance', 'hostname', 'location',
'coords', 'asn', 'list_note', 'urls']
reader = csv.DictReader(fp, fieldnames=fields)
ns_data = nameserver_list.NameServers()
for row in reader:
if row['instance']:
name = "%s (%s)" % (row['provider'], row['instance'])
else:
name = row['provider']
if row['coords']:
lat, lon = row['coords'].split(',')
else:
lat = lon = None
as_match = re.match('AS(\d+)(.*)', row['asn'])
if as_match:
asn, network_owner = as_match.groups()
network_owner = network_owner.lstrip(' ').rstrip(' ')
else:
asn = network_owner = None
ns_data.append(nameserver.NameServer(
row['ip'],
name=name,
tags=row['tags'].split(),
provider=row['provider'],
instance=row['instance'],
location=row['location'],
latitude=lat,
longitude=lon,
asn=asn,
hostname=row['hostname'],
network_owner=network_owner
))
return ns_data
def GetSanityChecks():
return GetAutoUpdatingConfigFile('config/sanity_checks.cfg')
def _GetLocalConfig(conf_file):
"""Read a simple local config file."""
local_config = _ReadConfigFile(conf_file)
return _ExpandConfigSections(local_config)
def _ReadConfigFile(conf_file):
"""Read a local config file."""
ref_file = util.FindDataFile(conf_file)
local_config = ConfigParser.ConfigParser()
local_config.read(ref_file)
return local_config
def GetAutoUpdatingConfigFile(conf_file):
"""Get the latest copy of the config file"""
local_config = _ReadConfigFile(conf_file)
download_latest = int(local_config.get('config', 'download_latest'))
local_version = int(local_config.get('config', 'version'))
if download_latest == 0:
return _ExpandConfigSections(local_config)
h = httplib2.Http(tempfile.gettempdir(), timeout=10)
url = '%s/%s' % (TRUNK_URL, conf_file)
content = None
try:
_, content = h.request(url, 'GET')
remote_config = ConfigParser.ConfigParser()
except:
print '* Unable to fetch remote %s: %s' % (conf_file, util.GetLastExceptionString())
return _ExpandConfigSections(local_config)
if content and '[config]' in content:
fp = StringIO.StringIO(content)
try:
remote_config.readfp(fp)
except:
print '* Unable to read remote %s: %s' % (conf_file, util.GetLastExceptionString())
return _ExpandConfigSections(local_config)
if remote_config and remote_config.has_section('config') and int(remote_config.get('config', 'version')) > local_version:
print '- Using %s' % url
return _ExpandConfigSections(remote_config)
else:
return _ExpandConfigSections(local_config)
def _ExpandConfigSections(config):
return dict([ (y, config.items(y)) for y in config.sections() if y != 'config' ])
def MergeConfigurationFileOptions(options):
"""Process configuration file, merge configuration with OptionParser.
Args:
options: optparse.OptionParser() object
Returns:
options: optparse.OptionParser() object
Raises:
ValueError: If we are unable to find a usable configuration file.
"""
config = ConfigParser.ConfigParser()
full_path = util.FindDataFile(options.config)
config.read(full_path)
if not config or not config.has_section('general'):
raise ValueError('Could not find usable configuration in %s (%s)' % (full_path, options.config))
general = dict(config.items('general'))
# -U implies -u
if options.site_url:
options.upload_results = True
for option in general:
if not getattr(options, option, None):
if 'timeout' in option:
value = float(general[option])
elif 'count' in option or 'num' in option or 'hide' in option:
value = int(general[option])
else:
value = general[option]
setattr(options, option, value)
for key in ('input_file', 'output_file', 'csv_file', 'input_source'):
value = getattr(options, key, None)
if value:
setattr(options, key, os.path.expanduser(value))
# This makes it easier to pass around later. Lazy-hack.
options.version = version.VERSION
return options
| apache-2.0 |
zzcclp/spark | examples/src/main/python/mllib/stratified_sampling_example.py | 27 | 1329 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark import SparkContext
if __name__ == "__main__":
sc = SparkContext(appName="StratifiedSamplingExample") # SparkContext
# $example on$
# an RDD of any key value pairs
data = sc.parallelize([(1, 'a'), (1, 'b'), (2, 'c'), (2, 'd'), (2, 'e'), (3, 'f')])
# specify the exact fraction desired from each key as a dictionary
fractions = {1: 0.1, 2: 0.6, 3: 0.3}
approxSample = data.sampleByKey(False, fractions)
# $example off$
for each in approxSample.collect():
print(each)
sc.stop()
| apache-2.0 |
ProfessionalIT/professionalit-webiste | sdk/google_appengine/google/appengine/_internal/django/core/validators.py | 23 | 6691 | import re
import urlparse
from google.appengine._internal.django.core.exceptions import ValidationError
from google.appengine._internal.django.utils.translation import ugettext_lazy as _
from google.appengine._internal.django.utils.encoding import smart_unicode
# These values, if given to validate(), will trigger the self.required check.
EMPTY_VALUES = (None, '', [], (), {})
try:
from google.appengine._internal.django.conf import settings
URL_VALIDATOR_USER_AGENT = settings.URL_VALIDATOR_USER_AGENT
except ImportError:
# It's OK if Django settings aren't configured.
URL_VALIDATOR_USER_AGENT = 'Django (http://www.djangoproject.com/)'
class RegexValidator(object):
regex = ''
message = _(u'Enter a valid value.')
code = 'invalid'
def __init__(self, regex=None, message=None, code=None):
if regex is not None:
self.regex = regex
if message is not None:
self.message = message
if code is not None:
self.code = code
if isinstance(self.regex, basestring):
self.regex = re.compile(regex)
def __call__(self, value):
"""
Validates that the input matches the regular expression.
"""
if not self.regex.search(smart_unicode(value)):
raise ValidationError(self.message, code=self.code)
class URLValidator(RegexValidator):
regex = re.compile(
r'^https?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
def __init__(self, verify_exists=False, validator_user_agent=URL_VALIDATOR_USER_AGENT):
super(URLValidator, self).__init__()
self.verify_exists = verify_exists
self.user_agent = validator_user_agent
def __call__(self, value):
try:
super(URLValidator, self).__call__(value)
except ValidationError, e:
# Trivial case failed. Try for possible IDN domain
if value:
value = smart_unicode(value)
scheme, netloc, path, query, fragment = urlparse.urlsplit(value)
try:
netloc = netloc.encode('idna') # IDN -> ACE
except UnicodeError: # invalid domain part
raise e
url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
super(URLValidator, self).__call__(url)
else:
raise
else:
url = value
if self.verify_exists:
import urllib2
headers = {
"Accept": "text/xml,application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5",
"Accept-Language": "en-us,en;q=0.5",
"Accept-Charset": "ISO-8859-1,utf-8;q=0.7,*;q=0.7",
"Connection": "close",
"User-Agent": self.user_agent,
}
try:
req = urllib2.Request(url, None, headers)
u = urllib2.urlopen(req)
except ValueError:
raise ValidationError(_(u'Enter a valid URL.'), code='invalid')
except: # urllib2.URLError, httplib.InvalidURL, etc.
raise ValidationError(_(u'This URL appears to be a broken link.'), code='invalid_link')
def validate_integer(value):
try:
int(value)
except (ValueError, TypeError), e:
raise ValidationError('')
class EmailValidator(RegexValidator):
def __call__(self, value):
try:
super(EmailValidator, self).__call__(value)
except ValidationError, e:
# Trivial case failed. Try for possible IDN domain-part
if value and u'@' in value:
parts = value.split(u'@')
domain_part = parts[-1]
try:
parts[-1] = parts[-1].encode('idna')
except UnicodeError:
raise e
super(EmailValidator, self).__call__(u'@'.join(parts))
else:
raise
email_re = re.compile(
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*" # dot-atom
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-011\013\014\016-\177])*"' # quoted-string
r')@(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?$', re.IGNORECASE) # domain
validate_email = EmailValidator(email_re, _(u'Enter a valid e-mail address.'), 'invalid')
slug_re = re.compile(r'^[-\w]+$')
validate_slug = RegexValidator(slug_re, _(u"Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens."), 'invalid')
ipv4_re = re.compile(r'^(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}$')
validate_ipv4_address = RegexValidator(ipv4_re, _(u'Enter a valid IPv4 address.'), 'invalid')
comma_separated_int_list_re = re.compile('^[\d,]+$')
validate_comma_separated_integer_list = RegexValidator(comma_separated_int_list_re, _(u'Enter only digits separated by commas.'), 'invalid')
class BaseValidator(object):
compare = lambda self, a, b: a is not b
clean = lambda self, x: x
message = _(u'Ensure this value is %(limit_value)s (it is %(show_value)s).')
code = 'limit_value'
def __init__(self, limit_value):
self.limit_value = limit_value
def __call__(self, value):
cleaned = self.clean(value)
params = {'limit_value': self.limit_value, 'show_value': cleaned}
if self.compare(cleaned, self.limit_value):
raise ValidationError(
self.message % params,
code=self.code,
params=params,
)
class MaxValueValidator(BaseValidator):
compare = lambda self, a, b: a > b
message = _(u'Ensure this value is less than or equal to %(limit_value)s.')
code = 'max_value'
class MinValueValidator(BaseValidator):
compare = lambda self, a, b: a < b
message = _(u'Ensure this value is greater than or equal to %(limit_value)s.')
code = 'min_value'
class MinLengthValidator(BaseValidator):
compare = lambda self, a, b: a < b
clean = lambda self, x: len(x)
message = _(u'Ensure this value has at least %(limit_value)d characters (it has %(show_value)d).')
code = 'min_length'
class MaxLengthValidator(BaseValidator):
compare = lambda self, a, b: a > b
clean = lambda self, x: len(x)
message = _(u'Ensure this value has at most %(limit_value)d characters (it has %(show_value)d).')
code = 'max_length'
| lgpl-3.0 |
certik/sympy-oldcore | sympy/ntheory/partitions_.py | 1 | 1870 |
def npartitions(n):
"""
Calculate the partition function P(n), i.e. the number of ways that
n can be written as a sum of positive integers.
P(n) is computed using a straightforward implementation of the
Hardy-Ramanujan-Rademacher formula, described e.g. at
http://mathworld.wolfram.com/PartitionFunctionP.html
The speed is decent up to n = 10**5 or so. The function has
been tested to give the correct result for n = 10**6.
"""
n = int(n)
if n < 0:
return 0
if n <= 5:
return [1, 1, 2, 3, 5, 7][n]
from sympy.core.numbers import gcd
from sympy.numerics import Float
from sympy.numerics.functions import pi_float, sqrt, exp, log, cos
def frac(x):
return x - int(x)
def D(n, j):
pi = pi_float()
a = sqrt(Float(2)/3) * pi / j
b = Float(n) - Float(1)/24
c = sqrt(b)
expa = exp(a*c)
iexpa = Float(1)/expa
ch = (expa + iexpa)*0.5
sh = (expa - iexpa)*0.5
return sqrt(j) / (2*sqrt(2)*b*pi) * (a*ch-sh/c)
def A(n, j):
if j == 1:
return Float(1)
s = Float(0)
pi = pi_float()
for h in xrange(1, j):
if gcd(h,j) == 1:
s += cos((g(h,j)-2*h*n)*pi/j)
return s
def g(h, j):
if j < 3:
return Float(0)
s = Float(0)
for k in xrange(1, j):
s += k*(frac(h*Float(k)/j)-0.5)
return s
# estimate number of digits in p(n)
pdigits = int((pi_float()*sqrt(2.0*n/3)-log(4*n))/log(10)+1)
Float.store()
Float.setdps(pdigits*1.1 + 10)
s = Float(0)
M = max(6, int(0.24*sqrt(n)+4))
for q in xrange(1, M):
s += A(n,q) * D(n,q)
p = int(s + 0.5)
Float.revert()
return p
| bsd-3-clause |
noobcoderT/ryu-3.21 | ryu/app/rest_conf_switch.py | 22 | 5715 | # Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2012 Isaku Yamahata <yamahata at private email ne jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module provides a set of REST API for switch configuration.
- Per-switch Key-Value store
Used by OpenStack Ryu agent.
"""
import httplib
import json
import logging
from webob import Response
from ryu.app.wsgi import ControllerBase
from ryu.base import app_manager
from ryu.controller import conf_switch
from ryu.lib import dpid as dpid_lib
# REST API for switch configuration
#
# get all the switches
# GET /v1.0/conf/switches
#
# get all the configuration keys of a switch
# GET /v1.0/conf/switches/<dpid>
#
# delete all the configuration of a switch
# DELETE /v1.0/conf/switches/<dpid>
#
# set the <key> configuration of a switch
# PUT /v1.0/conf/switches/<dpid>/<key>
#
# get the <key> configuration of a switch
# GET /v1.0/conf/switches/<dpid>/<key>
#
# delete the <key> configuration of a switch
# DELETE /v1.0/conf/switches/<dpid>/<key>
#
# where
# <dpid>: datapath id in 16 hex
class ConfSwitchController(ControllerBase):
def __init__(self, req, link, data, **config):
super(ConfSwitchController, self).__init__(req, link, data, **config)
self.conf_switch = data
def list_switches(self, _req, **_kwargs):
dpids = self.conf_switch.dpids()
body = json.dumps([dpid_lib.dpid_to_str(dpid) for dpid in dpids])
return Response(content_type='application/json', body=body)
@staticmethod
def _do_switch(dpid, func, ret_func):
dpid = dpid_lib.str_to_dpid(dpid)
try:
ret = func(dpid)
except KeyError:
return Response(status=httplib.NOT_FOUND,
body='no dpid is found %s' %
dpid_lib.dpid_to_str(dpid))
return ret_func(ret)
def delete_switch(self, _req, dpid, **_kwargs):
def _delete_switch(dpid):
self.conf_switch.del_dpid(dpid)
return None
def _ret(_ret):
return Response(status=httplib.ACCEPTED)
return self._do_switch(dpid, _delete_switch, _ret)
def list_keys(self, _req, dpid, **_kwargs):
def _list_keys(dpid):
return self.conf_switch.keys(dpid)
def _ret(keys):
body = json.dumps(keys)
return Response(content_type='application/json', body=body)
return self._do_switch(dpid, _list_keys, _ret)
@staticmethod
def _do_key(dpid, key, func, ret_func):
dpid = dpid_lib.str_to_dpid(dpid)
try:
ret = func(dpid, key)
except KeyError:
return Response(status=httplib.NOT_FOUND,
body='no dpid/key is found %s %s' %
(dpid_lib.dpid_to_str(dpid), key))
return ret_func(ret)
def set_key(self, req, dpid, key, **_kwargs):
def _set_val(dpid, key):
val = json.loads(req.body)
self.conf_switch.set_key(dpid, key, val)
return None
def _ret(_ret):
return Response(status=httplib.CREATED)
return self._do_key(dpid, key, _set_val, _ret)
def get_key(self, _req, dpid, key, **_kwargs):
def _get_key(dpid, key):
return self.conf_switch.get_key(dpid, key)
def _ret(val):
return Response(content_type='application/json',
body=json.dumps(val))
return self._do_key(dpid, key, _get_key, _ret)
def delete_key(self, _req, dpid, key, **_kwargs):
def _delete_key(dpid, key):
self.conf_switch.del_key(dpid, key)
return None
def _ret(_ret):
return Response()
return self._do_key(dpid, key, _delete_key, _ret)
class ConfSwitchAPI(app_manager.RyuApp):
_CONTEXTS = {
'conf_switch': conf_switch.ConfSwitchSet,
}
def __init__(self, *args, **kwargs):
super(ConfSwitchAPI, self).__init__(*args, **kwargs)
self.conf_switch = kwargs['conf_switch']
wsgi = kwargs['wsgi']
mapper = wsgi.mapper
controller = ConfSwitchController
wsgi.registory[controller.__name__] = self.conf_switch
route_name = 'conf_switch'
uri = '/v1.0/conf/switches'
mapper.connect(route_name, uri, controller=controller,
action='list_switches',
conditions=dict(method=['GET']))
uri += '/{dpid}'
requirements = {'dpid': dpid_lib.DPID_PATTERN}
s = mapper.submapper(controller=controller, requirements=requirements)
s.connect(route_name, uri, action='delete_switch',
conditions=dict(method=['DELETE']))
s.connect(route_name, uri, action='list_keys',
conditions=dict(method=['GET']))
uri += '/{key}'
s.connect(route_name, uri, action='set_key',
conditions=dict(method=['PUT']))
s.connect(route_name, uri, action='get_key',
conditions=dict(method=['GET']))
s.connect(route_name, uri, action='delete_key',
conditions=dict(method=['DELETE']))
| apache-2.0 |
kawasaki2013/python-for-android-x86 | python3-alpha/python3-src/Lib/idlelib/GrepDialog.py | 49 | 4062 | import os
import fnmatch
import sys
from tkinter import *
from idlelib import SearchEngine
from idlelib.SearchDialogBase import SearchDialogBase
def grep(text, io=None, flist=None):
root = text._root()
engine = SearchEngine.get(root)
if not hasattr(engine, "_grepdialog"):
engine._grepdialog = GrepDialog(root, engine, flist)
dialog = engine._grepdialog
searchphrase = text.get("sel.first", "sel.last")
dialog.open(text, searchphrase, io)
class GrepDialog(SearchDialogBase):
title = "Find in Files Dialog"
icon = "Grep"
needwrapbutton = 0
def __init__(self, root, engine, flist):
SearchDialogBase.__init__(self, root, engine)
self.flist = flist
self.globvar = StringVar(root)
self.recvar = BooleanVar(root)
def open(self, text, searchphrase, io=None):
SearchDialogBase.open(self, text, searchphrase)
if io:
path = io.filename or ""
else:
path = ""
dir, base = os.path.split(path)
head, tail = os.path.splitext(base)
if not tail:
tail = ".py"
self.globvar.set(os.path.join(dir, "*" + tail))
def create_entries(self):
SearchDialogBase.create_entries(self)
self.globent = self.make_entry("In files:", self.globvar)
def create_other_buttons(self):
f = self.make_frame()
btn = Checkbutton(f, anchor="w",
variable=self.recvar,
text="Recurse down subdirectories")
btn.pack(side="top", fill="both")
btn.select()
def create_command_buttons(self):
SearchDialogBase.create_command_buttons(self)
self.make_button("Search Files", self.default_command, 1)
def default_command(self, event=None):
prog = self.engine.getprog()
if not prog:
return
path = self.globvar.get()
if not path:
self.top.bell()
return
from idlelib.OutputWindow import OutputWindow
save = sys.stdout
try:
sys.stdout = OutputWindow(self.flist)
self.grep_it(prog, path)
finally:
sys.stdout = save
def grep_it(self, prog, path):
dir, base = os.path.split(path)
list = self.findfiles(dir, base, self.recvar.get())
list.sort()
self.close()
pat = self.engine.getpat()
print("Searching %r in %s ..." % (pat, path))
hits = 0
for fn in list:
try:
f = open(fn)
except IOError as msg:
print(msg)
continue
lineno = 0
while 1:
block = f.readlines(100000)
if not block:
break
for line in block:
lineno = lineno + 1
if line[-1:] == '\n':
line = line[:-1]
if prog.search(line):
sys.stdout.write("%s: %s: %s\n" % (fn, lineno, line))
hits = hits + 1
if hits:
if hits == 1:
s = ""
else:
s = "s"
print("Found", hits, "hit%s." % s)
print("(Hint: right-click to open locations.)")
else:
print("No hits.")
def findfiles(self, dir, base, rec):
try:
names = os.listdir(dir or os.curdir)
except os.error as msg:
print(msg)
return []
list = []
subdirs = []
for name in names:
fn = os.path.join(dir, name)
if os.path.isdir(fn):
subdirs.append(fn)
else:
if fnmatch.fnmatch(name, base):
list.append(fn)
if rec:
for subdir in subdirs:
list.extend(self.findfiles(subdir, base, rec))
return list
def close(self, event=None):
if self.top:
self.top.grab_release()
self.top.withdraw()
| apache-2.0 |
OCA/hr | hr_employee_display_own_info/tests/test_employee_display_own_info.py | 1 | 1081 | # Copyright 2017-2019 Onestein (<https://www.onestein.eu>)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo.tests.common import TransactionCase
class TestEmployeeDisplayOwnInfo(TransactionCase):
def setUp(self):
super(TestEmployeeDisplayOwnInfo, self).setUp()
self.user_test = self.env.ref('base.user_demo')
self.employee = self.env['hr.employee'].create({
'name': 'Employee',
})
def test_01(self):
self.assertFalse(self.user_test.has_group('hr.group_hr_user'))
self.assertFalse(
self.employee.sudo(self.user_test).employee_display_personal_data)
def test_02(self):
self.assertTrue(self.env.user.has_group('hr.group_hr_user'))
self.assertTrue(self.employee.employee_display_personal_data)
def test_03(self):
self.employee.write({'user_id': self.user_test.id})
self.assertFalse(self.user_test.has_group('hr.group_hr_user'))
self.assertTrue(
self.employee.sudo(self.user_test).employee_display_personal_data)
| agpl-3.0 |
genesi/u-boot-upstream | tools/patman/gitutil.py | 7 | 13538 | # Copyright (c) 2011 The Chromium OS Authors.
#
# See file CREDITS for list of people who contributed to this
# project.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
import command
import re
import os
import series
import settings
import subprocess
import sys
import terminal
def CountCommitsToBranch():
"""Returns number of commits between HEAD and the tracking branch.
This looks back to the tracking branch and works out the number of commits
since then.
Return:
Number of patches that exist on top of the branch
"""
pipe = [['git', 'log', '--no-color', '--oneline', '@{upstream}..'],
['wc', '-l']]
stdout = command.RunPipe(pipe, capture=True, oneline=True)
patch_count = int(stdout)
return patch_count
def CreatePatches(start, count, series):
"""Create a series of patches from the top of the current branch.
The patch files are written to the current directory using
git format-patch.
Args:
start: Commit to start from: 0=HEAD, 1=next one, etc.
count: number of commits to include
Return:
Filename of cover letter
List of filenames of patch files
"""
if series.get('version'):
version = '%s ' % series['version']
cmd = ['git', 'format-patch', '-M', '--signoff']
if series.get('cover'):
cmd.append('--cover-letter')
prefix = series.GetPatchPrefix()
if prefix:
cmd += ['--subject-prefix=%s' % prefix]
cmd += ['HEAD~%d..HEAD~%d' % (start + count, start)]
stdout = command.RunList(cmd)
files = stdout.splitlines()
# We have an extra file if there is a cover letter
if series.get('cover'):
return files[0], files[1:]
else:
return None, files
def ApplyPatch(verbose, fname):
"""Apply a patch with git am to test it
TODO: Convert these to use command, with stderr option
Args:
fname: filename of patch file to apply
"""
cmd = ['git', 'am', fname]
pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = pipe.communicate()
re_error = re.compile('^error: patch failed: (.+):(\d+)')
for line in stderr.splitlines():
if verbose:
print line
match = re_error.match(line)
if match:
print GetWarningMsg('warning', match.group(1), int(match.group(2)),
'Patch failed')
return pipe.returncode == 0, stdout
def ApplyPatches(verbose, args, start_point):
"""Apply the patches with git am to make sure all is well
Args:
verbose: Print out 'git am' output verbatim
args: List of patch files to apply
start_point: Number of commits back from HEAD to start applying.
Normally this is len(args), but it can be larger if a start
offset was given.
"""
error_count = 0
col = terminal.Color()
# Figure out our current position
cmd = ['git', 'name-rev', 'HEAD', '--name-only']
pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE)
stdout, stderr = pipe.communicate()
if pipe.returncode:
str = 'Could not find current commit name'
print col.Color(col.RED, str)
print stdout
return False
old_head = stdout.splitlines()[0]
# Checkout the required start point
cmd = ['git', 'checkout', 'HEAD~%d' % start_point]
pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = pipe.communicate()
if pipe.returncode:
str = 'Could not move to commit before patch series'
print col.Color(col.RED, str)
print stdout, stderr
return False
# Apply all the patches
for fname in args:
ok, stdout = ApplyPatch(verbose, fname)
if not ok:
print col.Color(col.RED, 'git am returned errors for %s: will '
'skip this patch' % fname)
if verbose:
print stdout
error_count += 1
cmd = ['git', 'am', '--skip']
pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE)
stdout, stderr = pipe.communicate()
if pipe.returncode != 0:
print col.Color(col.RED, 'Unable to skip patch! Aborting...')
print stdout
break
# Return to our previous position
cmd = ['git', 'checkout', old_head]
pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = pipe.communicate()
if pipe.returncode:
print col.Color(col.RED, 'Could not move back to head commit')
print stdout, stderr
return error_count == 0
def BuildEmailList(in_list, tag=None, alias=None):
"""Build a list of email addresses based on an input list.
Takes a list of email addresses and aliases, and turns this into a list
of only email address, by resolving any aliases that are present.
If the tag is given, then each email address is prepended with this
tag and a space. If the tag starts with a minus sign (indicating a
command line parameter) then the email address is quoted.
Args:
in_list: List of aliases/email addresses
tag: Text to put before each address
Returns:
List of email addresses
>>> alias = {}
>>> alias['fred'] = ['[email protected]']
>>> alias['john'] = ['[email protected]']
>>> alias['mary'] = ['Mary Poppins <[email protected]>']
>>> alias['boys'] = ['fred', ' john']
>>> alias['all'] = ['fred ', 'john', ' mary ']
>>> BuildEmailList(['john', 'mary'], None, alias)
['[email protected]', 'Mary Poppins <[email protected]>']
>>> BuildEmailList(['john', 'mary'], '--to', alias)
['--to "[email protected]"', \
'--to "Mary Poppins <[email protected]>"']
>>> BuildEmailList(['john', 'mary'], 'Cc', alias)
['Cc [email protected]', 'Cc Mary Poppins <[email protected]>']
"""
quote = '"' if tag and tag[0] == '-' else ''
raw = []
for item in in_list:
raw += LookupEmail(item, alias)
result = []
for item in raw:
if not item in result:
result.append(item)
if tag:
return ['%s %s%s%s' % (tag, quote, email, quote) for email in result]
return result
def EmailPatches(series, cover_fname, args, dry_run, cc_fname,
self_only=False, alias=None):
"""Email a patch series.
Args:
series: Series object containing destination info
cover_fname: filename of cover letter
args: list of filenames of patch files
dry_run: Just return the command that would be run
cc_fname: Filename of Cc file for per-commit Cc
self_only: True to just email to yourself as a test
Returns:
Git command that was/would be run
# For the duration of this doctest pretend that we ran patman with ./patman
>>> _old_argv0 = sys.argv[0]
>>> sys.argv[0] = './patman'
>>> alias = {}
>>> alias['fred'] = ['[email protected]']
>>> alias['john'] = ['[email protected]']
>>> alias['mary'] = ['[email protected]']
>>> alias['boys'] = ['fred', ' john']
>>> alias['all'] = ['fred ', 'john', ' mary ']
>>> alias[os.getenv('USER')] = ['[email protected]']
>>> series = series.Series()
>>> series.to = ['fred']
>>> series.cc = ['mary']
>>> EmailPatches(series, 'cover', ['p1', 'p2'], True, 'cc-fname', False, \
alias)
'git send-email --annotate --to "[email protected]" --cc \
"[email protected]" --cc-cmd "./patman --cc-cmd cc-fname" cover p1 p2'
>>> EmailPatches(series, None, ['p1'], True, 'cc-fname', False, alias)
'git send-email --annotate --to "[email protected]" --cc \
"[email protected]" --cc-cmd "./patman --cc-cmd cc-fname" p1'
>>> series.cc = ['all']
>>> EmailPatches(series, 'cover', ['p1', 'p2'], True, 'cc-fname', True, \
alias)
'git send-email --annotate --to "[email protected]" --cc-cmd "./patman \
--cc-cmd cc-fname" cover p1 p2'
>>> EmailPatches(series, 'cover', ['p1', 'p2'], True, 'cc-fname', False, \
alias)
'git send-email --annotate --to "[email protected]" --cc \
"[email protected]" --cc "[email protected]" --cc \
"[email protected]" --cc-cmd "./patman --cc-cmd cc-fname" cover p1 p2'
# Restore argv[0] since we clobbered it.
>>> sys.argv[0] = _old_argv0
"""
to = BuildEmailList(series.get('to'), '--to', alias)
if not to:
print ("No recipient, please add something like this to a commit\n"
"Series-to: Fred Bloggs <[email protected]>")
return
cc = BuildEmailList(series.get('cc'), '--cc', alias)
if self_only:
to = BuildEmailList([os.getenv('USER')], '--to', alias)
cc = []
cmd = ['git', 'send-email', '--annotate']
cmd += to
cmd += cc
cmd += ['--cc-cmd', '"%s --cc-cmd %s"' % (sys.argv[0], cc_fname)]
if cover_fname:
cmd.append(cover_fname)
cmd += args
str = ' '.join(cmd)
if not dry_run:
os.system(str)
return str
def LookupEmail(lookup_name, alias=None, level=0):
"""If an email address is an alias, look it up and return the full name
TODO: Why not just use git's own alias feature?
Args:
lookup_name: Alias or email address to look up
Returns:
tuple:
list containing a list of email addresses
Raises:
OSError if a recursive alias reference was found
ValueError if an alias was not found
>>> alias = {}
>>> alias['fred'] = ['[email protected]']
>>> alias['john'] = ['[email protected]']
>>> alias['mary'] = ['[email protected]']
>>> alias['boys'] = ['fred', ' john', '[email protected]']
>>> alias['all'] = ['fred ', 'john', ' mary ']
>>> alias['loop'] = ['other', 'john', ' mary ']
>>> alias['other'] = ['loop', 'john', ' mary ']
>>> LookupEmail('mary', alias)
['[email protected]']
>>> LookupEmail('[email protected]', alias)
['[email protected]']
>>> LookupEmail('boys', alias)
['[email protected]', '[email protected]']
>>> LookupEmail('all', alias)
['[email protected]', '[email protected]', '[email protected]']
>>> LookupEmail('odd', alias)
Traceback (most recent call last):
...
ValueError: Alias 'odd' not found
>>> LookupEmail('loop', alias)
Traceback (most recent call last):
...
OSError: Recursive email alias at 'other'
"""
if not alias:
alias = settings.alias
lookup_name = lookup_name.strip()
if '@' in lookup_name: # Perhaps a real email address
return [lookup_name]
lookup_name = lookup_name.lower()
if level > 10:
raise OSError, "Recursive email alias at '%s'" % lookup_name
out_list = []
if lookup_name:
if not lookup_name in alias:
raise ValueError, "Alias '%s' not found" % lookup_name
for item in alias[lookup_name]:
todo = LookupEmail(item, alias, level + 1)
for new_item in todo:
if not new_item in out_list:
out_list.append(new_item)
#print "No match for alias '%s'" % lookup_name
return out_list
def GetTopLevel():
"""Return name of top-level directory for this git repo.
Returns:
Full path to git top-level directory
This test makes sure that we are running tests in the right subdir
>>> os.path.realpath(os.path.dirname(__file__)) == \
os.path.join(GetTopLevel(), 'tools', 'patman')
True
"""
return command.OutputOneLine('git', 'rev-parse', '--show-toplevel')
def GetAliasFile():
"""Gets the name of the git alias file.
Returns:
Filename of git alias file, or None if none
"""
fname = command.OutputOneLine('git', 'config', 'sendemail.aliasesfile')
if fname:
fname = os.path.join(GetTopLevel(), fname.strip())
return fname
def GetDefaultUserName():
"""Gets the user.name from .gitconfig file.
Returns:
User name found in .gitconfig file, or None if none
"""
uname = command.OutputOneLine('git', 'config', '--global', 'user.name')
return uname
def GetDefaultUserEmail():
"""Gets the user.email from the global .gitconfig file.
Returns:
User's email found in .gitconfig file, or None if none
"""
uemail = command.OutputOneLine('git', 'config', '--global', 'user.email')
return uemail
def Setup():
"""Set up git utils, by reading the alias files."""
# Check for a git alias file also
alias_fname = GetAliasFile()
if alias_fname:
settings.ReadGitAliases(alias_fname)
if __name__ == "__main__":
import doctest
doctest.testmod()
| gpl-2.0 |
webcomics/dosage | dosagelib/plugins/namirdeiter.py | 1 | 2179 | # SPDX-License-Identifier: MIT
# Copyright (C) 2019-2020 Tobias Gruetzmacher
# Copyright (C) 2019-2020 Daniel Ring
from .common import _ParserScraper
class NamirDeiter(_ParserScraper):
imageSearch = '//img[contains(@src, "comics/")]'
prevSearch = ('//a[@rel="prev"]',
'//a[./img[contains(@src, "previous")]]',
'//a[contains(text(), "Previous")]')
def __init__(self, name, baseUrl, first=None, last=None):
if name == 'NamirDeiter':
super(NamirDeiter, self).__init__(name)
else:
super(NamirDeiter, self).__init__('NamirDeiter/' + name)
self.url = 'https://' + baseUrl + '/'
self.stripUrl = self.url + 'comics/index.php?date=%s'
if first:
self.firstStripUrl = self.stripUrl % first
else:
self.firstStripUrl = self.url + 'comics/'
if last:
self.url = self.stripUrl % last
self.endOfLife = True
def link_modifier(self, fromurl, tourl):
# Links are often absolute and keep jumping between http and https
return tourl.replace('http:', 'https:').replace('/www.', '/')
@classmethod
def getmodules(cls):
return (
cls('ApartmentForTwo', 'apartmentfor2.com'),
cls('NamirDeiter', 'namirdeiter.com', last='20150410'),
cls('NicoleAndDerek', 'nicoleandderek.com'),
cls('OneHundredPercentCat', 'ndunlimited.com/100cat', last='20121001'),
cls('SpareParts', 'sparepartscomics.com', first='20031022', last='20080331'),
cls('TheNDU', 'thendu.com'),
cls('WonderKittens', 'wonderkittens.com'),
cls('YouSayItFirst', 'yousayitfirst.com', first='20040220', last='20130125'),
)
class UnlikeMinerva(_ParserScraper):
name = 'NamirDeiter/UnlikeMinerva'
baseUrl = 'https://unlikeminerva.com/archive/index.php'
stripUrl = baseUrl + '?week=%s'
url = stripUrl % '127'
firstStripUrl = stripUrl % '26'
imageSearch = '//img[contains(@src, "archive/")]'
prevSearch = '//a[./img[contains(@src, "previous")]]'
multipleImagesPerStrip = True
endOfLife = True
| mit |
weaver-viii/h2o-3 | py2/h2o_cmd.py | 20 | 16497 |
import h2o_nodes
from h2o_test import dump_json, verboseprint
import h2o_util
import h2o_print as h2p
from h2o_test import OutputObj
#************************************************************************
def runStoreView(node=None, **kwargs):
print "FIX! disabling runStoreView for now"
return {}
if not node: node = h2o_nodes.nodes[0]
print "\nStoreView:"
# FIX! are there keys other than frames and models
a = node.frames(**kwargs)
# print "storeview frames:", dump_json(a)
frameList = [af['key']['name'] for af in a['frames']]
for f in frameList:
print "frame:", f
print "# of frames:", len(frameList)
b = node.models()
# print "storeview models:", dump_json(b)
modelList = [bm['key'] for bm in b['models']]
for m in modelList:
print "model:", m
print "# of models:", len(modelList)
return {'keys': frameList + modelList}
#************************************************************************
def runExec(node=None, **kwargs):
if not node: node = h2o_nodes.nodes[0]
a = node.rapids(**kwargs)
return a
def runInspect(node=None, key=None, verbose=False, **kwargs):
if not key: raise Exception('No key for Inspect')
if not node: node = h2o_nodes.nodes[0]
a = node.frames(key, **kwargs)
if verbose:
print "inspect of %s:" % key, dump_json(a)
return a
#************************************************************************
def infoFromParse(parse):
if not parse:
raise Exception("parse is empty for infoFromParse")
# assumes just one result from Frames
if 'frames' not in parse:
raise Exception("infoFromParse expects parse= param from parse result: %s" % parse)
if len(parse['frames'])!=1:
raise Exception("infoFromParse expects parse= param from parse result: %s " % parse['frames'])
# it it index[0] or key '0' in a dictionary?
frame = parse['frames'][0]
# need more info about this dataset for debug
numCols = len(frame['columns'])
numRows = frame['rows']
key_name = frame['frame_id']['name']
return numRows, numCols, key_name
#************************************************************************
# make this be the basic way to get numRows, numCols
def infoFromInspect(inspect):
if not inspect:
raise Exception("inspect is empty for infoFromInspect")
# assumes just one result from Frames
if 'frames' not in inspect:
raise Exception("infoFromInspect expects inspect= param from Frames result (single): %s" % inspect)
if len(inspect['frames'])!=1:
raise Exception("infoFromInspect expects inspect= param from Frames result (single): %s " % inspect['frames'])
# it it index[0] or key '0' in a dictionary?
frame = inspect['frames'][0]
# need more info about this dataset for debug
columns = frame['columns']
key_name = frame['frame_id']['name']
missingList = []
labelList = []
typeList = []
for i, colDict in enumerate(columns): # columns is a list
if 'missing_count' not in colDict:
# debug
print "\ncolDict"
for k in colDict:
print " key: %s" % k
# data
# domain
# string_data
# type
# label
# percentiles
# precision
# mins
# maxs
# mean
# histogram_base
# histogram_bins
# histogram_stride
# zero_count
# missing_count
# positive_infinity_count
# negative_infinity_count
# __meta
mins = colDict['mins']
maxs = colDict['maxs']
missing = colDict['missing_count']
label = colDict['label']
stype = colDict['type']
missingList.append(missing)
labelList.append(label)
typeList.append(stype)
if missing!=0:
print "%s: col: %s %s, missing: %d" % (key_name, i, label, missing)
print "inspect typeList:", typeList
# make missingList empty if all 0's
if sum(missingList)==0:
missingList = []
# no type per col in inspect2
numCols = len(frame['columns'])
numRows = frame['rows']
print "\n%s numRows: %s, numCols: %s" % (key_name, numRows, numCols)
return missingList, labelList, numRows, numCols
#************************************************************************
# does all columns unless you specify column index.
# only will return first or specified column
def runSummary(node=None, key=None, column=None, expected=None, maxDelta=None, noPrint=False, **kwargs):
if not key: raise Exception('No key for Summary')
if not node: node = h2o_nodes.nodes[0]
# return node.summary(key, **kwargs)
i = InspectObj(key=key)
# just so I don't have to change names below
missingList = i.missingList
labelList = i.labelList
numRows = i.numRows
numCols = i.numCols
print "labelList:", labelList
assert labelList is not None
# doesn't take indices? only column labels?
# return first column, unless specified
if not (column is None or isinstance(column, (basestring, int))):
raise Exception("column param should be string or integer index or None %s %s" % (type(column), column))
# either return the first col, or the col indentified by label. the column identifed could be string or index?
if column is None: # means the summary json when we ask for col 0, will be what we return (do all though)
colNameToDo = labelList
colIndexToDo = range(len(labelList))
elif isinstance(column, int):
colNameToDo = [labelList[column]]
colIndexToDo = [column]
elif isinstance(column, basestring):
colNameToDo = [column]
if column not in labelList:
raise Exception("% not in labellist: %s" % (column, labellist))
colIndexToDo = [labelList.index(column)]
else:
raise Exception("wrong type %s for column %s" % (type(column), column))
# we get the first column as result after walking across all, if no column parameter
desiredResult = None
for (colIndex, colName) in zip(colIndexToDo, colNameToDo):
print "doing summary on %s %s" % (colIndex, colName)
# ugly looking up the colIndex
co = SummaryObj(key=key, colIndex=colIndex, colName=colName)
if not desiredResult:
desiredResult = co
if not noPrint:
for k,v in co:
# only print [0] of mins and maxs because of the e308 values when they don't have dataset values
if k=='mins' or k=='maxs':
print "%s[0]" % k, v[0]
else:
print k, v
if expected is not None:
print "len(co.histogram_bins):", len(co.histogram_bins)
print "co.label:", co.label, "mean (2 places):", h2o_util.twoDecimals(co.mean)
# what is precision. -1?
print "co.label:", co.label, "std dev. (2 places):", h2o_util.twoDecimals(co.sigma)
# print "FIX! hacking the co.percentiles because it's short by two"
# if co.percentiles:
# percentiles = [0] + co.percentiles + [0]
# else:
# percentiles = None
percentiles = co.percentiles
assert len(co.percentiles) == len(co.default_percentiles)
# the thresholds h2o used, should match what we expected
# expected = [0] * 5
# Fix. doesn't check for expected = 0?
# max of one bin
if maxDelta is None:
maxDelta = (co.maxs[0] - co.mins[0])/1000
if expected[0]: h2o_util.assertApproxEqual(co.mins[0], expected[0], tol=maxDelta,
msg='min is not approx. expected')
if expected[1]: h2o_util.assertApproxEqual(percentiles[2], expected[1], tol=maxDelta,
msg='25th percentile is not approx. expected')
if expected[2]: h2o_util.assertApproxEqual(percentiles[4], expected[2], tol=maxDelta,
msg='50th percentile (median) is not approx. expected')
if expected[3]: h2o_util.assertApproxEqual(percentiles[6], expected[3], tol=maxDelta,
msg='75th percentile is not approx. expected')
if expected[4]: h2o_util.assertApproxEqual(co.maxs[0], expected[4], tol=maxDelta,
msg='max is not approx. expected')
# figure out the expected max error
# use this for comparing to sklearn/sort
MAX_QBINS = 1000
if expected[0] and expected[4]:
expectedRange = expected[4] - expected[0]
# because of floor and ceil effects due we potentially lose 2 bins (worst case)
# the extra bin for the max value, is an extra bin..ignore
expectedBin = expectedRange/(MAX_QBINS-2)
maxErr = expectedBin # should we have some fuzz for fp?
else:
print "Test won't calculate max expected error"
maxErr = 0
pt = h2o_util.twoDecimals(percentiles)
# only look at [0] for now...bit e308 numbers if unpopulated due to not enough unique values in dataset column
mx = h2o_util.twoDecimals(co.maxs[0])
mn = h2o_util.twoDecimals(co.mins[0])
print "co.label:", co.label, "co.percentiles (2 places):", pt
print "co.default_percentiles:", co.default_percentiles
print "co.label:", co.label, "co.maxs: (2 places):", mx
print "co.label:", co.label, "co.mins: (2 places):", mn
# FIX! why would percentiles be None? enums?
if pt is None:
compareActual = mn, [None] * 3, mx
else:
compareActual = mn, pt[2], pt[4], pt[6], mx
h2p.green_print("actual min/25/50/75/max co.label:", co.label, "(2 places):", compareActual)
h2p.green_print("expected min/25/50/75/max co.label:", co.label, "(2 places):", expected)
return desiredResult
# this parses the json object returned for one col from runSummary...returns an OutputObj object
# summaryResult = h2o_cmd.runSummary(key=hex_key, column=0)
# co = h2o_cmd.infoFromSummary(summaryResult)
# print co.label
# legacy
def infoFromSummary(summaryResult, column=None):
return SummaryObj(summaryResult, column=column)
class ParseObj(OutputObj):
# the most basic thing is that the data frame has the # of rows and cols we expected
# embed that checking here, so every test doesn't have to
def __init__(self, parseResult, expectedNumRows=None, expectedNumCols=None, noPrint=False, **kwargs):
super(ParseObj, self).__init__(parseResult['frames'][0], "Parse", noPrint=noPrint)
# add my stuff
self.numRows, self.numCols, self.parse_key = infoFromParse(parseResult)
# h2o_import.py does this for test support
if 'python_elapsed' in parseResult:
self.python_elapsed = parseResult['python_elapsed']
if expectedNumRows is not None:
assert self.numRows == expectedNumRows, "%s %s" % (self.numRows, expectedNumRows)
if expectedNumCols is not None:
assert self.numCols == expectedNumCols, "%s %s" % (self.numCols, expectedNumCols)
print "ParseObj created for:", self.parse_key # vars(self)
# Let's experiment with creating new objects that are an api I control for generic operations (Inspect)
class InspectObj(OutputObj):
# the most basic thing is that the data frame has the # of rows and cols we expected
# embed that checking here, so every test doesn't have to
def __init__(self, key,
expectedNumRows=None, expectedNumCols=None, expectedMissingList=None, expectedLabelList=None,
noPrint=False, **kwargs):
inspectResult = runInspect(key=key)
super(InspectObj, self).__init__(inspectResult['frames'][0], "Inspect", noPrint=noPrint)
# add my stuff
self.missingList, self.labelList, self.numRows, self.numCols = infoFromInspect(inspectResult)
if expectedNumRows is not None:
assert self.numRows == expectedNumRows, "%s %s" % (self.numRows, expectedNumRows)
if expectedNumCols is not None:
assert self.numCols == expectedNumCols, "%s %s" % (self.numCols, expectedNumCols)
if expectedMissingList is not None:
assert self.missingList == expectedMissingList, "%s %s" % (self.MissingList, expectedMissingList)
if expectedLabelList is not None:
assert self.labelList == expectedLabelList, "%s %s" % (self.labelList, expectedLabelList)
print "InspectObj created for:", key #, vars(self)
class SummaryObj(OutputObj):
@classmethod
def check(self,
expectedNumRows=None, expectedNumCols=None,
expectedLabel=None, expectedType=None, expectedMissing=None, expectedDomain=None, expectedBinsSum=None,
noPrint=False, **kwargs):
if expectedLabel is not None:
assert self.label != expectedLabel
if expectedType is not None:
assert self.type != expectedType
if expectedMissing is not None:
assert self.missing != expectedMissing
if expectedDomain is not None:
assert self.domain != expectedDomain
if expectedBinsSum is not None:
assert self.binsSum != expectedBinsSum
# column is column name?
def __init__(self, key, colIndex, colName,
expectedNumRows=None, expectedNumCols=None,
expectedLabel=None, expectedType=None, expectedMissing=None, expectedDomain=None, expectedBinsSum=None,
noPrint=False, timeoutSecs=30, **kwargs):
# we need both colInndex and colName for doing Summary efficiently
# ugly.
assert colIndex is not None
assert colName is not None
summaryResult = h2o_nodes.nodes[0].summary(key=key, column=colName, timeoutSecs=timeoutSecs, **kwargs)
# this should be the same for all the cols? Or does the checksum change?
frame = summaryResult['frames'][0]
default_percentiles = frame['default_percentiles']
checksum = frame['checksum']
rows = frame['rows']
# assert colIndex < len(frame['columns']), "You're asking for colIndex %s but there are only %s. " % \
# (colIndex, len(frame['columns']))
# coJson = frame['columns'][colIndex]
# is it always 0 now? the one I asked for ?
coJson = frame['columns'][0]
assert checksum !=0 and checksum is not None
assert rows!=0 and rows is not None
# FIX! why is frame['key'] = None here?
# assert frame['key'] == key, "%s %s" % (frame['key'], key)
super(SummaryObj, self).__init__(coJson, "Summary for %s" % colName, noPrint=noPrint)
# how are enums binned. Stride of 1? (what about domain values)
# touch all
# print "vars", vars(self)
coList = [
len(self.data),
self.domain,
self.string_data,
self.type,
self.label,
self.percentiles,
self.precision,
self.mins,
self.maxs,
self.mean,
self.histogram_base,
len(self.histogram_bins),
self.histogram_stride,
self.zero_count,
self.missing_count,
self.positive_infinity_count,
self.negative_infinity_count,
]
assert self.label==colName, "%s You must have told me the wrong colName %s for the given colIndex %s" % \
(self.label, colName, colIndex)
print "you can look at this attributes in the returned object (which is OutputObj if you assigned to 'co')"
for k,v in self:
print "%s" % k,
# hack these into the column object from the full summary
self.default_percentiles = default_percentiles
self.checksum = checksum
self.rows = rows
print "\nSummaryObj for", key, "for colName", colName, "colIndex:", colIndex
print "SummaryObj created for:", key # vars(self)
# now do the assertion checks
self.check(expectedNumRows, expectedNumCols,
expectedLabel, expectedType, expectedMissing, expectedDomain, expectedBinsSum,
noPrint=noPrint, **kwargs)
| apache-2.0 |
ingted/voltdb | tests/test_apps/csvbenchmark/csvbenchmark.py | 7 | 15913 | #!/usr/bin/env python
# This file is part of VoltDB.
# Copyright (C) 2008-2015 VoltDB Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
from optparse import OptionParser
from random import randint
import os
import sys
import re
from numpy import *
import random
from subprocess import Popen,PIPE
import shlex
import datetime
from voltdbclient import FastSerializer, VoltProcedure
import time
CSVLOADER = "bin/csvloader"
#SQLCMD = "$VOLTDB_HOME/bin/sqlcmd --servers=%s" % servers
# declare cases impmeneted and the data generator code
CASES = {
"narrow_short_noix" : "data_narrow_short",
"narrow_short_ix" : "data_narrow_short",
"narrow_short_cmpix" : "data_narrow_short",
"narrow_short_hasview" : "data_narrow_short",
"narrow_long_noix" : "data_narrow_long",
"narrow_long_ix" : "data_narrow_long",
"narrow_long_cmpix" : "data_narrow_long",
"narrow_long_hasview" : "data_narrow_long",
"generic_noix" : "data_generic",
"generic_ix" : "data_generic",
"replicated_pk" : "data_replicated_pk",
}
def list_cases():
print "List of implemented csvloader cases:\n"
for k in sorted(CASES.keys()):
print "\t%s" % k
# build the reference character set
# user all possible unicode-16 codes (first code page 0000-ffff)
UNICODE_CHARSET = ""
#for c in range(32,64*1024):
for c in range(32,127):
# 0-31 control chars
# 34 "
# 36 $
# 37 %
# 38 &
# 39 '
# 44 , reserved as field separator
# 91 [
# 92 \ just avoid it
# 93 ]
# 94 ^ quote reserved for loader
# 95 _ 37 % for LIKE % bbi escape doesn't work
# 96 `
# 123 {
# 124 | reserved as field separator
# 125 }
# 126 ~
# 127 DLE
if not (c==44 or c==127):
UNICODE_CHARSET += unichr(c)
ESCAPE_CHAR="\\"
QUOTE_CHAR="\""
UNICODE_CHARSET_MINUS_QUOTE_CHAR = UNICODE_CHARSET.replace(QUOTE_CHAR, "")
UNICODE_CHARSET_MINUS_WHITESPACE_CHARS = UNICODE_CHARSET.replace(" \t\n","")
NUMERIC_CHARSET="0123456789"
# XXX not yet handling leading/trailing zeroes and many other
# cases which are useful in testing, but this is not a test it is a benchmark.
def gentext(size):
r = ''.join(random.sample(UNICODE_CHARSET, len(UNICODE_CHARSET)))
s = r * int(size/len(r)) + r[:size%len(r)]
m = re.match(r'(.*)([ \t\n]+)$', s)
if m:
s = m.group(1) + ''.join(random.sample(UNICODE_CHARSET_MINUS_WHITESPACE_CHARS, len(m.group(2))))
s = s.replace(QUOTE_CHAR, QUOTE_CHAR+QUOTE_CHAR)[:size]
if (len(s) == 1 and s[0] == QUOTE_CHAR) or (len(s) > 1 and s[-1] == QUOTE_CHAR and s[-2] != QUOTE_CHAR):
s = s[:-1] + random.choice(UNICODE_CHARSET_MINUS_QUOTE_CHAR)
assert len(s) == size
return QUOTE_CHAR + s[:size] + QUOTE_CHAR
def genfixeddecimalstr(size=38, precision=12, signed=True):
# voltdb decimal is 16-byte with fixed scale of 12 and precision of 38
p = -1*precision
r = ''.join(random.sample(NUMERIC_CHARSET, len(NUMERIC_CHARSET)))
r = r * int(size/len(r)) + r[:size%len(r)]
if (p>0):
r = r[:p] + '.' + r[p:]
if signed:
r = random.choose(["-","+",""]) + r
return r
def gencurrency(size=16, precision=4):
c = genfixeddecimalstr(size, precision)
curr = re.match(r'^0*(\d+\.*\d+)0*$', c)
print curr.group(1)
return curr.group(1)
def genint(size):
if size == 1:
return randint(-2**7+1, 2**7-1)
elif size == 2:
return randint(-2**15+1, 2**15-1)
elif size == 4:
return randint(-2**31+1, 2**31-1)
elif size == 8:
return randint(-2**63+1, 2**63-1)
else:
raise RuntimeError ("invalid size for integer %d" % size)
def gennumsequence(__seq):
# pass in a list of on one number
assert (isinstance(__seq, list) and len(__seq) == 1)
__seq[0] += 1
return __seq[0]
def gentimestamp():
return datetime.datetime.today().strftime('"%Y-%m-%d %H:%M:%S"')
def gendouble():
return random.random() * genint(4)
def run_readlines(cmd):
fd = os.popen(cmd)
result = fd.read()
#print result
fd.close()
return result
def run_csvloader(schema, data_file):
rowcount = options.ROW_COUNT
elapsed_results = []
parsing_results = []
loading_results = []
for I in range(0, options.TRIES):
home = os.getenv("VOLTDB_HOME")
before_row_count = get_table_row_count(schema)
cmd = "%s --servers=%s" % (os.path.join(home, CSVLOADER), ','.join(options.servers))
if options.csvoptions:
cmd += " -o " + ",".join(options.csvoptions)
cmd += " %s -f %s" % (schema, data_file)
if options.VERBOSE:
print "starting csvloader with command: " + cmd
start_time = time.time()
p = Popen(shlex.split(cmd), stdout=PIPE, stderr=PIPE)
(stdout, stderr) = p.communicate()
run_time = time.time() - start_time
stdout_lines = stdout.split('\n')
if options.VERBOSE:
for l in stdout_lines:
print '[csvloader stdout] ' + l
rc = p.returncode
actual_row_count = get_table_row_count(schema)
if rc != 0:
print "CSVLoader failed with rc %d" % rc
for l in stderr.split('\n'):
print '[csvloader stderr] ' + l
raise RuntimeError ("CSV Loader failed")
# XXX seems that csvloader doesnt always returncode nonzero if it fails to load rows
m = re.search(r'^Read (\d+) rows from file and successfully inserted (\d+) rows \(final\)$',
stdout, flags=re.M)
if m is None or int(m.group(1)) != rowcount or m.group(1) != m.group(2):
raise RuntimeError ("CSV Loader failed to load all rows")
if int(before_row_count) + rowcount != int(actual_row_count):
raise RuntimeError ("Actual table row count was not as expected exp:%d act:%d" % (rowcount,actual_row_count))
elapsed_results.append(float(run_time))
def analyze_results(perf_results):
#print "raw perf_results: %s" % perf_results
pr = sorted(perf_results)[1:-1]
if len(pr) == 0:
pr = perf_results
return (average(pr), std(pr))
avg, stddev = analyze_results(elapsed_results)
print "statistics for %s execution time avg: %f stddev: %f rows/sec: %f rows: %d file size: %d tries: %d" %\
(schema, avg, stddev, rowcount/avg, rowcount, os.path.getsize(data_file), options.TRIES)
if options.statsfile:
with open(options.statsfile, "a") as sf:
# report duration in milliseconds for stats collector
print >>sf, "%s,%f,%d,0,0,0,0,0,0,0,0,0,0" % (schema, avg*1000.0, rowcount)
return (rowcount, avg, stddev)
def get_table_row_count(table_name):
host = random.choice(options.servers)
pyclient = FastSerializer(host=host, port=21212)
count = VoltProcedure(pyclient, '@AdHoc', [FastSerializer.VOLTTYPE_STRING])
resp = count.call(['select count(*) from %s' % table_name], timeout=360)
if resp.status != 1 or len(resp.tables[0].tuples) != 1:
print "Unexpected response to count query from host %s: %s" % (host, resp)
raise RuntimeError()
__tuples = resp.tables[0].tuples[0]
result = __tuples[0]
print "count query returned: %s" % result
return result
def get_datafile_path(case):
return os.path.join(DATA_DIR, "csvbench_%s_%d.dat" % (case, options.ROW_COUNT))
def get_filesize(file):
return int(run_readlines("wc -l %s" % file).split(' ')[0])
def list_callback (option, opt, value, parser):
"""split the list of strings and store it in the parser options """
setattr(parser.values, option.dest, value.split(','))
def parse_cmdline():
global options, args, DATA_DIR
usage = "usage: %prog [options] path-to-loadfiles"
parser = OptionParser()
parser.add_option ("-s", "--servers",
type = "string",
action = "callback", callback = list_callback,
default=["localhost"],
help ="list of servers")
# WNG Don't run more than one case at a time in apprunner if collecting stats
parser.add_option ("-c", "--case",
type = "string",
action = "callback", callback = list_callback,
default=None,
help ="comma separate list of cases to run")
parser.add_option ("-n", "--rows",
type = "int",
dest = "ROW_COUNT",
default = 100000,
help ="number of rows to test")
parser.add_option ("-r", "--regeneratedata",
dest = "REGENERATE",
action="store_true", default=False,
help ="regenerate the data'")
parser.add_option ("-t", "--tries",
type = "int",
dest = "TRIES",
default = 1,
help ="number of time to run the test case and average the performance results")
parser.add_option ("-o", "--csvoptions",
type = "string",
action = "callback", callback = list_callback,
default=None,
help ="comma separated list of options to be passed to the csvloader")
parser.add_option ("-v", "--verbose",
dest = "VERBOSE",
action="store_true", default=False,
help ="print csv output'")
parser.add_option ("-l", "--list",
dest = "LIST",
action="store_true", default=False,
help ="list cases supported and exit'")
parser.add_option ("--statsfile",
type = "string",
dest = "statsfile",
default=None,
help ="file to write statistics for apprunner")
(options, args) = parser.parse_args()
if options.LIST:
list_cases()
sys.exit(0)
if len(args) < 1:
print "ERROR load file directory not specified"
sys.exit(1)
DATA_DIR = args[0]
if not os.path.isdir(DATA_DIR):
print "ERROR load file directory does not exist, or is not a directory"
sys.exit(1)
if options.statsfile:
f = open(options.statsfile, 'w')
f.close
def data_narrow_short(rebuild=False):
data_file = get_datafile_path("narrow_short")
if rebuild or not os.path.exists(data_file):
with open(data_file, "w") as f:
for I in range(0, options.ROW_COUNT):
print >>f, "%d,%d,%d,%d,%s" % (I, genint(2), genint(1), genint(8), gentext(60))
print "data file %s was written" % data_file
return data_file
def data_narrow_long(rebuild=False):
data_file = get_datafile_path("narrow_long")
if rebuild or not os.path.exists(data_file):
with open(data_file, "w") as f:
for I in range(0, options.ROW_COUNT):
print >>f, "%d,%d,%d,%d,%s" % (I, randint(-32766,32767),randint(-127,127),randint(-2**63,2**63),gentext(512))
print "data file %s was written" % data_file
return data_file
def data_generic(rebuild=False):
"""
a integer NOT NULL
, b tinyint
, c smallint
, d varchar(1)
, e timestamp
, f timestamp
, h varchar(60)
, i varchar(60)
, j varchar(60)
, k varchar(1024)
, l varchar(1024)
, m varchar(1024)
, n double
, o bigint
, p varchar(1)
, r bigint
a integer NOT NULL
, b tinyint
, c smallint
, d varchar(1)
, e timestamp
, f timestamp
, h varchar(60)
, i varchar(60)
, j varchar(60)
, k varchar(1024)
, l varchar(1024)
, m varchar(1024)
, n float
, o bigint
, p varchar(1)
, r bigint
, s decimal(32,4)
, t decimal(32,4)
, u decimal(32,4)
"""
case = "generic"
data_file = get_datafile_path(case)
if rebuild or not os.path.exists(data_file) or get_filesize(data_file) != options.ROW_COUNT:
with open(data_file, "w") as f:
for I in range(0, options.ROW_COUNT):
print >>f, "%d,%d,%d,%s,%s,%s,%s,%s,%s,%s,%s,%s,%f,%d,%s,%d" \
% ( I,
genint(1),
genint(2),
gentext(1),
gentimestamp(),
gentimestamp(),
gentext(60),
gentext(60),
gentext(60),
gentext(1024),
gentext(1024),
gentext(1024),
gendouble(),
genint(8),
gentext(1),
genint(8)
)
print "data file %s was written" % data_file
return data_file
def case_generic_noix():
schema = "generic_noix"
data_file = data_generic(False)
run_csvloader(schema, data_file)
def data_replicated_pk(rebuild=False):
data_file = get_datafile_path("replicated_pk")
if rebuild or not os.path.exists(data_file) or get_filesize(data_file) != options.ROW_COUNT:
myseq = [0]
with open(data_file, "w") as f:
for I in range(0, options.ROW_COUNT):
print >>f, "%d,%s,%s,%s,%s,%s" % (gennumsequence(myseq),
gentext(60),
gentext(1024),
gentimestamp(),
gentext(30),
genfixeddecimalstr(size=1, precision=0, signed=False)
)
print "data file %s was written" % data_file
return data_file
parse_cmdline()
cases = options.case or CASES.keys()
for schema in cases:
if schema not in CASES:
print "ERROR unknown case: %s" % c
print list_cases()
sys.exit(1)
data_file = globals()[CASES[schema]](options.REGENERATE)
run_csvloader(schema, data_file)
| agpl-3.0 |
mat650/metagoofil | hachoir_parser/container/realmedia.py | 95 | 6851 | """
RealMedia (.rm) parser
Author: Mike Melanson
Creation date: 15 december 2006
References:
- http://wiki.multimedia.cx/index.php?title=RealMedia
- Appendix E: RealMedia File Format (RMFF) Reference
https://common.helixcommunity.org/nonav/2003/HCS_SDK_r5/htmfiles/rmff.htm
Samples:
- http://samples.mplayerhq.hu/real/
"""
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet,
UInt16, UInt32, Bit, RawBits,
RawBytes, String, PascalString8, PascalString16)
from hachoir_core.text_handler import textHandler, hexadecimal
from hachoir_core.endian import BIG_ENDIAN
def parseHeader(self):
yield UInt32(self, "filever", "File version")
yield UInt32(self, "numheaders", "number of headers")
def parseFileProperties(self):
yield UInt32(self, "max_bit_rate", "Maximum bit rate")
yield UInt32(self, "avg_bit_rate", "Average bit rate")
yield UInt32(self, "max_pkt_size", "Size of largest data packet")
yield UInt32(self, "avg_pkt_size", "Size of average data packet")
yield UInt32(self, "num_pkts", "Number of data packets")
yield UInt32(self, "duration", "File duration in milliseconds")
yield UInt32(self, "preroll", "Suggested preroll in milliseconds")
yield textHandler(UInt32(self, "index_offset", "Absolute offset of first index chunk"), hexadecimal)
yield textHandler(UInt32(self, "data_offset", "Absolute offset of first data chunk"), hexadecimal)
yield UInt16(self, "stream_count", "Number of streams in the file")
yield RawBits(self, "reserved", 13)
yield Bit(self, "is_live", "Whether file is a live broadcast")
yield Bit(self, "is_perfect_play", "Whether PerfectPlay can be used")
yield Bit(self, "is_saveable", "Whether file can be saved")
def parseContentDescription(self):
yield PascalString16(self, "title", charset="ISO-8859-1", strip=" \0")
yield PascalString16(self, "author", charset="ISO-8859-1", strip=" \0")
yield PascalString16(self, "copyright", charset="ISO-8859-1", strip=" \0")
yield PascalString16(self, "comment", charset="ISO-8859-1", strip=" \0")
class NameValueProperty(FieldSet):
def __init__(self, *args):
FieldSet.__init__(self, *args)
self._size = self["size"].value * 8
def createFields(self):
yield UInt32(self, "size")
yield UInt16(self, "obj_version")
yield PascalString8(self, "name", charset="ASCII")
yield UInt32(self, "type")
yield PascalString16(self, "value", charset="ISO-8859-1", strip=" \0")
class LogicalFileInfo(FieldSet):
def createFields(self):
yield UInt32(self, "size")
yield UInt16(self, "obj_version")
yield UInt16(self, "nb_physical_stream")
for index in xrange(self["nb_physical_stream"].value):
yield UInt16(self, "physical_stream[]")
for index in xrange(self["nb_physical_stream"].value):
yield UInt16(self, "data_offset[]")
yield UInt16(self, "nb_rule")
for index in xrange(self["nb_rule"].value):
yield UInt16(self, "rule[]")
yield UInt16(self, "nb_prop")
for index in xrange(self["nb_prop"].value):
yield NameValueProperty(self, "prop[]")
def parseMediaPropertiesHeader(self):
yield UInt16(self, "stream_number", "Stream number")
yield UInt32(self, "max_bit_rate", "Maximum bit rate")
yield UInt32(self, "avg_bit_rate", "Average bit rate")
yield UInt32(self, "max_pkt_size", "Size of largest data packet")
yield UInt32(self, "avg_pkt_size", "Size of average data packet")
yield UInt32(self, "stream_start", "Stream start offset in milliseconds")
yield UInt32(self, "preroll", "Preroll in milliseconds")
yield UInt32(self, "duration", "Stream duration in milliseconds")
yield PascalString8(self, "desc", "Stream description", charset="ISO-8859-1")
yield PascalString8(self, "mime_type", "MIME type string", charset="ASCII")
yield UInt32(self, "specific_size", "Size of type-specific data")
size = self['specific_size'].value
if size:
if self["mime_type"].value == "logical-fileinfo":
yield LogicalFileInfo(self, "file_info", size=size*8)
else:
yield RawBytes(self, "specific", size, "Type-specific data")
class Chunk(FieldSet):
tag_info = {
".RMF": ("header", parseHeader),
"PROP": ("file_prop", parseFileProperties),
"CONT": ("content_desc", parseContentDescription),
"MDPR": ("stream_prop[]", parseMediaPropertiesHeader),
"DATA": ("data[]", None),
"INDX": ("file_index[]", None)
}
def createValueFunc(self):
return self.value_func(self)
def __init__(self, parent, name, description=None):
FieldSet.__init__(self, parent, name, description)
self._size = (self["size"].value) * 8
tag = self["tag"].value
if tag in self.tag_info:
self._name, self.parse_func = self.tag_info[tag]
else:
self._description = ""
self.parse_func = None
def createFields(self):
yield String(self, "tag", 4, "Chunk FourCC", charset="ASCII")
yield UInt32(self, "size", "Chunk Size")
yield UInt16(self, "version", "Chunk Version")
if self.parse_func:
for field in self.parse_func(self):
yield field
else:
size = (self.size - self.current_size) // 8
if size:
yield RawBytes(self, "raw", size)
def createDescription(self):
return "Chunk: %s" % self["tag"].display
class RealMediaFile(Parser):
MAGIC = '.RMF\0\0\0\x12\0\1' # (magic, size=18, version=1)
PARSER_TAGS = {
"id": "real_media",
"category": "container",
"file_ext": ("rm",),
"mime": (
u"video/x-pn-realvideo",
u"audio/x-pn-realaudio",
u"audio/x-pn-realaudio-plugin",
u"audio/x-real-audio",
u"application/vnd.rn-realmedia"),
"min_size": len(MAGIC)*8, # just the identifier
"magic": ((MAGIC, 0),),
"description": u"RealMedia (rm) Container File",
}
endian = BIG_ENDIAN
def validate(self):
if self.stream.readBytes(0, 4) != '.RMF':
return "Invalid magic"
if self["header/size"].value != 18:
return "Invalid header size"
if self["header/version"].value not in (0, 1):
return "Unknown file format version (%s)" % self["header/version"].value
return True
def createFields(self):
while not self.eof:
yield Chunk(self, "chunk")
def createMimeType(self):
for prop in self.array("stream_prop"):
if prop["mime_type"].value == "video/x-pn-realvideo":
return u"video/x-pn-realvideo"
return u"audio/x-pn-realaudio"
| gpl-2.0 |
ovnicraft/odoo | addons/association/__openerp__.py | 260 | 1700 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Associations Management',
'version': '0.1',
'category': 'Specific Industry Applications',
'description': """
This module is to configure modules related to an association.
==============================================================
It installs the profile for associations to manage events, registrations, memberships,
membership products (schemes).
""",
'author': 'OpenERP SA',
'depends': ['base_setup', 'membership', 'event'],
'data': ['security/ir.model.access.csv', 'profile_association.xml'],
'demo': [],
'installable': True,
'auto_install': False,
'website': 'https://www.odoo.com'
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
partofthething/home-assistant | homeassistant/components/alpha_vantage/sensor.py | 16 | 6966 | """Stock market information from Alpha Vantage."""
from datetime import timedelta
import logging
from alpha_vantage.foreignexchange import ForeignExchange
from alpha_vantage.timeseries import TimeSeries
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION, CONF_API_KEY, CONF_CURRENCY, CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
ATTR_CLOSE = "close"
ATTR_HIGH = "high"
ATTR_LOW = "low"
ATTRIBUTION = "Stock market information provided by Alpha Vantage"
CONF_FOREIGN_EXCHANGE = "foreign_exchange"
CONF_FROM = "from"
CONF_SYMBOL = "symbol"
CONF_SYMBOLS = "symbols"
CONF_TO = "to"
ICONS = {
"BTC": "mdi:currency-btc",
"EUR": "mdi:currency-eur",
"GBP": "mdi:currency-gbp",
"INR": "mdi:currency-inr",
"RUB": "mdi:currency-rub",
"TRY": "mdi:currency-try",
"USD": "mdi:currency-usd",
}
SCAN_INTERVAL = timedelta(minutes=5)
SYMBOL_SCHEMA = vol.Schema(
{
vol.Required(CONF_SYMBOL): cv.string,
vol.Optional(CONF_CURRENCY): cv.string,
vol.Optional(CONF_NAME): cv.string,
}
)
CURRENCY_SCHEMA = vol.Schema(
{
vol.Required(CONF_FROM): cv.string,
vol.Required(CONF_TO): cv.string,
vol.Optional(CONF_NAME): cv.string,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_FOREIGN_EXCHANGE): vol.All(cv.ensure_list, [CURRENCY_SCHEMA]),
vol.Optional(CONF_SYMBOLS): vol.All(cv.ensure_list, [SYMBOL_SCHEMA]),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Alpha Vantage sensor."""
api_key = config[CONF_API_KEY]
symbols = config.get(CONF_SYMBOLS, [])
conversions = config.get(CONF_FOREIGN_EXCHANGE, [])
if not symbols and not conversions:
msg = "No symbols or currencies configured."
hass.components.persistent_notification.create(msg, "Sensor alpha_vantage")
_LOGGER.warning(msg)
return
timeseries = TimeSeries(key=api_key)
dev = []
for symbol in symbols:
try:
_LOGGER.debug("Configuring timeseries for symbols: %s", symbol[CONF_SYMBOL])
timeseries.get_intraday(symbol[CONF_SYMBOL])
except ValueError:
_LOGGER.error("API Key is not valid or symbol '%s' not known", symbol)
dev.append(AlphaVantageSensor(timeseries, symbol))
forex = ForeignExchange(key=api_key)
for conversion in conversions:
from_cur = conversion.get(CONF_FROM)
to_cur = conversion.get(CONF_TO)
try:
_LOGGER.debug("Configuring forex %s - %s", from_cur, to_cur)
forex.get_currency_exchange_rate(from_currency=from_cur, to_currency=to_cur)
except ValueError as error:
_LOGGER.error(
"API Key is not valid or currencies '%s'/'%s' not known",
from_cur,
to_cur,
)
_LOGGER.debug(str(error))
dev.append(AlphaVantageForeignExchange(forex, conversion))
add_entities(dev, True)
_LOGGER.debug("Setup completed")
class AlphaVantageSensor(Entity):
"""Representation of a Alpha Vantage sensor."""
def __init__(self, timeseries, symbol):
"""Initialize the sensor."""
self._symbol = symbol[CONF_SYMBOL]
self._name = symbol.get(CONF_NAME, self._symbol)
self._timeseries = timeseries
self.values = None
self._unit_of_measurement = symbol.get(CONF_CURRENCY, self._symbol)
self._icon = ICONS.get(symbol.get(CONF_CURRENCY, "USD"))
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def state(self):
"""Return the state of the sensor."""
return self.values["1. open"]
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self.values is not None:
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_CLOSE: self.values["4. close"],
ATTR_HIGH: self.values["2. high"],
ATTR_LOW: self.values["3. low"],
}
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return self._icon
def update(self):
"""Get the latest data and updates the states."""
_LOGGER.debug("Requesting new data for symbol %s", self._symbol)
all_values, _ = self._timeseries.get_intraday(self._symbol)
self.values = next(iter(all_values.values()))
_LOGGER.debug("Received new values for symbol %s", self._symbol)
class AlphaVantageForeignExchange(Entity):
"""Sensor for foreign exchange rates."""
def __init__(self, foreign_exchange, config):
"""Initialize the sensor."""
self._foreign_exchange = foreign_exchange
self._from_currency = config[CONF_FROM]
self._to_currency = config[CONF_TO]
if CONF_NAME in config:
self._name = config.get(CONF_NAME)
else:
self._name = f"{self._to_currency}/{self._from_currency}"
self._unit_of_measurement = self._to_currency
self._icon = ICONS.get(self._from_currency, "USD")
self.values = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def state(self):
"""Return the state of the sensor."""
return round(float(self.values["5. Exchange Rate"]), 4)
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return self._icon
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self.values is not None:
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
CONF_FROM: self._from_currency,
CONF_TO: self._to_currency,
}
def update(self):
"""Get the latest data and updates the states."""
_LOGGER.debug(
"Requesting new data for forex %s - %s",
self._from_currency,
self._to_currency,
)
self.values, _ = self._foreign_exchange.get_currency_exchange_rate(
from_currency=self._from_currency, to_currency=self._to_currency
)
_LOGGER.debug(
"Received new data for forex %s - %s",
self._from_currency,
self._to_currency,
)
| mit |
stamhe/zulip | zerver/lib/response.py | 124 | 1316 | from __future__ import absolute_import
from django.http import HttpResponse, HttpResponseNotAllowed
import ujson
class HttpResponseUnauthorized(HttpResponse):
status_code = 401
def __init__(self, realm):
HttpResponse.__init__(self)
self["WWW-Authenticate"] = 'Basic realm="%s"' % (realm,)
def json_unauthorized(message):
resp = HttpResponseUnauthorized("zulip")
resp.content = ujson.dumps({"result": "error",
"msg": message}) + "\n"
return resp
def json_method_not_allowed(methods):
resp = HttpResponseNotAllowed(methods)
resp.content = ujson.dumps({"result": "error",
"msg": "Method Not Allowed",
"allowed_methods": methods})
return resp
def json_response(res_type="success", msg="", data={}, status=200):
content = {"result": res_type, "msg": msg}
content.update(data)
return HttpResponse(content=ujson.dumps(content) + "\n",
content_type='application/json', status=status)
def json_success(data={}):
return json_response(data=data)
def json_error(msg, data={}, status=400):
return json_response(res_type="error", msg=msg, data=data, status=status)
def json_unhandled_exception():
return json_response(res_type="error", msg="Internal server error", status=500)
| apache-2.0 |
ojengwa/flask-oauthlib | example/linkedin.py | 16 | 2007 | from flask import Flask, redirect, url_for, session, request, jsonify
from flask_oauthlib.client import OAuth
app = Flask(__name__)
app.debug = True
app.secret_key = 'development'
oauth = OAuth(app)
linkedin = oauth.remote_app(
'linkedin',
consumer_key='k8fhkgkkqzub',
consumer_secret='ZZtLETQOQYNDjMrz',
request_token_params={
'scope': 'r_basicprofile',
'state': 'RandomString',
},
base_url='https://api.linkedin.com/v1/',
request_token_url=None,
access_token_method='POST',
access_token_url='https://www.linkedin.com/uas/oauth2/accessToken',
authorize_url='https://www.linkedin.com/uas/oauth2/authorization',
)
@app.route('/')
def index():
if 'linkedin_token' in session:
me = linkedin.get('people/~')
return jsonify(me.data)
return redirect(url_for('login'))
@app.route('/login')
def login():
return linkedin.authorize(callback=url_for('authorized', _external=True))
@app.route('/logout')
def logout():
session.pop('linkedin_token', None)
return redirect(url_for('index'))
@app.route('/login/authorized')
def authorized():
resp = linkedin.authorized_response()
if resp is None:
return 'Access denied: reason=%s error=%s' % (
request.args['error_reason'],
request.args['error_description']
)
session['linkedin_token'] = (resp['access_token'], '')
me = linkedin.get('people/~')
return jsonify(me.data)
@linkedin.tokengetter
def get_linkedin_oauth_token():
return session.get('linkedin_token')
def change_linkedin_query(uri, headers, body):
auth = headers.pop('Authorization')
headers['x-li-format'] = 'json'
if auth:
auth = auth.replace('Bearer', '').strip()
if '?' in uri:
uri += '&oauth2_access_token=' + auth
else:
uri += '?oauth2_access_token=' + auth
return uri, headers, body
linkedin.pre_request = change_linkedin_query
if __name__ == '__main__':
app.run()
| bsd-3-clause |
anilmuthineni/tensorflow | tensorflow/python/kernel_tests/transpose_op_test.py | 29 | 9886 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Transpose op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.platform import test
class TransposeTest(test.TestCase):
def _np_transpose(self, x, perm):
ret = np.copy(x)
ret = ret.transpose(perm)
return ret
def _compareCpu(self, x, p):
np_ans = self._np_transpose(x, p)
with self.test_session(use_gpu=False):
inx = ops.convert_to_tensor(x)
y = array_ops.transpose(inx, p)
tf_ans = y.eval()
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, y)
jacob_t = None
# Gradient check on CPU.
xs = list(np.shape(x))
ys = list(np.shape(tf_ans))
if x.dtype == np.float32:
jacob_t, jacob_n = gradient_checker.compute_gradient(inx, xs, y, ys, x,
1e-2)
self.assertAllClose(jacob_t, jacob_n, 1e-3, 1e-3)
elif x.dtype == np.float64:
jacob_t, jacob_n = gradient_checker.compute_gradient(inx, xs, y, ys, x,
1e-2)
self.assertAllClose(jacob_t, jacob_n, 1e-6, 1e-6)
return tf_ans, jacob_t
def _compareGpu(self, x, p):
np_ans = self._np_transpose(x, p)
with self.test_session(use_gpu=True):
inx = ops.convert_to_tensor(x)
y = array_ops.transpose(inx, p)
tf_ans = y.eval()
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, y)
jacob_t = None
# Gradient check on GPU.
xs = list(np.shape(x))
ys = list(np.shape(tf_ans))
if x.dtype == np.float32:
jacob_t, jacob_n = gradient_checker.compute_gradient(inx, xs, y, ys, x,
1e-2)
self.assertAllClose(jacob_t, jacob_n, 1e-3, 1e-3)
elif x.dtype == np.float64:
jacob_t, jacob_n = gradient_checker.compute_gradient(inx, xs, y, ys, x,
1e-2)
self.assertAllClose(jacob_t, jacob_n, 1e-6, 1e-6)
return tf_ans, jacob_t
def _compare(self, x, use_gpu=False):
n = np.ndim(x)
# generate all permutations of [0, 1, ... n-1] in random order.
all_perm = np.random.permutation(
[p for p in itertools.permutations(range(n))]).astype(np.int32)
for p in all_perm[0:2]:
self._compareCpu(x, p)
if use_gpu:
self._compareGpu(x, p)
def _compare_cpu_gpu(self, x):
n = np.ndim(x)
# generate all permutation of [0, 1, ... n-1] in random order,
# choose the first two.
perms = itertools.permutations(range(n))
for _ in range(2):
p = np.random.permutation(next(perms)).astype(np.int32)
tf_a_cpu, tf_g_cpu = self._compareCpu(x, p)
tf_a_gpu, tf_g_gpu = self._compareGpu(x, p)
assert tf_g_cpu is not None
assert tf_g_gpu is not None
if x.dtype == np.float32:
self.assertAllClose(tf_a_cpu, tf_a_gpu, 1e-3, 1e-3)
self.assertAllClose(tf_g_cpu, tf_g_gpu, 1e-3, 1e-3)
elif x.dtype == np.float64:
self.assertAllClose(tf_a_cpu, tf_a_gpu, 1e-6, 1e-6)
self.assertAllClose(tf_g_cpu, tf_g_gpu, 1e-6, 1e-6)
def _testBoth(self, x):
self._compare(x, use_gpu=False)
self._compare(x, use_gpu=True)
def testRank1(self):
self._compareCpu(np.arange(0., 2), [0])
def test1D(self):
vector = np.arange(0, 2).reshape((1, 1, 1, 2, 1))
self._compare(vector, use_gpu=False)
self._compare(vector, use_gpu=True)
def testNop(self):
self._compareCpu(np.arange(0, 6).reshape([3, 2]).astype(np.float32), [0, 1])
def testSimple(self):
self._compareCpu(
np.arange(0, 8).reshape([2, 4]).astype(np.float32),
np.array([1, 0]).astype(np.int32))
def testHalf(self):
self._compare(np.arange(0, 21).reshape([3, 7]).astype(np.float16))
self._compare(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.float16))
self._compare(
np.arange(0, 16).reshape([1, 2, 1, 2, 1, 2, 1, 2]).astype(np.float16))
def testFloat(self):
self._compare_cpu_gpu(np.arange(0, 21).reshape([3, 7]).astype(np.float32))
self._compare_cpu_gpu(
np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.float32))
self._compare_cpu_gpu(
np.arange(0, 16).reshape([1, 2, 1, 2, 1, 2, 1, 2]).astype(np.float32))
def testDouble(self):
self._compare_cpu_gpu(np.arange(0, 21).reshape([3, 7]).astype(np.float64))
self._compare_cpu_gpu(
np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.float64))
self._compare_cpu_gpu(
np.arange(0, 16).reshape([1, 2, 1, 2, 1, 2, 1, 2]).astype(np.float64))
def testComplex64(self):
self._testBoth(
np.complex(1, 2) *
np.arange(0, 21).reshape([3, 7]).astype(np.complex64))
self._testBoth(
np.complex(1, 2) *
np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.complex64))
self._testBoth(
np.complex(1, 2) *
np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.complex64))
def testComplex128(self):
self._testBoth(
np.complex(1, 2) *
np.arange(0, 21).reshape([3, 7]).astype(np.complex128))
self._testBoth(
np.complex(1, 2) *
np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.complex128))
self._testBoth(
np.complex(1, 2) *
np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.complex128))
def testInt8(self):
self._testBoth(np.arange(0, 21).reshape([3, 7]).astype(np.int8))
self._testBoth(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.int8))
self._testBoth(
np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.int8))
def testInt16(self):
self._testBoth(np.arange(0, 21).reshape([3, 7]).astype(np.int16))
self._testBoth(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.int16))
self._testBoth(
np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.int16))
def testInt32(self):
self._testBoth(np.arange(0, 21).reshape([3, 7]).astype(np.int32))
self._testBoth(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.int32))
self._testBoth(
np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.int32))
def testInt64(self):
self._testBoth(np.arange(0, 21).reshape([3, 7]).astype(np.int64))
self._testBoth(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.int64))
self._testBoth(
np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.int64))
def testTranspose2DAuto(self):
x_np = [[1, 2, 3], [4, 5, 6]]
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
x_tf = array_ops.transpose(x_np).eval()
self.assertAllEqual(x_tf, [[1, 4], [2, 5], [3, 6]])
def testTransposeShapes(self):
self.assertEqual(
[],
array_ops.transpose(array_ops.placeholder(
dtypes.int32, shape=[])).get_shape().dims)
self.assertEqual(
[100],
array_ops.transpose(array_ops.placeholder(
dtypes.int32, shape=[100])).get_shape().dims)
self.assertEqual(
[37, 100],
array_ops.transpose(
array_ops.placeholder(
dtypes.int32, shape=[100, 37])).get_shape().dims)
self.assertEqual(
[100, 37],
array_ops.transpose(
array_ops.placeholder(
dtypes.int32, shape=[100, 37]), [0, 1]).get_shape().dims)
self.assertEqual(
[15, 37, 100],
array_ops.transpose(
array_ops.placeholder(
dtypes.int32, shape=[100, 37, 15])).get_shape().dims)
self.assertEqual(
[15, 100, 37],
array_ops.transpose(
array_ops.placeholder(
dtypes.int32, shape=[100, 37, 15]), [2, 0, 1]).get_shape().dims)
self.assertEqual(
tensor_shape.TensorShape(None),
array_ops.transpose(array_ops.placeholder(dtypes.int32)).get_shape())
def testNullTensor(self):
with self.test_session():
x = constant_op.constant([], dtype=dtypes.float32, shape=[1, 4, 0])
xt = array_ops.transpose(x, [0, 2, 1]).eval()
self.assertAllEqual(xt.shape, (1, 0, 4))
def _testError(self, x, p, err):
with self.test_session():
with self.assertRaisesOpError(err):
array_ops.transpose(x, p).eval()
def testError(self):
with self.assertRaises(ValueError):
array_ops.transpose(
np.arange(0., 30).reshape([2, 3, 5]), [[0, 1], [2, 3]])
self._testError(
np.arange(0., 2**11).reshape([2] * 11), np.arange(11),
"not implemented")
with self.assertRaises(ValueError):
array_ops.transpose(np.arange(0., 30).reshape([2, 3, 5]), [0, 1, 3])
self._testError(
np.arange(0., 30).reshape([2, 3, 5]), [0, 1, 1], "2 is missing")
if __name__ == "__main__":
test.main()
| apache-2.0 |
portante/sosreport | sos/plugins/psacct.py | 1 | 1824 | ### This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
class Psacct(Plugin):
"""Process accounting related information
"""
option_list = [("all", "collect all process accounting files",
"slow", False)]
packages = [ "psacct" ]
class RedHatPsacct(Psacct, RedHatPlugin):
"""Process accounting related information for RedHat based distributions
"""
plugin_name = "psacct"
packages = [ "psacct" ]
def setup(self):
super(RedHatPsacct, self).setup()
self.add_copy_spec("/var/account/pacct")
if self.get_option("all"):
self.add_copy_spec("/var/account/pacct*.gz")
class DebianPsacct(Psacct, DebianPlugin, UbuntuPlugin):
"""Process accounting related information for Debian based distributions
"""
plugin_name = "acct"
packages = [ "acct" ]
def setup(self):
super(DebianPsacct, self).setup()
self.add_copy_specs(["/var/log/account/pacct", "/etc/default/acct"])
if self.get_option("all"):
self.add_copy_spec("/var/log/account/pacct*.gz")
# vim: et ts=4 sw=4
| gpl-2.0 |
hakonsbm/nest-simulator | pynest/nest/tests/test_connect_array_fixed_indegree.py | 2 | 3322 | # -*- coding: utf-8 -*-
#
# test_connect_array_fixed_indegree.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Tests of connection with rule fixed_indegree
and parameter arrays in syn_spec
"""
import unittest
import nest
import numpy
@nest.ll_api.check_stack
class ConnectArrayFixedIndegreeTestCase(unittest.TestCase):
"""Tests of connections with fixed indegree and parameter arrays"""
def test_Connect_Array_Fixed_Indegree(self):
"""Tests of connections with fixed indegree and parameter arrays"""
N = 20 # number of neurons in each subnet
K = 5 # number of connections per neuron
############################################
# test with connection rule fixed_indegree
############################################
nest.ResetKernel()
net1 = nest.Create('iaf_psc_alpha', N) # creates source subnet
net2 = nest.Create('iaf_psc_alpha', N) # creates target subnet
Warr = [[y*K+x for x in range(K)] for y in range(N)] # weight array
Darr = [[y*K+x + 1 for x in range(K)] for y in range(N)] # delay array
# synapses and connection dictionaries
syn_dict = {'model': 'static_synapse', 'weight': Warr, 'delay': Darr}
conn_dict = {'rule': 'fixed_indegree', 'indegree': K}
# connects source to target subnet
nest.Connect(net1, net2, conn_spec=conn_dict, syn_spec=syn_dict)
for i in range(N): # loop on all neurons of target subnet
# gets all connections to the target neuron
conns = nest.GetConnections(target=net2[i:i+1])
Warr1 = [] # creates empty weight array
# loop on synapses that connect to target neuron
for j in range(len(conns)):
c = conns[j:j+1]
w = nest.GetStatus(c, 'weight')[0] # gets synaptic weight
d = nest.GetStatus(c, 'delay')[0] # gets synaptic delay
self.assertTrue(d - w == 1) # checks that delay = weight + 1
Warr1.append(w) # appends w to Warr1
self.assertTrue(len(Warr1) == K) # checks the size of Warr1
Warr1.sort() # sorts the elements of Warr1
# get row of original weight array, sort it
# and compare it with Warr1
Warr2 = sorted(Warr[i])
for k in range(K):
self.assertTrue(Warr1[k]-Warr2[k] == 0.0)
def suite():
suite = unittest.makeSuite(ConnectArrayFixedIndegreeTestCase, 'test')
return suite
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == "__main__":
run()
| gpl-2.0 |
txemi/ansible | lib/ansible/modules/system/gconftool2.py | 22 | 9101 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Kenneth D. Evensen <[email protected]>
#
# This file is part of Ansible (sort of)
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
module: gconftool2
author:
- "Kenneth D. Evensen (@kevensen)"
short_description: Edit GNOME Configurations
description:
- This module allows for the manipulation of GNOME 2 Configuration via
gconftool-2. Please see the gconftool-2(1) man pages for more details.
version_added: "2.3"
options:
key:
required: true
description:
- A GConf preference key is an element in the GConf repository
that corresponds to an application preference. See man gconftool-2(1)
value:
required: false
description:
- Preference keys typically have simple values such as strings,
integers, or lists of strings and integers. This is ignored if the state
is "get". See man gconftool-2(1)
value_type:
required: false
choices:
- int
- bool
- float
- string
description:
- The type of value being set. This is ignored if the state is "get".
state:
required: true
choices:
- get
- present
- absent
description:
- The action to take upon the key/value.
config_source:
required: false
description:
- Specify a configuration source to use rather than the default path.
See man gconftool-2(1)
direct:
required: false
choices: [ "yes", "no" ]
default: no
description:
- Access the config database directly, bypassing server. If direct is
specified then the config_source must be specified as well.
See man gconftool-2(1)
"""
EXAMPLES = """
- name: Change the widget font to "Serif 12"
gconftool2:
key: "/desktop/gnome/interface/font_name"
value_type: "string"
value: "Serif 12"
"""
RETURN = '''
key:
description: The key specified in the module parameters
returned: success
type: string
sample: "/desktop/gnome/interface/font_name"
value_type:
description: The type of the value that was changed
returned: success
type: string
sample: "string"
value:
description: The value of the preference key after executing the module
returned: success
type: string
sample: "Serif 12"
...
'''
from ansible.module_utils.basic import AnsibleModule, BOOLEANS_TRUE
from ansible.module_utils.pycompat24 import get_exception
import subprocess
class GConf2Preference(object):
def __init__(self, ansible, key, value_type, value,
direct=False, config_source=""):
self.ansible = ansible
self.key = key
self.value_type = value_type
self.value = value
self.config_source = config_source
self.direct = direct
def value_already_set(self):
return False
def call(self, call_type):
config_source = ''
direct = ''
changed = False
out = ''
# If the configuration source is different from the default, create
# the argument
if self.config_source is not None and len(self.config_source) > 0:
config_source = "--config-source " + self.config_source
# If direct is true, create the argument
if self.direct:
direct = "--direct"
# Execute the call
try:
# If the call is "get", then we don't need as many parameters and
# we can ignore some
if call_type == 'get':
process = subprocess.Popen(["gconftool-2 --get " + self.key],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
# Otherwise, we will use all relevant parameters
else:
process = subprocess.Popen(["gconftool-2 " + direct + " " +
config_source + " --type " +
self.value_type + " --" +
call_type + " " + self.key + " " +
self.value], stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
# In either case, we will capture the output
out = process.stdout.read()
err = process.stderr.read()
if len(err) > 0:
self.ansible.fail_json(msg='gconftool-2 failed with error: %s'
% (str(err)))
else:
changed = True
except OSError:
self.ansible.fail_json(msg='gconftool-2 failed with and exception')
return changed, out.rstrip()
def main():
# Setup the Ansible module
module = AnsibleModule(
argument_spec=dict(
key=dict(required=True, default=None, type='str'),
value_type=dict(required=False,
choices=['int', 'bool',
'float', 'string'],
type='str'),
value=dict(required=False, default=None,
type='str'),
state=dict(required=True, default=None,
choices=['present', 'get', 'absent'],
type='str'),
direct=dict(required=False,
default=False, type='bool'),
config_source=dict(required=False,
default=None, type='str')
),
supports_check_mode=True
)
state_values = {"present": "set", "absent": "unset", "get": "get"}
direct = False
# Assign module values to dictionary values
key = module.params['key']
value_type = module.params['value_type']
if module.params['value'].lower() == "true":
value = "true"
elif module.params['value'] == "false":
value = "false"
else:
value = module.params['value']
state = state_values[module.params['state']]
if module.params['direct'] in BOOLEANS_TRUE:
direct = True
config_source = module.params['config_source']
# Initialize some variables for later
change = False
new_value = ''
if state != "get":
if value is None or value == "":
module.fail_json(msg='State %s requires "value" to be set'
% str(state))
elif value_type is None or value_type == "":
module.fail_json(msg='State %s requires "value_type" to be set'
% str(state))
if direct and config_source is None:
module.fail_json(msg='If "direct" is "yes" then the ' +
'"config_source" must be specified')
elif not direct and config_source is not None:
module.fail_json(msg='If the "config_source" is specified ' +
'then "direct" must be "yes"')
# Create a gconf2 preference
gconf_pref = GConf2Preference(module, key, value_type,
value, direct, config_source)
# Now we get the current value
_, current_value = gconf_pref.call("get")
# Check if the current value equals the value we want to set. If not, make
# a change
if current_value != value:
# If check mode, we know a change would have occured.
if module.check_mode:
# So we will set the change to True
change = True
# And set the new_value to the value that would have been set
new_value = value
# If not check mode make the change.
else:
change, new_value = gconf_pref.call(state)
# If the value we want to set is the same as the current_value, we will
# set the new_value to the current_value for reporting
else:
new_value = current_value
facts = {}
facts['gconftool2'] = {'changed': change, 'key': key,
'value_type': value_type, 'new_value': new_value,
'previous_value': current_value,
'playbook_value': module.params['value']}
module.exit_json(changed=change, ansible_facts=facts)
if __name__ == '__main__':
main()
| gpl-3.0 |
leilihh/novaha | nova/db/sqlalchemy/utils.py | 9 | 23587 | # Copyright (c) 2013 Boris Pavlovic ([email protected]).
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from migrate.changeset import UniqueConstraint, ForeignKeyConstraint
from sqlalchemy import Boolean
from sqlalchemy import CheckConstraint
from sqlalchemy import Column
from sqlalchemy.engine import reflection
from sqlalchemy.exc import OperationalError
from sqlalchemy.exc import ProgrammingError
from sqlalchemy.ext.compiler import compiles
from sqlalchemy import func
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import schema
from sqlalchemy.sql.expression import literal_column
from sqlalchemy.sql.expression import UpdateBase
from sqlalchemy.sql import select
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy.types import NullType
from nova.db.sqlalchemy import api as db
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
LOG = logging.getLogger(__name__)
def get_table(engine, name):
"""Returns an sqlalchemy table dynamically from db.
Needed because the models don't work for us in migrations
as models will be far out of sync with the current data.
"""
metadata = MetaData()
metadata.bind = engine
return Table(name, metadata, autoload=True)
class InsertFromSelect(UpdateBase):
def __init__(self, table, select):
self.table = table
self.select = select
@compiles(InsertFromSelect)
def visit_insert_from_select(element, compiler, **kw):
return "INSERT INTO %s %s" % (
compiler.process(element.table, asfrom=True),
compiler.process(element.select))
class DeleteFromSelect(UpdateBase):
def __init__(self, table, select, column):
self.table = table
self.select = select
self.column = column
# NOTE(guochbo): some verions of MySQL doesn't yet support subquery with
# 'LIMIT & IN/ALL/ANY/SOME' We need work around this with nesting select .
@compiles(DeleteFromSelect)
def visit_delete_from_select(element, compiler, **kw):
return "DELETE FROM %s WHERE %s in (SELECT T1.%s FROM (%s) as T1)" % (
compiler.process(element.table, asfrom=True),
compiler.process(element.column),
element.column.name,
compiler.process(element.select))
def _get_not_supported_column(col_name_col_instance, column_name):
try:
column = col_name_col_instance[column_name]
except Exception:
msg = _("Please specify column %s in col_name_col_instance "
"param. It is required because column has unsupported "
"type by sqlite).")
raise exception.NovaException(msg % column_name)
if not isinstance(column, Column):
msg = _("col_name_col_instance param has wrong type of "
"column instance for column %s It should be instance "
"of sqlalchemy.Column.")
raise exception.NovaException(msg % column_name)
return column
def _get_unique_constraints_in_sqlite(migrate_engine, table_name):
regexp = "CONSTRAINT (\w+) UNIQUE \(([^\)]+)\)"
meta = MetaData(bind=migrate_engine)
table = Table(table_name, meta, autoload=True)
sql_data = migrate_engine.execute(
"""
SELECT sql
FROM
sqlite_master
WHERE
type = 'table' AND
name = :table_name;
""",
table_name=table_name
).fetchone()[0]
uniques = set([
schema.UniqueConstraint(
*[getattr(table.c, c.strip(' "'))
for c in cols.split(",")], name=name
)
for name, cols in re.findall(regexp, sql_data)
])
return uniques
def _drop_unique_constraint_in_sqlite(migrate_engine, table_name, uc_name,
**col_name_col_instance):
insp = reflection.Inspector.from_engine(migrate_engine)
meta = MetaData(bind=migrate_engine)
table = Table(table_name, meta, autoload=True)
columns = []
for column in table.columns:
if isinstance(column.type, NullType):
new_column = _get_not_supported_column(col_name_col_instance,
column.name)
columns.append(new_column)
else:
columns.append(column.copy())
uniques = _get_unique_constraints_in_sqlite(migrate_engine, table_name)
table.constraints.update(uniques)
constraints = [constraint for constraint in table.constraints
if not constraint.name == uc_name and
not isinstance(constraint, schema.ForeignKeyConstraint)]
new_table = Table(table_name + "__tmp__", meta, *(columns + constraints))
new_table.create()
indexes = []
for index in insp.get_indexes(table_name):
column_names = [new_table.c[c] for c in index['column_names']]
indexes.append(Index(index["name"],
*column_names,
unique=index["unique"]))
f_keys = []
for fk in insp.get_foreign_keys(table_name):
refcolumns = [fk['referred_table'] + '.' + col
for col in fk['referred_columns']]
f_keys.append(ForeignKeyConstraint(fk['constrained_columns'],
refcolumns, table=new_table, name=fk['name']))
ins = InsertFromSelect(new_table, table.select())
migrate_engine.execute(ins)
table.drop()
[index.create(migrate_engine) for index in indexes]
for fkey in f_keys:
fkey.create()
new_table.rename(table_name)
def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns,
**col_name_col_instance):
"""This method drops UC from table and works for mysql, postgresql and
sqlite. In mysql and postgresql we are able to use "alter table"
construction. In sqlite is only one way to drop UC:
1) Create new table with same columns, indexes and constraints
(except one that we want to drop).
2) Copy data from old table to new.
3) Drop old table.
4) Rename new table to the name of old table.
:param migrate_engine: sqlalchemy engine
:param table_name: name of table that contains uniq constraint.
:param uc_name: name of uniq constraint that will be dropped.
:param columns: columns that are in uniq constraint.
:param col_name_col_instance: contains pair column_name=column_instance.
column_instance is instance of Column. These params
are required only for columns that have unsupported
types by sqlite. For example BigInteger.
"""
if migrate_engine.name == "sqlite":
_drop_unique_constraint_in_sqlite(migrate_engine, table_name, uc_name,
**col_name_col_instance)
else:
meta = MetaData()
meta.bind = migrate_engine
t = Table(table_name, meta, autoload=True)
uc = UniqueConstraint(*columns, table=t, name=uc_name)
uc.drop()
def drop_old_duplicate_entries_from_table(migrate_engine, table_name,
use_soft_delete, *uc_column_names):
"""This method is used to drop all old rows that have the same values for
columns in uc_columns.
"""
meta = MetaData()
meta.bind = migrate_engine
table = Table(table_name, meta, autoload=True)
columns_for_group_by = [table.c[name] for name in uc_column_names]
columns_for_select = [func.max(table.c.id)]
columns_for_select.extend(list(columns_for_group_by))
duplicated_rows_select = select(columns_for_select,
group_by=columns_for_group_by,
having=func.count(table.c.id) > 1)
for row in migrate_engine.execute(duplicated_rows_select):
# NOTE(boris-42): Do not remove row that has the biggest ID.
delete_condition = table.c.id != row[0]
for name in uc_column_names:
delete_condition &= table.c[name] == row[name]
rows_to_delete_select = select([table.c.id]).where(delete_condition)
for row in migrate_engine.execute(rows_to_delete_select).fetchall():
LOG.info(_("Deleted duplicated row with id: %(id)s from table: "
"%(table)s") % dict(id=row[0], table=table_name))
if use_soft_delete:
delete_statement = table.update().\
where(delete_condition).\
values({
'deleted': literal_column('id'),
'updated_at': literal_column('updated_at'),
'deleted_at': timeutils.utcnow()
})
else:
delete_statement = table.delete().where(delete_condition)
migrate_engine.execute(delete_statement)
def check_shadow_table(migrate_engine, table_name):
"""This method checks that table with ``table_name`` and
corresponding shadow table have same columns.
"""
meta = MetaData()
meta.bind = migrate_engine
table = Table(table_name, meta, autoload=True)
shadow_table = Table(db._SHADOW_TABLE_PREFIX + table_name, meta,
autoload=True)
columns = dict([(c.name, c) for c in table.columns])
shadow_columns = dict([(c.name, c) for c in shadow_table.columns])
for name, column in columns.iteritems():
if name not in shadow_columns:
raise exception.NovaException(
_("Missing column %(table)s.%(column)s in shadow table")
% {'column': name, 'table': shadow_table.name})
shadow_column = shadow_columns[name]
if not isinstance(shadow_column.type, type(column.type)):
raise exception.NovaException(
_("Different types in %(table)s.%(column)s and shadow table: "
"%(c_type)s %(shadow_c_type)s")
% {'column': name, 'table': table.name,
'c_type': column.type,
'shadow_c_type': shadow_column.type})
for name, column in shadow_columns.iteritems():
if name not in columns:
raise exception.NovaException(
_("Extra column %(table)s.%(column)s in shadow table")
% {'column': name, 'table': shadow_table.name})
return True
def create_shadow_table(migrate_engine, table_name=None, table=None,
**col_name_col_instance):
"""This method create shadow table for table with name ``table_name``
or table instance ``table``.
:param table_name: Autoload table with this name and create shadow table
:param table: Autoloaded table, so just create corresponding shadow table.
:param col_name_col_instance: contains pair column_name=column_instance.
column_instance is instance of Column. These params
are required only for columns that have unsupported
types by sqlite. For example BigInteger.
:returns: The created shadow_table object.
"""
meta = MetaData(bind=migrate_engine)
if table_name is None and table is None:
raise exception.NovaException(_("Specify `table_name` or `table` "
"param"))
if not (table_name is None or table is None):
raise exception.NovaException(_("Specify only one param `table_name` "
"`table`"))
if table is None:
table = Table(table_name, meta, autoload=True)
columns = []
for column in table.columns:
if isinstance(column.type, NullType):
new_column = _get_not_supported_column(col_name_col_instance,
column.name)
columns.append(new_column)
else:
columns.append(column.copy())
shadow_table_name = db._SHADOW_TABLE_PREFIX + table.name
shadow_table = Table(shadow_table_name, meta, *columns,
mysql_engine='InnoDB')
try:
shadow_table.create()
return shadow_table
except (OperationalError, ProgrammingError):
LOG.info(repr(shadow_table))
LOG.exception(_('Exception while creating table.'))
raise exception.ShadowTableExists(name=shadow_table_name)
except Exception:
LOG.info(repr(shadow_table))
LOG.exception(_('Exception while creating table.'))
def _get_default_deleted_value(table):
if isinstance(table.c.id.type, Integer):
return 0
if isinstance(table.c.id.type, String):
return ""
raise exception.NovaException(_("Unsupported id columns type"))
def _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes):
table = get_table(migrate_engine, table_name)
insp = reflection.Inspector.from_engine(migrate_engine)
real_indexes = insp.get_indexes(table_name)
existing_index_names = dict([(index['name'], index['column_names'])
for index in real_indexes])
# NOTE(boris-42): Restore indexes on `deleted` column
for index in indexes:
if 'deleted' not in index['column_names']:
continue
name = index['name']
if name in existing_index_names:
column_names = [table.c[c] for c in existing_index_names[name]]
old_index = Index(name, *column_names, unique=index["unique"])
old_index.drop(migrate_engine)
column_names = [table.c[c] for c in index['column_names']]
new_index = Index(index["name"], *column_names, unique=index["unique"])
new_index.create(migrate_engine)
def change_deleted_column_type_to_boolean(migrate_engine, table_name,
**col_name_col_instance):
if migrate_engine.name == "sqlite":
return _change_deleted_column_type_to_boolean_sqlite(migrate_engine,
table_name,
**col_name_col_instance)
insp = reflection.Inspector.from_engine(migrate_engine)
indexes = insp.get_indexes(table_name)
table = get_table(migrate_engine, table_name)
old_deleted = Column('old_deleted', Boolean, default=False)
old_deleted.create(table, populate_default=False)
table.update().\
where(table.c.deleted == table.c.id).\
values(old_deleted=True).\
execute()
table.c.deleted.drop()
table.c.old_deleted.alter(name="deleted")
_restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes)
def _change_deleted_column_type_to_boolean_sqlite(migrate_engine, table_name,
**col_name_col_instance):
insp = reflection.Inspector.from_engine(migrate_engine)
table = get_table(migrate_engine, table_name)
columns = []
for column in table.columns:
column_copy = None
if column.name != "deleted":
if isinstance(column.type, NullType):
column_copy = _get_not_supported_column(col_name_col_instance,
column.name)
else:
column_copy = column.copy()
else:
column_copy = Column('deleted', Boolean, default=0)
columns.append(column_copy)
constraints = [constraint.copy() for constraint in table.constraints]
meta = MetaData(bind=migrate_engine)
new_table = Table(table_name + "__tmp__", meta,
*(columns + constraints))
new_table.create()
indexes = []
for index in insp.get_indexes(table_name):
column_names = [new_table.c[c] for c in index['column_names']]
indexes.append(Index(index["name"], *column_names,
unique=index["unique"]))
c_select = []
for c in table.c:
if c.name != "deleted":
c_select.append(c)
else:
c_select.append(table.c.deleted == table.c.id)
ins = InsertFromSelect(new_table, select(c_select))
migrate_engine.execute(ins)
table.drop()
[index.create(migrate_engine) for index in indexes]
new_table.rename(table_name)
new_table.update().\
where(new_table.c.deleted == new_table.c.id).\
values(deleted=True).\
execute()
def change_deleted_column_type_to_id_type(migrate_engine, table_name,
**col_name_col_instance):
if migrate_engine.name == "sqlite":
return _change_deleted_column_type_to_id_type_sqlite(migrate_engine,
table_name,
**col_name_col_instance)
insp = reflection.Inspector.from_engine(migrate_engine)
indexes = insp.get_indexes(table_name)
table = get_table(migrate_engine, table_name)
new_deleted = Column('new_deleted', table.c.id.type,
default=_get_default_deleted_value(table))
new_deleted.create(table, populate_default=True)
table.update().\
where(table.c.deleted == True).\
values(new_deleted=table.c.id).\
execute()
table.c.deleted.drop()
table.c.new_deleted.alter(name="deleted")
_restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes)
def _change_deleted_column_type_to_id_type_sqlite(migrate_engine, table_name,
**col_name_col_instance):
# NOTE(boris-42): sqlaclhemy-migrate can't drop column with check
# constraints in sqlite DB and our `deleted` column has
# 2 check constraints. So there is only one way to remove
# these constraints:
# 1) Create new table with the same columns, constraints
# and indexes. (except deleted column).
# 2) Copy all data from old to new table.
# 3) Drop old table.
# 4) Rename new table to old table name.
insp = reflection.Inspector.from_engine(migrate_engine)
meta = MetaData(bind=migrate_engine)
table = Table(table_name, meta, autoload=True)
default_deleted_value = _get_default_deleted_value(table)
columns = []
for column in table.columns:
column_copy = None
if column.name != "deleted":
if isinstance(column.type, NullType):
column_copy = _get_not_supported_column(col_name_col_instance,
column.name)
else:
column_copy = column.copy()
else:
column_copy = Column('deleted', table.c.id.type,
default=default_deleted_value)
columns.append(column_copy)
def is_deleted_column_constraint(constraint):
# NOTE(boris-42): There is no other way to check is CheckConstraint
# associated with deleted column.
if not isinstance(constraint, CheckConstraint):
return False
sqltext = str(constraint.sqltext)
# NOTE(I159): when the type of column `deleted` is changed from boolean
# to int, the corresponding CHECK constraint is dropped too. But
# starting from SQLAlchemy version 0.8.3, those CHECK constraints
# aren't dropped anymore. So despite the fact that column deleted is
# of type int now, we still restrict its values to be either 0 or 1.
constraint_markers = (
"deleted in (0, 1)",
"deleted IN (:deleted_1, :deleted_2)",
"deleted IN (:param_1, :param_2)"
)
return any(sqltext.endswith(marker) for marker in constraint_markers)
constraints = []
for constraint in table.constraints:
if not is_deleted_column_constraint(constraint):
constraints.append(constraint.copy())
new_table = Table(table_name + "__tmp__", meta,
*(columns + constraints))
new_table.create()
indexes = []
for index in insp.get_indexes(table_name):
column_names = [new_table.c[c] for c in index['column_names']]
indexes.append(Index(index["name"], *column_names,
unique=index["unique"]))
ins = InsertFromSelect(new_table, table.select())
migrate_engine.execute(ins)
table.drop()
[index.create(migrate_engine) for index in indexes]
new_table.rename(table_name)
new_table.update().\
where(new_table.c.deleted == True).\
values(deleted=new_table.c.id).\
execute()
# NOTE(boris-42): Fix value of deleted column: False -> "" or 0.
new_table.update().\
where(new_table.c.deleted == False).\
values(deleted=default_deleted_value).\
execute()
def _index_exists(migrate_engine, table_name, index_name):
inspector = reflection.Inspector.from_engine(migrate_engine)
indexes = inspector.get_indexes(table_name)
index_names = [index['name'] for index in indexes]
return index_name in index_names
def _add_index(migrate_engine, table, index_name, idx_columns):
index = Index(
index_name, *[getattr(table.c, col) for col in idx_columns]
)
index.create()
def _drop_index(migrate_engine, table, index_name, idx_columns):
if _index_exists(migrate_engine, table.name, index_name):
index = Index(
index_name, *[getattr(table.c, col) for col in idx_columns]
)
index.drop()
def _change_index_columns(migrate_engine, table, index_name,
new_columns, old_columns):
_drop_index(migrate_engine, table, index_name, old_columns)
_add_index(migrate_engine, table, index_name, new_columns)
def modify_indexes(migrate_engine, data, upgrade=True):
if migrate_engine.name == 'sqlite':
return
meta = MetaData()
meta.bind = migrate_engine
for table_name, indexes in data.iteritems():
table = Table(table_name, meta, autoload=True)
for index_name, old_columns, new_columns in indexes:
if not upgrade:
new_columns, old_columns = old_columns, new_columns
if migrate_engine.name == 'postgresql':
if upgrade:
_add_index(migrate_engine, table, index_name, new_columns)
else:
_drop_index(migrate_engine, table, index_name, old_columns)
elif migrate_engine.name == 'mysql':
_change_index_columns(migrate_engine, table, index_name,
new_columns, old_columns)
else:
raise ValueError('Unsupported DB %s' % migrate_engine.name)
| apache-2.0 |
JCA-Developpement/Odoo | addons/hr_payroll/__init__.py | 433 | 1137 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_payroll
import report
import wizard
import res_config
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
openDAQ/easydaq | easydaq/config.py | 1 | 2487 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'daqcontrol/config.ui'
#
# Created by: PyQt5 UI code generator 5.9
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(240, 120)
MainWindow.setMinimumSize(QtCore.QSize(240, 120))
MainWindow.setMaximumSize(QtCore.QSize(250, 120))
self.verticalLayout = QtWidgets.QVBoxLayout(MainWindow)
self.verticalLayout.setObjectName("verticalLayout")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.label = QtWidgets.QLabel(MainWindow)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.connectButton = QtWidgets.QPushButton(MainWindow)
self.connectButton.setMinimumSize(QtCore.QSize(70, 27))
self.connectButton.setMaximumSize(QtCore.QSize(70, 27))
self.connectButton.setObjectName("connectButton")
self.gridLayout.addWidget(self.connectButton, 2, 1, 1, 1)
self.cbport = QtWidgets.QComboBox(MainWindow)
self.cbport.setObjectName("cbport")
self.gridLayout.addWidget(self.cbport, 1, 0, 1, 2)
self.verticalLayout.addLayout(self.gridLayout)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Configuration"))
self.label.setText(_translate("MainWindow", "Select Serial Port: "))
self.connectButton.setText(_translate("MainWindow", "Connect"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QDialog()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| lgpl-3.0 |
bsc-renewit/d2.2 | monitoringFramework/gmetric.py | 1 | 3582 |
#!/usr/bin/env python
# To change this license header, choose License Headers in Project Properties.
# To change this template file, choose Tools | Templates
# and open the template in the editor.
__author__="mcanuto"
__date__ ="$Feb 13, 2014 6:03:13 PM$"
from xdrlib import Packer, Unpacker
import socket
slope_str2int = {'zero':0,
'positive':1,
'negative':2,
'both':3,
'unspecified':4}
# could be autogenerated from previous but whatever
slope_int2str = {0: 'zero',
1: 'positive',
2: 'negative',
3: 'both',
4: 'unspecified'}
class Gmetric:
"""
Class to send gmetric/gmond 2.X packets
Thread safe
"""
type = ('', 'string', 'uint16', 'int16', 'uint32', 'int32', 'float',
'double', 'timestamp')
protocol = ('udp', 'multicast')
def __init__(self, host, port, protocol):
if protocol not in self.protocol:
raise ValueError("Protocol must be one of: " + str(self.protocol))
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if protocol == 'multicast':
self.socket.setsockopt(socket.IPPROTO_IP,
socket.IP_MULTICAST_TTL, 20)
self.hostport = (host, int(port))
#self.socket.connect(self.hostport)
def send(self, NAME, VAL, TYPE='', UNITS='', SLOPE='both',
TMAX=60, DMAX=0, GROUP="", SPOOF=""):
if SLOPE not in slope_str2int:
raise ValueError("Slope must be one of: " + str(self.slope.keys()))
if TYPE not in self.type:
raise ValueError("Type must be one of: " + str(self.type))
if len(NAME) == 0:
raise ValueError("Name must be non-empty")
( meta_msg, data_msg ) = gmetric_write(NAME, VAL, TYPE, UNITS, SLOPE, TMAX, DMAX, GROUP, SPOOF)
#print data_msg
self.socket.sendto(meta_msg, self.hostport)
self.socket.sendto(data_msg, self.hostport)
def gmetric_write(NAME, VAL, TYPE, UNITS, SLOPE, TMAX, DMAX, GROUP, SPOOF):
"""
Arguments are in all upper-case to match XML
"""
packer = Packer()
HOSTNAME="test"
if SPOOF == "":
SPOOFENABLED=0
else :
SPOOFENABLED=1
# Meta data about a metric
packer.pack_int(128)
if SPOOFENABLED == 1:
packer.pack_string(SPOOF)
else:
packer.pack_string(HOSTNAME)
packer.pack_string(NAME)
packer.pack_int(SPOOFENABLED)
packer.pack_string(TYPE)
packer.pack_string(NAME)
packer.pack_string(UNITS)
packer.pack_int(slope_str2int[SLOPE]) # map slope string to int
packer.pack_uint(int(TMAX))
packer.pack_uint(int(DMAX))
# Magic number. Indicates number of entries to follow. Put in 1 for GROUP
if GROUP == "":
packer.pack_int(0)
else:
packer.pack_int(1)
packer.pack_string("GROUP")
packer.pack_string(GROUP)
# Actual data sent in a separate packet
data = Packer()
data.pack_int(128+5)
if SPOOFENABLED == 1:
data.pack_string(SPOOF)
else:
data.pack_string(HOSTNAME)
data.pack_string(NAME)
data.pack_int(SPOOFENABLED)
data.pack_string("%s")
data.pack_string(str(VAL))
return ( packer.get_buffer() , data.get_buffer() )
class GmetricConf:
def __init__(self, host, port, protocol, slope, spoof):
self.host = host
self.port = port
self.protocol = protocol
self.slope = slope
self.spoof = spoof
| apache-2.0 |
aerospike/aerospike-client-python | test/old_tests/_test_remove_bin.py | 1 | 12571 | # -*- coding: utf-8 -*-
import pytest
import sys
from .test_base_class import TestBaseClass
from aerospike import exception as e
aerospike = pytest.importorskip("aerospike")
try:
import aerospike
except:
print("Please install aerospike python client.")
sys.exit(1)
class TestRemovebin(object):
def setup_class(cls):
"""
Setup class.
"""
hostlist, user, password = TestBaseClass.get_hosts()
config = {'hosts': hostlist}
if user is None and password is None:
TestRemovebin.client = aerospike.client(config).connect()
else:
TestRemovebin.client = aerospike.client(config).connect(user,
password)
def teardown_class(cls):
TestRemovebin.client.close()
def setup_method(self, method):
"""
Setup method.
"""
for i in range(5):
key = ('test', 'demo', i)
rec = {'name': 'name%s' % (str(i)), 'age': i}
TestRemovebin.client.put(key, rec)
def teardown_method(self, method):
"""
Teardoen method.
"""
for i in range(5):
key = ('test', 'demo', i)
try:
(key, _, _) = TestRemovebin.client.get(key)
except e.RecordNotFound:
TestRemovebin.client.remove(key)
def test_remove_bin_with_no_parameters(self):
"""
Invoke remove_bin() without any mandatory parameters.
"""
with pytest.raises(TypeError) as typeError:
TestRemovebin.client.remove_bin()
assert "argument 'key' (pos 1)" in str(
typeError.value)
def test_remove_bin_with_correct_parameters(self):
"""
Invoke remove_bin() with correct parameters
"""
key = ('test', 'demo', 1)
TestRemovebin.client.remove_bin(key, ["age"])
(key, _, bins) = TestRemovebin.client.get(key)
assert bins == {'name': 'name1'}
def test_remove_bin_with_correct_policy(self):
"""
Invoke remove_bin() with correct policy
"""
key = ('test', 'demo', 1)
policy = {'timeout': 1000}
TestRemovebin.client.remove_bin(key, ["age"], {}, policy)
(key, _, bins) = TestRemovebin.client.get(key)
assert bins == {'name': 'name1'}
def test_remove_bin_with_policy_send_gen_ignore(self):
"""
Invoke remove_bin() with policy send
"""
key = ('test', 'demo', 1)
policy = {
'timeout': 1000,
'retry': aerospike.POLICY_RETRY_ONCE,
'key': aerospike.POLICY_KEY_SEND,
'gen': aerospike.POLICY_GEN_IGNORE
}
meta = {'gen': 2, 'ttl': 1000}
TestRemovebin.client.remove_bin(key, ["age"], meta, policy)
(key, meta, bins) = TestRemovebin.client.get(key)
assert bins == {'name': 'name1'}
assert key == ('test', 'demo', None, bytearray(
b'\xb7\xf4\xb88\x89\xe2\xdag\xdeh>\x1d\xf6\x91\x9a\x1e\xac\xc4F\xc8')
)
def test_remove_bin_with_policy_send_gen_eq_positive(self):
"""
Invoke remove_bin() with policy gen eq less
"""
key = ('test', 'demo', 1)
policy = {
'timeout': 1000,
'retry': aerospike.POLICY_RETRY_ONCE,
'key': aerospike.POLICY_KEY_SEND,
'gen': aerospike.POLICY_GEN_EQ
}
(key, meta) = TestRemovebin.client.exists(key)
gen = meta['gen']
meta = {'gen': gen, 'ttl': 1000}
TestRemovebin.client.remove_bin(key, ["age"], meta, policy)
(key, meta, bins) = TestRemovebin.client.get(key)
assert bins == {'name': 'name1'}
assert key == ('test', 'demo', None, bytearray(
b'\xb7\xf4\xb88\x89\xe2\xdag\xdeh>\x1d\xf6\x91\x9a\x1e\xac\xc4F\xc8')
)
def test_remove_bin_with_policy_send_gen_eq_not_equal(self):
"""
Invoke remove_bin() with policy gen eq not equal
"""
key = ('test', 'demo', 1)
policy = {
'timeout': 1000,
'retry': aerospike.POLICY_RETRY_ONCE,
'key': aerospike.POLICY_KEY_SEND,
'gen': aerospike.POLICY_GEN_EQ
}
(key, meta) = TestRemovebin.client.exists(key)
gen = meta['gen']
meta = {'gen': gen + 5, 'ttl': 1000}
try:
TestRemovebin.client.remove_bin(key, ["age"], meta, policy)
except e.RecordGenerationError as exception:
assert exception.code == 3
assert exception.msg == "AEROSPIKE_ERR_RECORD_GENERATION"
(key, meta, bins) = TestRemovebin.client.get(key)
assert bins == {'age': 1, 'name': 'name1'}
assert key == ('test', 'demo', None, bytearray(
b'\xb7\xf4\xb88\x89\xe2\xdag\xdeh>\x1d\xf6\x91\x9a\x1e\xac\xc4F\xc8')
)
def test_remove_bin_with_policy_send_gen_GT_lesser(self):
"""
Invoke remove_bin() with policy gen GT lesser
"""
key = ('test', 'demo', 1)
policy = {
'timeout': 1000,
'retry': aerospike.POLICY_RETRY_ONCE,
'key': aerospike.POLICY_KEY_SEND,
'gen': aerospike.POLICY_GEN_GT
}
(key, meta) = TestRemovebin.client.exists(key)
gen = meta['gen']
meta = {'gen': gen, 'ttl': 1000}
try:
TestRemovebin.client.remove_bin(key, ["age"], meta, policy)
except e.RecordGenerationError as exception:
assert exception.code == 3
assert exception.msg == "AEROSPIKE_ERR_RECORD_GENERATION"
(key, meta, bins) = TestRemovebin.client.get(key)
assert bins == {'age': 1, 'name': 'name1'}
assert key == ('test', 'demo', None, bytearray(
b'\xb7\xf4\xb88\x89\xe2\xdag\xdeh>\x1d\xf6\x91\x9a\x1e\xac\xc4F\xc8')
)
def test_remove_bin_with_policy_send_gen_GT_positive(self):
"""
Invoke remove_bin() with policy gen GT positive
"""
key = ('test', 'demo', 1)
policy = {
'timeout': 1000,
'retry': aerospike.POLICY_RETRY_ONCE,
'key': aerospike.POLICY_KEY_SEND,
'gen': aerospike.POLICY_GEN_GT
}
(key, meta) = TestRemovebin.client.exists(key)
gen = meta['gen']
meta = {'gen': gen + 5, 'ttl': 1000}
TestRemovebin.client.remove_bin(key, ["age"], meta, policy)
(key, meta, bins) = TestRemovebin.client.get(key)
assert bins == {'name': 'name1'}
assert key == ('test', 'demo', None, bytearray(
b'\xb7\xf4\xb88\x89\xe2\xdag\xdeh>\x1d\xf6\x91\x9a\x1e\xac\xc4F\xc8')
)
def test_remove_bin_with_policy_key_digest(self):
"""
Invoke remove_bin() with policy key digest
"""
key = ('test', 'demo', None, bytearray("asd;as[d'as;djk;uyfl",
"utf-8"))
rec = {'age': 1, 'name': 'name1'}
TestRemovebin.client.put(key, rec)
policy = {'timeout': 1000, 'key': aerospike.POLICY_KEY_DIGEST}
TestRemovebin.client.remove_bin(key, ["age"], {}, policy)
(key, _, bins) = TestRemovebin.client.get(key)
assert bins == {'name': 'name1'}
assert key == ('test', 'demo', None,
bytearray(b"asd;as[d\'as;djk;uyfl"))
TestRemovebin.client.remove(key)
def test_remove_bin_with_incorrect_policy(self):
"""
Invoke remove_bin() with incorrect policy
"""
key = ('test', 'demo', 1)
policy = {
'timeout': 0.5
}
try:
TestRemovebin.client.remove_bin(key, ["age"], {}, policy)
except e.ClientError as exception:
assert exception.code == -1
assert exception.msg == "Incorrect policy"
def test_remove_bin_with_nonexistent_key(self):
"""
Invoke remove_bin() with non-existent key
"""
key = ('test', 'demo', "non-existent")
status = TestRemovebin.client.remove_bin(key, ["age"])
assert status == 0
def test_remove_bin_with_nonexistent_bin(self):
"""
Invoke remove_bin() with non-existent bin
"""
key = ('test', 'demo', 1)
status = TestRemovebin.client.remove_bin(key, ["non-existent"])
assert status == 0
def test_remove_bin_with_single_bin_in_a_record(self):
"""
Invoke remove_bin() with policy key digest
"""
key = ('test', 'demo', "single-bin")
try:
TestRemovebin.client.remove(key)
except:
pass
rec = {'name': 'single'}
TestRemovebin.client.put(key, rec)
policy = {'timeout': 1000}
TestRemovebin.client.remove_bin(key, ["name"], {}, policy)
_, _, bins = TestRemovebin.client.get(key)
assert bins is None
def test_remove_bin_with_extra_parameter(self):
"""
Invoke remove_bin() with extra parameter.
"""
key = ('test', 'demo', 1)
policy = {'timeout': 1000}
with pytest.raises(TypeError) as typeError:
TestRemovebin.client.remove_bin(key, ["age"], {}, policy, "")
assert "remove_bin() takes at most 4 arguments (5 given)" in str(
typeError.value)
def test_remove_bin_key_is_none(self):
"""
Invoke remove_bin() with key is none
"""
try:
TestRemovebin.client.remove_bin(None, ["age"])
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == "key is invalid"
def test_remove_bin_bin_is_none(self):
"""
Invoke remove_bin() with bin is none
"""
key = ('test', 'demo', 1)
try:
TestRemovebin.client.remove_bin(key, None)
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == "Bins should be a list"
def test_remove_bin_no_bin(self):
"""
Invoke remove_bin() no bin
"""
key = ('test', 'demo', 1)
try:
TestRemovebin.client.remove_bin(key, [])
(key, _, bins) = TestRemovebin.client.get(key)
assert bins == {'name': 'name1', 'age': 1}
except e.InvalidRequest:
pass
def test_remove_bin_all_bins(self):
"""
Invoke remove_bin() all bins
"""
key = ('test', 'demo', 1)
TestRemovebin.client.remove_bin(key, ["name", "age"])
try:
(key, _, _) = TestRemovebin.client.get(key)
except e.RecordNotFound as exception:
assert exception.code == 2
for i in range(5):
key = ('test', 'demo', i)
rec = {
'name': 'name%s' % (str(i)),
'age': i
}
TestRemovebin.client.put(key, rec)
def test_remove_bin_with_unicode_binname(self):
"""
Invoke remove_bin() with unicode bin name
"""
key = ('test', 'demo', 2)
TestRemovebin.client.remove_bin(key, [u"name"])
(key, _, bins) = TestRemovebin.client.get(key)
assert bins == {'age': 2}
key = ('test', 'demo', 3)
TestRemovebin.client.remove_bin(key, [u"name", "age"])
try:
(key, _, bins) = TestRemovebin.client.get(key)
except e.RecordNotFound as exception:
assert exception.code == 2
key = ('test', 'demo', 4)
TestRemovebin.client.remove_bin(key, ["name", u"age"])
try:
(key, _, bins) = TestRemovebin.client.get(key)
except e.RecordNotFound as exception:
assert exception.code == 2
for i in range(5):
key = ('test', 'demo', i)
rec = {
'name': 'name%s' % (str(i)),
'age': i
}
TestRemovebin.client.put(key, rec)
def test_remove_bin_with_correct_parameters_without_connection(self):
"""
Invoke remove_bin() with correct parameters without connection
"""
config = {'hosts': [('127.0.0.1', 3000)]}
client1 = aerospike.client(config)
key = ('test', 'demo', 1)
try:
client1.remove_bin(key, ["age"])
except e.ClusterError as exception:
assert exception.code == 11
assert exception.msg == 'No connection to aerospike cluster'
| apache-2.0 |
bob-white/UnityIronPythonConsole | Assets/IronPythonConsole/Plugins/Lib/email/generator.py | 106 | 13930 | # Copyright (C) 2001-2010 Python Software Foundation
# Contact: [email protected]
"""Classes to generate plain text from a message object tree."""
__all__ = ['Generator', 'DecodedGenerator']
import re
import sys
import time
import random
import warnings
from cStringIO import StringIO
from email.header import Header
UNDERSCORE = '_'
NL = '\n'
fcre = re.compile(r'^From ', re.MULTILINE)
def _is8bitstring(s):
if isinstance(s, str):
try:
unicode(s, 'us-ascii')
except UnicodeError:
return True
return False
class Generator:
"""Generates output from a Message object tree.
This basic generator writes the message to the given file object as plain
text.
"""
#
# Public interface
#
def __init__(self, outfp, mangle_from_=True, maxheaderlen=78):
"""Create the generator for message flattening.
outfp is the output file-like object for writing the message to. It
must have a write() method.
Optional mangle_from_ is a flag that, when True (the default), escapes
From_ lines in the body of the message by putting a `>' in front of
them.
Optional maxheaderlen specifies the longest length for a non-continued
header. When a header line is longer (in characters, with tabs
expanded to 8 spaces) than maxheaderlen, the header will split as
defined in the Header class. Set maxheaderlen to zero to disable
header wrapping. The default is 78, as recommended (but not required)
by RFC 2822.
"""
self._fp = outfp
self._mangle_from_ = mangle_from_
self._maxheaderlen = maxheaderlen
def write(self, s):
# Just delegate to the file object
self._fp.write(s)
def flatten(self, msg, unixfrom=False):
"""Print the message object tree rooted at msg to the output file
specified when the Generator instance was created.
unixfrom is a flag that forces the printing of a Unix From_ delimiter
before the first object in the message tree. If the original message
has no From_ delimiter, a `standard' one is crafted. By default, this
is False to inhibit the printing of any From_ delimiter.
Note that for subobjects, no From_ line is printed.
"""
if unixfrom:
ufrom = msg.get_unixfrom()
if not ufrom:
ufrom = 'From nobody ' + time.ctime(time.time())
print >> self._fp, ufrom
self._write(msg)
def clone(self, fp):
"""Clone this generator with the exact same options."""
return self.__class__(fp, self._mangle_from_, self._maxheaderlen)
#
# Protected interface - undocumented ;/
#
def _write(self, msg):
# We can't write the headers yet because of the following scenario:
# say a multipart message includes the boundary string somewhere in
# its body. We'd have to calculate the new boundary /before/ we write
# the headers so that we can write the correct Content-Type:
# parameter.
#
# The way we do this, so as to make the _handle_*() methods simpler,
# is to cache any subpart writes into a StringIO. The we write the
# headers and the StringIO contents. That way, subpart handlers can
# Do The Right Thing, and can still modify the Content-Type: header if
# necessary.
oldfp = self._fp
try:
self._fp = sfp = StringIO()
self._dispatch(msg)
finally:
self._fp = oldfp
# Write the headers. First we see if the message object wants to
# handle that itself. If not, we'll do it generically.
meth = getattr(msg, '_write_headers', None)
if meth is None:
self._write_headers(msg)
else:
meth(self)
self._fp.write(sfp.getvalue())
def _dispatch(self, msg):
# Get the Content-Type: for the message, then try to dispatch to
# self._handle_<maintype>_<subtype>(). If there's no handler for the
# full MIME type, then dispatch to self._handle_<maintype>(). If
# that's missing too, then dispatch to self._writeBody().
main = msg.get_content_maintype()
sub = msg.get_content_subtype()
specific = UNDERSCORE.join((main, sub)).replace('-', '_')
meth = getattr(self, '_handle_' + specific, None)
if meth is None:
generic = main.replace('-', '_')
meth = getattr(self, '_handle_' + generic, None)
if meth is None:
meth = self._writeBody
meth(msg)
#
# Default handlers
#
def _write_headers(self, msg):
for h, v in msg.items():
print >> self._fp, '%s:' % h,
if self._maxheaderlen == 0:
# Explicit no-wrapping
print >> self._fp, v
elif isinstance(v, Header):
# Header instances know what to do
print >> self._fp, v.encode()
elif _is8bitstring(v):
# If we have raw 8bit data in a byte string, we have no idea
# what the encoding is. There is no safe way to split this
# string. If it's ascii-subset, then we could do a normal
# ascii split, but if it's multibyte then we could break the
# string. There's no way to know so the least harm seems to
# be to not split the string and risk it being too long.
print >> self._fp, v
else:
# Header's got lots of smarts, so use it. Note that this is
# fundamentally broken though because we lose idempotency when
# the header string is continued with tabs. It will now be
# continued with spaces. This was reversedly broken before we
# fixed bug 1974. Either way, we lose.
print >> self._fp, Header(
v, maxlinelen=self._maxheaderlen, header_name=h).encode()
# A blank line always separates headers from body
print >> self._fp
#
# Handlers for writing types and subtypes
#
def _handle_text(self, msg):
payload = msg.get_payload()
if payload is None:
return
if not isinstance(payload, basestring):
raise TypeError('string payload expected: %s' % type(payload))
if self._mangle_from_:
payload = fcre.sub('>From ', payload)
self._fp.write(payload)
# Default body handler
_writeBody = _handle_text
def _handle_multipart(self, msg):
# The trick here is to write out each part separately, merge them all
# together, and then make sure that the boundary we've chosen isn't
# present in the payload.
msgtexts = []
subparts = msg.get_payload()
if subparts is None:
subparts = []
elif isinstance(subparts, basestring):
# e.g. a non-strict parse of a message with no starting boundary.
self._fp.write(subparts)
return
elif not isinstance(subparts, list):
# Scalar payload
subparts = [subparts]
for part in subparts:
s = StringIO()
g = self.clone(s)
g.flatten(part, unixfrom=False)
msgtexts.append(s.getvalue())
# BAW: What about boundaries that are wrapped in double-quotes?
boundary = msg.get_boundary()
if not boundary:
# Create a boundary that doesn't appear in any of the
# message texts.
alltext = NL.join(msgtexts)
boundary = _make_boundary(alltext)
msg.set_boundary(boundary)
# If there's a preamble, write it out, with a trailing CRLF
if msg.preamble is not None:
print >> self._fp, msg.preamble
# dash-boundary transport-padding CRLF
print >> self._fp, '--' + boundary
# body-part
if msgtexts:
self._fp.write(msgtexts.pop(0))
# *encapsulation
# --> delimiter transport-padding
# --> CRLF body-part
for body_part in msgtexts:
# delimiter transport-padding CRLF
print >> self._fp, '\n--' + boundary
# body-part
self._fp.write(body_part)
# close-delimiter transport-padding
self._fp.write('\n--' + boundary + '--')
if msg.epilogue is not None:
print >> self._fp
self._fp.write(msg.epilogue)
def _handle_multipart_signed(self, msg):
# The contents of signed parts has to stay unmodified in order to keep
# the signature intact per RFC1847 2.1, so we disable header wrapping.
# RDM: This isn't enough to completely preserve the part, but it helps.
old_maxheaderlen = self._maxheaderlen
try:
self._maxheaderlen = 0
self._handle_multipart(msg)
finally:
self._maxheaderlen = old_maxheaderlen
def _handle_message_delivery_status(self, msg):
# We can't just write the headers directly to self's file object
# because this will leave an extra newline between the last header
# block and the boundary. Sigh.
blocks = []
for part in msg.get_payload():
s = StringIO()
g = self.clone(s)
g.flatten(part, unixfrom=False)
text = s.getvalue()
lines = text.split('\n')
# Strip off the unnecessary trailing empty line
if lines and lines[-1] == '':
blocks.append(NL.join(lines[:-1]))
else:
blocks.append(text)
# Now join all the blocks with an empty line. This has the lovely
# effect of separating each block with an empty line, but not adding
# an extra one after the last one.
self._fp.write(NL.join(blocks))
def _handle_message(self, msg):
s = StringIO()
g = self.clone(s)
# The payload of a message/rfc822 part should be a multipart sequence
# of length 1. The zeroth element of the list should be the Message
# object for the subpart. Extract that object, stringify it, and
# write it out.
# Except, it turns out, when it's a string instead, which happens when
# and only when HeaderParser is used on a message of mime type
# message/rfc822. Such messages are generated by, for example,
# Groupwise when forwarding unadorned messages. (Issue 7970.) So
# in that case we just emit the string body.
payload = msg.get_payload()
if isinstance(payload, list):
g.flatten(msg.get_payload(0), unixfrom=False)
payload = s.getvalue()
self._fp.write(payload)
_FMT = '[Non-text (%(type)s) part of message omitted, filename %(filename)s]'
class DecodedGenerator(Generator):
"""Generates a text representation of a message.
Like the Generator base class, except that non-text parts are substituted
with a format string representing the part.
"""
def __init__(self, outfp, mangle_from_=True, maxheaderlen=78, fmt=None):
"""Like Generator.__init__() except that an additional optional
argument is allowed.
Walks through all subparts of a message. If the subpart is of main
type `text', then it prints the decoded payload of the subpart.
Otherwise, fmt is a format string that is used instead of the message
payload. fmt is expanded with the following keywords (in
%(keyword)s format):
type : Full MIME type of the non-text part
maintype : Main MIME type of the non-text part
subtype : Sub-MIME type of the non-text part
filename : Filename of the non-text part
description: Description associated with the non-text part
encoding : Content transfer encoding of the non-text part
The default value for fmt is None, meaning
[Non-text (%(type)s) part of message omitted, filename %(filename)s]
"""
Generator.__init__(self, outfp, mangle_from_, maxheaderlen)
if fmt is None:
self._fmt = _FMT
else:
self._fmt = fmt
def _dispatch(self, msg):
for part in msg.walk():
maintype = part.get_content_maintype()
if maintype == 'text':
print >> self, part.get_payload(decode=True)
elif maintype == 'multipart':
# Just skip this
pass
else:
print >> self, self._fmt % {
'type' : part.get_content_type(),
'maintype' : part.get_content_maintype(),
'subtype' : part.get_content_subtype(),
'filename' : part.get_filename('[no filename]'),
'description': part.get('Content-Description',
'[no description]'),
'encoding' : part.get('Content-Transfer-Encoding',
'[no encoding]'),
}
# Helper
_width = len(repr(sys.maxint-1))
_fmt = '%%0%dd' % _width
def _make_boundary(text=None):
# Craft a random boundary. If text is given, ensure that the chosen
# boundary doesn't appear in the text.
token = random.randrange(sys.maxint)
boundary = ('=' * 15) + (_fmt % token) + '=='
if text is None:
return boundary
b = boundary
counter = 0
while True:
cre = re.compile('^--' + re.escape(b) + '(--)?$', re.MULTILINE)
if not cre.search(text):
break
b = boundary + '.' + str(counter)
counter += 1
return b
| mpl-2.0 |
ThomasJunk/ringo | ringo/tests/functional/test_forms.py | 4 | 3977 | #!/usr/bin/env python
# encoding: utf-8
import pytest
from pytest_ringo import login, transaction_begin, transaction_rollback
class TestList:
def test_GET(self, app):
login(app, "admin", "secret")
app.get("/forms/list")
class TestRead:
# FIXME: There is currently no form in the database () <2016-02-08 10:30>
@pytest.mark.xfail
def test_GET(self, app):
login(app, "admin", "secret")
app.get("/forms/read/1")
class TestCreate:
def test_GET(self, app):
login(app, "admin", "secret")
app.get("/forms/create")
@pytest.mark.xfail
def test_POST(self, app):
login(app, "admin", "secret")
transaction_begin(app)
values = {"title": "test", "definiton": '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'}
app.post("/forms/create", params=values, status=302)
transaction_rollback(app)
@pytest.mark.xfail
def test_POST_missing_title(self, app):
login(app, "admin", "secret")
transaction_begin(app)
values = {"title": "", "definiton": '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'}
app.post("/forms/create", params=values, status=200)
transaction_rollback(app)
@pytest.mark.xfail
def test_POST_missing_definition(self, app):
login(app, "admin", "secret")
transaction_begin(app)
values = {"title": "test", "definiton": ''}
app.post("/forms/create", params=values, status=200)
transaction_rollback(app)
class TestUpdate:
# FIXME: There is currently no form in the database () <2016-02-08 10:30>
@pytest.mark.xfail
def test_update(self, app):
login(app, "admin", "secret")
app.get("/forms/update/1")
# FIXME: There is currently no form in the database () <2016-02-08 10:30>
@pytest.mark.xfail
def test_update_POST(self, app):
login(app, "admin", "secret")
transaction_begin(app)
values = {"title": "test", "definiton": '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'}
app.post("/forms/update/1", params=values, status=302)
transaction_rollback(app)
# FIXME: There is currently no form in the database () <2016-02-08 10:30>
@pytest.mark.xfail
def test_update_POST_missing_title(self, app):
login(app, "admin", "secret")
transaction_begin(app)
values = {"title": "", "definiton": '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'}
app.post("/forms/update/1", params=values, status=200)
transaction_rollback(app)
# FIXME: There is currently no form in the database () <2016-02-08 10:30>
@pytest.mark.xfail
def test_update_POST_missing_defintion(self, app):
login(app, "admin", "secret")
transaction_begin(app)
values = {"title": "test", "definiton": ''}
app.post("/forms/update/1", params=values, status=200)
transaction_rollback(app)
class TestDelete:
# FIXME: There is currently no form in the database () <2016-02-08 10:30>
@pytest.mark.xfail
def test_delete(self, app):
login(app, "admin", "secret")
transaction_begin(app)
app.get("/forms/delete/2")
transaction_rollback(app)
# FIXME: There is currently no form in the database () <2016-02-08 10:30>
@pytest.mark.xfail
def test_delete_POST_confirm_yes(self, app):
login(app, "admin", "secret")
transaction_begin(app)
values = {"confirmed": 1}
app.post("/forms/delete/2", params=values, status=302)
transaction_rollback(app)
# FIXME: There is currently no form in the database () <2016-02-08 10:30>
@pytest.mark.xfail
def test_delete_POST_admin_confirm_yes(self, app):
login(app, "admin", "secret")
transaction_begin(app)
values = {"confirmed": 1}
app.post("/forms/delete/1", params=values, status=302)
transaction_rollback(app)
| gpl-2.0 |
memsharded/conan | conans/test/functional/scm/workflows/test_scm_subfolder.py | 1 | 4596 | # coding=utf-8
import os
import textwrap
import unittest
from nose.plugins.attrib import attr
from conans.test.functional.scm.workflows.common import TestWorkflow
from conans.test.utils.tools import SVNLocalRepoTestCase
from conans.test.utils.tools import TestClient, create_local_git_repo
class SCMSubfolder(TestWorkflow):
""" The conanfile.py is in a subfolder inside the package,
also using subfolder for repo checkout
"""
path_to_conanfile = "cc" # It
path_from_conanfile_to_root = ".."
scm_subfolder = "scm_subfolder"
@attr("svn")
class SVNConanfileInRepoRootTest(SCMSubfolder, SVNLocalRepoTestCase):
""" Test SCM url='auto' with SVN, it can only work if conanfile is in the root of the repo
In this case, it is exactly the same to have the url="auto" or to implement a custom
get_remote_url function with the following behavior because the SVN class will be
created in the conanfile.py directory by default:
def get_remote_url():
here = os.path.dirname(__file__)
svn = tools.SVN(os.path.join(here, "."))
return svn.get_remote_url()
"""
extra_header = textwrap.dedent("""\
def get_remote_url():
here = os.path.dirname(__file__)
svn = tools.SVN(os.path.join(here, "%s"))
return svn.get_remote_url()
""" % SCMSubfolder.path_from_conanfile_to_root)
conanfile = SCMSubfolder.conanfile_base.format(extra_header=extra_header,
type="svn",
url="get_remote_url()",
scm_subfolder=SCMSubfolder.scm_subfolder)
def setUp(self):
self.lib1_ref = "lib1/version@user/channel"
files = self.get_files(subfolder='lib1', conanfile=self.conanfile, lib_ref=self.lib1_ref)
self.url, _ = self.create_project(files=files)
# Local workflow
def test_local_root_folder(self):
t = TestClient(path_with_spaces=False)
t.runner("svn co {}/lib1 .".format(self.url), cwd=t.current_folder)
self._run_local_test(t, t.current_folder, self.path_to_conanfile)
def test_local_monorepo(self):
t = TestClient(path_with_spaces=False)
t.runner("svn co {} .".format(self.url), cwd=t.current_folder)
self._run_local_test(t, t.current_folder, os.path.join("lib1", self.path_to_conanfile))
def test_local_monorepo_chdir(self):
t = TestClient(path_with_spaces=False)
t.runner("svn co {} .".format(self.url), cwd=t.current_folder)
self._run_local_test(t, os.path.join(t.current_folder, "lib1"), self.path_to_conanfile)
# Cache workflow
def test_remote_root_folder(self):
t = TestClient(path_with_spaces=False)
t.runner("svn co {}/lib1 .".format(self.url), cwd=t.current_folder)
self._run_remote_test(t, t.current_folder, self.path_to_conanfile)
def test_remote_monorepo(self):
t = TestClient(path_with_spaces=False)
t.runner("svn co {} .".format(self.url), cwd=t.current_folder)
self._run_remote_test(t, t.current_folder, os.path.join("lib1", self.path_to_conanfile))
def test_remote_monorepo_chdir(self):
t = TestClient(path_with_spaces=False)
t.runner("svn co {} .".format(self.url), cwd=t.current_folder)
self._run_remote_test(t, os.path.join(t.current_folder, "lib1"), self.path_to_conanfile)
class GitConanfileInRepoRootTest(SCMSubfolder, unittest.TestCase):
conanfile = SCMSubfolder.conanfile_base.format(extra_header="",
type="git",
url="\"auto\"",
scm_subfolder=SCMSubfolder.scm_subfolder)
def setUp(self):
self.lib1_ref = "lib1/version@user/channel"
files = self.get_files(subfolder=".", conanfile=self.conanfile, lib_ref=self.lib1_ref)
self.url, _ = create_local_git_repo(files=files)
# Local workflow
def test_local_root_folder(self):
t = TestClient(path_with_spaces=False)
t.runner('git clone "{}" .'.format(self.url), cwd=t.current_folder)
self._run_local_test(t, t.current_folder, self.path_to_conanfile)
# Cache workflow
def test_remote_root_folder(self):
t = TestClient(path_with_spaces=False)
t.runner('git clone "{}" .'.format(self.url), cwd=t.current_folder)
self._run_remote_test(t, t.current_folder, self.path_to_conanfile)
| mit |
barykaed/Pelican-Test | fsp_env/Lib/site-packages/pip/basecommand.py | 79 | 9310 | """Base Command class, and related routines"""
from __future__ import absolute_import
import logging
import os
import sys
import traceback
import optparse
import warnings
from pip._vendor.six import StringIO
from pip import cmdoptions
from pip.locations import running_under_virtualenv
from pip.download import PipSession
from pip.exceptions import (BadCommand, InstallationError, UninstallationError,
CommandError, PreviousBuildDirError)
from pip.compat import logging_dictConfig
from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
from pip.status_codes import (
SUCCESS, ERROR, UNKNOWN_ERROR, VIRTUALENV_NOT_FOUND,
PREVIOUS_BUILD_DIR_ERROR,
)
from pip.utils import appdirs, get_prog, normalize_path
from pip.utils.deprecation import RemovedInPip8Warning
from pip.utils.filesystem import check_path_owner
from pip.utils.logging import IndentingFormatter
from pip.utils.outdated import pip_version_check
__all__ = ['Command']
logger = logging.getLogger(__name__)
class Command(object):
name = None
usage = None
hidden = False
log_stream = "ext://sys.stdout"
def __init__(self, isolated=False):
parser_kw = {
'usage': self.usage,
'prog': '%s %s' % (get_prog(), self.name),
'formatter': UpdatingDefaultsHelpFormatter(),
'add_help_option': False,
'name': self.name,
'description': self.__doc__,
'isolated': isolated,
}
self.parser = ConfigOptionParser(**parser_kw)
# Commands should add options to this option group
optgroup_name = '%s Options' % self.name.capitalize()
self.cmd_opts = optparse.OptionGroup(self.parser, optgroup_name)
# Add the general options
gen_opts = cmdoptions.make_option_group(
cmdoptions.general_group,
self.parser,
)
self.parser.add_option_group(gen_opts)
def _build_session(self, options, retries=None, timeout=None):
session = PipSession(
cache=(
normalize_path(os.path.join(options.cache_dir, "http"))
if options.cache_dir else None
),
retries=retries if retries is not None else options.retries,
insecure_hosts=options.trusted_hosts,
)
# Handle custom ca-bundles from the user
if options.cert:
session.verify = options.cert
# Handle SSL client certificate
if options.client_cert:
session.cert = options.client_cert
# Handle timeouts
if options.timeout or timeout:
session.timeout = (
timeout if timeout is not None else options.timeout
)
# Handle configured proxies
if options.proxy:
session.proxies = {
"http": options.proxy,
"https": options.proxy,
}
# Determine if we can prompt the user for authentication or not
session.auth.prompting = not options.no_input
return session
def parse_args(self, args):
# factored out for testability
return self.parser.parse_args(args)
def main(self, args):
options, args = self.parse_args(args)
if options.quiet:
level = "WARNING"
elif options.verbose:
level = "DEBUG"
else:
level = "INFO"
# Compute the path for our debug log.
debug_log_path = os.path.join(appdirs.user_log_dir("pip"), "debug.log")
# Ensure that the path for our debug log is owned by the current user
# and if it is not, disable the debug log.
write_debug_log = check_path_owner(debug_log_path)
logging_dictConfig({
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"indent": {
"()": IndentingFormatter,
"format": (
"%(message)s"
if not options.log_explicit_levels
else "[%(levelname)s] %(message)s"
),
},
},
"handlers": {
"console": {
"level": level,
"class": "pip.utils.logging.ColorizedStreamHandler",
"stream": self.log_stream,
"formatter": "indent",
},
"debug_log": {
"level": "DEBUG",
"class": "pip.utils.logging.BetterRotatingFileHandler",
"filename": debug_log_path,
"maxBytes": 10 * 1000 * 1000, # 10 MB
"backupCount": 1,
"delay": True,
"formatter": "indent",
},
"user_log": {
"level": "DEBUG",
"class": "pip.utils.logging.BetterRotatingFileHandler",
"filename": options.log or "/dev/null",
"delay": True,
"formatter": "indent",
},
},
"root": {
"level": level,
"handlers": list(filter(None, [
"console",
"debug_log" if write_debug_log else None,
"user_log" if options.log else None,
])),
},
# Disable any logging besides WARNING unless we have DEBUG level
# logging enabled. These use both pip._vendor and the bare names
# for the case where someone unbundles our libraries.
"loggers": dict(
(
name,
{
"level": (
"WARNING"
if level in ["INFO", "ERROR"]
else "DEBUG"
),
},
)
for name in ["pip._vendor", "distlib", "requests", "urllib3"]
),
})
# We add this warning here instead of up above, because the logger
# hasn't been configured until just now.
if not write_debug_log:
logger.warning(
"The directory '%s' or its parent directory is not owned by "
"the current user and the debug log has been disabled. Please "
"check the permissions and owner of that directory. If "
"executing pip with sudo, you may want the -H flag.",
os.path.dirname(debug_log_path),
)
if options.log_explicit_levels:
warnings.warn(
"--log-explicit-levels has been deprecated and will be removed"
" in a future version.",
RemovedInPip8Warning,
)
# TODO: try to get these passing down from the command?
# without resorting to os.environ to hold these.
if options.no_input:
os.environ['PIP_NO_INPUT'] = '1'
if options.exists_action:
os.environ['PIP_EXISTS_ACTION'] = ' '.join(options.exists_action)
if options.require_venv:
# If a venv is required check if it can really be found
if not running_under_virtualenv():
logger.critical(
'Could not find an activated virtualenv (required).'
)
sys.exit(VIRTUALENV_NOT_FOUND)
# Check if we're using the latest version of pip available
if (not options.disable_pip_version_check
and not getattr(options, "no_index", False)):
with self._build_session(
options,
retries=0,
timeout=min(5, options.timeout)) as session:
pip_version_check(session)
try:
status = self.run(options, args)
# FIXME: all commands should return an exit status
# and when it is done, isinstance is not needed anymore
if isinstance(status, int):
return status
except PreviousBuildDirError as exc:
logger.critical(str(exc))
logger.debug('Exception information:\n%s', format_exc())
return PREVIOUS_BUILD_DIR_ERROR
except (InstallationError, UninstallationError, BadCommand) as exc:
logger.critical(str(exc))
logger.debug('Exception information:\n%s', format_exc())
return ERROR
except CommandError as exc:
logger.critical('ERROR: %s', exc)
logger.debug('Exception information:\n%s', format_exc())
return ERROR
except KeyboardInterrupt:
logger.critical('Operation cancelled by user')
logger.debug('Exception information:\n%s', format_exc())
return ERROR
except:
logger.critical('Exception:\n%s', format_exc())
return UNKNOWN_ERROR
return SUCCESS
def format_exc(exc_info=None):
if exc_info is None:
exc_info = sys.exc_info()
out = StringIO()
traceback.print_exception(*exc_info, **dict(file=out))
return out.getvalue()
| mit |
bimbam23/tools-iuc | tools/resize_coordinate_window/resize_coordinate_window.py | 6 | 3693 | from __future__ import print_function
import argparse
import fileinput
import sys
# Maximum value of a signed 32 bit integer (2**31 - 1).
MAX_CHROM_LEN = 2147483647
def stop_err(msg):
sys.stderr.write(msg)
sys.exit(1)
parser = argparse.ArgumentParser()
parser.add_argument('--input', dest='input', help="Input dataset")
parser.add_argument('--start_coordinate', dest='start_coordinate', type=int, help='Chromosome start coordinate, either 0 or 1.')
parser.add_argument('--subtract_from_start', dest='subtract_from_start', type=int, help='Distance to subtract from start.')
parser.add_argument('--add_to_end', dest='add_to_end', type=int, help='Distance to add to end.')
parser.add_argument('--extend_existing', dest='extend_existing', help='Extend existing start/end instead of from computed midpoint.')
parser.add_argument('--chrom_len_file', dest='chrom_len_file', help="File names of .len files for chromosome lengths")
parser.add_argument('--region_boundaries', dest='region_boundaries', help="Option for handling region boundaries")
parser.add_argument('--output', dest='output', help="Output dataset")
args = parser.parse_args()
extend_existing = args.extend_existing == 'existing'
out = open(args.output, 'wb')
chrom_start = int(args.start_coordinate)
chrom_lens = dict()
# Determine the length of each chromosome and add it to the chrom_lens dictionary.
len_file_missing = False
len_file_error = None
len_file = fileinput.FileInput(args.chrom_len_file)
try:
for line in len_file:
fields = line.split("\t")
chrom_lens[fields[0]] = int(fields[1])
except Exception as e:
len_file_error = str(e)
with open(args.input) as fhi:
for line in fhi:
if line.startswith('#'):
# Skip comments.
continue
items = line.split('\t')
if len(items) != 9:
# Skip invalid gff data.
continue
chrom = items[0]
start = int(items[3])
end = int(items[4])
if extend_existing:
new_start = start - args.subtract_from_start
new_end = end + args.add_to_end
else:
midpoint = (start + end) // 2
new_start = midpoint - args.subtract_from_start
new_end = midpoint + args.add_to_end
# Check start boundary.
if new_start < chrom_start:
if args.region_boundaries == 'discard':
continue
elif args.region_boundaries == 'limit':
new_start = chrom_start
elif args.region_boundaries == 'error':
out.close()
stop_err('Requested expansion places region beyond chromosome start boundary of %d.' % chrom_start)
# Check end boundary.
chrom_len = chrom_lens.get(chrom, None)
if chrom_len is None:
len_file_missing = True
chrom_len = MAX_CHROM_LEN
if new_end > chrom_len:
if args.region_boundaries == 'discard':
continue
elif args.region_boundaries == 'limit':
new_end = chrom_len
elif args.region_boundaries == 'error':
out.close()
stop_err('Requested expansion places region beyond chromosome end boundary of %d.' % chrom_len)
new_line = '\t'.join([chrom, items[1], items[2], str(new_start), str(new_end), items[5], items[6], items[7], items[8]])
out.write(new_line)
out.close()
if len_file_error is not None:
print("All chrom lengths set to %d, error in chrom len file: %s" % (MAX_CHROM_LEN, len_file_error))
if len_file_missing:
print("All chrom lengths set to %d, chrom len files are not installed." % MAX_CHROM_LEN)
| mit |
mihailignatenko/erp | addons/l10n_uy/__openerp__.py | 260 | 1807 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011 Openerp.uy <[email protected]>
# Proyecto de Localización de OperERP para Uruguay
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Uruguay - Chart of Accounts',
'version': '0.1',
'author': 'Uruguay l10n Team & Guillem Barba',
'category': 'Localization/Account Charts',
'website': 'https://launchpad.net/openerp-uruguay',
'description': """
General Chart of Accounts.
==========================
Provide Templates for Chart of Accounts, Taxes for Uruguay.
""",
'license': 'AGPL-3',
'depends': ['account'],
'data': [
'account_types.xml',
'taxes_code_template.xml',
'account_chart_template.xml',
'taxes_template.xml',
'l10n_uy_wizard.xml',
],
'demo': [],
'auto_install': False,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
zhangxq5012/sky_engine | build/android/pylib/perf/surface_stats_collector_unittest.py | 99 | 2384 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for SurfaceStatsCollector."""
# pylint: disable=W0212
import unittest
from pylib.perf.surface_stats_collector import SurfaceStatsCollector
class TestSurfaceStatsCollector(unittest.TestCase):
@staticmethod
def _CreateUniformTimestamps(base, num, delta):
return [base + i * delta for i in range(1, num + 1)]
@staticmethod
def _CreateDictionaryFromResults(results):
dictionary = {}
for result in results:
dictionary[result.name] = result
return dictionary
def setUp(self):
self.refresh_period = 0.1
def testOneFrameDelta(self):
timestamps = self._CreateUniformTimestamps(0, 10, self.refresh_period)
results = self._CreateDictionaryFromResults(
SurfaceStatsCollector._CalculateResults(
self.refresh_period, timestamps, ''))
self.assertEquals(results['avg_surface_fps'].value,
int(round(1 / self.refresh_period)))
self.assertEquals(results['jank_count'].value, 0)
self.assertEquals(results['max_frame_delay'].value, 1)
self.assertEquals(len(results['frame_lengths'].value), len(timestamps) - 1)
def testAllFramesTooShort(self):
timestamps = self._CreateUniformTimestamps(0, 10, self.refresh_period / 100)
self.assertRaises(Exception,
SurfaceStatsCollector._CalculateResults,
[self.refresh_period, timestamps, ''])
def testSomeFramesTooShort(self):
timestamps = self._CreateUniformTimestamps(0, 5, self.refresh_period)
# The following timestamps should be skipped.
timestamps += self._CreateUniformTimestamps(timestamps[4],
5,
self.refresh_period / 100)
timestamps += self._CreateUniformTimestamps(timestamps[4],
5,
self.refresh_period)
results = self._CreateDictionaryFromResults(
SurfaceStatsCollector._CalculateResults(
self.refresh_period, timestamps, ''))
self.assertEquals(len(results['frame_lengths'].value), 9)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
schwartzmx/ansible-modules-extras | notification/flowdock.py | 55 | 6057 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2013 Matt Coddington <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: flowdock
version_added: "1.2"
author: Matt Coddington
short_description: Send a message to a flowdock
description:
- Send a message to a flowdock team inbox or chat using the push API (see https://www.flowdock.com/api/team-inbox and https://www.flowdock.com/api/chat)
options:
token:
description:
- API token.
required: true
type:
description:
- Whether to post to 'inbox' or 'chat'
required: true
choices: [ "inbox", "chat" ]
msg:
description:
- Content of the message
required: true
tags:
description:
- tags of the message, separated by commas
required: false
external_user_name:
description:
- (chat only - required) Name of the "user" sending the message
required: false
from_address:
description:
- (inbox only - required) Email address of the message sender
required: false
source:
description:
- (inbox only - required) Human readable identifier of the application that uses the Flowdock API
required: false
subject:
description:
- (inbox only - required) Subject line of the message
required: false
from_name:
description:
- (inbox only) Name of the message sender
required: false
reply_to:
description:
- (inbox only) Email address for replies
required: false
project:
description:
- (inbox only) Human readable identifier for more detailed message categorization
required: false
link:
description:
- (inbox only) Link associated with the message. This will be used to link the message subject in Team Inbox.
required: false
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: 1.5.1
# informational: requirements for nodes
requirements: [ urllib, urllib2 ]
'''
EXAMPLES = '''
- flowdock: type=inbox
token=AAAAAA
[email protected]
source='my cool app'
msg='test from ansible'
subject='test subject'
- flowdock: type=chat
token=AAAAAA
external_user_name=testuser
msg='test from ansible'
tags=tag1,tag2,tag3
'''
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
token=dict(required=True),
msg=dict(required=True),
type=dict(required=True, choices=["inbox","chat"]),
external_user_name=dict(required=False),
from_address=dict(required=False),
source=dict(required=False),
subject=dict(required=False),
from_name=dict(required=False),
reply_to=dict(required=False),
project=dict(required=False),
tags=dict(required=False),
link=dict(required=False),
validate_certs = dict(default='yes', type='bool'),
),
supports_check_mode=True
)
type = module.params["type"]
token = module.params["token"]
if type == 'inbox':
url = "https://api.flowdock.com/v1/messages/team_inbox/%s" % (token)
else:
url = "https://api.flowdock.com/v1/messages/chat/%s" % (token)
params = {}
# required params
params['content'] = module.params["msg"]
# required params for the 'chat' type
if module.params['external_user_name']:
if type == 'inbox':
module.fail_json(msg="external_user_name is not valid for the 'inbox' type")
else:
params['external_user_name'] = module.params["external_user_name"]
elif type == 'chat':
module.fail_json(msg="%s is required for the 'inbox' type" % item)
# required params for the 'inbox' type
for item in [ 'from_address', 'source', 'subject' ]:
if module.params[item]:
if type == 'chat':
module.fail_json(msg="%s is not valid for the 'chat' type" % item)
else:
params[item] = module.params[item]
elif type == 'inbox':
module.fail_json(msg="%s is required for the 'inbox' type" % item)
# optional params
if module.params["tags"]:
params['tags'] = module.params["tags"]
# optional params for the 'inbox' type
for item in [ 'from_name', 'reply_to', 'project', 'link' ]:
if module.params[item]:
if type == 'chat':
module.fail_json(msg="%s is not valid for the 'chat' type" % item)
else:
params[item] = module.params[item]
# If we're in check mode, just exit pretending like we succeeded
if module.check_mode:
module.exit_json(changed=False)
# Send the data to Flowdock
data = urllib.urlencode(params)
response, info = fetch_url(module, url, data=data)
if info['status'] != 200:
module.fail_json(msg="unable to send msg: %s" % info['msg'])
module.exit_json(changed=True, msg=module.params["msg"])
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
| gpl-3.0 |
TheJJ100100/bedrock | bedrock/base/tests/test_accepted_locales.py | 6 | 3508 | import os
import shutil
from django.conf import settings
import test_utils
from bedrock.settings.base import get_dev_languages, path
class AcceptedLocalesTest(test_utils.TestCase):
"""Test lazy evaluation of locale related settings.
Verify that some localization-related settings are lazily evaluated based
on the current value of the DEV variable. Depending on the value,
DEV_LANGUAGES or PROD_LANGUAGES should be used.
"""
locale = path('locale')
locale_bkp = path('locale_bkp')
@classmethod
def setup_class(cls):
"""Create a directory structure for locale/.
Back up the existing project/locale/ directory and create the following
hierarchy in its place:
- project/locale/en-US/LC_MESSAGES
- project/locale/fr/LC_MESSAGES
- project/locale/templates/LC_MESSAGES
- project/locale/empty_file
Also, set PROD_LANGUAGES to ('en-US',).
"""
if os.path.exists(cls.locale_bkp):
raise Exception('A backup of locale/ exists at %s which might '
'mean that previous tests didn\'t end cleanly. '
'Skipping the test suite.' % cls.locale_bkp)
cls.DEV = settings.DEV
cls.PROD_LANGUAGES = settings.PROD_LANGUAGES
cls.DEV_LANGUAGES = settings.DEV_LANGUAGES
settings.PROD_LANGUAGES = ('en-US',)
if os.path.exists(cls.locale):
shutil.move(cls.locale, cls.locale_bkp)
else:
cls.locale_bkp = None
for loc in ('en-US', 'fr', 'templates'):
os.makedirs(os.path.join(cls.locale, loc, 'LC_MESSAGES'))
open(os.path.join(cls.locale, 'empty_file'), 'w').close()
@classmethod
def teardown_class(cls):
"""Remove the testing locale/ dir and bring back the backup."""
settings.DEV = cls.DEV
settings.PROD_LANGUAGES = cls.PROD_LANGUAGES
settings.DEV_LANGUAGES = cls.DEV_LANGUAGES
shutil.rmtree(cls.locale)
if cls.locale_bkp:
shutil.move(cls.locale_bkp, cls.locale)
def test_build_dev_languages(self):
"""Test that the list of dev locales is built properly.
On dev instances, the list of accepted locales should correspond to
the per-locale directories in locale/.
"""
settings.DEV = True
langs = get_dev_languages()
assert langs == ['en-US', 'fr'] or langs == ['fr', 'en-US'], (
'DEV_LANGUAGES do not correspond to the contents of locale/.')
def test_dev_languages(self):
"""Test the accepted locales on dev instances.
On dev instances, allow locales defined in DEV_LANGUAGES.
"""
settings.DEV = True
# simulate the successful result of the DEV_LANGUAGES list
# comprehension defined in settings.
settings.DEV_LANGUAGES = ['en-US', 'fr']
assert settings.LANGUAGE_URL_MAP == {'en-us': 'en-US', 'fr': 'fr'}, \
('DEV is True, but DEV_LANGUAGES are not used to define the '
'allowed locales.')
def test_prod_languages(self):
"""Test the accepted locales on prod instances.
On stage/prod instances, allow locales defined in PROD_LANGUAGES.
"""
settings.DEV = False
assert settings.LANGUAGE_URL_MAP == {'en-us': 'en-US'}, (
'DEV is False, but PROD_LANGUAGES are not used to define the '
'allowed locales.')
| mpl-2.0 |
skg-net/ansible | test/runner/lib/docker_util.py | 17 | 5836 | """Functions for accessing docker via the docker cli."""
from __future__ import absolute_import, print_function
import json
import os
import time
from lib.executor import (
SubprocessError,
)
from lib.util import (
ApplicationError,
run_command,
common_environment,
display,
find_executable,
)
from lib.config import (
EnvironmentConfig,
)
BUFFER_SIZE = 256 * 256
def docker_available():
"""
:rtype: bool
"""
return find_executable('docker', required=False)
def get_docker_container_id():
"""
:rtype: str | None
"""
path = '/proc/self/cgroup'
if not os.path.exists(path):
return None
with open(path) as cgroup_fd:
contents = cgroup_fd.read()
paths = [line.split(':')[2] for line in contents.splitlines()]
container_ids = set(path.split('/')[2] for path in paths if path.startswith('/docker/'))
if not container_ids:
return None
if len(container_ids) == 1:
return container_ids.pop()
raise ApplicationError('Found multiple container_id candidates: %s\n%s' % (sorted(container_ids), contents))
def get_docker_container_ip(args, container_id):
"""
:type args: EnvironmentConfig
:type container_id: str
:rtype: str
"""
results = docker_inspect(args, container_id)
ipaddress = results[0]['NetworkSettings']['IPAddress']
return ipaddress
def docker_pull(args, image):
"""
:type args: EnvironmentConfig
:type image: str
"""
if not args.docker_pull:
display.warning('Skipping docker pull for "%s". Image may be out-of-date.' % image)
return
for _ in range(1, 10):
try:
docker_command(args, ['pull', image])
return
except SubprocessError:
display.warning('Failed to pull docker image "%s". Waiting a few seconds before trying again.' % image)
time.sleep(3)
raise ApplicationError('Failed to pull docker image "%s".' % image)
def docker_put(args, container_id, src, dst):
"""
:type args: EnvironmentConfig
:type container_id: str
:type src: str
:type dst: str
"""
# avoid 'docker cp' due to a bug which causes 'docker rm' to fail
with open(src, 'rb') as src_fd:
docker_exec(args, container_id, ['dd', 'of=%s' % dst, 'bs=%s' % BUFFER_SIZE],
options=['-i'], stdin=src_fd, capture=True)
def docker_get(args, container_id, src, dst):
"""
:type args: EnvironmentConfig
:type container_id: str
:type src: str
:type dst: str
"""
# avoid 'docker cp' due to a bug which causes 'docker rm' to fail
with open(dst, 'wb') as dst_fd:
docker_exec(args, container_id, ['dd', 'if=%s' % src, 'bs=%s' % BUFFER_SIZE],
options=['-i'], stdout=dst_fd, capture=True)
def docker_run(args, image, options, cmd=None):
"""
:type args: EnvironmentConfig
:type image: str
:type options: list[str] | None
:type cmd: list[str] | None
:rtype: str | None, str | None
"""
if not options:
options = []
if not cmd:
cmd = []
for _ in range(1, 3):
try:
return docker_command(args, ['run'] + options + [image] + cmd, capture=True)
except SubprocessError as ex:
display.error(ex)
display.warning('Failed to run docker image "%s". Waiting a few seconds before trying again.' % image)
time.sleep(3)
raise ApplicationError('Failed to run docker image "%s".' % image)
def docker_rm(args, container_id):
"""
:type args: EnvironmentConfig
:type container_id: str
"""
docker_command(args, ['rm', '-f', container_id], capture=True)
def docker_inspect(args, container_id):
"""
:type args: EnvironmentConfig
:type container_id: str
:rtype: list[dict]
"""
if args.explain:
return []
try:
stdout, _ = docker_command(args, ['inspect', container_id], capture=True)
return json.loads(stdout)
except SubprocessError as ex:
try:
return json.loads(ex.stdout)
except:
raise ex # pylint: disable=locally-disabled, raising-bad-type
def docker_network_inspect(args, network):
"""
:type args: EnvironmentConfig
:type network: str
:rtype: list[dict]
"""
if args.explain:
return []
try:
stdout, _ = docker_command(args, ['network', 'inspect', network], capture=True)
return json.loads(stdout)
except SubprocessError as ex:
try:
return json.loads(ex.stdout)
except:
raise ex # pylint: disable=locally-disabled, raising-bad-type
def docker_exec(args, container_id, cmd, options=None, capture=False, stdin=None, stdout=None):
"""
:type args: EnvironmentConfig
:type container_id: str
:type cmd: list[str]
:type options: list[str] | None
:type capture: bool
:type stdin: file | None
:type stdout: file | None
:rtype: str | None, str | None
"""
if not options:
options = []
return docker_command(args, ['exec'] + options + [container_id] + cmd, capture=capture, stdin=stdin, stdout=stdout)
def docker_command(args, cmd, capture=False, stdin=None, stdout=None):
"""
:type args: EnvironmentConfig
:type cmd: list[str]
:type capture: bool
:type stdin: file | None
:type stdout: file | None
:rtype: str | None, str | None
"""
env = docker_environment()
return run_command(args, ['docker'] + cmd, env=env, capture=capture, stdin=stdin, stdout=stdout)
def docker_environment():
"""
:rtype: dict[str, str]
"""
env = common_environment()
env.update(dict((key, os.environ[key]) for key in os.environ if key.startswith('DOCKER_')))
return env
| gpl-3.0 |
AustereCuriosity/numpy | numpy/lib/tests/test_nanfunctions.py | 10 | 32613 | from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
from numpy.testing import (
run_module_suite, TestCase, assert_, assert_equal, assert_almost_equal,
assert_no_warnings, assert_raises, assert_array_equal, suppress_warnings
)
# Test data
_ndat = np.array([[0.6244, np.nan, 0.2692, 0.0116, np.nan, 0.1170],
[0.5351, -0.9403, np.nan, 0.2100, 0.4759, 0.2833],
[np.nan, np.nan, np.nan, 0.1042, np.nan, -0.5954],
[0.1610, np.nan, np.nan, 0.1859, 0.3146, np.nan]])
# Rows of _ndat with nans removed
_rdat = [np.array([0.6244, 0.2692, 0.0116, 0.1170]),
np.array([0.5351, -0.9403, 0.2100, 0.4759, 0.2833]),
np.array([0.1042, -0.5954]),
np.array([0.1610, 0.1859, 0.3146])]
# Rows of _ndat with nans converted to ones
_ndat_ones = np.array([[0.6244, 1.0, 0.2692, 0.0116, 1.0, 0.1170],
[0.5351, -0.9403, 1.0, 0.2100, 0.4759, 0.2833],
[1.0, 1.0, 1.0, 0.1042, 1.0, -0.5954],
[0.1610, 1.0, 1.0, 0.1859, 0.3146, 1.0]])
# Rows of _ndat with nans converted to zeros
_ndat_zeros = np.array([[0.6244, 0.0, 0.2692, 0.0116, 0.0, 0.1170],
[0.5351, -0.9403, 0.0, 0.2100, 0.4759, 0.2833],
[0.0, 0.0, 0.0, 0.1042, 0.0, -0.5954],
[0.1610, 0.0, 0.0, 0.1859, 0.3146, 0.0]])
class TestNanFunctions_MinMax(TestCase):
nanfuncs = [np.nanmin, np.nanmax]
stdfuncs = [np.min, np.max]
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
for f in self.nanfuncs:
f(ndat)
assert_equal(ndat, _ndat)
def test_keepdims(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for axis in [None, 0, 1]:
tgt = rf(mat, axis=axis, keepdims=True)
res = nf(mat, axis=axis, keepdims=True)
assert_(res.ndim == tgt.ndim)
def test_out(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
resout = np.zeros(3)
tgt = rf(mat, axis=1)
res = nf(mat, axis=1, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
def test_dtype_from_input(self):
codes = 'efdgFDG'
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for c in codes:
mat = np.eye(3, dtype=c)
tgt = rf(mat, axis=1).dtype.type
res = nf(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
tgt = rf(mat, axis=None).dtype.type
res = nf(mat, axis=None).dtype.type
assert_(res is tgt)
def test_result_values(self):
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
tgt = [rf(d) for d in _rdat]
res = nf(_ndat, axis=1)
assert_almost_equal(res, tgt)
def test_allnans(self):
mat = np.array([np.nan]*9).reshape(3, 3)
for f in self.nanfuncs:
for axis in [None, 0, 1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(mat, axis=axis)).all())
assert_(len(w) == 1, 'no warning raised')
assert_(issubclass(w[0].category, RuntimeWarning))
# Check scalars
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(np.nan)))
assert_(len(w) == 1, 'no warning raised')
assert_(issubclass(w[0].category, RuntimeWarning))
def test_masked(self):
mat = np.ma.fix_invalid(_ndat)
msk = mat._mask.copy()
for f in [np.nanmin]:
res = f(mat, axis=1)
tgt = f(_ndat, axis=1)
assert_equal(res, tgt)
assert_equal(mat._mask, msk)
assert_(not np.isinf(mat).any())
def test_scalar(self):
for f in self.nanfuncs:
assert_(f(0.) == 0.)
def test_matrices(self):
# Check that it works and that type and
# shape are preserved
mat = np.matrix(np.eye(3))
for f in self.nanfuncs:
res = f(mat, axis=0)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (1, 3))
res = f(mat, axis=1)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (3, 1))
res = f(mat)
assert_(np.isscalar(res))
# check that rows of nan are dealt with for subclasses (#4628)
mat[1] = np.nan
for f in self.nanfuncs:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(mat, axis=0)
assert_(isinstance(res, np.matrix))
assert_(not np.any(np.isnan(res)))
assert_(len(w) == 0)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(mat, axis=1)
assert_(isinstance(res, np.matrix))
assert_(np.isnan(res[1, 0]) and not np.isnan(res[0, 0])
and not np.isnan(res[2, 0]))
assert_(len(w) == 1, 'no warning raised')
assert_(issubclass(w[0].category, RuntimeWarning))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(mat)
assert_(np.isscalar(res))
assert_(res != np.nan)
assert_(len(w) == 0)
class TestNanFunctions_ArgminArgmax(TestCase):
nanfuncs = [np.nanargmin, np.nanargmax]
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
for f in self.nanfuncs:
f(ndat)
assert_equal(ndat, _ndat)
def test_result_values(self):
for f, fcmp in zip(self.nanfuncs, [np.greater, np.less]):
for row in _ndat:
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in")
ind = f(row)
val = row[ind]
# comparing with NaN is tricky as the result
# is always false except for NaN != NaN
assert_(not np.isnan(val))
assert_(not fcmp(val, row).any())
assert_(not np.equal(val, row[:ind]).any())
def test_allnans(self):
mat = np.array([np.nan]*9).reshape(3, 3)
for f in self.nanfuncs:
for axis in [None, 0, 1]:
assert_raises(ValueError, f, mat, axis=axis)
assert_raises(ValueError, f, np.nan)
def test_empty(self):
mat = np.zeros((0, 3))
for f in self.nanfuncs:
for axis in [0, None]:
assert_raises(ValueError, f, mat, axis=axis)
for axis in [1]:
res = f(mat, axis=axis)
assert_equal(res, np.zeros(0))
def test_scalar(self):
for f in self.nanfuncs:
assert_(f(0.) == 0.)
def test_matrices(self):
# Check that it works and that type and
# shape are preserved
mat = np.matrix(np.eye(3))
for f in self.nanfuncs:
res = f(mat, axis=0)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (1, 3))
res = f(mat, axis=1)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (3, 1))
res = f(mat)
assert_(np.isscalar(res))
class TestNanFunctions_IntTypes(TestCase):
int_types = (np.int8, np.int16, np.int32, np.int64, np.uint8,
np.uint16, np.uint32, np.uint64)
mat = np.array([127, 39, 93, 87, 46])
def integer_arrays(self):
for dtype in self.int_types:
yield self.mat.astype(dtype)
def test_nanmin(self):
tgt = np.min(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanmin(mat), tgt)
def test_nanmax(self):
tgt = np.max(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanmax(mat), tgt)
def test_nanargmin(self):
tgt = np.argmin(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanargmin(mat), tgt)
def test_nanargmax(self):
tgt = np.argmax(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanargmax(mat), tgt)
def test_nansum(self):
tgt = np.sum(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nansum(mat), tgt)
def test_nanprod(self):
tgt = np.prod(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanprod(mat), tgt)
def test_nancumsum(self):
tgt = np.cumsum(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nancumsum(mat), tgt)
def test_nancumprod(self):
tgt = np.cumprod(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nancumprod(mat), tgt)
def test_nanmean(self):
tgt = np.mean(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanmean(mat), tgt)
def test_nanvar(self):
tgt = np.var(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanvar(mat), tgt)
tgt = np.var(mat, ddof=1)
for mat in self.integer_arrays():
assert_equal(np.nanvar(mat, ddof=1), tgt)
def test_nanstd(self):
tgt = np.std(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanstd(mat), tgt)
tgt = np.std(self.mat, ddof=1)
for mat in self.integer_arrays():
assert_equal(np.nanstd(mat, ddof=1), tgt)
class SharedNanFunctionsTestsMixin(object):
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
for f in self.nanfuncs:
f(ndat)
assert_equal(ndat, _ndat)
def test_keepdims(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for axis in [None, 0, 1]:
tgt = rf(mat, axis=axis, keepdims=True)
res = nf(mat, axis=axis, keepdims=True)
assert_(res.ndim == tgt.ndim)
def test_out(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
resout = np.zeros(3)
tgt = rf(mat, axis=1)
res = nf(mat, axis=1, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
def test_dtype_from_dtype(self):
mat = np.eye(3)
codes = 'efdgFDG'
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for c in codes:
with suppress_warnings() as sup:
if nf in {np.nanstd, np.nanvar} and c in 'FDG':
# Giving the warning is a small bug, see gh-8000
sup.filter(np.ComplexWarning)
tgt = rf(mat, dtype=np.dtype(c), axis=1).dtype.type
res = nf(mat, dtype=np.dtype(c), axis=1).dtype.type
assert_(res is tgt)
# scalar case
tgt = rf(mat, dtype=np.dtype(c), axis=None).dtype.type
res = nf(mat, dtype=np.dtype(c), axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_char(self):
mat = np.eye(3)
codes = 'efdgFDG'
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for c in codes:
with suppress_warnings() as sup:
if nf in {np.nanstd, np.nanvar} and c in 'FDG':
# Giving the warning is a small bug, see gh-8000
sup.filter(np.ComplexWarning)
tgt = rf(mat, dtype=c, axis=1).dtype.type
res = nf(mat, dtype=c, axis=1).dtype.type
assert_(res is tgt)
# scalar case
tgt = rf(mat, dtype=c, axis=None).dtype.type
res = nf(mat, dtype=c, axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_input(self):
codes = 'efdgFDG'
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for c in codes:
mat = np.eye(3, dtype=c)
tgt = rf(mat, axis=1).dtype.type
res = nf(mat, axis=1).dtype.type
assert_(res is tgt, "res %s, tgt %s" % (res, tgt))
# scalar case
tgt = rf(mat, axis=None).dtype.type
res = nf(mat, axis=None).dtype.type
assert_(res is tgt)
def test_result_values(self):
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
tgt = [rf(d) for d in _rdat]
res = nf(_ndat, axis=1)
assert_almost_equal(res, tgt)
def test_scalar(self):
for f in self.nanfuncs:
assert_(f(0.) == 0.)
def test_matrices(self):
# Check that it works and that type and
# shape are preserved
mat = np.matrix(np.eye(3))
for f in self.nanfuncs:
res = f(mat, axis=0)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (1, 3))
res = f(mat, axis=1)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (3, 1))
res = f(mat)
assert_(np.isscalar(res))
class TestNanFunctions_SumProd(TestCase, SharedNanFunctionsTestsMixin):
nanfuncs = [np.nansum, np.nanprod]
stdfuncs = [np.sum, np.prod]
def test_allnans(self):
# Check for FutureWarning
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = np.nansum([np.nan]*3, axis=None)
assert_(res == 0, 'result is not 0')
assert_(len(w) == 0, 'warning raised')
# Check scalar
res = np.nansum(np.nan)
assert_(res == 0, 'result is not 0')
assert_(len(w) == 0, 'warning raised')
# Check there is no warning for not all-nan
np.nansum([0]*3, axis=None)
assert_(len(w) == 0, 'unwanted warning raised')
def test_empty(self):
for f, tgt_value in zip([np.nansum, np.nanprod], [0, 1]):
mat = np.zeros((0, 3))
tgt = [tgt_value]*3
res = f(mat, axis=0)
assert_equal(res, tgt)
tgt = []
res = f(mat, axis=1)
assert_equal(res, tgt)
tgt = tgt_value
res = f(mat, axis=None)
assert_equal(res, tgt)
class TestNanFunctions_CumSumProd(TestCase, SharedNanFunctionsTestsMixin):
nanfuncs = [np.nancumsum, np.nancumprod]
stdfuncs = [np.cumsum, np.cumprod]
def test_allnans(self):
for f, tgt_value in zip(self.nanfuncs, [0, 1]):
# Unlike other nan-functions, sum/prod/cumsum/cumprod don't warn on all nan input
with assert_no_warnings():
res = f([np.nan]*3, axis=None)
tgt = tgt_value*np.ones((3))
assert_(np.array_equal(res, tgt), 'result is not %s * np.ones((3))' % (tgt_value))
# Check scalar
res = f(np.nan)
tgt = tgt_value*np.ones((1))
assert_(np.array_equal(res, tgt), 'result is not %s * np.ones((1))' % (tgt_value))
# Check there is no warning for not all-nan
f([0]*3, axis=None)
def test_empty(self):
for f, tgt_value in zip(self.nanfuncs, [0, 1]):
mat = np.zeros((0, 3))
tgt = tgt_value*np.ones((0, 3))
res = f(mat, axis=0)
assert_equal(res, tgt)
tgt = mat
res = f(mat, axis=1)
assert_equal(res, tgt)
tgt = np.zeros((0))
res = f(mat, axis=None)
assert_equal(res, tgt)
def test_keepdims(self):
for f, g in zip(self.nanfuncs, self.stdfuncs):
mat = np.eye(3)
for axis in [None, 0, 1]:
tgt = f(mat, axis=axis, out=None)
res = g(mat, axis=axis, out=None)
assert_(res.ndim == tgt.ndim)
for f in self.nanfuncs:
d = np.ones((3, 5, 7, 11))
# Randomly set some elements to NaN:
rs = np.random.RandomState(0)
d[rs.rand(*d.shape) < 0.5] = np.nan
res = f(d, axis=None)
assert_equal(res.shape, (1155,))
for axis in np.arange(4):
res = f(d, axis=axis)
assert_equal(res.shape, (3, 5, 7, 11))
def test_matrices(self):
# Check that it works and that type and
# shape are preserved
mat = np.matrix(np.eye(3))
for f in self.nanfuncs:
for axis in np.arange(2):
res = f(mat, axis=axis)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (3, 3))
res = f(mat)
assert_(res.shape == (1, 3*3))
def test_result_values(self):
for axis in (-2, -1, 0, 1, None):
tgt = np.cumprod(_ndat_ones, axis=axis)
res = np.nancumprod(_ndat, axis=axis)
assert_almost_equal(res, tgt)
tgt = np.cumsum(_ndat_zeros,axis=axis)
res = np.nancumsum(_ndat, axis=axis)
assert_almost_equal(res, tgt)
def test_out(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
resout = np.eye(3)
for axis in (-2, -1, 0, 1):
tgt = rf(mat, axis=axis)
res = nf(mat, axis=axis, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
class TestNanFunctions_MeanVarStd(TestCase, SharedNanFunctionsTestsMixin):
nanfuncs = [np.nanmean, np.nanvar, np.nanstd]
stdfuncs = [np.mean, np.var, np.std]
def test_dtype_error(self):
for f in self.nanfuncs:
for dtype in [np.bool_, np.int_, np.object_]:
assert_raises(TypeError, f, _ndat, axis=1, dtype=dtype)
def test_out_dtype_error(self):
for f in self.nanfuncs:
for dtype in [np.bool_, np.int_, np.object_]:
out = np.empty(_ndat.shape[0], dtype=dtype)
assert_raises(TypeError, f, _ndat, axis=1, out=out)
def test_ddof(self):
nanfuncs = [np.nanvar, np.nanstd]
stdfuncs = [np.var, np.std]
for nf, rf in zip(nanfuncs, stdfuncs):
for ddof in [0, 1]:
tgt = [rf(d, ddof=ddof) for d in _rdat]
res = nf(_ndat, axis=1, ddof=ddof)
assert_almost_equal(res, tgt)
def test_ddof_too_big(self):
nanfuncs = [np.nanvar, np.nanstd]
stdfuncs = [np.var, np.std]
dsize = [len(d) for d in _rdat]
for nf, rf in zip(nanfuncs, stdfuncs):
for ddof in range(5):
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
sup.filter(np.ComplexWarning)
tgt = [ddof >= d for d in dsize]
res = nf(_ndat, axis=1, ddof=ddof)
assert_equal(np.isnan(res), tgt)
if any(tgt):
assert_(len(sup.log) == 1)
else:
assert_(len(sup.log) == 0)
def test_allnans(self):
mat = np.array([np.nan]*9).reshape(3, 3)
for f in self.nanfuncs:
for axis in [None, 0, 1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(mat, axis=axis)).all())
assert_(len(w) == 1)
assert_(issubclass(w[0].category, RuntimeWarning))
# Check scalar
assert_(np.isnan(f(np.nan)))
assert_(len(w) == 2)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
mat = np.zeros((0, 3))
for f in self.nanfuncs:
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(mat, axis=axis)).all())
assert_(len(w) == 1)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(f(mat, axis=axis), np.zeros([]))
assert_(len(w) == 0)
class TestNanFunctions_Median(TestCase):
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
np.nanmedian(ndat)
assert_equal(ndat, _ndat)
def test_keepdims(self):
mat = np.eye(3)
for axis in [None, 0, 1]:
tgt = np.median(mat, axis=axis, out=None, overwrite_input=False)
res = np.nanmedian(mat, axis=axis, out=None, overwrite_input=False)
assert_(res.ndim == tgt.ndim)
d = np.ones((3, 5, 7, 11))
# Randomly set some elements to NaN:
w = np.random.random((4, 200)) * np.array(d.shape)[:, None]
w = w.astype(np.intp)
d[tuple(w)] = np.nan
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
res = np.nanmedian(d, axis=None, keepdims=True)
assert_equal(res.shape, (1, 1, 1, 1))
res = np.nanmedian(d, axis=(0, 1), keepdims=True)
assert_equal(res.shape, (1, 1, 7, 11))
res = np.nanmedian(d, axis=(0, 3), keepdims=True)
assert_equal(res.shape, (1, 5, 7, 1))
res = np.nanmedian(d, axis=(1,), keepdims=True)
assert_equal(res.shape, (3, 1, 7, 11))
res = np.nanmedian(d, axis=(0, 1, 2, 3), keepdims=True)
assert_equal(res.shape, (1, 1, 1, 1))
res = np.nanmedian(d, axis=(0, 1, 3), keepdims=True)
assert_equal(res.shape, (1, 1, 7, 1))
def test_out(self):
mat = np.random.rand(3, 3)
nan_mat = np.insert(mat, [0, 2], np.nan, axis=1)
resout = np.zeros(3)
tgt = np.median(mat, axis=1)
res = np.nanmedian(nan_mat, axis=1, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
# 0-d output:
resout = np.zeros(())
tgt = np.median(mat, axis=None)
res = np.nanmedian(nan_mat, axis=None, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
res = np.nanmedian(nan_mat, axis=(0, 1), out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
def test_small_large(self):
# test the small and large code paths, current cutoff 400 elements
for s in [5, 20, 51, 200, 1000]:
d = np.random.randn(4, s)
# Randomly set some elements to NaN:
w = np.random.randint(0, d.size, size=d.size // 5)
d.ravel()[w] = np.nan
d[:,0] = 1. # ensure at least one good value
# use normal median without nans to compare
tgt = []
for x in d:
nonan = np.compress(~np.isnan(x), x)
tgt.append(np.median(nonan, overwrite_input=True))
assert_array_equal(np.nanmedian(d, axis=-1), tgt)
def test_result_values(self):
tgt = [np.median(d) for d in _rdat]
res = np.nanmedian(_ndat, axis=1)
assert_almost_equal(res, tgt)
def test_allnans(self):
mat = np.array([np.nan]*9).reshape(3, 3)
for axis in [None, 0, 1]:
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
assert_(np.isnan(np.nanmedian(mat, axis=axis)).all())
if axis is None:
assert_(len(sup.log) == 1)
else:
assert_(len(sup.log) == 3)
# Check scalar
assert_(np.isnan(np.nanmedian(np.nan)))
if axis is None:
assert_(len(sup.log) == 2)
else:
assert_(len(sup.log) == 4)
def test_empty(self):
mat = np.zeros((0, 3))
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(np.nanmedian(mat, axis=axis)).all())
assert_(len(w) == 1)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(np.nanmedian(mat, axis=axis), np.zeros([]))
assert_(len(w) == 0)
def test_scalar(self):
assert_(np.nanmedian(0.) == 0.)
def test_extended_axis_invalid(self):
d = np.ones((3, 5, 7, 11))
assert_raises(IndexError, np.nanmedian, d, axis=-5)
assert_raises(IndexError, np.nanmedian, d, axis=(0, -5))
assert_raises(IndexError, np.nanmedian, d, axis=4)
assert_raises(IndexError, np.nanmedian, d, axis=(0, 4))
assert_raises(ValueError, np.nanmedian, d, axis=(1, 1))
def test_float_special(self):
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
a = np.array([[np.inf, np.nan], [np.nan, np.nan]])
assert_equal(np.nanmedian(a, axis=0), [np.inf, np.nan])
assert_equal(np.nanmedian(a, axis=1), [np.inf, np.nan])
assert_equal(np.nanmedian(a), np.inf)
# minimum fill value check
a = np.array([[np.nan, np.nan, np.inf], [np.nan, np.nan, np.inf]])
assert_equal(np.nanmedian(a, axis=1), np.inf)
# no mask path
a = np.array([[np.inf, np.inf], [np.inf, np.inf]])
assert_equal(np.nanmedian(a, axis=1), np.inf)
class TestNanFunctions_Percentile(TestCase):
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
np.nanpercentile(ndat, 30)
assert_equal(ndat, _ndat)
def test_keepdims(self):
mat = np.eye(3)
for axis in [None, 0, 1]:
tgt = np.percentile(mat, 70, axis=axis, out=None,
overwrite_input=False)
res = np.nanpercentile(mat, 70, axis=axis, out=None,
overwrite_input=False)
assert_(res.ndim == tgt.ndim)
d = np.ones((3, 5, 7, 11))
# Randomly set some elements to NaN:
w = np.random.random((4, 200)) * np.array(d.shape)[:, None]
w = w.astype(np.intp)
d[tuple(w)] = np.nan
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
res = np.nanpercentile(d, 90, axis=None, keepdims=True)
assert_equal(res.shape, (1, 1, 1, 1))
res = np.nanpercentile(d, 90, axis=(0, 1), keepdims=True)
assert_equal(res.shape, (1, 1, 7, 11))
res = np.nanpercentile(d, 90, axis=(0, 3), keepdims=True)
assert_equal(res.shape, (1, 5, 7, 1))
res = np.nanpercentile(d, 90, axis=(1,), keepdims=True)
assert_equal(res.shape, (3, 1, 7, 11))
res = np.nanpercentile(d, 90, axis=(0, 1, 2, 3), keepdims=True)
assert_equal(res.shape, (1, 1, 1, 1))
res = np.nanpercentile(d, 90, axis=(0, 1, 3), keepdims=True)
assert_equal(res.shape, (1, 1, 7, 1))
def test_out(self):
mat = np.random.rand(3, 3)
nan_mat = np.insert(mat, [0, 2], np.nan, axis=1)
resout = np.zeros(3)
tgt = np.percentile(mat, 42, axis=1)
res = np.nanpercentile(nan_mat, 42, axis=1, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
# 0-d output:
resout = np.zeros(())
tgt = np.percentile(mat, 42, axis=None)
res = np.nanpercentile(nan_mat, 42, axis=None, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
res = np.nanpercentile(nan_mat, 42, axis=(0, 1), out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
def test_result_values(self):
tgt = [np.percentile(d, 28) for d in _rdat]
res = np.nanpercentile(_ndat, 28, axis=1)
assert_almost_equal(res, tgt)
# Transpose the array to fit the output convention of numpy.percentile
tgt = np.transpose([np.percentile(d, (28, 98)) for d in _rdat])
res = np.nanpercentile(_ndat, (28, 98), axis=1)
assert_almost_equal(res, tgt)
def test_allnans(self):
mat = np.array([np.nan]*9).reshape(3, 3)
for axis in [None, 0, 1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(np.nanpercentile(mat, 60, axis=axis)).all())
if axis is None:
assert_(len(w) == 1)
else:
assert_(len(w) == 3)
assert_(issubclass(w[0].category, RuntimeWarning))
# Check scalar
assert_(np.isnan(np.nanpercentile(np.nan, 60)))
if axis is None:
assert_(len(w) == 2)
else:
assert_(len(w) == 4)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
mat = np.zeros((0, 3))
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(np.nanpercentile(mat, 40, axis=axis)).all())
assert_(len(w) == 1)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(np.nanpercentile(mat, 40, axis=axis), np.zeros([]))
assert_(len(w) == 0)
def test_scalar(self):
assert_(np.nanpercentile(0., 100) == 0.)
def test_extended_axis_invalid(self):
d = np.ones((3, 5, 7, 11))
assert_raises(IndexError, np.nanpercentile, d, q=5, axis=-5)
assert_raises(IndexError, np.nanpercentile, d, q=5, axis=(0, -5))
assert_raises(IndexError, np.nanpercentile, d, q=5, axis=4)
assert_raises(IndexError, np.nanpercentile, d, q=5, axis=(0, 4))
assert_raises(ValueError, np.nanpercentile, d, q=5, axis=(1, 1))
def test_multiple_percentiles(self):
perc = [50, 100]
mat = np.ones((4, 3))
nan_mat = np.nan * mat
# For checking consistency in higher dimensional case
large_mat = np.ones((3, 4, 5))
large_mat[:, 0:2:4, :] = 0
large_mat[:, :, 3:] *= 2
for axis in [None, 0, 1]:
for keepdim in [False, True]:
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "All-NaN slice encountered")
val = np.percentile(mat, perc, axis=axis, keepdims=keepdim)
nan_val = np.nanpercentile(nan_mat, perc, axis=axis,
keepdims=keepdim)
assert_equal(nan_val.shape, val.shape)
val = np.percentile(large_mat, perc, axis=axis,
keepdims=keepdim)
nan_val = np.nanpercentile(large_mat, perc, axis=axis,
keepdims=keepdim)
assert_equal(nan_val, val)
megamat = np.ones((3, 4, 5, 6))
assert_equal(np.nanpercentile(megamat, perc, axis=(1, 2)).shape, (2, 3, 6))
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
nolanliou/tensorflow | tensorflow/contrib/nn/__init__.py | 56 | 1688 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for variants of ops in tf.nn.
@@alpha_dropout
@@conv1d_transpose
@@deprecated_flipped_softmax_cross_entropy_with_logits
@@deprecated_flipped_sparse_softmax_cross_entropy_with_logits
@@deprecated_flipped_sigmoid_cross_entropy_with_logits
@@nth_element
@@rank_sampled_softmax_loss
@@sampled_sparse_softmax_loss
@@scaled_softplus
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.nn.python.ops.alpha_dropout import *
from tensorflow.contrib.nn.python.ops.cross_entropy import *
from tensorflow.contrib.nn.python.ops.sampling_ops import *
from tensorflow.contrib.nn.python.ops.scaled_softplus import *
from tensorflow.python.ops.nn_ops import conv1d_transpose
from tensorflow.python.ops.nn_ops import nth_element
# pylint: enable=unused-import,wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
| apache-2.0 |
zingale/hydro_examples | advection/weno_coefficients.py | 3 | 18621 | import numpy
# Coefficients of order r=2
# On smooth solutions this should converge with order r=3
C_2 = numpy.array([ 1, 2 ]) / 3
a_2 = numpy.array([
[ 3, -1],
[ 1, 1],
]) / 2
sigma_2 = numpy.array([
[
[ 1, 0],
[-2, 1]
],
[
[ 1, 0],
[-2, 1]
]
])
# Coefficients of order r=3
# On smooth solutions this should converge with order r=5
C_3 = numpy.array([ 1, 6, 3 ]) / 10
a_3 = numpy.array([
[ 11, -7, 2],
[ 2, 5, -1],
[ -1, 5, 2],
]) / 6
sigma_3 = numpy.array([
[
[ 10, 0, 0],
[-31, 25, 0],
[ 11, -19, 4]
],
[
[ 4, 0, 0],
[-13, 13, 0],
[ 5, -13, 4]
],
[
[ 4, 0, 0],
[-19, 25, 0],
[ 11, -31, 10]
]
]) / 3
# Coefficients of order r=4
# On smooth solutions this should converge with order r=7
C_4 = numpy.array([ 1, 12, 18, 4 ]) / 35
a_4 = numpy.array([
[ 25, -23, 13, -3],
[ 3, 13, -5, 1],
[ -1, 7, 7, -1],
[ 1, -5, 13, 3],
]) / 12
sigma_4 = numpy.array([
[
[ 2107, 0, 0, 0],
[ -9402, 11003, 0, 0],
[ 7042, -17246, 7043, 0],
[ -1854, 4642, -3882, 547]
],
[
[ 547, 0, 0, 0],
[ -2522, 3443, 0, 0],
[ 1922, -5966, 2843, 0],
[ -494, 1602, -1642, 267]
],
[
[ 267, 0, 0, 0],
[ -1642, 2843, 0, 0],
[ 1602, -5966, 3443, 0],
[ -494, 1922, -2522, 547]
],
[
[ 547, 0, 0, 0],
[ -3882, 7043, 0, 0],
[ 4642, -17246, 11003, 0],
[ -1854, 7042, -9402, 2107]
]
]) / 240
# Coefficients of order r=5
# On smooth solutions this should converge with order r=9
C_5 = numpy.array([ 1, 20, 60, 40, 5 ]) / 126
a_5 = numpy.array([
[ 137, -163, 137, -63, 12],
[ 12, 77, -43, 17, -3],
[ -3, 27, 47, -13, 2],
[ 2, -13, 47, 27, -3],
[ -3, 17, -43, 77, 12],
]) / 60
sigma_5 = numpy.array([
[
[ 107918, 0, 0, 0, 0],
[ -649501, 1020563, 0, 0, 0],
[ 758823, -2462076, 1521393, 0, 0],
[ -411487, 1358458, -1704396, 482963, 0],
[ 86329, -288007, 364863, -208501, 22658]
],
[
[ 22658, 0, 0, 0, 0],
[ -140251, 242723, 0, 0, 0],
[ 165153, -611976, 406293, 0, 0],
[ -88297, 337018, -464976, 138563, 0],
[ 18079, -70237, 99213, -60871, 6908]
],
[
[ 6908, 0, 0, 0, 0],
[ -51001, 104963, 0, 0, 0],
[ 67923, -299076, 231153, 0, 0],
[ -38947, 179098, -299076, 104963, 0],
[ 8209, -38947, 67923, -51001, 6908]
],
[
[ 6908, 0, 0, 0, 0],
[ -60871, 138563, 0, 0, 0],
[ 99213, -464976, 406293, 0, 0],
[ -70237, 337018, -611976, 242723, 0],
[ 18079, -88297, 165153, -140251, 22658]
],
[
[ 22658, 0, 0, 0, 0],
[ -208501, 482963, 0, 0, 0],
[ 364863, -1704396, 1521393, 0, 0],
[ -288007, 1358458, -2462076, 1020563, 0],
[ 86329, -411487, 758823, -649501, 107918]
]
]) / 5040
# Coefficients of order r=6
# On smooth solutions this should converge with order r=11
C_6 = numpy.array([ 1, 30, 150, 200, 75, 6 ]) / 462
a_6 = numpy.array([
[ 147, -213, 237, -163, 62, -10],
[ 10, 87, -63, 37, -13, 2],
[ -2, 22, 57, -23, 7, -1],
[ 1, -8, 37, 37, -8, 1],
[ -1, 7, -23, 57, 22, -2],
[ 2, -13, 37, -63, 87, 10],
]) / 60
sigma_6 = numpy.array([
[
[ 6150211, 0, 0, 0, 0, 0],
[ -47460464, 94851237, 0, 0, 0, 0],
[ 76206736, -311771244, 260445372, 0, 0, 0],
[ -63394124, 262901672, -444003904, 190757572, 0, 0],
[ 27060170, -113206788, 192596472, -166461044, 36480687, 0],
[ -4712740, 19834350, -33918804, 29442256, -12950184, 1152561]
],
[
[ 1152561, 0, 0, 0, 0, 0],
[ -9117992, 19365967, 0, 0, 0, 0],
[ 14742480, -65224244, 56662212, 0, 0, 0],
[ -12183636, 55053752, -97838784, 43093692, 0, 0],
[ 5134574, -23510468, 42405032, -37913324, 8449957, 0],
[ -880548, 4067018, -7408908, 6694608, -3015728, 271779]
],
[
[ 271779, 0, 0, 0, 0, 0],
[ -2380800, 5653317, 0, 0, 0, 0],
[ 4086352, -20427884, 19510972, 0, 0, 0],
[ -3462252, 17905032, -35817664, 17195652, 0, 0],
[ 1458762, -7727988, 15929912, -15880404, 3824847, 0],
[ -245620, 1325006, -2792660, 2863984, -1429976, 139633]
],
[
[ 139633, 0, 0, 0, 0, 0],
[ -1429976, 3824847, 0, 0, 0, 0],
[ 2863984, -15880404, 17195652, 0, 0, 0],
[ -2792660, 15929912, -35817664, 19510972, 0, 0],
[ 1325006, -7727988, 17905032, -20427884, 5653317, 0],
[ -245620, 1458762, -3462252, 4086352, -2380800, 271779]
],
[
[ 271779, 0, 0, 0, 0, 0],
[ -3015728, 8449957, 0, 0, 0, 0],
[ 6694608, -37913324, 43093692, 0, 0, 0],
[ -7408908, 42405032, -97838784, 56662212, 0, 0],
[ 4067018, -23510468, 55053752, -65224244, 19365967, 0],
[ -880548, 5134574, -12183636, 14742480, -9117992, 1152561]
],
[
[ 1152561, 0, 0, 0, 0, 0],
[ -12950184, 36480687, 0, 0, 0, 0],
[ 29442256, -166461044, 190757572, 0, 0, 0],
[ -33918804, 192596472, -444003904, 260445372, 0, 0],
[ 19834350, -113206788, 262901672, -311771244, 94851237, 0],
[ -4712740, 27060170, -63394124, 76206736, -47460464, 6150211]
]
]) / 120960
# Coefficients of order r=7
# On smooth solutions this should converge with order r=13
C_7 = numpy.array([ 1, 42, 315, 700, 525, 126, 7 ]) / 1716
a_7 = numpy.array([
[ 1089, -1851, 2559, -2341, 1334, -430, 60],
[ 60, 669, -591, 459, -241, 74, -10],
[ -10, 130, 459, -241, 109, -31, 4],
[ 4, -38, 214, 319, -101, 25, -3],
[ -3, 25, -101, 319, 214, -38, 4],
[ 4, -31, 109, -241, 459, 130, -10],
[ -10, 74, -241, 459, -591, 669, 60],
]) / 420
sigma_7 = numpy.array([
[
[ 7177657304, 0, 0, 0, 0, 0, 0],
[ -68289277071, 166930543737, 0, 0, 0, 0, 0],
[ 140425750893, -698497961463, 739478564460, 0, 0, 0, 0],
[ -158581758572, 797280592452, -1701893556420, 985137198380, 0, 0, 0],
[ 102951716988, -521329653333, 1119254208255, -1301580166020, 431418789360, 0, 0],
[ -36253275645, 184521097818, -397822832973, 464200620612, -308564463663, 55294430841, 0],
[ 5391528799, -27545885877, 59577262788, -69700128812, 46430779053, -16670007831, 1258225940]
],
[
[ 1258225940, 0, 0, 0, 0, 0, 0],
[ -12223634361, 31090026771, 0, 0, 0, 0, 0],
[ 25299603603, -132164397513, 143344579860, 0, 0, 0, 0],
[ -28498553012, 151212114012, -332861569020, 195601143380, 0, 0, 0],
[ 18375686988, -98508059523, 219064013505, -259838403420, 86959466460, 0, 0],
[ -6414710427, 34632585198, -77574968883, 92646554652, -62392325913, 11250068787, 0],
[ 945155329, -5128661355, 11548158588, -13862429972, 9380155443, -3397272201, 257447084]
],
[
[ 257447084, 0, 0, 0, 0, 0, 0],
[ -2659103847, 7257045753, 0, 0, 0, 0, 0],
[ 5684116173, -32164185663, 36922302360, 0, 0, 0, 0],
[ -6473137292, 37531128132, -88597133220, 54531707180, 0, 0, 0],
[ 4158865908, -24530177853, 59045150655, -74236325220, 25788772260, 0, 0],
[ -1432622085, 8555779674, -20891234853, 26694456132, -18869146983, 3510366201, 0],
[ 206986975, -1247531949, 3078682188, -3982402892, 2854088973, -1077964287, 84070496]
],
[
[ 84070496, 0, 0, 0, 0, 0, 0],
[ -969999969, 2927992563, 0, 0, 0, 0, 0],
[ 2283428883, -14296379553, 18133963560, 0, 0, 0, 0],
[ -2806252532, 18083339772, -47431870620, 32154783380, 0, 0, 0],
[ 1902531828, -12546315963, 33820678305, -47431870620, 18133963560, 0, 0],
[ -676871859, 4550242446, -12546315963, 18083339772, -14296379553, 2927992563, 0],
[ 99022657, -676871859, 1902531828, -2806252532, 2283428883, -969999969, 84070496]
],
[
[ 84070496, 0, 0, 0, 0, 0, 0],
[ -1077964287, 3510366201, 0, 0, 0, 0, 0],
[ 2854088973, -18869146983, 25788772260, 0, 0, 0, 0],
[ -3982402892, 26694456132, -74236325220, 54531707180, 0, 0, 0],
[ 3078682188, -20891234853, 59045150655, -88597133220, 36922302360, 0, 0],
[ -1247531949, 8555779674, -24530177853, 37531128132, -32164185663, 7257045753, 0],
[ 206986975, -1432622085, 4158865908, -6473137292, 5684116173, -2659103847, 257447084]
],
[
[ 257447084, 0, 0, 0, 0, 0, 0],
[ -3397272201, 11250068787, 0, 0, 0, 0, 0],
[ 9380155443, -62392325913, 86959466460, 0, 0, 0, 0],
[ -13862429972, 92646554652, -259838403420, 195601143380, 0, 0, 0],
[ 11548158588, -77574968883, 219064013505, -332861569020, 143344579860, 0, 0],
[ -5128661355, 34632585198, -98508059523, 151212114012, -132164397513, 31090026771, 0],
[ 945155329, -6414710427, 18375686988, -28498553012, 25299603603, -12223634361, 1258225940]
],
[
[ 1258225940, 0, 0, 0, 0, 0, 0],
[ -16670007831, 55294430841, 0, 0, 0, 0, 0],
[ 46430779053, -308564463663, 431418789360, 0, 0, 0, 0],
[ -69700128812, 464200620612, -1301580166020, 985137198380, 0, 0, 0],
[ 59577262788, -397822832973, 1119254208255, -1701893556420, 739478564460, 0, 0],
[ -27545885877, 184521097818, -521329653333, 797280592452, -698497961463, 166930543737, 0],
[ 5391528799, -36253275645, 102951716988, -158581758572, 140425750893, -68289277071, 7177657304]
]
]) / 59875200
C_all = {
2 : C_2,
3 : C_3,
4 : C_4,
5 : C_5,
6 : C_6,
7 : C_7
}
a_all = {
2 : a_2,
3 : a_3,
4 : a_4,
5 : a_5,
6 : a_6,
7 : a_7
}
sigma_all = {
2 : sigma_2,
3 : sigma_3,
4 : sigma_4,
5 : sigma_5,
6 : sigma_6,
7 : sigma_7
}
| bsd-3-clause |
pombredanne/pants | tests/python/pants_test/backend/codegen/antlr/java/test_antlr_java_gen_integration.py | 8 | 1286 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class AntlrJavaGenIntegrationTest(PantsRunIntegrationTest):
def test_run_antlr3(self):
stdout_data = self.bundle_and_run('examples/src/java/org/pantsbuild/example/antlr3',
'examples.src.java.org.pantsbuild.example.antlr3.antlr3',
bundle_jar_name='antlr3',
args=['7*8'])
self.assertEquals('56.0', stdout_data.rstrip(), msg="got output:{0}".format(stdout_data))
def test_run_antlr4(self):
stdout_data = self.bundle_and_run('examples/src/java/org/pantsbuild/example/antlr4',
'examples.src.java.org.pantsbuild.example.antlr4.antlr4',
bundle_jar_name='antlr4',
args=['7*6'])
self.assertEquals('42.0', stdout_data.rstrip(), msg="got output:{0}".format(stdout_data))
| apache-2.0 |
KeyWeeUsr/kivy | examples/frameworks/twisted/echo_client_app.py | 13 | 2352 | # install_twisted_rector must be called before importing the reactor
from __future__ import unicode_literals
from kivy.support import install_twisted_reactor
install_twisted_reactor()
# A Simple Client that send messages to the Echo Server
from twisted.internet import reactor, protocol
class EchoClient(protocol.Protocol):
def connectionMade(self):
self.factory.app.on_connection(self.transport)
def dataReceived(self, data):
self.factory.app.print_message(data.decode('utf-8'))
class EchoClientFactory(protocol.ClientFactory):
protocol = EchoClient
def __init__(self, app):
self.app = app
def startedConnecting(self, connector):
self.app.print_message('Started to connect.')
def clientConnectionLost(self, connector, reason):
self.app.print_message('Lost connection.')
def clientConnectionFailed(self, connector, reason):
self.app.print_message('Connection failed.')
from kivy.app import App
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
from kivy.uix.boxlayout import BoxLayout
# A simple kivy App, with a textbox to enter messages, and
# a large label to display all the messages received from
# the server
class TwistedClientApp(App):
connection = None
textbox = None
label = None
def build(self):
root = self.setup_gui()
self.connect_to_server()
return root
def setup_gui(self):
self.textbox = TextInput(size_hint_y=.1, multiline=False)
self.textbox.bind(on_text_validate=self.send_message)
self.label = Label(text='connecting...\n')
layout = BoxLayout(orientation='vertical')
layout.add_widget(self.label)
layout.add_widget(self.textbox)
return layout
def connect_to_server(self):
reactor.connectTCP('localhost', 8000, EchoClientFactory(self))
def on_connection(self, connection):
self.print_message("Connected successfully!")
self.connection = connection
def send_message(self, *args):
msg = self.textbox.text
if msg and self.connection:
self.connection.write(msg.encode('utf-8'))
self.textbox.text = ""
def print_message(self, msg):
self.label.text += "{}\n".format(msg)
if __name__ == '__main__':
TwistedClientApp().run()
| mit |
utessel/edimax | target/linux/x86/image/mkimg_bifferboard.py | 561 | 1265 | #!/usr/bin/env python
"""
Create firmware for 4/8MB Bifferboards, suitable for uploading using
either bb_upload8.py or bb_eth_upload8.py
"""
import struct, sys
# Increase the kmax value if the script gives errors about the kernel being
# too large. You need to set the Biffboot kmax value to the same value you
# use here.
kmax = 0x10
# No need to change this for 4MB devices, it's only used to tell you if
# the firmware is too large!
flash_size = 0x800000
# This is always the same, for 1MB, 4MB and 8MB devices
config_extent = 0x6000
kernel_extent = kmax * 0x10000
if __name__ == "__main__":
if len(sys.argv) != 4:
print "usage: mkimg_bifferboard.py <kernel> <rootfs> <output file>"
sys.exit(-1)
bzimage = sys.argv[1]
rootfs = sys.argv[2]
target = sys.argv[3]
# Kernel first
fw = file(bzimage).read()
if len(fw) > (kernel_extent - config_extent):
raise IOError("Kernel too large")
# Pad up to end of kernel partition
while len(fw) < (kernel_extent - config_extent):
fw += "\xff"
fw += file(rootfs).read()
# Check length of total
if len(fw) > (flash_size - 0x10000 - config_extent):
raise IOError("Rootfs too large")
file(target,"wb").write(fw)
print "Firmware written to '%s'" % target
| gpl-2.0 |
thaumos/ansible | lib/ansible/modules/storage/netapp/na_ontap_vscan_scanner_pool.py | 21 | 8931 | #!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: na_ontap_vscan_scanner_pool
short_description: NetApp ONTAP Vscan Scanner Pools Configuration.
extends_documentation_fragment:
- netapp.na_ontap
version_added: '2.8'
author: NetApp Ansible Team (@carchi8py) <[email protected]>
description:
- Configure a Vscan Scanner Pool
options:
state:
description:
- Whether a Vscan Scanner pool is present or not
choices: ['present', 'absent']
default: present
vserver:
description:
- the name of the data vserver to use.
required: true
hostnames:
description:
- List of hostnames of Vscan servers which are allowed to connect to Data ONTAP
privileged_users:
description:
- List of privileged usernames. Username must be in the form "domain-name\\user-name"
scanner_pool:
description:
- the name of the virus scanner pool
required: true
scanner_policy:
description:
- The name of the Virus scanner Policy
choices: ['primary', 'secondary', 'idle']
'''
EXAMPLES = """
- name: Create and enable Scanner pool
na_ontap_vscan_scanner_pool:
state: present
username: '{{ netapp_username }}'
password: '{{ netapp_password }}'
hostname: '{{ netapp_hostname }}'
vserver: carchi-vsim2
hostnames: ['name', 'name2']
privileged_users: ['sim.rtp.openeng.netapp.com\\admin', 'sim.rtp.openeng.netapp.com\\carchi']
scanner_pool: Scanner1
scanner_policy: primary
- name: Delete a scanner pool
na_ontap_vscan_scanner_pool:
state: absent
username: '{{ netapp_username }}'
password: '{{ netapp_password }}'
hostname: '{{ netapp_hostname }}'
vserver: carchi-vsim2
scanner_pool: Scanner1
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppOntapVscanScannerPool(object):
def __init__(self):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(choices=['present', 'absent'], default='present'),
vserver=dict(required=True, type='str'),
hostnames=dict(requried=False, type='list'),
privileged_users=dict(required=False, type='list'),
scanner_pool=dict(required=True, type='str'),
scanner_policy=dict(required=False, choices=['primary', 'secondary', 'idle'])
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
parameters = self.module.params
self.hostnames = parameters['hostnames']
self.vserver = parameters['vserver']
self.privileged_users = parameters['privileged_users']
self.scanner_pool = parameters['scanner_pool']
self.state = parameters['state']
self.scanner_policy = parameters['scanner_policy']
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.vserver)
def create_scanner_pool(self):
"""
Create a Vscan Scanner Pool
:return: nothing
"""
scanner_pool_obj = netapp_utils.zapi.NaElement('vscan-scanner-pool-create')
if self.hostnames:
string_obj = netapp_utils.zapi.NaElement('hostnames')
scanner_pool_obj.add_child_elem(string_obj)
for hostname in self.hostnames:
string_obj.add_new_child('string', hostname)
if self.privileged_users:
users_obj = netapp_utils.zapi.NaElement('privileged-users')
scanner_pool_obj.add_child_elem(users_obj)
for user in self.privileged_users:
users_obj.add_new_child('privileged-user', user)
scanner_pool_obj.add_new_child('scanner-pool', self.scanner_pool)
try:
self.server.invoke_successfully(scanner_pool_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error creating Vscan Scanner Pool %s: %s' %
(self.scanner_pool, to_native(error)),
exception=traceback.format_exc())
def apply_policy(self):
"""
Apply a Scanner policy to a Scanner pool
:return: nothing
"""
apply_policy_obj = netapp_utils.zapi.NaElement('vscan-scanner-pool-apply-policy')
apply_policy_obj.add_new_child('scanner-policy', self.scanner_policy)
apply_policy_obj.add_new_child('scanner-pool', self.scanner_pool)
try:
self.server.invoke_successfully(apply_policy_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error appling policy %s to pool %s: %s' %
(self.scanner_policy, self.scanner_pool, to_native(error)),
exception=traceback.format_exc())
def get_scanner_pool(self):
"""
Check to see if a scanner pool exist or not
:return: True if it exist, False if it does not
"""
scanner_pool_obj = netapp_utils.zapi.NaElement('vscan-scanner-pool-get-iter')
scanner_pool_info = netapp_utils.zapi.NaElement('scan-scanner-pool-info')
scanner_pool_info.add_new_child('scanner-pool', self.scanner_pool)
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(scanner_pool_info)
scanner_pool_obj.add_child_elem(query)
try:
result = self.server.invoke_successfully(scanner_pool_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error searching for Vscan Scanner Pool %s: %s' %
(self.scanner_pool, to_native(error)),
exception=traceback.format_exc())
if result.get_child_by_name('num-records'):
if result.get_child_by_name('attributes-list').get_child_by_name('vscan-scanner-pool-info').get_child_content(
'scanner-pool') == self.scanner_pool:
return result.get_child_by_name('attributes-list').get_child_by_name('vscan-scanner-pool-info')
return False
return False
def delete_scanner_pool(self):
"""
Delete a Scanner pool
:return: nothing
"""
scanner_pool_obj = netapp_utils.zapi.NaElement('vscan-scanner-pool-delete')
scanner_pool_obj.add_new_child('scanner-pool', self.scanner_pool)
try:
self.server.invoke_successfully(scanner_pool_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error deleting Vscan Scanner Pool %s: %s' %
(self.scanner_pool, to_native(error)),
exception=traceback.format_exc())
def asup_log_for_cserver(self, event_name):
"""
Fetch admin vserver for the given cluster
Create and Autosupport log event with the given module name
:param event_name: Name of the event log
:return: None
"""
results = netapp_utils.get_cserver(self.server)
cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
netapp_utils.ems_log_event(event_name, cserver)
def apply(self):
self.asup_log_for_cserver("na_ontap_vscan_scanner_pool")
changed = False
scanner_pool_obj = self.get_scanner_pool()
if self.state == 'present':
if not scanner_pool_obj:
self.create_scanner_pool()
if self.scanner_policy:
self.apply_policy()
changed = True
# apply Scanner policy
if scanner_pool_obj:
if scanner_pool_obj.get_child_content('scanner-policy') != self.scanner_policy:
self.apply_policy()
changed = True
if self.state == 'absent':
if scanner_pool_obj:
self.delete_scanner_pool()
changed = True
self.module.exit_json(changed=changed)
def main():
"""
Execute action from playbook
"""
command = NetAppOntapVscanScannerPool()
command.apply()
if __name__ == '__main__':
main()
| gpl-3.0 |
awkspace/ansible | lib/ansible/modules/network/fortios/fortios_firewall_multicast_address6.py | 24 | 10083 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_multicast_address6
short_description: Configure IPv6 multicast address in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by
allowing the user to configure firewall feature and multicast_address6 category.
Examples includes all options and need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: true
firewall_multicast_address6:
description:
- Configure IPv6 multicast address.
default: null
suboptions:
state:
description:
- Indicates whether to create or remove the object
choices:
- present
- absent
color:
description:
- Color of icon on the GUI.
comment:
description:
- Comment.
ip6:
description:
- "IPv6 address prefix (format: xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx/xxx)."
name:
description:
- IPv6 multicast address name.
required: true
tagging:
description:
- Config object tagging.
suboptions:
category:
description:
- Tag category. Source system.object-tagging.category.
name:
description:
- Tagging entry name.
required: true
tags:
description:
- Tags.
suboptions:
name:
description:
- Tag name. Source system.object-tagging.tags.name.
required: true
visibility:
description:
- Enable/disable visibility of the IPv6 multicast address on the GUI.
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Configure IPv6 multicast address.
fortios_firewall_multicast_address6:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
firewall_multicast_address6:
state: "present"
color: "3"
comment: "Comment."
ip6: "<your_own_value>"
name: "default_name_6"
tagging:
-
category: "<your_own_value> (source system.object-tagging.category)"
name: "default_name_9"
tags:
-
name: "default_name_11 (source system.object-tagging.tags.name)"
visibility: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_firewall_multicast_address6_data(json):
option_list = ['color', 'comment', 'ip6',
'name', 'tagging', 'visibility']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def firewall_multicast_address6(data, fos):
vdom = data['vdom']
firewall_multicast_address6_data = data['firewall_multicast_address6']
filtered_data = filter_firewall_multicast_address6_data(firewall_multicast_address6_data)
if firewall_multicast_address6_data['state'] == "present":
return fos.set('firewall',
'multicast-address6',
data=filtered_data,
vdom=vdom)
elif firewall_multicast_address6_data['state'] == "absent":
return fos.delete('firewall',
'multicast-address6',
mkey=filtered_data['name'],
vdom=vdom)
def fortios_firewall(data, fos):
login(data)
methodlist = ['firewall_multicast_address6']
for method in methodlist:
if data[method]:
resp = eval(method)(data, fos)
break
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"firewall_multicast_address6": {
"required": False, "type": "dict",
"options": {
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"color": {"required": False, "type": "int"},
"comment": {"required": False, "type": "str"},
"ip6": {"required": False, "type": "str"},
"name": {"required": True, "type": "str"},
"tagging": {"required": False, "type": "list",
"options": {
"category": {"required": False, "type": "str"},
"name": {"required": True, "type": "str"},
"tags": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}}
}},
"visibility": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_firewall(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
shubhdev/openedx | common/djangoapps/student/migrations/0007_convert_to_utf8.py | 188 | 9663 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
if db.backend_name == 'mysql':
db.execute_many("""
ALTER DATABASE CHARACTER SET utf8 COLLATE utf8_general_ci;
ALTER TABLE student_pendingemailchange CONVERT TO CHARACTER SET utf8 COLLATE utf8_general_ci;
ALTER TABLE student_pendingnamechange CONVERT TO CHARACTER SET utf8 COLLATE utf8_general_ci;
ALTER TABLE student_usertestgroup CONVERT TO CHARACTER SET utf8 COLLATE utf8_general_ci;
ALTER TABLE student_usertestgroup_users CONVERT TO CHARACTER SET utf8 COLLATE utf8_general_ci;
""")
def backwards(self, orm):
# Although this migration can't be undone, it is okay for it to be run backwards because it doesn't add/remove any fields
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.pendingemailchange': {
'Meta': {'object_name': 'PendingEmailChange'},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.pendingnamechange': {
'Meta': {'object_name': 'PendingNameChange'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.registration': {
'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'student.usertestgroup': {
'Meta': {'object_name': 'UserTestGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'})
}
}
complete_apps = ['student']
| agpl-3.0 |
luisgg/iteexe | twisted/internet/gtk2reactor.py | 14 | 9981 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module provides support for Twisted to interact with the glib/gtk2 mainloop.
In order to use this support, simply do the following::
| from twisted.internet import gtk2reactor
| gtk2reactor.install()
Then use twisted.internet APIs as usual. The other methods here are not
intended to be called directly.
When installing the reactor, you can choose whether to use the glib
event loop or the GTK+ event loop which is based on it but adds GUI
integration.
API Stability: stable
Maintainer: U{Itamar Shtull-Trauring<mailto:[email protected]>}
"""
__all__ = ['install']
# System Imports
import sys
from zope.interface import implements
try:
if not hasattr(sys, 'frozen'):
# Don't want to check this for py2exe
import pygtk
pygtk.require('2.0')
except (ImportError, AttributeError):
pass # maybe we're using pygtk before this hack existed.
import gobject
if hasattr(gobject, "threads_init"):
# recent versions of python-gtk expose this. python-gtk=2.4.1
# (wrapping glib-2.4.7) does. python-gtk=2.0.0 (wrapping
# glib-2.2.3) does not.
gobject.threads_init()
# Twisted Imports
from twisted.python import log, threadable, runtime, failure, components
from twisted.internet.interfaces import IReactorFDSet
# Sibling Imports
from twisted.internet import main, posixbase, error, selectreactor
reads = {}
writes = {}
hasReader = reads.has_key
hasWriter = writes.has_key
# the next callback
_simtag = None
POLL_DISCONNECTED = gobject.IO_HUP | gobject.IO_ERR | gobject.IO_NVAL
# glib's iochannel sources won't tell us about any events that we haven't
# asked for, even if those events aren't sensible inputs to the poll()
# call.
INFLAGS = gobject.IO_IN | POLL_DISCONNECTED
OUTFLAGS = gobject.IO_OUT | POLL_DISCONNECTED
def _our_mainquit():
# XXX: gtk.main_quit() (which is used for crash()) raises an exception if
# gtk.main_level() == 0; however, all the tests freeze if we use this
# function to stop the reactor. what gives? (I believe this may have been
# a stupid mistake where I forgot to import gtk here... I will remove this
# comment if the tests pass)
import gtk
if gtk.main_level():
gtk.main_quit()
class Gtk2Reactor(posixbase.PosixReactorBase):
"""GTK+-2 event loop reactor.
"""
implements(IReactorFDSet)
def __init__(self, useGtk=True):
self.context = gobject.main_context_default()
self.loop = gobject.MainLoop()
posixbase.PosixReactorBase.__init__(self)
# pre 2.3.91 the glib iteration and mainloop functions didn't release
# global interpreter lock, thus breaking thread and signal support.
if (hasattr(gobject, "pygtk_version") and gobject.pygtk_version >= (2, 3, 91)
and not useGtk):
self.__pending = self.context.pending
self.__iteration = self.context.iteration
self.__crash = self.loop.quit
self.__run = self.loop.run
else:
import gtk
self.__pending = gtk.events_pending
self.__iteration = gtk.main_iteration
self.__crash = _our_mainquit
self.__run = gtk.main
# The input_add function in pygtk1 checks for objects with a
# 'fileno' method and, if present, uses the result of that method
# as the input source. The pygtk2 input_add does not do this. The
# function below replicates the pygtk1 functionality.
# In addition, pygtk maps gtk.input_add to _gobject.io_add_watch, and
# g_io_add_watch() takes different condition bitfields than
# gtk_input_add(). We use g_io_add_watch() here in case pygtk fixes this
# bug.
def input_add(self, source, condition, callback):
if hasattr(source, 'fileno'):
# handle python objects
def wrapper(source, condition, real_s=source, real_cb=callback):
return real_cb(real_s, condition)
return gobject.io_add_watch(source.fileno(), condition, wrapper)
else:
return gobject.io_add_watch(source, condition, callback)
def addReader(self, reader):
if not hasReader(reader):
reads[reader] = self.input_add(reader, INFLAGS, self.callback)
def addWriter(self, writer):
if not hasWriter(writer):
writes[writer] = self.input_add(writer, OUTFLAGS, self.callback)
def removeAll(self):
return self._removeAll(reads, writes)
def removeReader(self, reader):
if hasReader(reader):
gobject.source_remove(reads[reader])
del reads[reader]
def removeWriter(self, writer):
if hasWriter(writer):
gobject.source_remove(writes[writer])
del writes[writer]
doIterationTimer = None
def doIterationTimeout(self, *args):
self.doIterationTimer = None
return 0 # auto-remove
def doIteration(self, delay):
# flush some pending events, return if there was something to do
# don't use the usual "while self.context.pending(): self.context.iteration()"
# idiom because lots of IO (in particular test_tcp's
# ProperlyCloseFilesTestCase) can keep us from ever exiting.
log.msg(channel='system', event='iteration', reactor=self)
if self.__pending():
self.__iteration(0)
return
# nothing to do, must delay
if delay == 0:
return # shouldn't delay, so just return
self.doIterationTimer = gobject.timeout_add(int(delay * 1000),
self.doIterationTimeout)
# This will either wake up from IO or from a timeout.
self.__iteration(1) # block
# note: with the .simulate timer below, delays > 0.1 will always be
# woken up by the .simulate timer
if self.doIterationTimer:
# if woken by IO, need to cancel the timer
gobject.source_remove(self.doIterationTimer)
self.doIterationTimer = None
def crash(self):
self.__crash()
def run(self, installSignalHandlers=1):
self.startRunning(installSignalHandlers=installSignalHandlers)
gobject.timeout_add(0, self.simulate)
self.__run()
def _doReadOrWrite(self, source, condition, faildict={
error.ConnectionDone: failure.Failure(error.ConnectionDone()),
error.ConnectionLost: failure.Failure(error.ConnectionLost()),
}):
why = None
didRead = None
if condition & POLL_DISCONNECTED and \
not (condition & gobject.IO_IN):
why = main.CONNECTION_LOST
else:
try:
if condition & gobject.IO_IN:
why = source.doRead()
didRead = source.doRead
if not why and condition & gobject.IO_OUT:
# if doRead caused connectionLost, don't call doWrite
# if doRead is doWrite, don't call it again.
if not source.disconnected and source.doWrite != didRead:
why = source.doWrite()
didRead = source.doWrite # if failed it was in write
except:
why = sys.exc_info()[1]
log.msg('Error In %s' % source)
log.deferr()
if why:
self._disconnectSelectable(source, why, didRead == source.doRead)
def callback(self, source, condition):
log.callWithLogger(source, self._doReadOrWrite, source, condition)
self.simulate() # fire Twisted timers
return 1 # 1=don't auto-remove the source
def simulate(self):
"""Run simulation loops and reschedule callbacks.
"""
global _simtag
if _simtag is not None:
gobject.source_remove(_simtag)
self.runUntilCurrent()
timeout = min(self.timeout(), 0.1)
if timeout is None:
timeout = 0.1
# grumble
_simtag = gobject.timeout_add(int(timeout * 1010), self.simulate)
components.backwardsCompatImplements(Gtk2Reactor)
class PortableGtkReactor(selectreactor.SelectReactor):
"""Reactor that works on Windows.
input_add is not supported on GTK+ for Win32, apparently.
"""
def crash(self):
import gtk
# mainquit is deprecated in newer versions
if hasattr(gtk, 'main_quit'):
gtk.main_quit()
else:
gtk.mainquit()
def run(self, installSignalHandlers=1):
import gtk
self.startRunning(installSignalHandlers=installSignalHandlers)
self.simulate()
# mainloop is deprecated in newer versions
if hasattr(gtk, 'main'):
gtk.main()
else:
gtk.mainloop()
def simulate(self):
"""Run simulation loops and reschedule callbacks.
"""
global _simtag
if _simtag is not None:
gobject.source_remove(_simtag)
self.iterate()
timeout = min(self.timeout(), 0.1)
if timeout is None:
timeout = 0.1
# grumble
_simtag = gobject.timeout_add(int(timeout * 1010), self.simulate)
def install(useGtk=True):
"""Configure the twisted mainloop to be run inside the gtk mainloop.
@param useGtk: should glib rather than GTK+ event loop be
used (this will be slightly faster but does not support GUI).
"""
reactor = Gtk2Reactor(useGtk)
from twisted.internet.main import installReactor
installReactor(reactor)
return reactor
def portableInstall(useGtk=True):
"""Configure the twisted mainloop to be run inside the gtk mainloop.
"""
reactor = PortableGtkReactor()
from twisted.internet.main import installReactor
installReactor(reactor)
return reactor
if runtime.platform.getType() != 'posix':
install = portableInstall
| gpl-2.0 |
flavour/tldrmp | modules/s3cfg.py | 1 | 73402 | # -*- coding: utf-8 -*-
""" Deployment Settings
@requires: U{B{I{gluon}} <http://web2py.com>}
@copyright: 2009-2013 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3Config"]
try:
# Python 2.7
from collections import OrderedDict
except:
# Python 2.6
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from gluon import current, URL, TR, TD
from gluon.storage import Storage
class S3Config(Storage):
"""
Deployment Settings Helper Class
"""
def __init__(self):
self.auth = Storage()
self.auth.email_domains = []
self.base = Storage()
self.database = Storage()
# @ToDo: Move to self.ui
self.frontpage = Storage()
self.frontpage.rss = []
self.fin = Storage()
self.L10n = Storage()
self.mail = Storage()
self.msg = Storage()
self.search = Storage()
self.security = Storage()
self.ui = Storage()
self.cap = Storage()
self.gis = Storage()
self.hrm = Storage()
self.inv = Storage()
self.irs = Storage()
self.org = Storage()
self.pr = Storage()
self.proc = Storage()
self.project = Storage()
self.req = Storage()
self.supply = Storage()
self.hms = Storage()
# -------------------------------------------------------------------------
# Template
def get_template(self):
"""
Which deployment template to use for config.py, parser.py, menus.py, etc
http://eden.sahanafoundation.org/wiki/BluePrint/Templates
"""
return self.base.get("template", "default")
def exec_template(self, path):
"""
Execute the template
"""
from gluon.fileutils import read_file
from gluon.restricted import restricted
code = read_file(path)
restricted(code, layer=path)
return
# -------------------------------------------------------------------------
# Theme
def get_theme(self):
"""
Which templates folder to use for views/layout.html
"""
return self.base.get("theme", "default")
def get_base_xtheme(self):
"""
Whether there is a custom Ext theme or simply use the default xtheme-gray
- specified as <themefolder>/xtheme-<filename>.css
"""
return self.base.get("xtheme", None)
# -------------------------------------------------------------------------
def is_cd_version(self):
"""
Whether we're running from a non-writable CD
"""
return self.base.get("cd_version", False)
# -------------------------------------------------------------------------
def get_google_analytics_tracking_id(self):
"""
Google Analytics Key
"""
return self.base.get("google_analytics_tracking_id", None)
# -------------------------------------------------------------------------
def get_youtube_id(self):
"""
List of YouTube IDs for the /default/video page
"""
return self.base.get("youtube_id", [])
# -------------------------------------------------------------------------
# Authentication settings
def get_auth_hmac_key(self):
"""
salt to encrypt passwords - normally randomised during 1st run
"""
return self.auth.get("hmac_key", "akeytochange")
def get_auth_password_min_length(self):
"""
To set the Minimum Password Length
"""
return self.auth.get("password_min_length", int(4))
def get_auth_facebook(self):
"""
Read the FaceBook OAuth settings
- if configured, then it is assumed that FaceBook Authentication is enabled
"""
id = self.auth.get("facebook_id", False)
secret = self.auth.get("facebook_secret", False)
if id and secret:
return dict(id=id, secret=secret)
else:
return False
def get_auth_gmail_domains(self):
""" List of domains which can use GMail SMTP for Authentication """
return self.auth.get("gmail_domains", [])
def get_auth_google(self):
"""
Read the Google OAuth settings
- if configured, then it is assumed that Google Authentication is enabled
"""
id = self.auth.get("google_id", False)
secret = self.auth.get("google_secret", False)
if id and secret:
return dict(id=id, secret=secret)
else:
return False
def get_auth_openid(self):
""" Use OpenID for Authentication """
return self.auth.get("openid", False)
def get_security_self_registration(self):
return self.security.get("self_registration", True)
def get_auth_registration_requires_verification(self):
return self.auth.get("registration_requires_verification", False)
def get_auth_registration_requires_approval(self):
return self.auth.get("registration_requires_approval", False)
def get_auth_always_notify_approver(self):
return self.auth.get("always_notify_approver", True)
def get_auth_login_next(self):
""" Which page to go to after login """
return self.auth.get("login_next", URL(c="default", f="index"))
def get_auth_show_link(self):
return self.auth.get("show_link", True)
def get_auth_registration_link_user_to(self):
"""
Link User accounts to none or more of:
* Staff
* Volunteer
* Member
"""
return self.auth.get("registration_link_user_to", None)
def get_auth_registration_link_user_to_default(self):
"""
Link User accounts to none or more of:
* Staff
* Volunteer
* Member
"""
return self.auth.get("registration_link_user_to_default", None)
def get_auth_opt_in_team_list(self):
return self.auth.get("opt_in_team_list", [])
def get_auth_opt_in_to_email(self):
return self.get_auth_opt_in_team_list() != []
def get_auth_opt_in_default(self):
return self.auth.get("opt_in_default", False)
def get_auth_registration_requests_mobile_phone(self):
return self.auth.get("registration_requests_mobile_phone", False)
def get_auth_registration_mobile_phone_mandatory(self):
" Make the selection of Mobile Phone Mandatory during registration "
return self.auth.get("registration_mobile_phone_mandatory", False)
def get_auth_registration_requests_organisation(self):
" Have the registration form request the Organisation "
return self.auth.get("registration_requests_organisation", False)
def get_auth_admin_sees_organisation(self):
" See Organisations in User Admin"
return self.auth.get("admin_sees_organisation",
self.get_auth_registration_requests_organisation())
def get_auth_registration_organisation_required(self):
" Make the selection of Organisation required during registration "
return self.auth.get("registration_organisation_required", False)
def get_auth_registration_organisation_hidden(self):
" Hide the Organisation field in the registration form unless an email is entered which isn't whitelisted "
return self.auth.get("registration_organisation_hidden", False)
def get_auth_registration_organisation_default(self):
" Default the Organisation during registration "
return self.auth.get("registration_organisation_default", None)
def get_auth_registration_organisation_id_default(self):
" Default the Organisation during registration - will return the organisation_id"
name = self.auth.get("registration_organisation_default", None)
if name:
otable = current.s3db.org_organisation
orow = current.db(otable.name == name).select(otable.id).first()
if orow:
organisation_id = orow.id
else:
organisation_id = otable.insert(name = name)
else:
organisation_id = None
return organisation_id
def get_auth_registration_requests_organisation_group(self):
" Have the registration form request the Organisation Group "
return self.auth.get("registration_requests_organisation_group", False)
def get_auth_registration_organisation_group_required(self):
" Make the selection of Organisation Group required during registration "
return self.auth.get("registration_organisation_group_required", False)
def get_auth_registration_requests_site(self):
" Have the registration form request the Site "
return self.auth.get("registration_requests_site", False)
def get_auth_registration_site_required(self):
" Make the selection of site required during registration "
return self.auth.get("registration_site_required", False)
def get_auth_registration_requests_image(self):
""" Have the registration form request an Image """
return self.auth.get("registration_requests_image", False)
def get_auth_registration_pending(self):
""" Message someone gets when they register & they need approving """
message = self.auth.get("registration_pending", None)
if message:
return current.T(message)
approver = self.get_mail_approver()
if "@" in approver:
m = "Registration is still pending approval from Approver (%s) - please wait until confirmation received." % \
approver
else:
m = "Registration is still pending approval from the system administrator - please wait until confirmation received."
return current.T(m)
def get_auth_registration_pending_approval(self):
""" Message someone gets when they register & they need approving """
message = self.auth.get("registration_pending_approval", None)
if message:
return current.T(message)
approver = self.get_mail_approver()
if "@" in approver:
m = "Thank you for validating your email. Your user account is still pending for approval by the system administrator (%s). You will get a notification by email when your account is activated." % \
approver
else:
m = "Thank you for validating your email. Your user account is still pending for approval by the system administrator. You will get a notification by email when your account is activated."
return current.T(m)
def get_auth_registration_roles(self):
"""
A dictionary of realms, with lists of role UUIDs, to assign to newly-registered users
Use key = 0 to have the roles not restricted to a realm
"""
return self.auth.get("registration_roles", [])
def get_auth_terms_of_service(self):
"""
Force users to accept Terms of Servcie before Registering an account
- uses <template>/views/tos.html
"""
return self.auth.get("terms_of_service", False)
def get_auth_registration_volunteer(self):
""" Redirect the newly-registered user to their volunteer details page """
return self.auth.get("registration_volunteer", False)
def get_auth_record_approval(self):
""" Use record approval (False by default) """
return self.auth.get("record_approval", False)
def get_auth_record_approval_required_for(self):
""" Which tables record approval is required for """
return self.auth.get("record_approval_required_for", [])
def get_auth_realm_entity(self):
""" Hook to determine the owner entity of a record """
return self.auth.get("realm_entity", None)
def get_auth_person_realm_human_resource_site_then_org(self):
"""
Should we set pr_person.realm_entity to that of
hrm_human_resource.site_id$pe_id
"""
return self.auth.get("person_realm_human_resource_site_then_org", False)
def get_auth_person_realm_member_org(self):
"""
Sets pr_person.realm_entity to
organisation.pe_id of member_member
"""
return self.auth.get("person_realm_member_org", False)
def get_auth_role_modules(self):
"""
Which modules are includes in the Role Manager
- to assign discrete permissions to via UI
"""
T = current.T
return self.auth.get("role_modules", OrderedDict([
("staff", "Staff"),
("vol", "Volunteers"),
("member", "Members"),
("inv", "Warehouses"),
("asset", "Assets"),
("project", "Projects"),
("survey", "Assessments"),
("irs", "Incidents")
]))
def get_auth_access_levels(self):
"""
Access levels for the Role Manager UI
"""
T = current.T
return self.auth.get("access_levels", OrderedDict([
("reader", "Reader"),
("data_entry", "Data Entry"),
("editor", "Editor"),
("super", "Super Editor")
]))
def get_auth_set_presence_on_login(self):
return self.auth.get("set_presence_on_login", False)
def get_auth_ignore_levels_for_presence(self):
return self.auth.get("ignore_levels_for_presence", ["L0"])
def get_auth_create_unknown_locations(self):
return self.auth.get("create_unknown_locations", False)
def get_auth_show_utc_offset(self):
return self.auth.get("show_utc_offset", True)
def get_security_archive_not_delete(self):
return self.security.get("archive_not_delete", True)
def get_security_audit_read(self):
return self.security.get("audit_read", False)
def get_security_audit_write(self):
return self.security.get("audit_write", False)
def get_security_policy(self):
" Default is Simple Security Policy "
return self.security.get("policy", 1)
def get_security_strict_ownership(self):
"""
Ownership-rule for records without owner:
True = not owned by any user (strict ownership, default)
False = owned by any authenticated user
"""
return self.security.get("strict_ownership", True)
def get_security_map(self):
return self.security.get("map", False)
# -------------------------------------------------------------------------
# Base settings
def get_instance_name(self):
"""
Instance Name - for management scripts. e.g. prod or test
"""
return self.base.get("instance_name", "")
def get_system_name(self):
"""
System Name - for the UI & Messaging
"""
return self.base.get("system_name", current.T("Sahana Eden Humanitarian Management Platform"))
def get_system_name_short(self):
"""
System Name (Short Version) - for the UI & Messaging
"""
return self.base.get("system_name_short", "Sahana Eden")
def get_base_debug(self):
"""
Debug mode: Serve CSS/JS in separate uncompressed files
"""
return self.base.get("debug", False)
def get_base_migrate(self):
""" Whether to allow Web2Py to migrate the SQL database to the new structure """
return self.base.get("migrate", True)
def get_base_fake_migrate(self):
""" Whether to have Web2Py create the .table files to match the expected SQL database structure """
return self.base.get("fake_migrate", False)
def get_base_prepopulate(self):
""" Whether to prepopulate the database &, if so, which set of data to use for this """
return self.base.get("prepopulate", 1)
def get_base_guided_tour(self):
""" Whether the guided tours are enabled """
return self.base.get("guided_tour", False)
def get_base_public_url(self):
"""
The Public URL for the site - for use in email links, etc
"""
return self.base.get("public_url", "http://127.0.0.1:8000")
def get_base_cdn(self):
"""
Should we use CDNs (Content Distribution Networks) to serve some common CSS/JS?
"""
return self.base.get("cdn", False)
def get_base_session_memcache(self):
"""
Should we store sessions in a Memcache service to allow sharing
between multiple instances?
"""
return self.base.get("session_memcache", False)
def get_base_solr_url(self):
"""
URL to connect to solr server
"""
return self.base.get("solr_url", False)
def get_import_callback(self, tablename, callback):
"""
Lookup callback to use for imports in the following order:
- custom [create, update]_onxxxx
- default [create, update]_onxxxx
- custom onxxxx
- default onxxxx
NB: Currently only onaccept is actually used
"""
callbacks = self.base.get("import_callbacks", [])
if tablename in callbacks:
callbacks = callbacks[tablename]
if callback in callbacks:
return callbacks[callback]
get_config = current.s3db.get_config
default = get_config(tablename, callback)
if default:
return default
if callback[:2] != "on":
callback = callback[7:]
if callback in callbacks:
return callbacks[callback]
default = get_config(tablename, callback)
if default:
return default
# -------------------------------------------------------------------------
# Database settings
def get_database_type(self):
return self.database.get("db_type", "sqlite").lower()
def get_database_string(self):
db_type = self.database.get("db_type", "sqlite").lower()
pool_size = self.database.get("pool_size", 30)
if (db_type == "sqlite"):
db_string = "sqlite://storage.db"
elif (db_type == "mysql"):
db_string = "mysql://%s:%s@%s:%s/%s" % \
(self.database.get("username", "sahana"),
self.database.get("password", "password"),
self.database.get("host", "localhost"),
self.database.get("port", None) or "3306",
self.database.get("database", "sahana"))
elif (db_type == "postgres"):
db_string = "postgres://%s:%s@%s:%s/%s" % \
(self.database.get("username", "sahana"),
self.database.get("password", "password"),
self.database.get("host", "localhost"),
self.database.get("port", None) or "5432",
self.database.get("database", "sahana"))
else:
from gluon import HTTP
raise HTTP(501, body="Database type '%s' not recognised - please correct file models/000_config.py." % db_type)
return (db_string, pool_size)
# -------------------------------------------------------------------------
# Finance settings
# @ToDo: Make these customisable per Organisation
# => Move to a Table like hrm_course
def get_fin_currencies(self):
T = current.T
currencies = {
"EUR" :T("Euros"),
"GBP" :T("Great British Pounds"),
"USD" :T("United States Dollars"),
}
return self.fin.get("currencies", currencies)
def get_fin_currency_default(self):
return self.fin.get("currency_default", "USD") # Dollars
def get_fin_currency_writable(self):
return self.fin.get("currency_writable", True)
# -------------------------------------------------------------------------
# GIS (Map) Settings
#
def get_gis_api_bing(self):
""" API key for Bing """
return self.gis.get("api_bing", None)
def get_gis_api_google(self):
"""
API key for Google
- needed for Earth, MapMaker & GeoCoder
- defaults to localhost
"""
return self.gis.get("api_google", "ABQIAAAAgB-1pyZu7pKAZrMGv3nksRTpH3CbXHjuCVmaTc5MkkU4wO1RRhQWqp1VGwrG8yPE2KhLCPYhD7itFw")
def get_gis_api_yahoo(self):
"""
API key for Yahoo
- deprecated
"""
return self.gis.get("api_yahoo", None)
def get_gis_building_name(self):
" Display Building Name when selecting Locations "
return self.gis.get("building_name", True)
def get_gis_check_within_parent_boundaries(self):
"""
Whether location Lat/Lons should be within the boundaries of the parent
"""
return self.gis.get("check_within_parent_boundaries", True)
def get_gis_countries(self):
"""
Which country codes should be accessible to the location selector?
"""
return self.gis.get("countries", [])
def get_gis_display_l0(self):
return self.gis.get("display_L0", False)
def get_gis_display_l1(self):
return self.gis.get("display_L1", True)
def get_gis_duplicate_features(self):
"""
Display duplicate features either side of the International date line?
"""
return self.gis.get("duplicate_features", False)
def get_gis_edit_group(self):
" Edit Location Groups "
return self.gis.get("edit_GR", False)
def get_gis_geocode_imported_addresses(self):
" Should Addresses imported from CSV be passed to a Geocoder to try and automate Lat/Lon? "
return self.gis.get("geocode_imported_addresses", False)
def get_gis_geoserver_url(self):
return self.gis.get("geoserver_url", "")
def get_gis_geoserver_username(self):
return self.gis.get("geoserver_username", "admin")
def get_gis_geoserver_password(self):
return self.gis.get("geoserver_password", "")
def get_gis_latlon_selector(self):
" Display Lat/Lon form fields when selecting Locations "
return self.gis.get("latlon_selector", True)
def get_gis_layer_metadata(self):
" Use CMS to provide Metadata on Map Layers "
return self.has_module("cms") and self.gis.get("layer_metadata", False)
def get_gis_layer_properties(self):
" Display Layer Properties Tool above Map's Layer Tree "
return self.gis.get("layer_properties", True)
def get_gis_layer_tree_base(self):
" Display Base Layers folder in the Map's Layer Tree "
return self.gis.get("layer_tree_base", True)
def get_gis_layer_tree_overlays(self):
" Display Overlays folder in the Map's Layer Tree "
return self.gis.get("layer_tree_overlays", True)
def get_gis_layer_tree_expanded(self):
" Display folders in the Map's Layer Tree Open by default "
return self.gis.get("layer_tree_expanded", True)
def get_gis_layer_tree_radio(self):
" Use a radio button for custom folders in the Map's Layer Tree "
return self.gis.get("layer_tree_radio", False)
def get_gis_layers_label(self):
" Label for the Map's Layer Tree "
return self.gis.get("layers_label", "Layers")
def get_gis_map_height(self):
"""
Height of the Embedded Map
Change this if-required for your theme
NB API can override this in specific modules
"""
return self.gis.get("map_height", 600)
def get_gis_map_width(self):
"""
Width of the Embedded Map
Change this if-required for your theme
NB API can override this in specific modules
"""
return self.gis.get("map_width", 1000)
def get_gis_map_selector(self):
" Display a Map-based tool to select Locations "
return self.gis.get("map_selector", True)
def get_gis_marker_max_height(self):
return self.gis.get("marker_max_height", 35)
def get_gis_marker_max_width(self):
return self.gis.get("marker_max_width", 30)
def get_gis_max_features(self):
"""
The maximum number of features to return in a Map Layer
- more than this will prompt the user to zoom in to load the layer
Lower this number to get extra performance from an overloaded server.
"""
return self.gis.get("max_features", 1000)
def get_gis_legend(self):
"""
Should we display a Legend on the Map?
- set to True to show a GeoExt Legend (default)
- set to False to not show a Legend
- set to "float" to use a floating DIV
"""
return self.gis.get("legend", True)
def get_gis_menu(self):
"""
Should we display a menu of GIS configurations?
- set to False to not show the menu (default)
- set to the label to use for the menu to enable it
e.g. T("Events") or T("Regions")
"""
return self.gis.get("menu", False)
def get_gis_mouse_position(self):
"""
What style of Coordinates for the current Mouse Position
should be shown on the Map?
'normal', 'mgrs' or False
"""
return self.gis.get("mouse_position", "normal")
def get_gis_nav_controls(self):
"""
Should the Map Toolbar display Navigation Controls?
"""
return self.gis.get("nav_controls", True)
def get_gis_label_overlays(self):
"""
Label for the Map Overlays in the Layer Tree
"""
return self.gis.get("label_overlays", "Overlays")
def get_gis_overview(self):
"""
Should the Map display an Overview Map?
"""
return self.gis.get("overview", True)
def get_gis_permalink(self):
"""
Should the Map display a Permalink control?
"""
return self.gis.get("permalink", True)
def get_gis_poi_resources(self):
"""
List of resources (tablenames) to import/export as PoIs from Admin Locations
- KML & OpenStreetMap formats
"""
return self.gis.get("poi_resources",
["cr_shelter", "hms_hospital", "org_office"])
def get_gis_postcode_selector(self):
" Display Postcode form field when selecting Locations "
return self.gis.get("postcode_selector", True)
def get_gis_print_service(self):
"""
URL for a Print Service
"""
return self.gis.get("print_service", "")
def get_gis_simplify_tolerance(self):
"""
Default Tolerance for the Simplification of Polygons
- a lower value means less simplification, which is suitable for higher-resolution local activities
- a higher value is suitable for global views
"""
return self.gis.get("simplify_tolerance", 0.01)
def get_gis_scaleline(self):
"""
Should the Map display a ScaleLine control?
"""
return self.gis.get("scaleline", True)
def get_gis_spatialdb(self):
"""
Does the database have Spatial extensions?
"""
db_type = self.get_database_type()
if db_type != "postgres":
# Only Postgres supported currently
return False
else:
return self.gis.get("spatialdb", False)
def get_gis_toolbar(self):
"""
Should the main Map display a Toolbar?
"""
return self.gis.get("toolbar", True)
def get_gis_zoomcontrol(self):
"""
Should the Map display a Zoom control?
"""
return self.gis.get("zoomcontrol", True)
# -------------------------------------------------------------------------
# L10N Settings
def get_L10n_default_language(self):
return self.L10n.get("default_language", "en")
def get_L10n_display_toolbar(self):
return self.L10n.get("display_toolbar", True)
def get_L10n_languages(self):
return self.L10n.get("languages",
OrderedDict([
("ar", "العربية"),
("zh-cn", "中文 (简体)"),
("zh-tw", "中文 (繁體)"),
("en", "English"),
("fr", "Français"),
("de", "Deutsch"),
("el", "ελληνικά"),
("it", "Italiano"),
("ja", "日本語"),
("ko", "한국어"),
("pt", "Português"),
("pt-br", "Português (Brasil)"),
("ru", "русский"),
("es", "Español"),
("tl", "Tagalog"),
("ur", "اردو"),
("vi", "Tiếng Việt"),
]))
def get_L10n_languages_readonly(self):
return self.L10n.get("languages_readonly", True)
def get_L10n_religions(self):
"""
Religions used in Person Registry
@ToDo: find a better code
http://eden.sahanafoundation.org/ticket/594
"""
T = current.T
return self.L10n.get("religions", {
"none":T("none"),
"christian":T("Christian"),
"muslim":T("Muslim"),
"jewish":T("Jewish"),
"buddhist":T("Buddhist"),
"hindu":T("Hindu"),
"bahai":T("Bahai"),
"other":T("other")
})
def get_L10n_date_format(self):
return self.L10n.get("date_format", "%Y-%m-%d")
def get_L10n_time_format(self):
return self.L10n.get("time_format", "%H:%M")
def get_L10n_datetime_separator(self):
return self.L10n.get("datetime_separator", " ")
def get_L10n_datetime_format(self):
return "%s%s%s" % (self.get_L10n_date_format(),
self.get_L10n_datetime_separator(),
self.get_L10n_time_format()
)
def get_L10n_utc_offset(self):
return self.L10n.get("utc_offset", "UTC +0000")
def get_L10n_firstDOW(self):
return self.L10n.get("firstDOW", 1)
def get_L10n_lat_lon_format(self):
"""
This is used to format latitude and longitude fields when they are
displayed by eden. The format string may include the following
placeholders:
- %d -- Degress (integer)
- %m -- Minutes (integer)
- %s -- Seconds (double)
- %f -- Degrees in decimal (double)
"""
return self.L10n.get("lat_lon_display_format", "%f")
def get_L10n_default_country_code(self):
""" Default Telephone Country Code """
return self.L10n.get("default_country_code", 1)
def get_L10n_mandatory_lastname(self):
return self.L10n.get("mandatory_lastname", False)
def get_L10n_thousands_separator(self):
return self.L10n.get("thousands_separator", " ")
def get_L10n_thousands_grouping(self):
return self.L10n.get("thousands_grouping", 3)
def get_L10n_decimal_separator(self):
return self.L10n.get("decimal_separator", ",")
def get_L10n_translate_cms_series(self):
"""
Whether to translate CMS Series names
"""
return self.L10n.get("translate_cms_series", False)
def get_L10n_translate_gis_layer(self):
"""
Whether to translate Layer names
"""
return self.L10n.get("translate_gis_layer", False)
def get_L10n_translate_gis_location(self):
"""
Whether to translate Location names
"""
return self.L10n.get("translate_gis_location", False)
def get_L10n_pootle_url(self):
""" URL for Pootle server """
return self.L10n.get("pootle_url", "http://pootle.sahanafoundation.org/")
def get_L10n_pootle_username(self):
""" Username for Pootle server """
return self.L10n.get("pootle_username", False)
def get_L10n_pootle_password(self):
""" Password for Pootle server """
return self.L10n.get("pootle_password", False)
# -------------------------------------------------------------------------
# PDF settings
def get_paper_size(self):
return self.base.get("paper_size", "A4")
def get_pdf_logo(self):
return self.ui.get("pdf_logo", None)
# Optical Character Recognition (OCR)
def get_pdf_excluded_fields(self, resourcename):
excluded_fields_dict = {
"hms_hospital" : [
"hrm_human_resource",
],
"pr_group" : [
"pr_group_membership",
],
}
excluded_fields =\
excluded_fields_dict.get(resourcename, [])
return excluded_fields
# -------------------------------------------------------------------------
# UI Settings
@staticmethod
def default_formstyle(id, label, widget, comment, hidden=False):
"""
Provide the default Sahana Eden Form Style
Label above the Inputs:
http://uxmovement.com/design-articles/faster-with-top-aligned-labels
Things that need to be looked at for custom formstyles:
* subheadings (s3forms.py)
* S3AddPersonWidget (s3widgets.py)
* S3EmbedComponentWidget (s3widgets.py)
"""
row = []
if hidden:
_class = "hide"
else:
_class = ""
# Label on the 1st row
row.append(TR(TD(label, _class="w2p_fl"),
TD(""),
_id=id + "1",
_class=_class))
# Widget & Comment on the 2nd Row
row.append(TR(widget,
TD(comment, _class="w2p_fc"),
_id=id,
_class=_class))
return tuple(row)
def get_ui_formstyle(self):
return self.ui.get("formstyle", self.default_formstyle)
# -------------------------------------------------------------------------
def get_ui_auth_user_represent(self):
"""
Should the auth_user created_by/modified_by be represented by Name or Email?
- defaults to email
"""
return self.ui.get("auth_user_represent", "email")
def get_ui_confirm(self):
"""
For Delete actions
Workaround for this Bug in Selenium with FF4:
http://code.google.com/p/selenium/issues/detail?id=1604
"""
return self.ui.get("confirm", True)
def get_ui_crud_form(self, tablename):
"""
Get custom crud_forms for diffent tables
"""
return self.ui.get("crud_form_%s" % tablename, None)
def ui_customize(self, tablename, **attr):
"""
Customize a Controller
"""
customize = self.ui.get("customize_%s" % tablename)
if customize:
return customize(**attr)
else:
return attr
def get_ui_export_formats(self):
"""
Which export formats should we display?
- specify a list of export formats to restrict
"""
return self.ui.get("export_formats",
["have", "kml", "map", "pdf", "rss", "xls", "xml"])
def get_ui_hide_report_filter_options(self):
"""
Show report filter options form by default
"""
return self.ui.get("hide_report_filter_options", False)
def get_ui_hide_report_options(self):
"""
Hide report options form by default
"""
return self.ui.get("hide_report_options", True)
def get_ui_interim_save(self):
""" Render interim-save button in CRUD forms by default """
return self.ui.get("interim_save", False)
def get_ui_label_attachments(self):
"""
Label for attachments tab
"""
return current.T(self.ui.get("label_attachments", "Attachments"))
def get_ui_label_camp(self):
""" 'Camp' instead of 'Shelter'? """
return self.ui.get("camp", False)
def get_ui_label_cluster(self):
""" UN-style deployment? """
return self.ui.get("cluster", False)
def get_ui_label_mobile_phone(self):
"""
Label for the Mobile Phone field
e.g. 'Cell Phone'
"""
return current.T(self.ui.get("label_mobile_phone", "Mobile Phone"))
def get_ui_label_postcode(self):
"""
Label for the Postcode field
e.g. 'ZIP Code'
"""
return current.T(self.ui.get("label_postcode", "Postcode"))
def get_ui_label_read(self):
"""
Label for buttons in list views which lead to a Read-only 'Display' page
"""
return self.ui.get("read_label", "Open")
def get_ui_label_update(self):
"""
Label for buttons in list views which lead to an Editable 'Update' page
"""
return self.ui.get("update_label", "Open")
def get_ui_navigate_away_confirm(self):
return self.ui.get("navigate_away_confirm", True)
def get_ui_search_submit_button(self):
"""
Class for submit buttons in search views
"""
return self.ui.get("search_submit_button", "search-button")
def get_ui_social_buttons(self):
"""
Display social media Buttons in the footer?
- requires support in the Theme
"""
return self.ui.get("social_buttons", False)
def get_ui_summary(self):
"""
Default Summary Page Configuration (can also be
configured per-resource using s3db.configure)
@example:
settings.ui.summary = [
{
"name": "table", # the section name
"label": "Table", # the section label, will
# automatically be translated
"common": False, # show this section on all tabs
"translate": True, # turn automatic label translation on/off
"widgets": [ # list of widgets for this section
{
"method": "datatable", # widget method, either a
# name that resolves into
# a S3Method, or a callable
# to render the widget
"filterable": True, # Whether the widget can
# be filtered by the summary
# filter form
}
]
}
]
"""
return self.ui.get("summary", None)
def get_ui_filter_auto_submit(self):
"""
Time in milliseconds after the last filter option change to
automatically update the filter target(s), set to 0 to disable
"""
return self.ui.get("filter_auto_submit", 800)
def get_ui_report_auto_submit(self):
"""
Time in milliseconds after the last filter option change to
automatically update the filter target(s), set to 0 to disable
"""
return self.ui.get("report_auto_submit", 800)
# =========================================================================
# Messaging
# -------------------------------------------------------------------------
# Mail settings
def get_mail_server(self):
return self.mail.get("server", "127.0.0.1:25")
def get_mail_server_login(self):
return self.mail.get("login", False)
def get_mail_server_tls(self):
"""
Does the Mail Server use TLS?
- default Debian is False
- GMail is True
"""
return self.mail.get("tls", False)
def get_mail_sender(self):
"""
The From Address for all Outbound Emails
"""
return self.mail.get("sender", None)
def get_mail_approver(self):
"""
The default Address to send Requests for New Users to be Approved
OR
UUID of Role of users who should receive Requests for New Users to be Approved
- unless overridden by per-domain entries in auth_organsiation
"""
return self.mail.get("approver", "[email protected]")
def get_mail_limit(self):
"""
A daily limit to the number of messages which can be sent
"""
return self.mail.get("limit", None)
# -------------------------------------------------------------------------
# Parser
def get_msg_parser(self):
"""
Which template folder to use to load parser.py
"""
return self.msg.get("parser", "default")
# -------------------------------------------------------------------------
# Twitter
def get_msg_twitter_oauth_consumer_key(self):
return self.msg.get("twitter_oauth_consumer_key", "")
def get_msg_twitter_oauth_consumer_secret(self):
return self.msg.get("twitter_oauth_consumer_secret", "")
# -------------------------------------------------------------------------
# Notifications
def get_msg_notify_subject(self):
"""
Template for the subject line in update notifications.
Available placeholders:
$S = System Name (long)
$s = System Name (short)
$r = Resource Name
Use {} to separate the placeholder from immediately following
identifier characters (like: ${placeholder}text).
"""
return self.msg.get("notify_subject",
"$s %s: $r" % current.T("Update Notification"))
def get_msg_notify_email_format(self):
"""
The preferred email format for update notifications,
"text" or "html".
"""
return self.msg.get("notify_email_format", "text")
def get_msg_notify_renderer(self):
"""
Custom content renderer function for update notifications,
function()
"""
return self.msg.get("notify_renderer", None)
# -------------------------------------------------------------------------
# Outbox settings
def get_msg_max_send_retries(self):
"""
Maximum number of retries to send a message before
it is regarded as permanently failing; set to None
to retry forever.
"""
return self.msg.get("max_send_retries", 9)
# -------------------------------------------------------------------------
def get_search_max_results(self):
"""
The maximum number of results to return in an Autocomplete Search
- more than this will prompt the user to enter a more exact match
Lower this number to get extra performance from an overloaded server.
"""
return self.search.get("max_results", 200)
# -------------------------------------------------------------------------
# Save Search and Subscription
def get_search_save_widget(self):
"""
Enable the Saved Search widget
"""
return self.search.get("save_widget", True)
# -------------------------------------------------------------------------
# Filter Manager Widget
def get_search_filter_manager(self):
""" Enable the filter manager widget """
return self.search.get("filter_manager", True)
def get_search_filter_manager_allow_delete(self):
""" Allow deletion of saved filters """
return self.search.get("filter_manager_allow_delete", True)
def get_search_filter_manager_save(self):
""" Text for saved filter save-button """
return self.search.get("filter_manager_save", None)
def get_search_filter_manager_update(self):
""" Text for saved filter update-button """
return self.search.get("filter_manager_update", None)
def get_search_filter_manager_delete(self):
""" Text for saved filter delete-button """
return self.search.get("filter_manager_delete", None)
def get_search_filter_manager_load(self):
""" Text for saved filter load-button """
return self.search.get("filter_manager_load", None)
# =========================================================================
# Modules
# -------------------------------------------------------------------------
# CAP
def get_cap_identifier_prefix(self):
"""
Prefix to be prepended to identifiers of CAP alerts
"""
return self.cap.get("identifier_prefix", "")
def get_cap_identifier_suffix(self):
"""
Suffix to be appended to identifiers of CAP alerts
"""
return self.cap.get("identifier_suffix", "")
def get_cap_codes(self):
"""
Default codes for CAP alerts
should return a list of dicts:
[ {"key": "<ValueName>, "value": "<Value>",
"comment": "<Help string>", "mutable": True|False},
...]
"""
return self.cap.get("codes", [])
def get_cap_event_codes(self):
"""
Default alert codes for CAP info segments
should return a list of dicts:
[ {"key": "<ValueName>, "value": "<Value>",
"comment": "<Help string>", "mutable": True|False},
...]
"""
return self.cap.get("event_codes", [])
def get_cap_parameters(self):
"""
Default parameters for CAP info segments
should return a list of dicts:
[ {"key": "<ValueName>, "value": "<Value>",
"comment": "<Help string>", "mutable": True|False},
...]
"""
return self.cap.get("parameters", [])
def get_cap_geocodes(self):
"""
Default geocodes.
should return a list of dicts:
[ {"key": "<ValueName>, "value": "<Value>",
"comment": "<Help string>", "mutable": True|False},
...]
"""
return self.cap.get("geocodes", [])
def get_cap_base64(self):
"""
Should CAP resources be base64 encoded and embedded in the alert message?
"""
return self.cap.get("base64", False)
def get_cap_languages(self):
"""
Languages for CAP info segments.
This gets filled in the drop-down for selecting languages.
These values should conform to RFC 3066.
For a full list of languages and their codes, see:
http://www.i18nguy.com/unicode/language-identifiers.html
"""
return self.cap.get("languages",
OrderedDict([
("ar", "العربية"),
("en", "English"),
("fr", "Français"),
("pt", "Português"),
("ru", "русский"),
("es", "Español")
]))
def get_cap_priorities(self):
"""
Settings for CAP priorities
Should be an ordered dict of the format
OrderedDict([
("<value>, "<Translated title>", <urgency>, <severity>, <certainty>, <color>),
...
]) """
T = current.T
return self.cap.get("priorities", [
("Urgent", T("Urgent"), "Immediate", "Extreme", "Observed", "red"),
("High", T("High"), "Expected", "Severe", "Observed", "orange"),
("Low", T("Low"), "Expected", "Moderate", "Observed", "green")
])
# -------------------------------------------------------------------------
# Human Resource Management
def get_hrm_staff_label(self):
"""
Label for 'Staff'
e.g. 'Contacts'
"""
return current.T(self.hrm.get("staff_label", "Staff"))
def get_hrm_organisation_label(self):
"""
Label for Organisations in Human Resources
"""
return current.T(self.hrm.get("organisation_label", "Organization"))
def get_hrm_email_required(self):
"""
If set to True then Staff & Volunteers require an email address
"""
return self.hrm.get("email_required", True)
def get_hrm_org_required(self):
"""
If set to True then Staff & Volunteers require an Organisation
"""
return self.hrm.get("org_required", True)
def get_hrm_deletable(self):
"""
If set to True then HRM records are deletable rather than just being able to be marked as obsolete
"""
return self.hrm.get("deletable", True)
def get_hrm_filter_certificates(self):
"""
If set to True then Certificates are filtered by (Root) Organisation
& hence certificates from other Organisations cannot be added to an HR's profile (except by Admins)
"""
return self.hrm.get("filter_certificates", False)
def get_hrm_multiple_job_titles(self):
"""
If set to True then HRs can have multiple Job Titles
"""
return self.hrm.get("multi_job_titles", False)
def get_hrm_show_staff(self):
"""
If set to True then show 'Staff' options when HRM enabled
- needs a separate setting as vol requires hrm, but we may only wish to show Volunteers
"""
return self.hrm.get("show_staff", True)
def get_hrm_skill_types(self):
"""
If set to True then Skill Types are exposed to the UI
- each skill_type needs it's own set of competency levels
If set to False then Skill Types are hidden from the UI
- all skills use the same skill_type & hence the same set of competency levels
"""
return self.hrm.get("skill_types", False)
def get_hrm_staff_experience(self):
"""
Whether to use Experience for Staff &, if so, which table to use
- options are: False, "experience"
"""
return self.hrm.get("staff_experience", "experience")
def get_hrm_vol_experience(self):
"""
Whether to use Experience for Volunteers &, if so, which table to use
- options are: False, "experience", "programme" or "both"
"""
return self.hrm.get("vol_experience", "programme")
def get_hrm_show_organisation(self):
"""
Whether Human Resource representations should include the Organisation
"""
return self.hrm.get("show_organisation", False)
def get_hrm_teams(self):
"""
Whether Human Resources should use Teams
& what to call them
"""
return self.hrm.get("teams", "Team")
def get_hrm_use_awards(self):
"""
Whether Volunteers should use Awards
"""
return self.hrm.get("use_awards", True)
def get_hrm_use_certificates(self):
"""
Whether Human Resources should use Certificates
"""
return self.hrm.get("use_certificates", True)
def get_hrm_use_credentials(self):
"""
Whether Human Resources should use Credentials
"""
return self.hrm.get("use_credentials", True)
def get_hrm_use_description(self):
"""
Whether Human Resources should use Physical Description
"""
return self.hrm.get("use_description", True)
def get_hrm_use_education(self):
"""
Whether Human Resources should show Education
"""
return self.hrm.get("use_education", False)
def get_hrm_use_id(self):
"""
Whether Human Resources should use Staff ID
"""
return self.hrm.get("use_id", True)
def get_hrm_use_skills(self):
"""
Whether Human Resources should use Skills
"""
return self.hrm.get("use_skills", True)
def get_hrm_use_trainings(self):
"""
Whether Human Resources should use Trainings
"""
return self.hrm.get("use_trainings", True)
# -------------------------------------------------------------------------
# Inventory Management Settings
#
def get_inv_collapse_tabs(self):
return self.inv.get("collapse_tabs", True)
def get_inv_facility_label(self):
return self.inv.get("facility_label", current.T("Warehouse"))
def get_inv_direct_stock_edits(self):
"""
Can Stock levels be adjusted directly?
- defaults to False
"""
return self.inv.get("direct_stock_edits", False)
def get_inv_send_show_mode_of_transport(self):
"""
Show mode of transport on Sent Shipments
"""
return self.inv.get("show_mode_of_transport", False)
def get_inv_send_show_org(self):
"""
Show Organisation on Sent Shipments
"""
return self.inv.get("send_show_org", True)
def get_inv_send_show_time_in(self):
"""
Show Time In on Sent Shipments
"""
return self.inv.get("send_show_time_in", False)
def get_inv_stock_count(self):
"""
Call Stock Adjustments 'Stock Counts'
"""
return self.inv.get("stock_count", True)
def get_inv_track_pack_values(self):
"""
Whether or not Pack values are tracked
"""
return self.inv.get("track_pack_values", True)
def get_inv_item_status(self):
"""
Item Statuses which can also be Sent Shipment Types
"""
T = current.T
return self.inv.get("item_status", {
0: current.messages["NONE"],
1: T("Dump"),
2: T("Sale"),
3: T("Reject"),
4: T("Surplus")
})
def get_inv_shipment_name(self):
"""
Get the name of Shipments
- currently supported options are:
* shipment
* order
"""
return self.inv.get("shipment_name", "shipment")
def get_inv_shipment_types(self):
"""
Shipment types which are common to both Send & Receive
"""
return self.inv.get("shipment_types", {
0 : current.messages["NONE"],
11: current.T("Internal Shipment"),
})
def get_inv_send_types(self):
"""
Shipment types which are just for Send
"""
return self.inv.get("send_types", {
21: current.T("Distribution"),
})
def get_inv_send_type_default(self):
"""
Which Shipment type is default
"""
return self.inv.get("send_type_default", 0)
def get_inv_recv_types(self):
"""
Shipment types which are just for Receive
"""
T = current.T
return self.inv.get("recv_types", {
#31: T("Other Warehouse"), Same as Internal Shipment
32: T("Donation"),
#33: T("Foreign Donation"),
34: T("Purchase"),
})
def get_inv_send_form_name(self):
return self.inv.get("send_form_name", "Waybill")
def get_inv_send_ref_field_name(self):
return self.inv.get("send_ref_field_name", "Waybill Number")
def get_inv_send_shortname(self):
return self.inv.get("send_shortname", "WB")
def get_inv_recv_form_name(self):
return self.inv.get("recv_form_name", "Goods Received Note")
def get_inv_recv_shortname(self):
return self.inv.get("recv_shortname", "GRN")
# -------------------------------------------------------------------------
# IRS
def get_irs_vehicle(self):
"""
Use Vehicles to respond to Incident Reports
"""
return self.irs.get("vehicle", False)
# -------------------------------------------------------------------------
# Organisation
def get_org_autocomplete(self):
"""
Whether organisation_id fields should use an Autocomplete instead of a dropdown
"""
return self.org.get("autocomplete", False)
def get_org_branches(self):
"""
Whether to support Organisation branches or not
"""
return self.org.get("branches", False)
def get_org_regions(self):
"""
Whether to support Organisation regions or not
"""
return self.org.get("regions", False)
def get_org_site_code_len(self):
"""
Length of auto-generated Codes for Facilities (org_site)
"""
return self.org.get("site_code_len", 10)
def get_org_site_label(self):
"""
Label for site_id fields
"""
return current.T(self.org.get("site_label", "Facility"))
def get_org_site_inv_req_tabs(self):
"""
Whether Sites should have Tabs for Inv/Req
"""
return self.org.get("site_inv_req_tabs", True)
def get_org_site_autocomplete(self):
"""
Whether site_id fields should use an Autocomplete instead of a dropdown
"""
return self.org.get("site_autocomplete", False)
def get_org_site_autocomplete_fields(self):
"""
Which extra fields should be returned in S3SiteAutocompleteWidget
"""
return self.org.get("site_autocomplete_fields", ["instance_type"])
def get_org_site_address_autocomplete(self):
"""
Whether site_id Autocomplete fields should search Address fields as well as name
"""
return self.org.get("site_address_autocomplete", False)
def get_org_site_last_contacted(self):
"""
Whether to display the last_contacted field for a Site
"""
return self.org.get("site_last_contacted", False)
def get_org_summary(self):
"""
Whether to use Summary fields for Organisation/Office:
# National/International staff
"""
return self.org.get("summary", False)
def set_org_dependent_field(self,
tablename=None,
fieldname=None,
enable_field=True):
"""
Enables/Disables optional fields according to a user's Organisation
- must specify either field or tablename/fieldname
(e.g. for virtual fields)
"""
enabled = False
dependent_fields = self.org.get("dependent_fields", None)
if dependent_fields:
org_name_list = dependent_fields.get("%s.%s" % (tablename,
fieldname),
None)
if org_name_list:
auth = current.auth
if auth.s3_has_role(auth.get_system_roles().ADMIN):
# Admins see all fields unless disabled for all orgs in this deployment
enabled = True
else:
s3db = current.s3db
otable = s3db.org_organisation
root_org_id = auth.root_org()
root_org = current.db(otable.id == root_org_id).select(otable.name,
limitby=(0, 1),
cache=s3db.cache
).first()
if root_org:
enabled = root_org.name in org_name_list
if enable_field:
field = current.s3db[tablename][fieldname]
field.readable = enabled
field.writable = enabled
return enabled
# -------------------------------------------------------------------------
# Persons
def get_pr_age_group(self, age):
"""
Function to provide the age group for an age
"""
fn = self.pr.get("age_group", None)
if fn:
group = fn(age)
else:
# Default
if age < 18 :
group = "-17" # "< 18"/" < 18" don't sort correctly
elif age < 25 :
group = "18-24"
elif age < 40:
group = "25-39"
elif age < 60:
group = "40-59"
else:
group = "60+"
return group
def get_pr_request_dob(self):
""" Include Date of Birth in the AddPersonWidget[2] """
return self.pr.get("request_dob", True)
def get_pr_request_gender(self):
""" Include Gender in the AddPersonWidget[2] """
return self.pr.get("request_gender", True)
def get_pr_request_home_phone(self):
""" Include Home Phone in the AddPersonWidget2 """
return self.pr.get("request_home_phone", False)
def get_pr_select_existing(self):
"""
Whether the AddPersonWidget allows selecting existing PRs
- set to True if Persons can be found in multiple contexts
- set to False if just a single context
"""
return self.pr.get("select_existing", True)
def get_pr_import_update_requires_email(self):
"""
During imports, records are only updated if the import
item contains a (matching) email address
"""
return self.pr.get("import_update_requires_email", True)
# -------------------------------------------------------------------------
# Proc
def get_proc_form_name(self):
return self.proc.get("form_name", "Purchase Order")
def get_proc_shortname(self):
return self.proc.get("form_name", "PO")
# -------------------------------------------------------------------------
# Projects
def get_project_mode_3w(self):
"""
Enable 3W mode in the projects module
"""
return self.project.get("mode_3w", False)
def get_project_mode_task(self):
"""
Enable Tasks mode in the projects module
"""
return self.project.get("mode_task", False)
def get_project_mode_drr(self):
"""
Enable DRR extensions in the projects module
"""
return self.project.get("mode_drr", False)
def get_project_activities(self):
"""
Use Activities in Projects
"""
return self.project.get("activities", False)
def get_project_codes(self):
"""
Use Codes in Projects
"""
return self.project.get("codes", False)
def get_project_community(self):
"""
Label project_location as 'Community'
"""
return self.project.get("community", False)
#def get_project_locations_from_countries(self):
# """
# Create a project_location for each country that a Project is
# implemented in
# """
# return self.project.get("locations_from_countries", False)
def get_project_milestones(self):
"""
Use Milestones in Projects
"""
return self.project.get("milestones", False)
def get_project_sectors(self):
"""
Use Sectors in Projects
"""
return self.project.get("sectors", True)
def get_project_theme_percentages(self):
"""
Use Theme Percentages in Projects
"""
return self.project.get("theme_percentages", False)
def get_project_multiple_budgets(self):
"""
Use Multiple Budgets in Projects
"""
return self.project.get("multiple_budgets", False)
def get_project_multiple_organisations(self):
"""
Use Multiple Organisations in Projects
"""
return self.project.get("multiple_organisations", False)
def get_project_organisation_roles(self):
T = current.T
return self.project.get("organisation_roles", {
1: T("Lead Implementer"), # T("Host National Society")
2: T("Partner"), # T("Partner National Society")
3: T("Donor"),
#4: T("Customer"), # T("Beneficiary")?
#5: T("Supplier") # T("Beneficiary")?
})
def get_project_organisation_lead_role(self):
return self.project.get("organisation_lead_role", 1)
# -------------------------------------------------------------------------
# Request Settings
def get_req_req_type(self):
"""
The Types of Request which can be made.
Select one or more from:
* People
* Stock
* Other
tbc: Assets, Shelter, Food
"""
return self.req.get("req_type", ["Stock", "People", "Other"])
def get_req_type_inv_label(self):
return current.T(self.req.get("type_inv_label", "Warehouse Stock"))
def get_req_type_hrm_label(self):
return current.T(self.req.get("type_hrm_label", "People"))
def get_req_requester_label(self):
return current.T(self.req.get("requester_label", "Requester"))
def get_req_requester_optional(self):
return self.req.get("requester_optional", False)
def get_req_requester_is_author(self):
"""
Whether the User Account logging the Request is normally the Requester
"""
return self.req.get("requester_is_author", True)
def get_req_requester_from_site(self):
"""
Whether the Requester has to be a staff of the site making the Request
"""
return self.req.get("requester_from_site", False)
def get_req_requester_to_site(self):
"""
Whether to set the Requester as being an HR for the Site if no HR record yet & as Site contact if none yet exists
"""
return self.req.get("requester_to_site", False)
def get_req_date_writable(self):
""" Whether Request Date should be manually editable """
return self.req.get("date_writable", True)
def get_req_status_writable(self):
""" Whether Request Status should be manually editable """
return self.req.get("status_writable", True)
def get_req_item_quantities_writable(self):
""" Whether Item Quantities should be manually editable """
return self.req.get("item_quantities_writable", False)
def get_req_skill_quantities_writable(self):
""" Whether People Quantities should be manually editable """
return self.req.get("skill_quantities_writable", False)
def get_req_multiple_req_items(self):
"""
Can a Request have multiple line items?
- e.g. ICS says that each request should be just for items of a single Type
"""
return self.req.get("multiple_req_items", True)
def get_req_show_quantity_transit(self):
return self.req.get("show_quantity_transit", True)
def get_req_inline_forms(self):
"""
Whether Requests module should use inline forms for Items
"""
return self.req.get("inline_forms", True)
def get_req_prompt_match(self):
"""
Whether a Requester is prompted to match each line item in an Item request
"""
return self.req.get("prompt_match", True)
def get_req_summary(self):
"""
Whether to use Summary Needs for Sites (Office/Facility currently):
"""
return self.req.get("summary", False)
def get_req_use_commit(self):
"""
Whether there is a Commit step in Requests Management
"""
return self.req.get("use_commit", True)
def get_req_commit_value(self):
"""
Whether Donations should have a Value field
"""
return self.req.get("commit_value", False)
def get_req_commit_without_request(self):
"""
Whether to allow Donations to be made without a matching Request
"""
return self.req.get("commit_without_request", False)
def get_req_committer_is_author(self):
""" Whether the User Account logging the Commitment is normally the Committer """
return self.req.get("committer_is_author", True)
def get_req_ask_security(self):
"""
Should Requests ask whether Security is required?
"""
return self.req.get("ask_security", False)
def get_req_ask_transport(self):
"""
Should Requests ask whether Transportation is required?
"""
return self.req.get("ask_transport", False)
def get_req_items_ask_purpose(self):
"""
Should Requests for Items ask for Purpose?
"""
return self.req.get("items_ask_purpose", True)
def get_req_req_crud_strings(self, type = None):
return self.req.get("req_crud_strings") and \
self.req.req_crud_strings.get(type, None)
def get_req_use_req_number(self):
return self.req.get("use_req_number", True)
def get_req_generate_req_number(self):
return self.req.get("generate_req_number", True)
def get_req_form_name(self):
return self.req.get("req_form_name", "Requisition Form")
def get_req_shortname(self):
return self.req.get("req_shortname", "REQ")
def get_req_restrict_on_complete(self):
"""
To restrict adding new commits to the Completed commits.
"""
return self.req.get("req_restrict_on_complete", False)
# -------------------------------------------------------------------------
# Supply
def get_supply_catalog_default(self):
return self.inv.get("catalog_default", "Default")
def get_supply_use_alt_name(self):
return self.supply.get("use_alt_name", True)
# -------------------------------------------------------------------------
# Hospital Registry
def get_hms_track_ctc(self):
return self.hms.get("track_ctc", False)
def get_hms_activity_reports(self):
return self.hms.get("activity_reports", False)
# -------------------------------------------------------------------------
# Active modules list
def has_module(self, module_name):
if not self.modules:
# Provide a minimal list of core modules
_modules = [
"default", # Default
"admin", # Admin
"gis", # GIS
"pr", # Person Registry
"org" # Organization Registry
]
else:
_modules = self.modules
return module_name in _modules
# END =========================================================================
| mit |
jimi-c/ansible | lib/ansible/modules/cloud/ovirt/ovirt_scheduling_policies_facts.py | 2 | 4085 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_scheduling_policies_facts
short_description: Retrieve facts about one or more oVirt scheduling policies
author: "Ondra Machacek (@machacekondra)"
version_added: "2.4"
description:
- "Retrieve facts about one or more oVirt scheduling policies."
notes:
- "This module creates a new top-level C(ovirt_scheduling_policies) fact,
which contains a list of scheduling policies."
options:
id:
description:
- "ID of the scheduling policy."
required: true
name:
description:
- "Name of the scheduling policy, can be used as glob expression."
extends_documentation_fragment: ovirt_facts
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather facts about all scheduling policies with name InClusterUpgrade:
- ovirt_scheduling_policies_facts:
name: InClusterUpgrade
- debug:
var: ovirt_scheduling_policies
'''
RETURN = '''
ovirt_scheduling_policies:
description: "List of dictionaries describing the scheduling policies.
Scheduling policies attributes are mapped to dictionary keys,
all scheduling policies attributes can be found at following
url: https://ovirt.example.com/ovirt-engine/api/model#types/scheduling_policy."
returned: On success.
type: list
'''
import fnmatch
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_facts_full_argument_spec,
)
def main():
argument_spec = ovirt_facts_full_argument_spec(
id=dict(default=None),
name=dict(default=None),
)
module = AnsibleModule(argument_spec)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
system_service = connection.system_service()
sched_policies_service = system_service.scheduling_policies_service()
if module.params['name']:
sched_policies = [
e for e in sched_policies_service.list()
if fnmatch.fnmatch(e.name, module.params['name'])
]
elif module.params['id']:
sched_policies = [
sched_policies_service.service(module.params['id']).get()
]
else:
sched_policies = sched_policies_service.list()
module.exit_json(
changed=False,
ansible_facts=dict(
ovirt_scheduling_policies=[
get_dict_of_struct(
struct=c,
connection=connection,
fetch_nested=module.params.get('fetch_nested'),
attributes=module.params.get('nested_attributes'),
) for c in sched_policies
],
),
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == '__main__':
main()
| gpl-3.0 |
nikhilprathapani/python-for-android | python3-alpha/python3-src/Lib/distutils/tests/test_bdist_wininst.py | 53 | 1038 | """Tests for distutils.command.bdist_wininst."""
import unittest
from test.support import run_unittest
from distutils.command.bdist_wininst import bdist_wininst
from distutils.tests import support
class BuildWinInstTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def test_get_exe_bytes(self):
# issue5731: command was broken on non-windows platforms
# this test makes sure it works now for every platform
# let's create a command
pkg_pth, dist = self.create_dist()
cmd = bdist_wininst(dist)
cmd.ensure_finalized()
# let's run the code that finds the right wininst*.exe file
# and make sure it finds it and returns its content
# no matter what platform we have
exe_file = cmd.get_exe_bytes()
self.assertTrue(len(exe_file) > 10)
def test_suite():
return unittest.makeSuite(BuildWinInstTestCase)
if __name__ == '__main__':
run_unittest(test_suite())
| apache-2.0 |
shashank971/edx-platform | common/djangoapps/student/management/commands/assigngroups.py | 170 | 3059 | from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from student.models import UserTestGroup
import random
import sys
import datetime
from textwrap import dedent
import json
from pytz import UTC
def group_from_value(groups, v):
''' Given group: (('a',0.3),('b',0.4),('c',0.3)) And random value
in [0,1], return the associated group (in the above case, return
'a' if v<0.3, 'b' if 0.3<=v<0.7, and 'c' if v>0.7
'''
sum = 0
for (g, p) in groups:
sum = sum + p
if sum > v:
return g
return g # For round-off errors
class Command(BaseCommand):
help = dedent("""\
Assign users to test groups. Takes a list of groups:
a:0.3,b:0.4,c:0.3 file.txt "Testing something"
Will assign each user to group a, b, or c with
probability 0.3, 0.4, 0.3. Probabilities must
add up to 1.
Will log what happened to file.txt.
""")
def handle(self, *args, **options):
if len(args) != 3:
print "Invalid number of options"
sys.exit(-1)
# Extract groups from string
group_strs = [x.split(':') for x in args[0].split(',')]
groups = [(group, float(value)) for group, value in group_strs]
print "Groups", groups
## Confirm group probabilities add up to 1
total = sum(zip(*groups)[1])
print "Total:", total
if abs(total - 1) > 0.01:
print "Total not 1"
sys.exit(-1)
## Confirm groups don't already exist
for group in dict(groups):
if UserTestGroup.objects.filter(name=group).count() != 0:
print group, "already exists!"
sys.exit(-1)
group_objects = {}
f = open(args[1], "a+")
## Create groups
for group in dict(groups):
utg = UserTestGroup()
utg.name = group
utg.description = json.dumps({"description": args[2]},
{"time": datetime.datetime.now(UTC).isoformat()})
group_objects[group] = utg
group_objects[group].save()
## Assign groups
users = list(User.objects.all())
count = 0
for user in users:
if count % 1000 == 0:
print count
count = count + 1
v = random.uniform(0, 1)
group = group_from_value(groups, v)
group_objects[group].users.add(user)
f.write(u"Assigned user {name} ({id}) to {group}\n".format(
name=user.username,
id=user.id,
group=group
).encode('utf-8'))
## Save groups
for group in group_objects:
group_objects[group].save()
f.close()
# python manage.py assigngroups summary_test:0.3,skip_summary_test:0.7 log.txt "Do previews of future materials help?"
# python manage.py assigngroups skip_capacitor:0.3,capacitor:0.7 log.txt "Do we show capacitor in linearity tutorial?"
| agpl-3.0 |
lumig242/Hue-Integration-with-CDAP | desktop/core/ext-py/Django-1.6.10/django/contrib/gis/db/models/sql/compiler.py | 76 | 14014 | import datetime
from django.conf import settings
from django.db.backends.util import truncate_name, typecast_date, typecast_timestamp
from django.db.models.sql import compiler
from django.db.models.sql.constants import MULTI
from django.utils import six
from django.utils.six.moves import zip, zip_longest
from django.utils import timezone
SQLCompiler = compiler.SQLCompiler
class GeoSQLCompiler(compiler.SQLCompiler):
def get_columns(self, with_aliases=False):
"""
Return the list of columns to use in the select statement. If no
columns have been specified, returns all columns relating to fields in
the model.
If 'with_aliases' is true, any column names that are duplicated
(without the table names) are given unique aliases. This is needed in
some cases to avoid ambiguitity with nested queries.
This routine is overridden from Query to handle customized selection of
geometry columns.
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = ['(%s) AS %s' % (self.get_extra_select_format(alias) % col[0], qn2(alias))
for alias, col in six.iteritems(self.query.extra_select)]
params = []
aliases = set(self.query.extra_select.keys())
if with_aliases:
col_aliases = aliases.copy()
else:
col_aliases = set()
if self.query.select:
only_load = self.deferred_to_columns()
# This loop customized for GeoQuery.
for col, field in self.query.select:
if isinstance(col, (list, tuple)):
alias, column = col
table = self.query.alias_map[alias].table_name
if table in only_load and column not in only_load[table]:
continue
r = self.get_field_select(field, alias, column)
if with_aliases:
if col[1] in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append('%s AS %s' % (r, qn2(col[1])))
aliases.add(r)
col_aliases.add(col[1])
else:
result.append(r)
aliases.add(r)
col_aliases.add(col[1])
else:
col_sql, col_params = col.as_sql(qn, self.connection)
result.append(col_sql)
params.extend(col_params)
if hasattr(col, 'alias'):
aliases.add(col.alias)
col_aliases.add(col.alias)
elif self.query.default_cols:
cols, new_aliases = self.get_default_columns(with_aliases,
col_aliases)
result.extend(cols)
aliases.update(new_aliases)
max_name_length = self.connection.ops.max_name_length()
for alias, aggregate in self.query.aggregate_select.items():
agg_sql, agg_params = aggregate.as_sql(qn, self.connection)
if alias is None:
result.append(agg_sql)
else:
result.append('%s AS %s' % (agg_sql, qn(truncate_name(alias, max_name_length))))
params.extend(agg_params)
# This loop customized for GeoQuery.
for (table, col), field in self.query.related_select_cols:
r = self.get_field_select(field, table, col)
if with_aliases and col in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append(r)
aliases.add(r)
col_aliases.add(col)
self._select_aliases = aliases
return result, params
def get_default_columns(self, with_aliases=False, col_aliases=None,
start_alias=None, opts=None, as_pairs=False, from_parent=None):
"""
Computes the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Returns a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, returns a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
This routine is overridden from Query to handle customized selection of
geometry columns.
"""
result = []
if opts is None:
opts = self.query.get_meta()
aliases = set()
only_load = self.deferred_to_columns()
seen = self.query.included_inherited_models.copy()
if start_alias:
seen[None] = start_alias
for field, model in opts.get_concrete_fields_with_model():
if from_parent and model is not None and issubclass(from_parent, model):
# Avoid loading data for already loaded parents.
continue
alias = self.query.join_parent_model(opts, model, start_alias, seen)
table = self.query.alias_map[alias].table_name
if table in only_load and field.column not in only_load[table]:
continue
if as_pairs:
result.append((alias, field))
aliases.add(alias)
continue
# This part of the function is customized for GeoQuery. We
# see if there was any custom selection specified in the
# dictionary, and set up the selection format appropriately.
field_sel = self.get_field_select(field, alias)
if with_aliases and field.column in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (field_sel, c_alias))
col_aliases.add(c_alias)
aliases.add(c_alias)
else:
r = field_sel
result.append(r)
aliases.add(r)
if with_aliases:
col_aliases.add(field.column)
return result, aliases
def resolve_columns(self, row, fields=()):
"""
This routine is necessary so that distances and geometries returned
from extra selection SQL get resolved appropriately into Python
objects.
"""
values = []
aliases = list(self.query.extra_select)
# Have to set a starting row number offset that is used for
# determining the correct starting row index -- needed for
# doing pagination with Oracle.
rn_offset = 0
if self.connection.ops.oracle:
if self.query.high_mark is not None or self.query.low_mark: rn_offset = 1
index_start = rn_offset + len(aliases)
# Converting any extra selection values (e.g., geometries and
# distance objects added by GeoQuerySet methods).
values = [self.query.convert_values(v,
self.query.extra_select_fields.get(a, None),
self.connection)
for v, a in zip(row[rn_offset:index_start], aliases)]
if self.connection.ops.oracle or getattr(self.query, 'geo_values', False):
# We resolve the rest of the columns if we're on Oracle or if
# the `geo_values` attribute is defined.
for value, field in zip_longest(row[index_start:], fields):
values.append(self.query.convert_values(value, field, self.connection))
else:
values.extend(row[index_start:])
return tuple(values)
#### Routines unique to GeoQuery ####
def get_extra_select_format(self, alias):
sel_fmt = '%s'
if hasattr(self.query, 'custom_select') and alias in self.query.custom_select:
sel_fmt = sel_fmt % self.query.custom_select[alias]
return sel_fmt
def get_field_select(self, field, alias=None, column=None):
"""
Returns the SELECT SQL string for the given field. Figures out
if any custom selection SQL is needed for the column The `alias`
keyword may be used to manually specify the database table where
the column exists, if not in the model associated with this
`GeoQuery`. Similarly, `column` may be used to specify the exact
column name, rather than using the `column` attribute on `field`.
"""
sel_fmt = self.get_select_format(field)
if field in self.query.custom_select:
field_sel = sel_fmt % self.query.custom_select[field]
else:
field_sel = sel_fmt % self._field_column(field, alias, column)
return field_sel
def get_select_format(self, fld):
"""
Returns the selection format string, depending on the requirements
of the spatial backend. For example, Oracle and MySQL require custom
selection formats in order to retrieve geometries in OGC WKT. For all
other fields a simple '%s' format string is returned.
"""
if self.connection.ops.select and hasattr(fld, 'geom_type'):
# This allows operations to be done on fields in the SELECT,
# overriding their values -- used by the Oracle and MySQL
# spatial backends to get database values as WKT, and by the
# `transform` method.
sel_fmt = self.connection.ops.select
# Because WKT doesn't contain spatial reference information,
# the SRID is prefixed to the returned WKT to ensure that the
# transformed geometries have an SRID different than that of the
# field -- this is only used by `transform` for Oracle and
# SpatiaLite backends.
if self.query.transformed_srid and ( self.connection.ops.oracle or
self.connection.ops.spatialite ):
sel_fmt = "'SRID=%d;'||%s" % (self.query.transformed_srid, sel_fmt)
else:
sel_fmt = '%s'
return sel_fmt
# Private API utilities, subject to change.
def _field_column(self, field, table_alias=None, column=None):
"""
Helper function that returns the database column for the given field.
The table and column are returned (quoted) in the proper format, e.g.,
`"geoapp_city"."point"`. If `table_alias` is not specified, the
database table associated with the model of this `GeoQuery` will be
used. If `column` is specified, it will be used instead of the value
in `field.column`.
"""
if table_alias is None: table_alias = self.query.get_meta().db_table
return "%s.%s" % (self.quote_name_unless_alias(table_alias),
self.connection.ops.quote_name(column or field.column))
class SQLInsertCompiler(compiler.SQLInsertCompiler, GeoSQLCompiler):
pass
class SQLDeleteCompiler(compiler.SQLDeleteCompiler, GeoSQLCompiler):
pass
class SQLUpdateCompiler(compiler.SQLUpdateCompiler, GeoSQLCompiler):
pass
class SQLAggregateCompiler(compiler.SQLAggregateCompiler, GeoSQLCompiler):
pass
class SQLDateCompiler(compiler.SQLDateCompiler, GeoSQLCompiler):
"""
This is overridden for GeoDjango to properly cast date columns, since
`GeoQuery.resolve_columns` is used for spatial values.
See #14648, #16757.
"""
def results_iter(self):
if self.connection.ops.oracle:
from django.db.models.fields import DateTimeField
fields = [DateTimeField()]
else:
needs_string_cast = self.connection.features.needs_datetime_string_cast
offset = len(self.query.extra_select)
for rows in self.execute_sql(MULTI):
for row in rows:
date = row[offset]
if self.connection.ops.oracle:
date = self.resolve_columns(row, fields)[offset]
elif needs_string_cast:
date = typecast_date(str(date))
if isinstance(date, datetime.datetime):
date = date.date()
yield date
class SQLDateTimeCompiler(compiler.SQLDateTimeCompiler, GeoSQLCompiler):
"""
This is overridden for GeoDjango to properly cast date columns, since
`GeoQuery.resolve_columns` is used for spatial values.
See #14648, #16757.
"""
def results_iter(self):
if self.connection.ops.oracle:
from django.db.models.fields import DateTimeField
fields = [DateTimeField()]
else:
needs_string_cast = self.connection.features.needs_datetime_string_cast
offset = len(self.query.extra_select)
for rows in self.execute_sql(MULTI):
for row in rows:
datetime = row[offset]
if self.connection.ops.oracle:
datetime = self.resolve_columns(row, fields)[offset]
elif needs_string_cast:
datetime = typecast_timestamp(str(datetime))
# Datetimes are artifically returned in UTC on databases that
# don't support time zone. Restore the zone used in the query.
if settings.USE_TZ:
datetime = datetime.replace(tzinfo=None)
datetime = timezone.make_aware(datetime, self.query.tzinfo)
yield datetime
| apache-2.0 |
mozillazg/Unidecode | unidecode/x00b.py | 252 | 4132 | data = (
'[?]', # 0x00
'N', # 0x01
'N', # 0x02
'H', # 0x03
'[?]', # 0x04
'a', # 0x05
'aa', # 0x06
'i', # 0x07
'ii', # 0x08
'u', # 0x09
'uu', # 0x0a
'R', # 0x0b
'L', # 0x0c
'[?]', # 0x0d
'[?]', # 0x0e
'e', # 0x0f
'ai', # 0x10
'[?]', # 0x11
'[?]', # 0x12
'o', # 0x13
'au', # 0x14
'k', # 0x15
'kh', # 0x16
'g', # 0x17
'gh', # 0x18
'ng', # 0x19
'c', # 0x1a
'ch', # 0x1b
'j', # 0x1c
'jh', # 0x1d
'ny', # 0x1e
'tt', # 0x1f
'tth', # 0x20
'dd', # 0x21
'ddh', # 0x22
'nn', # 0x23
't', # 0x24
'th', # 0x25
'd', # 0x26
'dh', # 0x27
'n', # 0x28
'[?]', # 0x29
'p', # 0x2a
'ph', # 0x2b
'b', # 0x2c
'bh', # 0x2d
'm', # 0x2e
'y', # 0x2f
'r', # 0x30
'[?]', # 0x31
'l', # 0x32
'll', # 0x33
'[?]', # 0x34
'', # 0x35
'sh', # 0x36
'ss', # 0x37
's', # 0x38
'h', # 0x39
'[?]', # 0x3a
'[?]', # 0x3b
'\'', # 0x3c
'\'', # 0x3d
'aa', # 0x3e
'i', # 0x3f
'ii', # 0x40
'u', # 0x41
'uu', # 0x42
'R', # 0x43
'[?]', # 0x44
'[?]', # 0x45
'[?]', # 0x46
'e', # 0x47
'ai', # 0x48
'[?]', # 0x49
'[?]', # 0x4a
'o', # 0x4b
'au', # 0x4c
'', # 0x4d
'[?]', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'[?]', # 0x52
'[?]', # 0x53
'[?]', # 0x54
'[?]', # 0x55
'+', # 0x56
'+', # 0x57
'[?]', # 0x58
'[?]', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'rr', # 0x5c
'rh', # 0x5d
'[?]', # 0x5e
'yy', # 0x5f
'RR', # 0x60
'LL', # 0x61
'[?]', # 0x62
'[?]', # 0x63
'[?]', # 0x64
'[?]', # 0x65
'0', # 0x66
'1', # 0x67
'2', # 0x68
'3', # 0x69
'4', # 0x6a
'5', # 0x6b
'6', # 0x6c
'7', # 0x6d
'8', # 0x6e
'9', # 0x6f
'', # 0x70
'[?]', # 0x71
'[?]', # 0x72
'[?]', # 0x73
'[?]', # 0x74
'[?]', # 0x75
'[?]', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'[?]', # 0x80
'[?]', # 0x81
'N', # 0x82
'H', # 0x83
'[?]', # 0x84
'a', # 0x85
'aa', # 0x86
'i', # 0x87
'ii', # 0x88
'u', # 0x89
'uu', # 0x8a
'[?]', # 0x8b
'[?]', # 0x8c
'[?]', # 0x8d
'e', # 0x8e
'ee', # 0x8f
'ai', # 0x90
'[?]', # 0x91
'o', # 0x92
'oo', # 0x93
'au', # 0x94
'k', # 0x95
'[?]', # 0x96
'[?]', # 0x97
'[?]', # 0x98
'ng', # 0x99
'c', # 0x9a
'[?]', # 0x9b
'j', # 0x9c
'[?]', # 0x9d
'ny', # 0x9e
'tt', # 0x9f
'[?]', # 0xa0
'[?]', # 0xa1
'[?]', # 0xa2
'nn', # 0xa3
't', # 0xa4
'[?]', # 0xa5
'[?]', # 0xa6
'[?]', # 0xa7
'n', # 0xa8
'nnn', # 0xa9
'p', # 0xaa
'[?]', # 0xab
'[?]', # 0xac
'[?]', # 0xad
'm', # 0xae
'y', # 0xaf
'r', # 0xb0
'rr', # 0xb1
'l', # 0xb2
'll', # 0xb3
'lll', # 0xb4
'v', # 0xb5
'[?]', # 0xb6
'ss', # 0xb7
's', # 0xb8
'h', # 0xb9
'[?]', # 0xba
'[?]', # 0xbb
'[?]', # 0xbc
'[?]', # 0xbd
'aa', # 0xbe
'i', # 0xbf
'ii', # 0xc0
'u', # 0xc1
'uu', # 0xc2
'[?]', # 0xc3
'[?]', # 0xc4
'[?]', # 0xc5
'e', # 0xc6
'ee', # 0xc7
'ai', # 0xc8
'[?]', # 0xc9
'o', # 0xca
'oo', # 0xcb
'au', # 0xcc
'', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'+', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'0', # 0xe6
'1', # 0xe7
'2', # 0xe8
'3', # 0xe9
'4', # 0xea
'5', # 0xeb
'6', # 0xec
'7', # 0xed
'8', # 0xee
'9', # 0xef
'+10+', # 0xf0
'+100+', # 0xf1
'+1000+', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| gpl-2.0 |
erkrishna9/odoo | addons/l10n_es/__openerp__.py | 314 | 2772 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2008-2010 Zikzakmedia S.L. (http://zikzakmedia.com) All Rights Reserved.
# Jordi Esteve <[email protected]>
# Copyright (c) 2012-2013, Grupo OPENTIA (<http://opentia.com>) Registered EU Trademark.
# Dpto. Consultoría <[email protected]>
# Copyright (c) 2013 Serv. Tecnol. Avanzados (http://www.serviciosbaeza.com)
# Pedro Manuel Baeza <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Spanish Charts of Accounts (PGCE 2008)",
"version" : "4.0",
"author" : "Spanish Localization Team",
'website' : 'https://launchpad.net/openerp-spain',
"category" : "Localization/Account Charts",
"description": """
Spanish charts of accounts (PGCE 2008).
========================================
* Defines the following chart of account templates:
* Spanish general chart of accounts 2008
* Spanish general chart of accounts 2008 for small and medium companies
* Spanish general chart of accounts 2008 for associations
* Defines templates for sale and purchase VAT
* Defines tax code templates
* Defines fiscal positions for spanish fiscal legislation
""",
"license" : "AGPL-3",
"depends" : ["account", "base_vat", "base_iban"],
"data" : [
"account_type.xml",
"account_chart_template.xml",
"account_account_common.xml",
"account_account_full.xml",
"account_account_pymes.xml",
"account_account_assoc.xml",
"tax_codes_common.xml",
"taxes_common.xml",
"fiscal_templates_common.xml",
"account_chart_template_post.xml",
"l10n_es_wizard.xml",
],
"demo" : [],
'auto_install': False,
"installable": True,
'images': ['images/config_chart_l10n_es.png', 'images/l10n_es_chart.png'],
}
| agpl-3.0 |
CoderSong2015/utFordataType | googletest/googletest/test/gtest_test_utils.py | 20 | 10824 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Testing Framework."""
__author__ = '[email protected] (Zhanyong Wan)'
import atexit
import os
import shutil
import sys
import tempfile
import unittest
_test_module = unittest
# Suppresses the 'Import not at the top of the file' lint complaint.
# pylint: disable-msg=C6204
try:
import subprocess
_SUBPROCESS_MODULE_AVAILABLE = True
except:
import popen2
_SUBPROCESS_MODULE_AVAILABLE = False
# pylint: enable-msg=C6204
GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT'
IS_WINDOWS = os.name == 'nt'
IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
# The environment variable for specifying the path to the premature-exit file.
PREMATURE_EXIT_FILE_ENV_VAR = 'TEST_PREMATURE_EXIT_FILE'
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets/unsets an environment variable to a given value."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
# Here we expose a class from a particular module, depending on the
# environment. The comment suppresses the 'Invalid variable name' lint
# complaint.
TestCase = _test_module.TestCase # pylint: disable-msg=C6409
# Initially maps a flag to its default value. After
# _ParseAndStripGTestFlags() is called, maps a flag to its actual value.
_flag_map = {'source_dir': os.path.dirname(sys.argv[0]),
'build_dir': os.path.dirname(sys.argv[0])}
_gtest_flags_are_parsed = False
def _ParseAndStripGTestFlags(argv):
"""Parses and strips Google Test flags from argv. This is idempotent."""
# Suppresses the lint complaint about a global variable since we need it
# here to maintain module-wide state.
global _gtest_flags_are_parsed # pylint: disable-msg=W0603
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
# The environment variable overrides the default value.
if flag.upper() in os.environ:
_flag_map[flag] = os.environ[flag.upper()]
# The command line flag overrides the environment variable.
i = 1 # Skips the program name.
while i < len(argv):
prefix = '--' + flag + '='
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
# We don't increment i in case we just found a --gtest_* flag
# and removed it from argv.
i += 1
def GetFlag(flag):
"""Returns the value of the given flag."""
# In case GetFlag() is called before Main(), we always call
# _ParseAndStripGTestFlags() here to make sure the --gtest_* flags
# are parsed.
_ParseAndStripGTestFlags(sys.argv)
return _flag_map[flag]
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return os.path.abspath(GetFlag('source_dir'))
def GetBuildDir():
"""Returns the absolute path of the directory where the test binaries are."""
return os.path.abspath(GetFlag('build_dir'))
_temp_dir = None
def _RemoveTempDir():
if _temp_dir:
shutil.rmtree(_temp_dir, ignore_errors=True)
atexit.register(_RemoveTempDir)
def GetTempDir():
"""Returns a directory for temporary files."""
global _temp_dir
if not _temp_dir:
_temp_dir = tempfile.mkdtemp()
return _temp_dir
def GetTestExecutablePath(executable_name, build_dir=None):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
build_dir: directory where to look for executables, by default
the result of GetBuildDir().
Returns:
The absolute path of the test binary.
"""
path = os.path.abspath(os.path.join(build_dir or GetBuildDir(),
executable_name))
if (IS_WINDOWS or IS_CYGWIN) and not path.endswith('.exe'):
path += '.exe'
if not os.path.exists(path):
message = (
'Unable to find the test binary "%s". Please make sure to provide\n'
'a path to the binary via the --build_dir flag or the BUILD_DIR\n'
'environment variable.' % path)
sys.stdout.write(message)
sys.exit(1)
return path
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
class Subprocess:
def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
"""Changes into a specified directory, if provided, and executes a command.
Restores the old directory afterwards.
Args:
command: The command to run, in the form of sys.argv.
working_dir: The directory to change into.
capture_stderr: Determines whether to capture stderr in the output member
or to discard it.
env: Dictionary with environment to pass to the subprocess.
Returns:
An object that represents outcome of the executed process. It has the
following attributes:
terminated_by_signal True iff the child process has been terminated
by a signal.
signal Sygnal that terminated the child process.
exited True iff the child process exited normally.
exit_code The code with which the child process exited.
output Child process's stdout and stderr output
combined in a string.
"""
# The subprocess module is the preferrable way of running programs
# since it is available and behaves consistently on all platforms,
# including Windows. But it is only available starting in python 2.4.
# In earlier python versions, we revert to the popen2 module, which is
# available in python 2.0 and later but doesn't provide required
# functionality (Popen4) under Windows. This allows us to support Mac
# OS X 10.4 Tiger, which has python 2.3 installed.
if _SUBPROCESS_MODULE_AVAILABLE:
if capture_stderr:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=stderr,
cwd=working_dir, universal_newlines=True, env=env)
# communicate returns a tuple with the file object for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
else:
old_dir = os.getcwd()
def _ReplaceEnvDict(dest, src):
# Changes made by os.environ.clear are not inheritable by child
# processes until Python 2.6. To produce inheritable changes we have
# to delete environment items with the del statement.
for key in dest.keys():
del dest[key]
dest.update(src)
# When 'env' is not None, backup the environment variables and replace
# them with the passed 'env'. When 'env' is None, we simply use the
# current 'os.environ' for compatibility with the subprocess.Popen
# semantics used above.
if env is not None:
old_environ = os.environ.copy()
_ReplaceEnvDict(os.environ, env)
try:
if working_dir is not None:
os.chdir(working_dir)
if capture_stderr:
p = popen2.Popen4(command)
else:
p = popen2.Popen3(command)
p.tochild.close()
self.output = p.fromchild.read()
ret_code = p.wait()
finally:
os.chdir(old_dir)
# Restore the old environment variables
# if they were replaced.
if env is not None:
_ReplaceEnvDict(os.environ, old_environ)
# Converts ret_code to match the semantics of
# subprocess.Popen.returncode.
if os.WIFSIGNALED(ret_code):
self._return_code = -os.WTERMSIG(ret_code)
else: # os.WIFEXITED(ret_code) should return True here.
self._return_code = os.WEXITSTATUS(ret_code)
if self._return_code < 0:
self.terminated_by_signal = True
self.exited = False
self.signal = -self._return_code
else:
self.terminated_by_signal = False
self.exited = True
self.exit_code = self._return_code
def Main():
"""Runs the unit test."""
# We must call _ParseAndStripGTestFlags() before calling
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
# The tested binaries should not be writing XML output files unless the
# script explicitly instructs them to.
# TODO([email protected]): Move this into Subprocess when we implement
# passing environment into it as a parameter.
if GTEST_OUTPUT_VAR_NAME in os.environ:
del os.environ[GTEST_OUTPUT_VAR_NAME]
_test_module.main()
| apache-2.0 |
lewischeng-ms/pox | pox/lib/socketcapture.py | 24 | 5511 | # Copyright 2012 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
from pox.lib.addresses import *
import pox.lib.packet as pkt
from struct import pack
import time
from struct import pack
import time
class SocketWedge (object):
def __init__ (self, socket):
self._socket = socket
def send (self, string, *args, **kw):
r = self._socket.send(string, *args, **kw)
self._send_out(string, r)
return r
def recv (self, bufsize, *args, **kw):
r = self._socket.recv(bufsize, *args, **kw)
self._recv_out(r)
return r
def __getattr__ (self, n):
return getattr(self._socket, n)
class PCapWriter (object):
def __init__ (self, outstream, socket = None, flush = False,
local_addrs = (None,None,None),
remote_addrs = (None,None,None)):
"""
outstream is the stream to write the PCAP trace to.
Ethernet addresses have to be faked, and it can be convenient to
fake IP and TCP addresses as well. Thus, you can specify local_addrs
or remote_addrs. These are tuples of (EthAddr, IPAddr, TCPPort).
Any item that is None gets a default value.
"""
self._out = outstream
self._flush = flush
if socket is not None:
remote = socket.getpeername()
local = socket.getsockname()
else:
remote = ("1.1.1.1",1)
local = ("0.0.0.0",0)
def create_packet (e1,e2,i1,i2,t1,t2):
e = pkt.ethernet(
src = e1,
dst = e2,
type = pkt.ethernet.IP_TYPE)
i = pkt.ipv4(
srcip = i1,
dstip = i2,
protocol = pkt.ipv4.TCP_PROTOCOL)
t = pkt.tcp(
srcport = t1,
dstport = t2,
off = 5,
win = 1)
t.ACK = True
i.payload = t
e.payload = i
return e
self._c_to_s = create_packet(
local_addrs[0] or EthAddr("\x02" + "\x00" * 5),
remote_addrs[0] or EthAddr("\x02" + "\x11" * 5),
local_addrs[1] or IPAddr(local[0]),
remote_addrs[1] or IPAddr(remote[0]),
local_addrs[2] or local[1],
remote_addrs[2] or remote[1],
)
self._s_to_c = create_packet(
remote_addrs[0] or EthAddr("\x02" + "\x11" * 5),
local_addrs[0] or EthAddr("\x02" + "\x00" * 5),
remote_addrs[1] or IPAddr(remote[0]),
local_addrs[1] or IPAddr(local[0]),
remote_addrs[2] or remote[1],
local_addrs[2] or local[1],
)
outstream.write(pack("IHHiIII",
0xa1b2c3d4, # Magic
2,4, # Version
time.timezone, # TZ offset
0, # Accuracy of timestamps (apparently 0 is OK)
0x7fffFFff, # Snaplen
1 # Ethernet
))
def write (self, outgoing, buf):
if len(buf) == 0: return
e = self._c_to_s if outgoing else self._s_to_c
e2 = self._c_to_s if not outgoing else self._s_to_c
l = len(buf)
e.payload.payload.payload = buf
buf = e.pack()
t = time.time()
ut = t - int(t)
t = int(t)
ut = int(ut * 1000000)
self._out.write(pack("IIII",
t,ut, # Timestamp
len(buf), # Saved size
len(buf), # Original size
))
self._out.write(buf)
if self._flush: self._out.flush()
e.next.next.seq += l
e2.next.next.ack += l
class CaptureSocket (SocketWedge):
"""
Wraps a TCP socket and writes a faked PCAP format trace
"""
def __init__ (self, socket, outstream, close = True,
local_addrs = (None,None,None),
remote_addrs = (None,None,None)):
"""
socket is the socket to be wrapped.
outstream is the stream to write the PCAP trace to.
Ethernet addresses have to be faked, and it can be convenient to
fake IP and TCP addresses as well. Thus, you can specify local_addrs
or remote_addrs. These are tuples of (EthAddr, IPAddr, TCPPort).
Any item that is None gets a default value.
"""
super(CaptureSocket, self).__init__(socket)
self._close = close
self._writer = PCapWriter(outstream, socket=socket,
local_addrs=local_addrs,
remote_addrs=remote_addrs)
def _recv_out (self, buf):
try:
self._writer.write(False, buf)
except Exception:
pass
def _send_out (self, buf, r):
try:
self._writer.write(True, buf[:r])
except Exception:
pass
def close (self, *args, **kw):
if self._close:
try:
self._writer._out.close()
except Exception:
pass
return self._socket.close(*args, **kw)
if __name__ == "__main__":
"""
Test with:
nc -v -v -l 9933
"""
import socket
sock = socket.create_connection(("127.0.0.1",9933))
s = CaptureSocket(sock, file("test.pcap", "w"))
while True:
d = s.recv(1024)
d = d.upper()
import sys
import time
import random
time.sleep(random.random() * 1.5)
sys.stdout.write(d)
s.send(d)
| gpl-3.0 |
emilio/servo | tests/wpt/web-platform-tests/tools/third_party/pytest/src/_pytest/pytester.py | 32 | 42622 | """(disabled by default) support for testing pytest and pytest plugins."""
from __future__ import absolute_import, division, print_function
import codecs
import gc
import os
import platform
import re
import subprocess
import six
import sys
import time
import traceback
from fnmatch import fnmatch
from weakref import WeakKeyDictionary
from _pytest.capture import MultiCapture, SysCapture
from _pytest._code import Source
import py
import pytest
from _pytest.main import Session, EXIT_OK
from _pytest.assertion.rewrite import AssertionRewritingHook
PYTEST_FULLPATH = os.path.abspath(pytest.__file__.rstrip("oc")).replace(
"$py.class", ".py"
)
IGNORE_PAM = [ # filenames added when obtaining details about the current user
u"/var/lib/sss/mc/passwd"
]
def pytest_addoption(parser):
parser.addoption(
"--lsof",
action="store_true",
dest="lsof",
default=False,
help=("run FD checks if lsof is available"),
)
parser.addoption(
"--runpytest",
default="inprocess",
dest="runpytest",
choices=("inprocess", "subprocess"),
help=(
"run pytest sub runs in tests using an 'inprocess' "
"or 'subprocess' (python -m main) method"
),
)
def pytest_configure(config):
if config.getvalue("lsof"):
checker = LsofFdLeakChecker()
if checker.matching_platform():
config.pluginmanager.register(checker)
class LsofFdLeakChecker(object):
def get_open_files(self):
out = self._exec_lsof()
open_files = self._parse_lsof_output(out)
return open_files
def _exec_lsof(self):
pid = os.getpid()
return py.process.cmdexec("lsof -Ffn0 -p %d" % pid)
def _parse_lsof_output(self, out):
def isopen(line):
return line.startswith("f") and (
"deleted" not in line
and "mem" not in line
and "txt" not in line
and "cwd" not in line
)
open_files = []
for line in out.split("\n"):
if isopen(line):
fields = line.split("\0")
fd = fields[0][1:]
filename = fields[1][1:]
if filename in IGNORE_PAM:
continue
if filename.startswith("/"):
open_files.append((fd, filename))
return open_files
def matching_platform(self):
try:
py.process.cmdexec("lsof -v")
except (py.process.cmdexec.Error, UnicodeDecodeError):
# cmdexec may raise UnicodeDecodeError on Windows systems with
# locale other than English:
# https://bitbucket.org/pytest-dev/py/issues/66
return False
else:
return True
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_runtest_protocol(self, item):
lines1 = self.get_open_files()
yield
if hasattr(sys, "pypy_version_info"):
gc.collect()
lines2 = self.get_open_files()
new_fds = {t[0] for t in lines2} - {t[0] for t in lines1}
leaked_files = [t for t in lines2 if t[0] in new_fds]
if leaked_files:
error = []
error.append("***** %s FD leakage detected" % len(leaked_files))
error.extend([str(f) for f in leaked_files])
error.append("*** Before:")
error.extend([str(f) for f in lines1])
error.append("*** After:")
error.extend([str(f) for f in lines2])
error.append(error[0])
error.append("*** function %s:%s: %s " % item.location)
error.append("See issue #2366")
item.warn("", "\n".join(error))
# XXX copied from execnet's conftest.py - needs to be merged
winpymap = {
"python2.7": r"C:\Python27\python.exe",
"python3.4": r"C:\Python34\python.exe",
"python3.5": r"C:\Python35\python.exe",
"python3.6": r"C:\Python36\python.exe",
}
def getexecutable(name, cache={}):
try:
return cache[name]
except KeyError:
executable = py.path.local.sysfind(name)
if executable:
import subprocess
popen = subprocess.Popen(
[str(executable), "--version"],
universal_newlines=True,
stderr=subprocess.PIPE,
)
out, err = popen.communicate()
if name == "jython":
if not err or "2.5" not in err:
executable = None
if "2.5.2" in err:
executable = None # http://bugs.jython.org/issue1790
elif popen.returncode != 0:
# handle pyenv's 127
executable = None
cache[name] = executable
return executable
@pytest.fixture(params=["python2.7", "python3.4", "pypy", "pypy3"])
def anypython(request):
name = request.param
executable = getexecutable(name)
if executable is None:
if sys.platform == "win32":
executable = winpymap.get(name, None)
if executable:
executable = py.path.local(executable)
if executable.check():
return executable
pytest.skip("no suitable %s found" % (name,))
return executable
# used at least by pytest-xdist plugin
@pytest.fixture
def _pytest(request):
"""Return a helper which offers a gethookrecorder(hook) method which
returns a HookRecorder instance which helps to make assertions about called
hooks.
"""
return PytestArg(request)
class PytestArg(object):
def __init__(self, request):
self.request = request
def gethookrecorder(self, hook):
hookrecorder = HookRecorder(hook._pm)
self.request.addfinalizer(hookrecorder.finish_recording)
return hookrecorder
def get_public_names(values):
"""Only return names from iterator values without a leading underscore."""
return [x for x in values if x[0] != "_"]
class ParsedCall(object):
def __init__(self, name, kwargs):
self.__dict__.update(kwargs)
self._name = name
def __repr__(self):
d = self.__dict__.copy()
del d["_name"]
return "<ParsedCall %r(**%r)>" % (self._name, d)
class HookRecorder(object):
"""Record all hooks called in a plugin manager.
This wraps all the hook calls in the plugin manager, recording each call
before propagating the normal calls.
"""
def __init__(self, pluginmanager):
self._pluginmanager = pluginmanager
self.calls = []
def before(hook_name, hook_impls, kwargs):
self.calls.append(ParsedCall(hook_name, kwargs))
def after(outcome, hook_name, hook_impls, kwargs):
pass
self._undo_wrapping = pluginmanager.add_hookcall_monitoring(before, after)
def finish_recording(self):
self._undo_wrapping()
def getcalls(self, names):
if isinstance(names, str):
names = names.split()
return [call for call in self.calls if call._name in names]
def assert_contains(self, entries):
__tracebackhide__ = True
i = 0
entries = list(entries)
backlocals = sys._getframe(1).f_locals
while entries:
name, check = entries.pop(0)
for ind, call in enumerate(self.calls[i:]):
if call._name == name:
print("NAMEMATCH", name, call)
if eval(check, backlocals, call.__dict__):
print("CHECKERMATCH", repr(check), "->", call)
else:
print("NOCHECKERMATCH", repr(check), "-", call)
continue
i += ind + 1
break
print("NONAMEMATCH", name, "with", call)
else:
pytest.fail("could not find %r check %r" % (name, check))
def popcall(self, name):
__tracebackhide__ = True
for i, call in enumerate(self.calls):
if call._name == name:
del self.calls[i]
return call
lines = ["could not find call %r, in:" % (name,)]
lines.extend([" %s" % str(x) for x in self.calls])
pytest.fail("\n".join(lines))
def getcall(self, name):
values = self.getcalls(name)
assert len(values) == 1, (name, values)
return values[0]
# functionality for test reports
def getreports(self, names="pytest_runtest_logreport pytest_collectreport"):
return [x.report for x in self.getcalls(names)]
def matchreport(
self,
inamepart="",
names="pytest_runtest_logreport pytest_collectreport",
when=None,
):
"""return a testreport whose dotted import path matches"""
values = []
for rep in self.getreports(names=names):
try:
if not when and rep.when != "call" and rep.passed:
# setup/teardown passing reports - let's ignore those
continue
except AttributeError:
pass
if when and getattr(rep, "when", None) != when:
continue
if not inamepart or inamepart in rep.nodeid.split("::"):
values.append(rep)
if not values:
raise ValueError(
"could not find test report matching %r: "
"no test reports at all!" % (inamepart,)
)
if len(values) > 1:
raise ValueError(
"found 2 or more testreports matching %r: %s" % (inamepart, values)
)
return values[0]
def getfailures(self, names="pytest_runtest_logreport pytest_collectreport"):
return [rep for rep in self.getreports(names) if rep.failed]
def getfailedcollections(self):
return self.getfailures("pytest_collectreport")
def listoutcomes(self):
passed = []
skipped = []
failed = []
for rep in self.getreports("pytest_collectreport pytest_runtest_logreport"):
if rep.passed:
if getattr(rep, "when", None) == "call":
passed.append(rep)
elif rep.skipped:
skipped.append(rep)
elif rep.failed:
failed.append(rep)
return passed, skipped, failed
def countoutcomes(self):
return [len(x) for x in self.listoutcomes()]
def assertoutcome(self, passed=0, skipped=0, failed=0):
realpassed, realskipped, realfailed = self.listoutcomes()
assert passed == len(realpassed)
assert skipped == len(realskipped)
assert failed == len(realfailed)
def clear(self):
self.calls[:] = []
@pytest.fixture
def linecomp(request):
return LineComp()
@pytest.fixture(name="LineMatcher")
def LineMatcher_fixture(request):
return LineMatcher
@pytest.fixture
def testdir(request, tmpdir_factory):
return Testdir(request, tmpdir_factory)
rex_outcome = re.compile(r"(\d+) ([\w-]+)")
class RunResult(object):
"""The result of running a command.
Attributes:
:ret: the return value
:outlines: list of lines captured from stdout
:errlines: list of lines captures from stderr
:stdout: :py:class:`LineMatcher` of stdout, use ``stdout.str()`` to
reconstruct stdout or the commonly used ``stdout.fnmatch_lines()``
method
:stderr: :py:class:`LineMatcher` of stderr
:duration: duration in seconds
"""
def __init__(self, ret, outlines, errlines, duration):
self.ret = ret
self.outlines = outlines
self.errlines = errlines
self.stdout = LineMatcher(outlines)
self.stderr = LineMatcher(errlines)
self.duration = duration
def parseoutcomes(self):
"""Return a dictionary of outcomestring->num from parsing the terminal
output that the test process produced.
"""
for line in reversed(self.outlines):
if "seconds" in line:
outcomes = rex_outcome.findall(line)
if outcomes:
d = {}
for num, cat in outcomes:
d[cat] = int(num)
return d
raise ValueError("Pytest terminal report not found")
def assert_outcomes(self, passed=0, skipped=0, failed=0, error=0):
"""Assert that the specified outcomes appear with the respective
numbers (0 means it didn't occur) in the text output from a test run.
"""
d = self.parseoutcomes()
obtained = {
"passed": d.get("passed", 0),
"skipped": d.get("skipped", 0),
"failed": d.get("failed", 0),
"error": d.get("error", 0),
}
assert (
obtained == dict(passed=passed, skipped=skipped, failed=failed, error=error)
)
class CwdSnapshot(object):
def __init__(self):
self.__saved = os.getcwd()
def restore(self):
os.chdir(self.__saved)
class SysModulesSnapshot(object):
def __init__(self, preserve=None):
self.__preserve = preserve
self.__saved = dict(sys.modules)
def restore(self):
if self.__preserve:
self.__saved.update(
(k, m) for k, m in sys.modules.items() if self.__preserve(k)
)
sys.modules.clear()
sys.modules.update(self.__saved)
class SysPathsSnapshot(object):
def __init__(self):
self.__saved = list(sys.path), list(sys.meta_path)
def restore(self):
sys.path[:], sys.meta_path[:] = self.__saved
class Testdir(object):
"""Temporary test directory with tools to test/run pytest itself.
This is based on the ``tmpdir`` fixture but provides a number of methods
which aid with testing pytest itself. Unless :py:meth:`chdir` is used all
methods will use :py:attr:`tmpdir` as their current working directory.
Attributes:
:tmpdir: The :py:class:`py.path.local` instance of the temporary directory.
:plugins: A list of plugins to use with :py:meth:`parseconfig` and
:py:meth:`runpytest`. Initially this is an empty list but plugins can
be added to the list. The type of items to add to the list depends on
the method using them so refer to them for details.
"""
def __init__(self, request, tmpdir_factory):
self.request = request
self._mod_collections = WeakKeyDictionary()
name = request.function.__name__
self.tmpdir = tmpdir_factory.mktemp(name, numbered=True)
self.plugins = []
self._cwd_snapshot = CwdSnapshot()
self._sys_path_snapshot = SysPathsSnapshot()
self._sys_modules_snapshot = self.__take_sys_modules_snapshot()
self.chdir()
self.request.addfinalizer(self.finalize)
method = self.request.config.getoption("--runpytest")
if method == "inprocess":
self._runpytest_method = self.runpytest_inprocess
elif method == "subprocess":
self._runpytest_method = self.runpytest_subprocess
def __repr__(self):
return "<Testdir %r>" % (self.tmpdir,)
def finalize(self):
"""Clean up global state artifacts.
Some methods modify the global interpreter state and this tries to
clean this up. It does not remove the temporary directory however so
it can be looked at after the test run has finished.
"""
self._sys_modules_snapshot.restore()
self._sys_path_snapshot.restore()
self._cwd_snapshot.restore()
def __take_sys_modules_snapshot(self):
# some zope modules used by twisted-related tests keep internal state
# and can't be deleted; we had some trouble in the past with
# `zope.interface` for example
def preserve_module(name):
return name.startswith("zope")
return SysModulesSnapshot(preserve=preserve_module)
def make_hook_recorder(self, pluginmanager):
"""Create a new :py:class:`HookRecorder` for a PluginManager."""
assert not hasattr(pluginmanager, "reprec")
pluginmanager.reprec = reprec = HookRecorder(pluginmanager)
self.request.addfinalizer(reprec.finish_recording)
return reprec
def chdir(self):
"""Cd into the temporary directory.
This is done automatically upon instantiation.
"""
self.tmpdir.chdir()
def _makefile(self, ext, args, kwargs, encoding="utf-8"):
items = list(kwargs.items())
def to_text(s):
return s.decode(encoding) if isinstance(s, bytes) else six.text_type(s)
if args:
source = u"\n".join(to_text(x) for x in args)
basename = self.request.function.__name__
items.insert(0, (basename, source))
ret = None
for basename, value in items:
p = self.tmpdir.join(basename).new(ext=ext)
p.dirpath().ensure_dir()
source = Source(value)
source = u"\n".join(to_text(line) for line in source.lines)
p.write(source.strip().encode(encoding), "wb")
if ret is None:
ret = p
return ret
def makefile(self, ext, *args, **kwargs):
"""Create a new file in the testdir.
ext: The extension the file should use, including the dot, e.g. `.py`.
args: All args will be treated as strings and joined using newlines.
The result will be written as contents to the file. The name of the
file will be based on the test function requesting this fixture.
E.g. "testdir.makefile('.txt', 'line1', 'line2')"
kwargs: Each keyword is the name of a file, while the value of it will
be written as contents of the file.
E.g. "testdir.makefile('.ini', pytest='[pytest]\naddopts=-rs\n')"
"""
return self._makefile(ext, args, kwargs)
def makeconftest(self, source):
"""Write a contest.py file with 'source' as contents."""
return self.makepyfile(conftest=source)
def makeini(self, source):
"""Write a tox.ini file with 'source' as contents."""
return self.makefile(".ini", tox=source)
def getinicfg(self, source):
"""Return the pytest section from the tox.ini config file."""
p = self.makeini(source)
return py.iniconfig.IniConfig(p)["pytest"]
def makepyfile(self, *args, **kwargs):
"""Shortcut for .makefile() with a .py extension."""
return self._makefile(".py", args, kwargs)
def maketxtfile(self, *args, **kwargs):
"""Shortcut for .makefile() with a .txt extension."""
return self._makefile(".txt", args, kwargs)
def syspathinsert(self, path=None):
"""Prepend a directory to sys.path, defaults to :py:attr:`tmpdir`.
This is undone automatically when this object dies at the end of each
test.
"""
if path is None:
path = self.tmpdir
sys.path.insert(0, str(path))
# a call to syspathinsert() usually means that the caller wants to
# import some dynamically created files, thus with python3 we
# invalidate its import caches
self._possibly_invalidate_import_caches()
def _possibly_invalidate_import_caches(self):
# invalidate caches if we can (py33 and above)
try:
import importlib
except ImportError:
pass
else:
if hasattr(importlib, "invalidate_caches"):
importlib.invalidate_caches()
def mkdir(self, name):
"""Create a new (sub)directory."""
return self.tmpdir.mkdir(name)
def mkpydir(self, name):
"""Create a new python package.
This creates a (sub)directory with an empty ``__init__.py`` file so it
gets recognised as a python package.
"""
p = self.mkdir(name)
p.ensure("__init__.py")
return p
Session = Session
def getnode(self, config, arg):
"""Return the collection node of a file.
:param config: :py:class:`_pytest.config.Config` instance, see
:py:meth:`parseconfig` and :py:meth:`parseconfigure` to create the
configuration
:param arg: a :py:class:`py.path.local` instance of the file
"""
session = Session(config)
assert "::" not in str(arg)
p = py.path.local(arg)
config.hook.pytest_sessionstart(session=session)
res = session.perform_collect([str(p)], genitems=False)[0]
config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK)
return res
def getpathnode(self, path):
"""Return the collection node of a file.
This is like :py:meth:`getnode` but uses :py:meth:`parseconfigure` to
create the (configured) pytest Config instance.
:param path: a :py:class:`py.path.local` instance of the file
"""
config = self.parseconfigure(path)
session = Session(config)
x = session.fspath.bestrelpath(path)
config.hook.pytest_sessionstart(session=session)
res = session.perform_collect([x], genitems=False)[0]
config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK)
return res
def genitems(self, colitems):
"""Generate all test items from a collection node.
This recurses into the collection node and returns a list of all the
test items contained within.
"""
session = colitems[0].session
result = []
for colitem in colitems:
result.extend(session.genitems(colitem))
return result
def runitem(self, source):
"""Run the "test_func" Item.
The calling test instance (class containing the test method) must
provide a ``.getrunner()`` method which should return a runner which
can run the test protocol for a single item, e.g.
:py:func:`_pytest.runner.runtestprotocol`.
"""
# used from runner functional tests
item = self.getitem(source)
# the test class where we are called from wants to provide the runner
testclassinstance = self.request.instance
runner = testclassinstance.getrunner()
return runner(item)
def inline_runsource(self, source, *cmdlineargs):
"""Run a test module in process using ``pytest.main()``.
This run writes "source" into a temporary file and runs
``pytest.main()`` on it, returning a :py:class:`HookRecorder` instance
for the result.
:param source: the source code of the test module
:param cmdlineargs: any extra command line arguments to use
:return: :py:class:`HookRecorder` instance of the result
"""
p = self.makepyfile(source)
values = list(cmdlineargs) + [p]
return self.inline_run(*values)
def inline_genitems(self, *args):
"""Run ``pytest.main(['--collectonly'])`` in-process.
Runs the :py:func:`pytest.main` function to run all of pytest inside
the test process itself like :py:meth:`inline_run`, but returns a
tuple of the collected items and a :py:class:`HookRecorder` instance.
"""
rec = self.inline_run("--collect-only", *args)
items = [x.item for x in rec.getcalls("pytest_itemcollected")]
return items, rec
def inline_run(self, *args, **kwargs):
"""Run ``pytest.main()`` in-process, returning a HookRecorder.
Runs the :py:func:`pytest.main` function to run all of pytest inside
the test process itself. This means it can return a
:py:class:`HookRecorder` instance which gives more detailed results
from that run than can be done by matching stdout/stderr from
:py:meth:`runpytest`.
:param args: command line arguments to pass to :py:func:`pytest.main`
:param plugin: (keyword-only) extra plugin instances the
``pytest.main()`` instance should use
:return: a :py:class:`HookRecorder` instance
"""
finalizers = []
try:
# When running pytest inline any plugins active in the main test
# process are already imported. So this disables the warning which
# will trigger to say they can no longer be rewritten, which is
# fine as they have already been rewritten.
orig_warn = AssertionRewritingHook._warn_already_imported
def revert_warn_already_imported():
AssertionRewritingHook._warn_already_imported = orig_warn
finalizers.append(revert_warn_already_imported)
AssertionRewritingHook._warn_already_imported = lambda *a: None
# Any sys.module or sys.path changes done while running pytest
# inline should be reverted after the test run completes to avoid
# clashing with later inline tests run within the same pytest test,
# e.g. just because they use matching test module names.
finalizers.append(self.__take_sys_modules_snapshot().restore)
finalizers.append(SysPathsSnapshot().restore)
# Important note:
# - our tests should not leave any other references/registrations
# laying around other than possibly loaded test modules
# referenced from sys.modules, as nothing will clean those up
# automatically
rec = []
class Collect(object):
def pytest_configure(x, config):
rec.append(self.make_hook_recorder(config.pluginmanager))
plugins = kwargs.get("plugins") or []
plugins.append(Collect())
ret = pytest.main(list(args), plugins=plugins)
if len(rec) == 1:
reprec = rec.pop()
else:
class reprec(object):
pass
reprec.ret = ret
# typically we reraise keyboard interrupts from the child run
# because it's our user requesting interruption of the testing
if ret == 2 and not kwargs.get("no_reraise_ctrlc"):
calls = reprec.getcalls("pytest_keyboard_interrupt")
if calls and calls[-1].excinfo.type == KeyboardInterrupt:
raise KeyboardInterrupt()
return reprec
finally:
for finalizer in finalizers:
finalizer()
def runpytest_inprocess(self, *args, **kwargs):
"""Return result of running pytest in-process, providing a similar
interface to what self.runpytest() provides.
"""
if kwargs.get("syspathinsert"):
self.syspathinsert()
now = time.time()
capture = MultiCapture(Capture=SysCapture)
capture.start_capturing()
try:
try:
reprec = self.inline_run(*args, **kwargs)
except SystemExit as e:
class reprec(object):
ret = e.args[0]
except Exception:
traceback.print_exc()
class reprec(object):
ret = 3
finally:
out, err = capture.readouterr()
capture.stop_capturing()
sys.stdout.write(out)
sys.stderr.write(err)
res = RunResult(reprec.ret, out.split("\n"), err.split("\n"), time.time() - now)
res.reprec = reprec
return res
def runpytest(self, *args, **kwargs):
"""Run pytest inline or in a subprocess, depending on the command line
option "--runpytest" and return a :py:class:`RunResult`.
"""
args = self._ensure_basetemp(args)
return self._runpytest_method(*args, **kwargs)
def _ensure_basetemp(self, args):
args = [str(x) for x in args]
for x in args:
if str(x).startswith("--basetemp"):
# print("basedtemp exists: %s" %(args,))
break
else:
args.append("--basetemp=%s" % self.tmpdir.dirpath("basetemp"))
# print("added basetemp: %s" %(args,))
return args
def parseconfig(self, *args):
"""Return a new pytest Config instance from given commandline args.
This invokes the pytest bootstrapping code in _pytest.config to create
a new :py:class:`_pytest.core.PluginManager` and call the
pytest_cmdline_parse hook to create a new
:py:class:`_pytest.config.Config` instance.
If :py:attr:`plugins` has been populated they should be plugin modules
to be registered with the PluginManager.
"""
args = self._ensure_basetemp(args)
import _pytest.config
config = _pytest.config._prepareconfig(args, self.plugins)
# we don't know what the test will do with this half-setup config
# object and thus we make sure it gets unconfigured properly in any
# case (otherwise capturing could still be active, for example)
self.request.addfinalizer(config._ensure_unconfigure)
return config
def parseconfigure(self, *args):
"""Return a new pytest configured Config instance.
This returns a new :py:class:`_pytest.config.Config` instance like
:py:meth:`parseconfig`, but also calls the pytest_configure hook.
"""
config = self.parseconfig(*args)
config._do_configure()
self.request.addfinalizer(config._ensure_unconfigure)
return config
def getitem(self, source, funcname="test_func"):
"""Return the test item for a test function.
This writes the source to a python file and runs pytest's collection on
the resulting module, returning the test item for the requested
function name.
:param source: the module source
:param funcname: the name of the test function for which to return a
test item
"""
items = self.getitems(source)
for item in items:
if item.name == funcname:
return item
assert 0, (
"%r item not found in module:\n%s\nitems: %s" % (funcname, source, items)
)
def getitems(self, source):
"""Return all test items collected from the module.
This writes the source to a python file and runs pytest's collection on
the resulting module, returning all test items contained within.
"""
modcol = self.getmodulecol(source)
return self.genitems([modcol])
def getmodulecol(self, source, configargs=(), withinit=False):
"""Return the module collection node for ``source``.
This writes ``source`` to a file using :py:meth:`makepyfile` and then
runs the pytest collection on it, returning the collection node for the
test module.
:param source: the source code of the module to collect
:param configargs: any extra arguments to pass to
:py:meth:`parseconfigure`
:param withinit: whether to also write an ``__init__.py`` file to the
same directory to ensure it is a package
"""
kw = {self.request.function.__name__: Source(source).strip()}
path = self.makepyfile(**kw)
if withinit:
self.makepyfile(__init__="#")
self.config = config = self.parseconfigure(path, *configargs)
node = self.getnode(config, path)
return node
def collect_by_name(self, modcol, name):
"""Return the collection node for name from the module collection.
This will search a module collection node for a collection node
matching the given name.
:param modcol: a module collection node; see :py:meth:`getmodulecol`
:param name: the name of the node to return
"""
if modcol not in self._mod_collections:
self._mod_collections[modcol] = list(modcol.collect())
for colitem in self._mod_collections[modcol]:
if colitem.name == name:
return colitem
def popen(self, cmdargs, stdout, stderr, **kw):
"""Invoke subprocess.Popen.
This calls subprocess.Popen making sure the current working directory
is in the PYTHONPATH.
You probably want to use :py:meth:`run` instead.
"""
env = os.environ.copy()
env["PYTHONPATH"] = os.pathsep.join(
filter(None, [str(os.getcwd()), env.get("PYTHONPATH", "")])
)
kw["env"] = env
popen = subprocess.Popen(
cmdargs, stdin=subprocess.PIPE, stdout=stdout, stderr=stderr, **kw
)
popen.stdin.close()
return popen
def run(self, *cmdargs):
"""Run a command with arguments.
Run a process using subprocess.Popen saving the stdout and stderr.
Returns a :py:class:`RunResult`.
"""
return self._run(*cmdargs)
def _run(self, *cmdargs):
cmdargs = [str(x) for x in cmdargs]
p1 = self.tmpdir.join("stdout")
p2 = self.tmpdir.join("stderr")
print("running:", " ".join(cmdargs))
print(" in:", str(py.path.local()))
f1 = codecs.open(str(p1), "w", encoding="utf8")
f2 = codecs.open(str(p2), "w", encoding="utf8")
try:
now = time.time()
popen = self.popen(
cmdargs, stdout=f1, stderr=f2, close_fds=(sys.platform != "win32")
)
ret = popen.wait()
finally:
f1.close()
f2.close()
f1 = codecs.open(str(p1), "r", encoding="utf8")
f2 = codecs.open(str(p2), "r", encoding="utf8")
try:
out = f1.read().splitlines()
err = f2.read().splitlines()
finally:
f1.close()
f2.close()
self._dump_lines(out, sys.stdout)
self._dump_lines(err, sys.stderr)
return RunResult(ret, out, err, time.time() - now)
def _dump_lines(self, lines, fp):
try:
for line in lines:
print(line, file=fp)
except UnicodeEncodeError:
print("couldn't print to %s because of encoding" % (fp,))
def _getpytestargs(self):
# we cannot use `(sys.executable, script)` because on Windows the
# script is e.g. `pytest.exe`
return (sys.executable, PYTEST_FULLPATH) # noqa
def runpython(self, script):
"""Run a python script using sys.executable as interpreter.
Returns a :py:class:`RunResult`.
"""
return self.run(sys.executable, script)
def runpython_c(self, command):
"""Run python -c "command", return a :py:class:`RunResult`."""
return self.run(sys.executable, "-c", command)
def runpytest_subprocess(self, *args, **kwargs):
"""Run pytest as a subprocess with given arguments.
Any plugins added to the :py:attr:`plugins` list will added using the
``-p`` command line option. Additionally ``--basetemp`` is used put
any temporary files and directories in a numbered directory prefixed
with "runpytest-" so they do not conflict with the normal numbered
pytest location for temporary files and directories.
Returns a :py:class:`RunResult`.
"""
p = py.path.local.make_numbered_dir(
prefix="runpytest-", keep=None, rootdir=self.tmpdir
)
args = ("--basetemp=%s" % p,) + args
plugins = [x for x in self.plugins if isinstance(x, str)]
if plugins:
args = ("-p", plugins[0]) + args
args = self._getpytestargs() + args
return self.run(*args)
def spawn_pytest(self, string, expect_timeout=10.0):
"""Run pytest using pexpect.
This makes sure to use the right pytest and sets up the temporary
directory locations.
The pexpect child is returned.
"""
basetemp = self.tmpdir.mkdir("temp-pexpect")
invoke = " ".join(map(str, self._getpytestargs()))
cmd = "%s --basetemp=%s %s" % (invoke, basetemp, string)
return self.spawn(cmd, expect_timeout=expect_timeout)
def spawn(self, cmd, expect_timeout=10.0):
"""Run a command using pexpect.
The pexpect child is returned.
"""
pexpect = pytest.importorskip("pexpect", "3.0")
if hasattr(sys, "pypy_version_info") and "64" in platform.machine():
pytest.skip("pypy-64 bit not supported")
if sys.platform.startswith("freebsd"):
pytest.xfail("pexpect does not work reliably on freebsd")
logfile = self.tmpdir.join("spawn.out").open("wb")
child = pexpect.spawn(cmd, logfile=logfile)
self.request.addfinalizer(logfile.close)
child.timeout = expect_timeout
return child
def getdecoded(out):
try:
return out.decode("utf-8")
except UnicodeDecodeError:
return "INTERNAL not-utf8-decodeable, truncated string:\n%s" % (
py.io.saferepr(out),
)
class LineComp(object):
def __init__(self):
self.stringio = py.io.TextIO()
def assert_contains_lines(self, lines2):
"""Assert that lines2 are contained (linearly) in lines1.
Return a list of extralines found.
"""
__tracebackhide__ = True
val = self.stringio.getvalue()
self.stringio.truncate(0)
self.stringio.seek(0)
lines1 = val.split("\n")
return LineMatcher(lines1).fnmatch_lines(lines2)
class LineMatcher(object):
"""Flexible matching of text.
This is a convenience class to test large texts like the output of
commands.
The constructor takes a list of lines without their trailing newlines, i.e.
``text.splitlines()``.
"""
def __init__(self, lines):
self.lines = lines
self._log_output = []
def str(self):
"""Return the entire original text."""
return "\n".join(self.lines)
def _getlines(self, lines2):
if isinstance(lines2, str):
lines2 = Source(lines2)
if isinstance(lines2, Source):
lines2 = lines2.strip().lines
return lines2
def fnmatch_lines_random(self, lines2):
"""Check lines exist in the output using in any order.
Lines are checked using ``fnmatch.fnmatch``. The argument is a list of
lines which have to occur in the output, in any order.
"""
self._match_lines_random(lines2, fnmatch)
def re_match_lines_random(self, lines2):
"""Check lines exist in the output using ``re.match``, in any order.
The argument is a list of lines which have to occur in the output, in
any order.
"""
self._match_lines_random(lines2, lambda name, pat: re.match(pat, name))
def _match_lines_random(self, lines2, match_func):
"""Check lines exist in the output.
The argument is a list of lines which have to occur in the output, in
any order. Each line can contain glob whildcards.
"""
lines2 = self._getlines(lines2)
for line in lines2:
for x in self.lines:
if line == x or match_func(x, line):
self._log("matched: ", repr(line))
break
else:
self._log("line %r not found in output" % line)
raise ValueError(self._log_text)
def get_lines_after(self, fnline):
"""Return all lines following the given line in the text.
The given line can contain glob wildcards.
"""
for i, line in enumerate(self.lines):
if fnline == line or fnmatch(line, fnline):
return self.lines[i + 1:]
raise ValueError("line %r not found in output" % fnline)
def _log(self, *args):
self._log_output.append(" ".join((str(x) for x in args)))
@property
def _log_text(self):
return "\n".join(self._log_output)
def fnmatch_lines(self, lines2):
"""Search captured text for matching lines using ``fnmatch.fnmatch``.
The argument is a list of lines which have to match and can use glob
wildcards. If they do not match a pytest.fail() is called. The
matches and non-matches are also printed on stdout.
"""
self._match_lines(lines2, fnmatch, "fnmatch")
def re_match_lines(self, lines2):
"""Search captured text for matching lines using ``re.match``.
The argument is a list of lines which have to match using ``re.match``.
If they do not match a pytest.fail() is called.
The matches and non-matches are also printed on stdout.
"""
self._match_lines(lines2, lambda name, pat: re.match(pat, name), "re.match")
def _match_lines(self, lines2, match_func, match_nickname):
"""Underlying implementation of ``fnmatch_lines`` and ``re_match_lines``.
:param list[str] lines2: list of string patterns to match. The actual
format depends on ``match_func``
:param match_func: a callable ``match_func(line, pattern)`` where line
is the captured line from stdout/stderr and pattern is the matching
pattern
:param str match_nickname: the nickname for the match function that
will be logged to stdout when a match occurs
"""
lines2 = self._getlines(lines2)
lines1 = self.lines[:]
nextline = None
extralines = []
__tracebackhide__ = True
for line in lines2:
nomatchprinted = False
while lines1:
nextline = lines1.pop(0)
if line == nextline:
self._log("exact match:", repr(line))
break
elif match_func(nextline, line):
self._log("%s:" % match_nickname, repr(line))
self._log(" with:", repr(nextline))
break
else:
if not nomatchprinted:
self._log("nomatch:", repr(line))
nomatchprinted = True
self._log(" and:", repr(nextline))
extralines.append(nextline)
else:
self._log("remains unmatched: %r" % (line,))
pytest.fail(self._log_text)
| mpl-2.0 |
eyohansa/django | django/contrib/sessions/backends/base.py | 298 | 12046 | from __future__ import unicode_literals
import base64
import logging
import string
from datetime import datetime, timedelta
from django.conf import settings
from django.contrib.sessions.exceptions import SuspiciousSession
from django.core.exceptions import SuspiciousOperation
from django.utils import timezone
from django.utils.crypto import (
constant_time_compare, get_random_string, salted_hmac,
)
from django.utils.encoding import force_bytes, force_text
from django.utils.module_loading import import_string
# session_key should not be case sensitive because some backends can store it
# on case insensitive file systems.
VALID_KEY_CHARS = string.ascii_lowercase + string.digits
class CreateError(Exception):
"""
Used internally as a consistent exception type to catch from save (see the
docstring for SessionBase.save() for details).
"""
pass
class SessionBase(object):
"""
Base class for all Session classes.
"""
TEST_COOKIE_NAME = 'testcookie'
TEST_COOKIE_VALUE = 'worked'
def __init__(self, session_key=None):
self._session_key = session_key
self.accessed = False
self.modified = False
self.serializer = import_string(settings.SESSION_SERIALIZER)
def __contains__(self, key):
return key in self._session
def __getitem__(self, key):
return self._session[key]
def __setitem__(self, key, value):
self._session[key] = value
self.modified = True
def __delitem__(self, key):
del self._session[key]
self.modified = True
def get(self, key, default=None):
return self._session.get(key, default)
def pop(self, key, default=None):
self.modified = self.modified or key in self._session
return self._session.pop(key, default)
def setdefault(self, key, value):
if key in self._session:
return self._session[key]
else:
self.modified = True
self._session[key] = value
return value
def set_test_cookie(self):
self[self.TEST_COOKIE_NAME] = self.TEST_COOKIE_VALUE
def test_cookie_worked(self):
return self.get(self.TEST_COOKIE_NAME) == self.TEST_COOKIE_VALUE
def delete_test_cookie(self):
del self[self.TEST_COOKIE_NAME]
def _hash(self, value):
key_salt = "django.contrib.sessions" + self.__class__.__name__
return salted_hmac(key_salt, value).hexdigest()
def encode(self, session_dict):
"Returns the given session dictionary serialized and encoded as a string."
serialized = self.serializer().dumps(session_dict)
hash = self._hash(serialized)
return base64.b64encode(hash.encode() + b":" + serialized).decode('ascii')
def decode(self, session_data):
encoded_data = base64.b64decode(force_bytes(session_data))
try:
# could produce ValueError if there is no ':'
hash, serialized = encoded_data.split(b':', 1)
expected_hash = self._hash(serialized)
if not constant_time_compare(hash.decode(), expected_hash):
raise SuspiciousSession("Session data corrupted")
else:
return self.serializer().loads(serialized)
except Exception as e:
# ValueError, SuspiciousOperation, unpickling exceptions. If any of
# these happen, just return an empty dictionary (an empty session).
if isinstance(e, SuspiciousOperation):
logger = logging.getLogger('django.security.%s' %
e.__class__.__name__)
logger.warning(force_text(e))
return {}
def update(self, dict_):
self._session.update(dict_)
self.modified = True
def has_key(self, key):
return key in self._session
def keys(self):
return self._session.keys()
def values(self):
return self._session.values()
def items(self):
return self._session.items()
def iterkeys(self):
return self._session.iterkeys()
def itervalues(self):
return self._session.itervalues()
def iteritems(self):
return self._session.iteritems()
def clear(self):
# To avoid unnecessary persistent storage accesses, we set up the
# internals directly (loading data wastes time, since we are going to
# set it to an empty dict anyway).
self._session_cache = {}
self.accessed = True
self.modified = True
def is_empty(self):
"Returns True when there is no session_key and the session is empty"
try:
return not bool(self._session_key) and not self._session_cache
except AttributeError:
return True
def _get_new_session_key(self):
"Returns session key that isn't being used."
while True:
session_key = get_random_string(32, VALID_KEY_CHARS)
if not self.exists(session_key):
break
return session_key
def _get_or_create_session_key(self):
if self._session_key is None:
self._session_key = self._get_new_session_key()
return self._session_key
def _validate_session_key(self, key):
"""
Key must be truthy and at least 8 characters long. 8 characters is an
arbitrary lower bound for some minimal key security.
"""
return key and len(key) >= 8
def _get_session_key(self):
return self.__session_key
def _set_session_key(self, value):
"""
Validate session key on assignment. Invalid values will set to None.
"""
if self._validate_session_key(value):
self.__session_key = value
else:
self.__session_key = None
session_key = property(_get_session_key)
_session_key = property(_get_session_key, _set_session_key)
def _get_session(self, no_load=False):
"""
Lazily loads session from storage (unless "no_load" is True, when only
an empty dict is stored) and stores it in the current instance.
"""
self.accessed = True
try:
return self._session_cache
except AttributeError:
if self.session_key is None or no_load:
self._session_cache = {}
else:
self._session_cache = self.load()
return self._session_cache
_session = property(_get_session)
def get_expiry_age(self, **kwargs):
"""Get the number of seconds until the session expires.
Optionally, this function accepts `modification` and `expiry` keyword
arguments specifying the modification and expiry of the session.
"""
try:
modification = kwargs['modification']
except KeyError:
modification = timezone.now()
# Make the difference between "expiry=None passed in kwargs" and
# "expiry not passed in kwargs", in order to guarantee not to trigger
# self.load() when expiry is provided.
try:
expiry = kwargs['expiry']
except KeyError:
expiry = self.get('_session_expiry')
if not expiry: # Checks both None and 0 cases
return settings.SESSION_COOKIE_AGE
if not isinstance(expiry, datetime):
return expiry
delta = expiry - modification
return delta.days * 86400 + delta.seconds
def get_expiry_date(self, **kwargs):
"""Get session the expiry date (as a datetime object).
Optionally, this function accepts `modification` and `expiry` keyword
arguments specifying the modification and expiry of the session.
"""
try:
modification = kwargs['modification']
except KeyError:
modification = timezone.now()
# Same comment as in get_expiry_age
try:
expiry = kwargs['expiry']
except KeyError:
expiry = self.get('_session_expiry')
if isinstance(expiry, datetime):
return expiry
if not expiry: # Checks both None and 0 cases
expiry = settings.SESSION_COOKIE_AGE
return modification + timedelta(seconds=expiry)
def set_expiry(self, value):
"""
Sets a custom expiration for the session. ``value`` can be an integer,
a Python ``datetime`` or ``timedelta`` object or ``None``.
If ``value`` is an integer, the session will expire after that many
seconds of inactivity. If set to ``0`` then the session will expire on
browser close.
If ``value`` is a ``datetime`` or ``timedelta`` object, the session
will expire at that specific future time.
If ``value`` is ``None``, the session uses the global session expiry
policy.
"""
if value is None:
# Remove any custom expiration for this session.
try:
del self['_session_expiry']
except KeyError:
pass
return
if isinstance(value, timedelta):
value = timezone.now() + value
self['_session_expiry'] = value
def get_expire_at_browser_close(self):
"""
Returns ``True`` if the session is set to expire when the browser
closes, and ``False`` if there's an expiry date. Use
``get_expiry_date()`` or ``get_expiry_age()`` to find the actual expiry
date/age, if there is one.
"""
if self.get('_session_expiry') is None:
return settings.SESSION_EXPIRE_AT_BROWSER_CLOSE
return self.get('_session_expiry') == 0
def flush(self):
"""
Removes the current session data from the database and regenerates the
key.
"""
self.clear()
self.delete()
self._session_key = None
def cycle_key(self):
"""
Creates a new session key, whilst retaining the current session data.
"""
data = self._session_cache
key = self.session_key
self.create()
self._session_cache = data
self.delete(key)
# Methods that child classes must implement.
def exists(self, session_key):
"""
Returns True if the given session_key already exists.
"""
raise NotImplementedError('subclasses of SessionBase must provide an exists() method')
def create(self):
"""
Creates a new session instance. Guaranteed to create a new object with
a unique key and will have saved the result once (with empty data)
before the method returns.
"""
raise NotImplementedError('subclasses of SessionBase must provide a create() method')
def save(self, must_create=False):
"""
Saves the session data. If 'must_create' is True, a new session object
is created (otherwise a CreateError exception is raised). Otherwise,
save() can update an existing object with the same key.
"""
raise NotImplementedError('subclasses of SessionBase must provide a save() method')
def delete(self, session_key=None):
"""
Deletes the session data under this key. If the key is None, the
current session key value is used.
"""
raise NotImplementedError('subclasses of SessionBase must provide a delete() method')
def load(self):
"""
Loads the session data and returns a dictionary.
"""
raise NotImplementedError('subclasses of SessionBase must provide a load() method')
@classmethod
def clear_expired(cls):
"""
Remove expired sessions from the session store.
If this operation isn't possible on a given backend, it should raise
NotImplementedError. If it isn't necessary, because the backend has
a built-in expiration mechanism, it should be a no-op.
"""
raise NotImplementedError('This backend does not support clear_expired().')
| bsd-3-clause |
paveu/api_mocker | apimocker/mocker/migrations/0002_auto_20170718_1858.py | 1 | 1459 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-18 18:58
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mocker', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ResponseContent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_date', models.DateTimeField(auto_now_add=True, verbose_name='Create date')),
('update_date', models.DateTimeField(auto_now=True, verbose_name='Update date')),
('destination_address', models.URLField(null=True, verbose_name='Called API')),
('content', models.TextField(null=True, verbose_name='API Response')),
],
options={
'verbose_name': 'API Log',
'verbose_name_plural': 'API Logs',
},
),
migrations.RemoveField(
model_name='mocker',
name='api_log',
),
migrations.DeleteModel(
name='APILog',
),
migrations.AddField(
model_name='responsecontent',
name='mocker',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='mocker.Mocker'),
),
]
| mit |
pyrrho314/recipesystem | trunk/astrodata/adutils/irafutil.py | 1 | 6759 |
'''This file contains the following utilities:
joinlines (input, delim=" ", missing="Missing", maxchars=161,
shortest=True)
joinlists (list1, list2, delim=" ", missing="Missing", shortest=True)
atList (input, filenames)
expandlist (input)
'''
#---------------------------------------------------------------------------
def joinlines (input, delim=" ", missing="Missing",
maxchars=161, shortest=True):
"""Join lines from the input list of files.
This is an implementation of the iraf.proto.joinlines task, with
the following differences: The result is as a list of strings,
returned as the function value, rather than writing to standard
output. There is no verbose mode. No warnings will be printed.
@param input: names of files, separated by commas (and optional
whitespace)
@type input: string
@param delim: delimiter to separate joined lines
@type delim: string
@param missing: string to use for files with fewer lines,
if shortest is False
@type missing: string
@param maxchars: the output strings will be truncated after this length
@type maxchars: int
@param shortest: if True, the number of elements in the function
value will be the smallest number of lines in any input file;
if False, the number of elements will be the largest number of
lines in any input file
@type shortest: Boolean
@return: the contents of the input files
@rtype: list of strings
"""
filenames = input.split (",")
if not filenames[0]: # an empty string?
return filenames
for i in range (len (filenames)):
filenames[i] = filenames[i].strip()
# There will be one element of all_lines for each file in input;
# all_lines[i] will be a list of the lines (with leading and
# trailing whitespace removed) of file i from input.
all_lines = []
first = True
for name in filenames:
fd = open (name)
lines = fd.readlines()
fd.close()
for i in range (len (lines)):
lines[i] = lines[i].strip()
all_lines.append (copy.deepcopy (lines))
numlines = len (lines)
if first:
min_numlines = numlines
max_numlines = numlines
first = False
else:
min_numlines = min (numlines, min_numlines)
max_numlines = max (numlines, max_numlines)
if min_numlines < max_numlines:
if shortest:
numlines = min_numlines
else:
numlines = max_numlines
if len (all_lines[0]) > numlines:
result = all_lines[0][0:numlines]
else:
result = all_lines[0]
for k in range (len (result), numlines):
result.append (missing)
for i in range (1, len (all_lines)):
lines = all_lines[i]
for j in range (len (lines)):
if j >= numlines:
break
result[j] = result[j] + delim + lines[j]
for j in range (len (lines), numlines):
result[j] = result[j] + delim + missing
for j in range (len (result)):
result[j] = result[j][0:maxchars]
return result
#---------------------------------------------------------------------------
def joinlists (list1, list2, delim=" ", missing="Missing", shortest=True):
"""Join corresponding elements from two input lists.
This is similar to the iraf.proto.joinlines task, except that the
input is a pair of lists rather than files (just two input lists),
and the result is as a list of strings, returned as the function
value, rather than writing to standard output. There is no verbose
mode. No warnings will be printed.
@param list1: a list of values
@type list1: list
@param list2: another list of values
@type list2: list
@param delim: delimiter to separate joined elements
@type delim: string
@param missing: string to use for lists with fewer lines,
if shortest is False
@type missing: string
@param shortest: if True, the number of elements in the function
value will be the smaller of the number of lines in either of
the input lists;
if False, the number of elements will be the larger of the
number lines in either input list
@type shortest: Boolean
@return: the contents of the input lists
@rtype: list of strings
"""
len1 = len (list1)
len2 = len (list2)
min_numlines = min (len1, len2)
max_numlines = max (len1, len2)
if min_numlines < max_numlines:
if shortest:
numlines = min_numlines
else:
numlines = max_numlines
else:
numlines = len1
result = []
for i in range (numlines):
if i > len1-1:
result.append (missing + delim + str (list2[i]))
elif i > len2-1:
result.append (str (list1[i]) + delim + missing)
else:
result.append (str (list1[i]) + delim + str (list2[i]))
return result
#---------------------------------------------------------------------------
def atList (input, filenames):
"""Either append the current name, or read contents if it's a file.
@param input: one or more names (or @name) separated by commas
@type input: string
@param filenames: (modified in-place) a list of the names extracted
from input, or from the contents of input if it's an '@file'
@type filenames: list
"""
input = input.strip()
if not input:
return
if input[0] == '@' and input.find (',') < 0:
# just a single word, and it begins with '@'
line = irafutil.expandFileName (input[1:]) # expand environment var.
fd = open (line)
lines = fd.readlines()
fd.close()
else:
# one or more words, and the first does not begin with '@'
lines = input.split (',')
for line in lines:
line = line.strip()
if line[0] == '@':
atList (line, filenames)
else:
line = irafutil.expandFileName (line)
filenames.append (line)
#---------------------------------------------------------------------------
def expandlist (input):
"""Convert a string of comma-separated names to a list of names.
@param input: one or more names separated by commas;
a name of the form '@filename' implies that 'filename' is
the name of a file containing names
@type input: string
@return: list of the names in 'input'
@rtype: list of strings
"""
filenames = []
atList (input, filenames)
return filenames
#---------------------------------------------------------------------------
| mpl-2.0 |
sensidev/drf-requests-jwt | drf_requests_jwt/services.py | 1 | 4894 | """
Services.
"""
import logging
import requests
from urllib.parse import urlparse, parse_qs
from drf_requests_jwt import settings
from drf_requests_jwt.backends.utils import build_url
logger = logging.getLogger(__name__)
class HttpRequestService(object):
obtain_jwt_allowed_fail_attempts = settings.DEFAULTS.get('OBTAIN_JWT_ALLOWED_FAIL_ATTEMPTS')
cache_backend_class = settings.DEFAULTS.get('CACHE_BACKEND_CLASS')
def __init__(self, params=None):
super().__init__()
self.cache_backend = self._get_cache_backend()
self.params = params or {}
self.params.update(self._get_params())
self.headers = self._get_headers()
self.url = self._get_url()
self.session = requests.Session()
self.obtain_jwt_fail_attempts = 0
def _get_cache_backend(self):
resolved_backend_class = settings.import_from_string(self.cache_backend_class)
return resolved_backend_class(self._get_jwt_cache_key())
def _get_base_url(self):
raise NotImplementedError
def _get_url_path(self):
raise NotImplementedError
def _get_url(self):
return build_url(base_url=self._get_base_url(), path=self._get_url_path())
def _get_jwt_login_url_path(self):
raise NotImplementedError
def _get_jwt_login_url(self):
return build_url(base_url=self._get_base_url(), path=self._get_jwt_login_url_path())
def _get_username(self):
raise NotImplementedError
def _get_password(self):
raise NotImplementedError
def _get_params(self):
return {}
def _get_headers(self):
return {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Authorization': 'Bearer {token}'.format(token=self._get_jwt_token_from_cache())
}
def get_results_from_all_pages(self):
next_url = self.url
result_list = []
while True:
url_parse = urlparse(next_url)
self.params.update(parse_qs(url_parse.query))
next_url = '{scheme}://{netloc}{path}'.format(
scheme=url_parse.scheme, netloc=url_parse.netloc, path=url_parse.path
)
response = self.session.get(next_url, headers=self.headers, params=self.params)
logger.debug('Request url: {} with params {}'.format(next_url, self.params))
if response.status_code == 200:
response_json = response.json()
next_url = response_json.get('next')
result_list.extend(response_json.get('results', []))
elif response.status_code == 401:
if self._should_update_authorization_header():
self.update_authorization_header()
else:
break
else:
raise Exception('Wrong response status code: {code}, content: {content}'.format(
code=response.status_code,
content=response.content
))
if not bool(next_url):
break
return result_list
def write_results_from_all_pages_to_file(self, filename):
results = self.get_results_from_all_pages()
with open(filename, 'w') as output:
json.dump(results, output)
def update_authorization_header(self):
token = self._get_jwt_token()
self.headers['Authorization'] = 'Bearer {token}'.format(token=token)
def get_deserialized_data(self):
raise NotImplementedError
def _get_jwt_token(self):
payload = {
'username': self._get_username(),
'password': self._get_password()
}
url = self._get_jwt_login_url()
logger.debug('Request url: {}'.format(url))
response = self.session.post(url, data=payload)
if response.status_code == 200:
response_dict = response.json()
token = response_dict.get('access')
self._set_jwt_token_to_cache(token)
logger.debug('Received a fresh JWT token')
return token
else:
self.obtain_jwt_fail_attempts += 1
logger.warning('Attempt to get a JWT token failed')
raise Exception('Wrong response status code: {code}, content: {content}'.format(
code=response.status_code,
content=response.content
))
def _should_update_authorization_header(self):
return self.obtain_jwt_fail_attempts <= self.obtain_jwt_allowed_fail_attempts
def _set_jwt_token_to_cache(self, token):
self.cache_backend.set_jwt(token)
def _get_jwt_token_from_cache(self):
return self.cache_backend.get_jwt()
def _get_jwt_cache_key(self):
return 'jwt-{url}-{username}'.format(url=self._get_base_url(), username=self._get_username())
| mit |
UrusTeam/android_ndk_toolchain_cross | lib/python2.7/lib2to3/fixes/fix_sys_exc.py | 327 | 1039 | """Fixer for sys.exc_{type, value, traceback}
sys.exc_type -> sys.exc_info()[0]
sys.exc_value -> sys.exc_info()[1]
sys.exc_traceback -> sys.exc_info()[2]
"""
# By Jeff Balogh and Benjamin Peterson
# Local imports
from .. import fixer_base
from ..fixer_util import Attr, Call, Name, Number, Subscript, Node, syms
class FixSysExc(fixer_base.BaseFix):
# This order matches the ordering of sys.exc_info().
exc_info = [u"exc_type", u"exc_value", u"exc_traceback"]
BM_compatible = True
PATTERN = """
power< 'sys' trailer< dot='.' attribute=(%s) > >
""" % '|'.join("'%s'" % e for e in exc_info)
def transform(self, node, results):
sys_attr = results["attribute"][0]
index = Number(self.exc_info.index(sys_attr.value))
call = Call(Name(u"exc_info"), prefix=sys_attr.prefix)
attr = Attr(Name(u"sys"), call)
attr[1].children[0].prefix = results["dot"].prefix
attr.append(Subscript(index))
return Node(syms.power, attr, prefix=node.prefix)
| gpl-2.0 |
MarishaYasko/interactive-stories-stands | InteractiveStands/Lib/copyreg.py | 165 | 6833 | """Helper to provide extensibility for pickle.
This is only useful to add pickle support for extension types defined in
C, not for instances of user-defined classes.
"""
__all__ = ["pickle", "constructor",
"add_extension", "remove_extension", "clear_extension_cache"]
dispatch_table = {}
def pickle(ob_type, pickle_function, constructor_ob=None):
if not callable(pickle_function):
raise TypeError("reduction functions must be callable")
dispatch_table[ob_type] = pickle_function
# The constructor_ob function is a vestige of safe for unpickling.
# There is no reason for the caller to pass it anymore.
if constructor_ob is not None:
constructor(constructor_ob)
def constructor(object):
if not callable(object):
raise TypeError("constructors must be callable")
# Example: provide pickling support for complex numbers.
try:
complex
except NameError:
pass
else:
def pickle_complex(c):
return complex, (c.real, c.imag)
pickle(complex, pickle_complex, complex)
# Support for pickling new-style objects
def _reconstructor(cls, base, state):
if base is object:
obj = object.__new__(cls)
else:
obj = base.__new__(cls, state)
if base.__init__ != object.__init__:
base.__init__(obj, state)
return obj
_HEAPTYPE = 1<<9
# Python code for object.__reduce_ex__ for protocols 0 and 1
def _reduce_ex(self, proto):
assert proto < 2
for base in self.__class__.__mro__:
if hasattr(base, '__flags__') and not base.__flags__ & _HEAPTYPE:
break
else:
base = object # not really reachable
if base is object:
state = None
else:
if base is self.__class__:
raise TypeError("can't pickle %s objects" % base.__name__)
state = base(self)
args = (self.__class__, base, state)
try:
getstate = self.__getstate__
except AttributeError:
if getattr(self, "__slots__", None):
raise TypeError("a class that defines __slots__ without "
"defining __getstate__ cannot be pickled")
try:
dict = self.__dict__
except AttributeError:
dict = None
else:
dict = getstate()
if dict:
return _reconstructor, args, dict
else:
return _reconstructor, args
# Helper for __reduce_ex__ protocol 2
def __newobj__(cls, *args):
return cls.__new__(cls, *args)
def __newobj_ex__(cls, args, kwargs):
"""Used by pickle protocol 4, instead of __newobj__ to allow classes with
keyword-only arguments to be pickled correctly.
"""
return cls.__new__(cls, *args, **kwargs)
def _slotnames(cls):
"""Return a list of slot names for a given class.
This needs to find slots defined by the class and its bases, so we
can't simply return the __slots__ attribute. We must walk down
the Method Resolution Order and concatenate the __slots__ of each
class found there. (This assumes classes don't modify their
__slots__ attribute to misrepresent their slots after the class is
defined.)
"""
# Get the value from a cache in the class if possible
names = cls.__dict__.get("__slotnames__")
if names is not None:
return names
# Not cached -- calculate the value
names = []
if not hasattr(cls, "__slots__"):
# This class has no slots
pass
else:
# Slots found -- gather slot names from all base classes
for c in cls.__mro__:
if "__slots__" in c.__dict__:
slots = c.__dict__['__slots__']
# if class has a single slot, it can be given as a string
if isinstance(slots, str):
slots = (slots,)
for name in slots:
# special descriptors
if name in ("__dict__", "__weakref__"):
continue
# mangled names
elif name.startswith('__') and not name.endswith('__'):
names.append('_%s%s' % (c.__name__, name))
else:
names.append(name)
# Cache the outcome in the class if at all possible
try:
cls.__slotnames__ = names
except:
pass # But don't die if we can't
return names
# A registry of extension codes. This is an ad-hoc compression
# mechanism. Whenever a global reference to <module>, <name> is about
# to be pickled, the (<module>, <name>) tuple is looked up here to see
# if it is a registered extension code for it. Extension codes are
# universal, so that the meaning of a pickle does not depend on
# context. (There are also some codes reserved for local use that
# don't have this restriction.) Codes are positive ints; 0 is
# reserved.
_extension_registry = {} # key -> code
_inverted_registry = {} # code -> key
_extension_cache = {} # code -> object
# Don't ever rebind those names: pickling grabs a reference to them when
# it's initialized, and won't see a rebinding.
def add_extension(module, name, code):
"""Register an extension code."""
code = int(code)
if not 1 <= code <= 0x7fffffff:
raise ValueError("code out of range")
key = (module, name)
if (_extension_registry.get(key) == code and
_inverted_registry.get(code) == key):
return # Redundant registrations are benign
if key in _extension_registry:
raise ValueError("key %s is already registered with code %s" %
(key, _extension_registry[key]))
if code in _inverted_registry:
raise ValueError("code %s is already in use for key %s" %
(code, _inverted_registry[code]))
_extension_registry[key] = code
_inverted_registry[code] = key
def remove_extension(module, name, code):
"""Unregister an extension code. For testing only."""
key = (module, name)
if (_extension_registry.get(key) != code or
_inverted_registry.get(code) != key):
raise ValueError("key %s is not registered with code %s" %
(key, code))
del _extension_registry[key]
del _inverted_registry[code]
if code in _extension_cache:
del _extension_cache[code]
def clear_extension_cache():
_extension_cache.clear()
# Standard extension code assignments
# Reserved ranges
# First Last Count Purpose
# 1 127 127 Reserved for Python standard library
# 128 191 64 Reserved for Zope
# 192 239 48 Reserved for 3rd parties
# 240 255 16 Reserved for private use (will never be assigned)
# 256 Inf Inf Reserved for future assignment
# Extension codes are assigned by the Python Software Foundation.
| mit |
catapult-project/catapult-csm | trace_processor/third_party/cloudstorage/common.py | 129 | 12326 | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Helpers shared by cloudstorage_stub and cloudstorage_api."""
__all__ = ['CS_XML_NS',
'CSFileStat',
'dt_str_to_posix',
'local_api_url',
'LOCAL_GCS_ENDPOINT',
'local_run',
'get_access_token',
'get_stored_content_length',
'get_metadata',
'GCSFileStat',
'http_time_to_posix',
'memory_usage',
'posix_time_to_http',
'posix_to_dt_str',
'set_access_token',
'validate_options',
'validate_bucket_name',
'validate_bucket_path',
'validate_file_path',
]
import calendar
import datetime
from email import utils as email_utils
import logging
import os
import re
try:
from google.appengine.api import runtime
except ImportError:
from google.appengine.api import runtime
_GCS_BUCKET_REGEX_BASE = r'[a-z0-9\.\-_]{3,63}'
_GCS_BUCKET_REGEX = re.compile(_GCS_BUCKET_REGEX_BASE + r'$')
_GCS_BUCKET_PATH_REGEX = re.compile(r'/' + _GCS_BUCKET_REGEX_BASE + r'$')
_GCS_PATH_PREFIX_REGEX = re.compile(r'/' + _GCS_BUCKET_REGEX_BASE + r'.*')
_GCS_FULLPATH_REGEX = re.compile(r'/' + _GCS_BUCKET_REGEX_BASE + r'/.*')
_GCS_METADATA = ['x-goog-meta-',
'content-disposition',
'cache-control',
'content-encoding']
_GCS_OPTIONS = _GCS_METADATA + ['x-goog-acl']
CS_XML_NS = 'http://doc.s3.amazonaws.com/2006-03-01'
LOCAL_GCS_ENDPOINT = '/_ah/gcs'
_access_token = ''
_MAX_GET_BUCKET_RESULT = 1000
def set_access_token(access_token):
"""Set the shared access token to authenticate with Google Cloud Storage.
When set, the library will always attempt to communicate with the
real Google Cloud Storage with this token even when running on dev appserver.
Note the token could expire so it's up to you to renew it.
When absent, the library will automatically request and refresh a token
on appserver, or when on dev appserver, talk to a Google Cloud Storage
stub.
Args:
access_token: you can get one by run 'gsutil -d ls' and copy the
str after 'Bearer'.
"""
global _access_token
_access_token = access_token
def get_access_token():
"""Returns the shared access token."""
return _access_token
class GCSFileStat(object):
"""Container for GCS file stat."""
def __init__(self,
filename,
st_size,
etag,
st_ctime,
content_type=None,
metadata=None,
is_dir=False):
"""Initialize.
For files, the non optional arguments are always set.
For directories, only filename and is_dir is set.
Args:
filename: a Google Cloud Storage filename of form '/bucket/filename'.
st_size: file size in bytes. long compatible.
etag: hex digest of the md5 hash of the file's content. str.
st_ctime: posix file creation time. float compatible.
content_type: content type. str.
metadata: a str->str dict of user specified options when creating
the file. Possible keys are x-goog-meta-, content-disposition,
content-encoding, and cache-control.
is_dir: True if this represents a directory. False if this is a real file.
"""
self.filename = filename
self.is_dir = is_dir
self.st_size = None
self.st_ctime = None
self.etag = None
self.content_type = content_type
self.metadata = metadata
if not is_dir:
self.st_size = long(st_size)
self.st_ctime = float(st_ctime)
if etag[0] == '"' and etag[-1] == '"':
etag = etag[1:-1]
self.etag = etag
def __repr__(self):
if self.is_dir:
return '(directory: %s)' % self.filename
return (
'(filename: %(filename)s, st_size: %(st_size)s, '
'st_ctime: %(st_ctime)s, etag: %(etag)s, '
'content_type: %(content_type)s, '
'metadata: %(metadata)s)' %
dict(filename=self.filename,
st_size=self.st_size,
st_ctime=self.st_ctime,
etag=self.etag,
content_type=self.content_type,
metadata=self.metadata))
def __cmp__(self, other):
if not isinstance(other, self.__class__):
raise ValueError('Argument to cmp must have the same type. '
'Expect %s, got %s', self.__class__.__name__,
other.__class__.__name__)
if self.filename > other.filename:
return 1
elif self.filename < other.filename:
return -1
return 0
def __hash__(self):
if self.etag:
return hash(self.etag)
return hash(self.filename)
CSFileStat = GCSFileStat
def get_stored_content_length(headers):
"""Return the content length (in bytes) of the object as stored in GCS.
x-goog-stored-content-length should always be present except when called via
the local dev_appserver. Therefore if it is not present we default to the
standard content-length header.
Args:
headers: a dict of headers from the http response.
Returns:
the stored content length.
"""
length = headers.get('x-goog-stored-content-length')
if length is None:
length = headers.get('content-length')
return length
def get_metadata(headers):
"""Get user defined options from HTTP response headers."""
return dict((k, v) for k, v in headers.iteritems()
if any(k.lower().startswith(valid) for valid in _GCS_METADATA))
def validate_bucket_name(name):
"""Validate a Google Storage bucket name.
Args:
name: a Google Storage bucket name with no prefix or suffix.
Raises:
ValueError: if name is invalid.
"""
_validate_path(name)
if not _GCS_BUCKET_REGEX.match(name):
raise ValueError('Bucket should be 3-63 characters long using only a-z,'
'0-9, underscore, dash or dot but got %s' % name)
def validate_bucket_path(path):
"""Validate a Google Cloud Storage bucket path.
Args:
path: a Google Storage bucket path. It should have form '/bucket'.
Raises:
ValueError: if path is invalid.
"""
_validate_path(path)
if not _GCS_BUCKET_PATH_REGEX.match(path):
raise ValueError('Bucket should have format /bucket '
'but got %s' % path)
def validate_file_path(path):
"""Validate a Google Cloud Storage file path.
Args:
path: a Google Storage file path. It should have form '/bucket/filename'.
Raises:
ValueError: if path is invalid.
"""
_validate_path(path)
if not _GCS_FULLPATH_REGEX.match(path):
raise ValueError('Path should have format /bucket/filename '
'but got %s' % path)
def _process_path_prefix(path_prefix):
"""Validate and process a Google Cloud Stoarge path prefix.
Args:
path_prefix: a Google Cloud Storage path prefix of format '/bucket/prefix'
or '/bucket/' or '/bucket'.
Raises:
ValueError: if path is invalid.
Returns:
a tuple of /bucket and prefix. prefix can be None.
"""
_validate_path(path_prefix)
if not _GCS_PATH_PREFIX_REGEX.match(path_prefix):
raise ValueError('Path prefix should have format /bucket, /bucket/, '
'or /bucket/prefix but got %s.' % path_prefix)
bucket_name_end = path_prefix.find('/', 1)
bucket = path_prefix
prefix = None
if bucket_name_end != -1:
bucket = path_prefix[:bucket_name_end]
prefix = path_prefix[bucket_name_end + 1:] or None
return bucket, prefix
def _validate_path(path):
"""Basic validation of Google Storage paths.
Args:
path: a Google Storage path. It should have form '/bucket/filename'
or '/bucket'.
Raises:
ValueError: if path is invalid.
TypeError: if path is not of type basestring.
"""
if not path:
raise ValueError('Path is empty')
if not isinstance(path, basestring):
raise TypeError('Path should be a string but is %s (%s).' %
(path.__class__, path))
def validate_options(options):
"""Validate Google Cloud Storage options.
Args:
options: a str->basestring dict of options to pass to Google Cloud Storage.
Raises:
ValueError: if option is not supported.
TypeError: if option is not of type str or value of an option
is not of type basestring.
"""
if not options:
return
for k, v in options.iteritems():
if not isinstance(k, str):
raise TypeError('option %r should be a str.' % k)
if not any(k.lower().startswith(valid) for valid in _GCS_OPTIONS):
raise ValueError('option %s is not supported.' % k)
if not isinstance(v, basestring):
raise TypeError('value %r for option %s should be of type basestring.' %
(v, k))
def http_time_to_posix(http_time):
"""Convert HTTP time format to posix time.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.3.1
for http time format.
Args:
http_time: time in RFC 2616 format. e.g.
"Mon, 20 Nov 1995 19:12:08 GMT".
Returns:
A float of secs from unix epoch.
"""
if http_time is not None:
return email_utils.mktime_tz(email_utils.parsedate_tz(http_time))
def posix_time_to_http(posix_time):
"""Convert posix time to HTML header time format.
Args:
posix_time: unix time.
Returns:
A datatime str in RFC 2616 format.
"""
if posix_time:
return email_utils.formatdate(posix_time, usegmt=True)
_DT_FORMAT = '%Y-%m-%dT%H:%M:%S'
def dt_str_to_posix(dt_str):
"""format str to posix.
datetime str is of format %Y-%m-%dT%H:%M:%S.%fZ,
e.g. 2013-04-12T00:22:27.978Z. According to ISO 8601, T is a separator
between date and time when they are on the same line.
Z indicates UTC (zero meridian).
A pointer: http://www.cl.cam.ac.uk/~mgk25/iso-time.html
This is used to parse LastModified node from GCS's GET bucket XML response.
Args:
dt_str: A datetime str.
Returns:
A float of secs from unix epoch. By posix definition, epoch is midnight
1970/1/1 UTC.
"""
parsable, _ = dt_str.split('.')
dt = datetime.datetime.strptime(parsable, _DT_FORMAT)
return calendar.timegm(dt.utctimetuple())
def posix_to_dt_str(posix):
"""Reverse of str_to_datetime.
This is used by GCS stub to generate GET bucket XML response.
Args:
posix: A float of secs from unix epoch.
Returns:
A datetime str.
"""
dt = datetime.datetime.utcfromtimestamp(posix)
dt_str = dt.strftime(_DT_FORMAT)
return dt_str + '.000Z'
def local_run():
"""Whether we should hit GCS dev appserver stub."""
server_software = os.environ.get('SERVER_SOFTWARE')
if server_software is None:
return True
if 'remote_api' in server_software:
return False
if server_software.startswith(('Development', 'testutil')):
return True
return False
def local_api_url():
"""Return URL for GCS emulation on dev appserver."""
return 'http://%s%s' % (os.environ.get('HTTP_HOST'), LOCAL_GCS_ENDPOINT)
def memory_usage(method):
"""Log memory usage before and after a method."""
def wrapper(*args, **kwargs):
logging.info('Memory before method %s is %s.',
method.__name__, runtime.memory_usage().current())
result = method(*args, **kwargs)
logging.info('Memory after method %s is %s',
method.__name__, runtime.memory_usage().current())
return result
return wrapper
def _add_ns(tagname):
return '{%(ns)s}%(tag)s' % {'ns': CS_XML_NS,
'tag': tagname}
_T_CONTENTS = _add_ns('Contents')
_T_LAST_MODIFIED = _add_ns('LastModified')
_T_ETAG = _add_ns('ETag')
_T_KEY = _add_ns('Key')
_T_SIZE = _add_ns('Size')
_T_PREFIX = _add_ns('Prefix')
_T_COMMON_PREFIXES = _add_ns('CommonPrefixes')
_T_NEXT_MARKER = _add_ns('NextMarker')
_T_IS_TRUNCATED = _add_ns('IsTruncated')
| bsd-3-clause |
dnidever/noaosourcecatalog | python/nsc_instcal_combine.py | 1 | 27777 | #!/usr/bin/env python
import os
import sys
import numpy as np
import warnings
from astropy.io import fits
from astropy.utils.exceptions import AstropyWarning
from astropy.table import Table, vstack, Column
from astropy.time import Time
import healpy as hp
from dlnpyutils import utils, coords
import subprocess
import time
from argparse import ArgumentParser
import socket
from dustmaps.sfd import SFDQuery
from astropy.coordinates import SkyCoord
def add_elements(cat,nnew=300000):
""" Add more elements to a catalog"""
ncat = len(cat)
old = cat.copy()
nnew = utils.gt(nnew,ncat)
cat = np.zeros(ncat+nnew,dtype=old.dtype)
cat[0:ncat] = old
del(old)
return cat
def add_cat(obj,totobj,idstr,idcnt,ind1,cat,meta):
""" Add object information from a new meas catalog of matched objects"""
ncat = len(cat)
f = meta['filter'].lower().strip()[0]
# Copy to final structure
obj['ra'][ind1] = cat['RA']
obj['dec'][ind1] = cat['DEC']
obj['raerr'][ind1] += 1.0/cat['RAERR']**2 # total(ra_wt)
obj['decerr'][ind1] += 1.0/cat['DECERR']**2 # total(dec_wt)
obj['pmra'][ind1] += (1.0/cat['RAERR']**2) * meta['mjd']*cat['RA'] # total(wt*mjd*ra)
obj['pmdec'][ind1] += (1.0/cat['DECERR']**2) * meta['mjd']*cat['DEC'] # total(wt*mjd*dec)
obj['mjd'][ind1] += meta['mjd'] # total(mjd)
obj['ndet'][ind1] += 1
# Detection and morphology parameters for this FILTER
obj['ndet'+f][ind1] += 1
obj[f+'asemi'][ind1] += cat['ASEMI']
obj[f+'bsemi'][ind1] += cat['BSEMI']
obj[f+'theta'][ind1] += cat['THETA']
# Good photometry for this FILTER
gdmag, = np.where(cat['MAG_AUTO']<50)
if len(gdmag)>0:
obj[f+'mag'][ind1[gdmag]] += 2.5118864**cat['MAG_AUTO'][gdmag] * (1.0/cat['MAGERR_AUTO'][gdmag]**2)
obj[f+'err'][ind1[gdmag]] += 1.0/cat['MAGERR_AUTO'][gdmag]**2
obj['nphot'+f][ind1[gdmag]] += 1
obj['asemi'][ind1] += cat['ASEMI']
obj['asemierr'][ind1] += cat['ASEMIERR']**2
obj['bsemi'][ind1] += cat['BSEMI']
obj['bsemierr'][ind1] += cat['BSEMIERR']**2
obj['theta'][ind1] += cat['THETA']
obj['thetaerr'][ind1] += cat['THETAERR']**2
obj['fwhm'][ind1] += cat['FWHM'] # in arcsec
obj['flags'][ind1] |= cat['FLAGS'] # OR combine
obj['class_star'][ind1] += cat['CLASS_STAR']
totobj['ra'][ind1] += cat['RA'] * (1.0/cat['RAERR']**2) # total(ra*wt)
totobj['dec'][ind1] += cat['DEC'] * (1.0/cat['DECERR']**2) # total(dec*wt)
totobj['ramjd'][ind1] += (1.0/cat['RAERR']**2) * meta['mjd'] # total(wt_ra*mjd)
totobj['decmjd'][ind1] += (1.0/cat['DECERR']**2) * meta['mjd'] # total(wt_dec*mjd)
totobj['ramjd2'][ind1] += (1.0/cat['RAERR']**2) * meta['mjd']**2 # total(wt_ra*mjd**2)
totobj['decmjd2'][ind1] += (1.0/cat['DECERR']**2) * meta['mjd']**2 # total(wt_dec*mjd**2)
totobj['minmjd'][ind1] = np.minimum( meta['mjd'][0], totobj['minmjd'][ind1] )
totobj['maxmjd'][ind1] = np.maximum( meta['mjd'][0], totobj['maxmjd'][ind1] )
if len(gdmag)>0:
totobj[f+'tot'][ind1[gdmag]] += cat['MAG_AUTO'][gdmag] # sum(mag)
totobj[f+'mag2'][ind1[gdmag]] += np.float64(cat['MAG_AUTO'][gdmag])**2 # sum(mag**2), need dbl to precent underflow
# Add new elements to IDSTR
if idcnt+ncat > len(idstr):
idstr = add_elements(idstr)
nidstr = len(idstr)
# Add to IDSTR
idstr['measid'][idcnt:idcnt+ncat] = cat['MEASID']
idstr['exposure'][idcnt:idcnt+ncat] = meta['base']
idstr['expnum'][idcnt:idcnt+ncat] = meta['expnum']
idstr['objectid'][idcnt:idcnt+ncat] = obj[ind1]['objectid']
idstr['objectindex'][idcnt:idcnt+ncat] = ind1
idcnt += ncat
return obj,totobj,idstr,idcnt
def loadmeas(metafile,buffdict=None,verbose=False):
if os.path.exists(metafile) is False:
print(metafile+' NOT FOUND')
return np.array([])
meta = fits.getdata(metafile,1)
chmeta = fits.getdata(metafile,2)
fdir = os.path.dirname(metafile)
fbase, ext = os.path.splitext(os.path.basename(metafile))
fbase = fbase[:-5] # remove _meta at end
# Loop over the chip files
cat = None
for j in range(len(chmeta)):
# Check that this chip was astrometrically calibrated
# and falls in to HEALPix region
if chmeta[j]['ngaiamatch'] == 0:
if verbose: print('This chip was not astrometrically calibrate')
# Check that this overlaps the healpix region
inside = True
if buffdict is not None:
vra = chmeta[j]['vra']
vdec = chmeta[j]['vdec']
if (np.max(vra)-np.min(vra)) > 100: # deal with RA=0 wrapround
bd, = np.where(vra>180)
if len(bd)>0: vra[bd] -= 360
if coords.doPolygonsOverlap(buffdict['ra'],buffdict['dec'],vra,vdec) is False:
if verbose: print('This chip does NOT overlap the HEALPix region+buffer')
inside = False
# Check if the chip-level file exists
chfile = fdir+'/'+fbase+'_'+str(chmeta[j]['ccdnum'])+'_meas.fits'
if os.path.exists(chfile) is False:
print(chfile+' NOT FOUND')
# Load this one
if (os.path.exists(chfile) is True) and (inside is True) and (chmeta[j]['ngaiamatch']>1):
# Load the chip-level catalog
cat1 = fits.getdata(chfile,1)
ncat1 = len(cat1)
print(' '+str(ncat1)+' sources')
# Make sure it's in the right format
if len(cat1.dtype.fields) != 32:
if verbose: print(' This catalog does not have the right format. Skipping')
del(cat1)
ncat1 = 0
# Only include sources inside Boundary+Buffer zone
# -use ROI_CUT
# -reproject to tangent plane first so we don't have to deal
# with RA=0 wrapping or pol issues
if buffdict is not None:
lon, lat = coords.rotsphcen(cat1['ra'],cat1['dec'],buffdict['cenra'],buffdict['cendec'],gnomic=True)
ind0, ind1 = utils.roi_cut(buffdict['lon'],buffdict['lat'],lon,lat)
nmatch = len(ind1)
# Only want source inside this pixel
if nmatch>0:
cat1 = cat1[ind1]
ncat1 = len(cat1)
if verbose: print(' '+str(nmatch)+' sources are inside this pixel')
# Combine the catalogs
if ncat1 > 0:
if cat is None:
dtype_cat = cat1.dtype
cat = np.zeros(np.sum(chmeta['nsources']),dtype=dtype_cat)
catcount = 0
cat[catcount:catcount+ncat1] = cat1
catcount += ncat1
#BOMB1:
if cat is not None: cat=cat[0:catcount] # trim excess
if cat is None: cat=np.array([]) # empty cat
return cat
# Combine data for one NSC healpix region
if __name__ == "__main__":
parser = ArgumentParser(description='Combine NSC data for one healpix region.')
parser.add_argument('pix', type=str, nargs=1, help='HEALPix pixel number')
parser.add_argument('version', type=str, nargs=1, help='Version number')
parser.add_argument('--nside', type=int, default=128, help='HEALPix Nside')
parser.add_argument('-r','--redo', action='store_true', help='Redo this HEALPIX')
parser.add_argument('--outdir', type=str, default='', help='Output directory')
#parser.add_argument('--filesexist', type=float, default=0.2, help='Time to wait between checking the status of running jobs')
#parser.add_argument('--pixfiles', type=str, default=False, help='IDL program')
args = parser.parse_args()
t0 = time.time()
hostname = socket.gethostname()
host = hostname.split('.')[0]
radeg = np.float64(180.00) / np.pi
# Inputs
pix = int(args.pix[0])
version = args.version
nside = args.nside
redo = args.redo
outdir = args.outdir
# on thing/hulk use
if (host == "thing") or (host == "hulk"):
dir = "/dl1/users/dnidever/nsc/instcal/"+version+"/"
mssdir = "/mss1/"
localdir = "/d0/"
tmproot = localdir+"dnidever/nsc/instcal/"+version+"/tmp/"
# on gp09 use
if (host == "gp09") or (host == "gp08") or (host == "gp07") or (host == "gp06") or (host == "gp05"):
dir = "/net/dl1/users/dnidever/nsc/instcal/"+version+"/"
mssdir = "/net/mss1/"
localdir = "/data0/"
tmproot = localdir+"dnidever/nsc/instcal/"+version+"/tmp/"
t0 = time.time()
# Check if output file already exists
if outdir == '': outdir=dir+'combine/'
subdir = str(int(pix)//1000) # use the thousands to create subdirectory grouping
outfile = outdir+'/'+subdir+'/'+str(pix)+'.fits'
if (os.path.exists(outfile) or os.path.exists(outfile+'.gz')) & ~redo:
print(outfile+' EXISTS already and REDO not set')
sys.exit()
print("Combining InstCal SExtractor catalogs for Healpix pixel = "+str(pix))
# Load the list
listfile = localdir+'dnidever/nsc/instcal/'+version+'/nsc_instcal_combine_healpix_list.fits.gz'
if os.path.exists(listfile) is False:
print(listfile+" NOT FOUND")
sys.exit()
healstr = Table(fits.getdata(listfile,1))
index = Table(fits.getdata(listfile,2))
# Find our pixel
ind, = np.where(index['PIX'] == pix)
nind = len(ind)
if nind == 0:
print("No entries for Healpix pixel '"+str(pix)+"' in the list")
sys.exit()
ind = ind[0]
hlist = healstr[index[ind]['LO']:index[ind]['HI']+1]
nlist = len(hlist)
# GET EXPOSURES FOR NEIGHBORING PIXELS AS WELL
# so we can deal with the edge cases
neipix = hp.get_all_neighbours(nside,pix)
for neip in neipix:
ind1, = np.where(index['PIX'] == neip)
nind1 = len(ind1)
if nind1>0:
ind1 = ind1[0]
hlist1 = healstr[index[ind1]['LO']:index[ind1]['HI']+1]
hlist = vstack([hlist,hlist1])
# Use entire exposure files
# Get unique values
u, ui = np.unique(hlist['FILE'],return_index=True)
hlist = hlist[ui]
nhlist = len(hlist)
print(str(nhlist)+' exposures that overlap this pixel and neighbors')
# Get the boundary coordinates
# healpy.boundaries but not sure how to do it in IDL
# pix2vec_ring/nest can optionally return vertices but only 4
# maybe subsample myself between the vectors
# Expand the boundary to include a "buffer" zone
# to deal with edge cases
vecbound = hp.boundaries(nside,pix,step=100)
rabound, decbound = hp.vec2ang(np.transpose(vecbound),lonlat=True)
# Expand the boundary by the buffer size
cenra, cendec = hp.pix2ang(nside,pix,lonlat=True)
# reproject onto tangent plane
lonbound, latbound = coords.rotsphcen(rabound,decbound,cenra,cendec,gnomic=True)
# expand by a fraction, it's not an extact boundary but good enough
buffsize = 10.0/3600. # in deg
radbound = np.sqrt(lonbound**2+latbound**2)
frac = 1.0 + 1.5*np.max(buffsize/radbound)
lonbuff = lonbound*frac
latbuff = latbound*frac
rabuff, decbuff = coords.rotsphcen(lonbuff,latbuff,cenra,cendec,gnomic=True,reverse=True)
if (np.max(rabuff)-np.min(rabuff))>100: # deal with RA=0 wraparound
bd, = np.where(rabuff>180)
if len(bd)>0:rabuff[bd] -=360.0
buffdict = {'cenra':cenra,'cendec':cendec,'rar':utils.minmax(rabuff),'decr':utils.minmax(decbuff),'ra':rabuff,'dec':decbuff,\
'lon':lonbuff,'lat':latbuff,'lr':utils.minmax(lonbuff),'br':utils.minmax(latbuff)}
# Initialize the ID structure
# this will contain the MeasID, Exposure name, ObjectID
dtype_idstr = np.dtype([('measid',np.str,200),('exposure',np.str,200),('expnum',np.str,200),('objectid',np.str,200),('objectindex',int)])
idstr = np.zeros(1000000,dtype=dtype_idstr)
nidstr = len(idstr)
idcnt = 0
# Initialize the object structure
dtype_obj = np.dtype([('objectid',np.str,100),('pix',int),('ra',np.float64),('dec',np.float64),('raerr',float),('decerr',float),
('pmra',float),('pmdec',float),('pmraerr',float),('pmdecerr',float),('mjd',np.float64),
('deltamjd',float),('ndet',int),('nphot',int),
('ndetu',int),('nphotu',int),('umag',float),('urms',float),('uerr',float),('uasemi',float),('ubsemi',float),('utheta',float),
('ndetg',int),('nphotg',int),('gmag',float),('grms',float),('gerr',float),('gasemi',float),('gbsemi',float),('gtheta',float),
('ndetr',int),('nphotr',int),('rmag',float),('rrms',float),('rerr',float),('rasemi',float),('rbsemi',float),('rtheta',float),
('ndeti',int),('nphoti',int),('imag',float),('irms',float),('ierr',float),('iasemi',float),('ibsemi',float),('itheta',float),
('ndetz',int),('nphotz',int),('zmag',float),('zrms',float),('zerr',float),('zasemi',float),('zbsemi',float),('ztheta',float),
('ndety',int),('nphoty',int),('ymag',float),('yrms',float),('yerr',float),('yasemi',float),('ybsemi',float),('ytheta',float),
('ndetvr',int),('nphotvr',int),('vrmag',float),('vrrms',float),('vrerr',float),('vrasemi',float),('vrbsemi',float),('vrtheta',float),
('asemi',float),('asemierr',float),('bsemi',float),('bsemierr',float),('theta',float),('thetaerr',float),
('fwhm',float),('flags',int),('class_star',float),('ebv',float)])
tags = dtype_obj.names
obj = np.zeros(500000,dtype=dtype_obj)
obj['pix'] = pix
nobj = len(obj)
dtype_totobj = np.dtype([('ra',np.float64),('dec',np.float64),('ramjd',np.float64),('decmjd',np.float64),('ramjd2',np.float64),
('decmjd2',np.float64),('minmjd',np.float64),('maxmjd',np.float64),('umag2',np.float64),('gmag2',np.float64),
('rmag2',np.float64),('imag2',np.float64),('zmag2',np.float64),('ymag2',np.float64),('vrmag2',np.float64),
('utot',np.float64),('gtot',np.float64),('rtot',np.float64),('itot',np.float64),('ztot',np.float64),
('ytot',np.float64),('vrtot',np.float64)])
totags = dtype_totobj.names
totobj = np.zeros(nobj,dtype=dtype_totobj)
totobj['minmjd'] = 999999.0
totobj['maxmjd'] = -999999.0
cnt = 0
# New meta-data format
dtype_meta = np.dtype([('file',np.str,500),('base',np.str,200),('expnum',int),('ra',np.float64),
('dec',np.float64),('dateobs',np.str,100),('mjd',np.float64),('filter',np.str,50),
('exptime',float),('airmass',float),('nsources',int),('fwhm',float),
('nchips',int),('badchip31',bool),('rarms',float),('decrms',float),
('ebv',float),('gaianmatch',int),('zpterm',float),('zptermerr',float),
('zptermsig',float),('refmatch',int)])
# Loop over the exposures
allmeta = None
for i in range(nhlist):
print(str(i+1)+' Loading '+hlist[i]['FILE'])
# Load meta data file first
metafile = hlist[i]['FILE'].replace('_cat','_meta').strip()
if os.path.exists(metafile) is False:
print(metafile+' NOT FOUND')
#goto,BOMB
meta = fits.getdata(metafile,1)
t = Time(meta['dateobs'], format='isot', scale='utc')
meta['mjd'] = t.mjd # recompute because some MJD are bad
chmeta = fits.getdata(metafile,2) # chip-level meta-data structure
print(' FILTER='+meta['filter'][0]+' EXPTIME='+str(meta['exptime'][0])+' sec')
# Load the measurement catalog
cat = loadmeas(metafile,buffdict)
ncat = utils.size(cat)
if ncat==0:
print('This exposure does NOT cover the HEALPix')
continue # go to next exposure
# Add metadata to ALLMETA
# Make sure it's in the right format
newmeta = np.zeros(1,dtype=dtype_meta)
# Copy over the meta information
for n in newmeta.dtype.names:
if n.upper() in meta.dtype.names: newmeta[n]=meta[n]
if allmeta is None:
allmeta = newmeta
else:
allmeta = np.hstack((allmeta,newmeta))
# Combine the data
#-----------------
# First catalog
if cnt==0:
ind1 = np.arange(len(cat))
obj['objectid'][ind1] = utils.strjoin( str(pix)+'.', ((np.arange(ncat)+1).astype(np.str)) )
obj,totobj,idstr,idcnt = add_cat(obj,totobj,idstr,idcnt,ind1,cat,meta)
cnt += ncat
# Second and up
else:
# Match new sources to the objects
ind1,ind2,dist = coords.xmatch(obj[0:cnt]['ra'],obj[0:cnt]['dec'],cat['RA'],cat['DEC'],0.5)
nmatch = len(ind1)
print(' '+str(nmatch)+' matched sources')
# Some matches, add data to existing record for these sources
if nmatch>0:
obj,totobj,idstr,idcnt = add_cat(obj,totobj,idstr,idcnt,ind1,cat[ind2],meta)
if nmatch<ncat:
cat = np.delete(cat,ind2)
ncat = len(cat)
else:
cat = np.array([])
ncat = 0
# Some left, add records for these sources
if ncat>0:
print(' '+str(ncat)+' sources left to add')
# Add new elements
if (cnt+ncat)>nobj:
obj = add_elements(obj)
nobj = len(obj)
ind1 = np.arange(ncat)+cnt
obj['objectid'][ind1] = utils.strjoin( str(pix)+'.', ((np.arange(ncat)+1+cnt).astype(np.str)) )
obj,totobj,idstr,idcnt = add_cat(obj,totobj,idstr,idcnt,ind1,cat,meta)
cnt += ncat
# No sources
if cnt==0:
print('No sources in this pixel')
sys.exit()
# Trim off the excess elements
obj = obj[0:cnt]
totobj = totobj[0:cnt]
nobj = len(obj)
print(str(nobj)+' final objects')
idstr = idstr[0:idcnt]
# Make NPHOT from NPHOTX
obj['nphot'] = obj['nphotu']+obj['nphotg']+obj['nphotr']+obj['nphoti']+obj['nphotz']+obj['nphoty']+obj['nphotvr']
# Convert total(mjd*ra) to true proper motion values
# the slope of RA vs. MJD is
# pmra=(total(wt*mjd*ra)/total(wt)-<mjd>*<ra>)/(total(wt*mjd^2)/total(wt)-<mjd>^2)
# we are creating the totals cumulatively as we go
totobj['ra'] /= obj['raerr'] # wt mean RA (totalrawt/totalwt)
totobj['dec'] /= obj['decerr'] # wt mean DEC (totaldecwt/totalwt)
obj['mjd'] /= obj['ndet'] # mean MJD
totobj['ramjd'] /= obj['raerr'] # wt_ra mean MJD
totobj['decmjd'] /= obj['decerr'] # wt_dec mean MJD
gdet, = np.where(obj['ndet']>1)
if len(gdet)>0:
pmra = (obj['pmra'][gdet]/obj['raerr'][gdet]-totobj['ramjd'][gdet]*totobj['ra'][gdet]) / (totobj['ramjd2'][gdet]/obj['raerr'][gdet]-totobj['ramjd'][gdet]**2) # deg[ra]/day
pmra *= (3600*1e3)*365.2425 # mas/year
pmra *= np.cos(obj['dec'][gdet]/radeg) # mas/year, true angle
pmdec = (obj['pmdec'][gdet]/obj['decerr'][gdet]-totobj['decmjd'][gdet]*totobj['dec'][gdet])/(totobj['decmjd2'][gdet]/obj['decerr'][gdet]-totobj['decmjd'][gdet]**2) # deg/day
pmdec *= (3600*1e3)*365.2425 # mas/year
# Proper motion errors
# pmerr = 1/sqrt( sum(wt*mjd^2) - <mjd>^2 * sum(wt) )
# if wt=1/err^2 with err in degrees, but we are using arcsec
# Need to divide by 3600 for PMDECERR and 3600*cos(dec) for PMRAERR
pmraerr = 1.0/np.sqrt( totobj['ramjd2'][gdet] - totobj['ramjd'][gdet]**2 * obj['raerr'][gdet] )
pmraerr /= (3600*np.cos(totobj['dec'][gdet]/radeg)) # correction for raerr in arcsec
pmraerr *= (3600*1e3)*365.2425 # mas/year
pmraerr *= np.cos(obj['dec'][gdet]/radeg) # mas/year, true angle
pmdecerr = 1.0/np.sqrt( totobj['decmjd2'][gdet] - totobj['decmjd'][gdet]**2 * obj['decerr'][gdet] )
pmdecerr /= 3600 # correction for decerr in arcsec
pmdecerr *= (3600*1e3)*365.2425 # mas/year
obj['pmra'][gdet] = pmra
obj['pmdec'][gdet] = pmdec
obj['pmraerr'][gdet] = pmraerr
obj['pmdecerr'][gdet] = pmdecerr
# sometimes it happens that the denominator is 0.0
# when there are few closely spaced points
# nothing we can do, just mark as bad
bdet, = np.where((obj['ndet']<2) | ~np.isfinite(obj['pmra']))
if len(bdet)>0:
obj['pmra'][bdet] = 999999.0
obj['pmdec'][bdet] = 999999.0
obj['pmraerr'][bdet] = 999999.0
obj['pmdecerr'][bdet] = 999999.0
obj['deltamjd'] = totobj['maxmjd']-totobj['minmjd']
# Average coordinates
obj['ra'] = totobj['ra'] # now stuff in the average coordinates
obj['dec'] = totobj['dec']
obj['raerr'] = np.sqrt(1.0/obj['raerr']) # err in wt mean RA, arcsec
obj['decerr'] = np.sqrt(1.0/obj['decerr']) # err in wt mean DEC, arcsec
# Convert totalwt and totalfluxwt to MAG and ERR
# and average the morphology parameters PER FILTER
filters = ['u','g','r','i','z','y','vr']
for f in filters:
# Get average photometry for objects with photometry in this band
gph, = np.where(obj['nphot'+f]>0)
if len(gph)>0:
newflux = obj[f+'mag'][gph] / obj[f+'err'][gph]
newmag = 2.50*np.log10(newflux)
newerr = np.sqrt(1.0/obj[f+'err'][gph])
obj[f+'mag'][gph] = newmag
obj[f+'err'][gph] = newerr
bdmag, = np.where((obj['nphot'+f]==0) | ~np.isfinite(obj[f+'mag']))
if len(bdmag)>0:
obj[f+'mag'][bdmag] = 99.99
obj[f+'err'][bdmag] = 9.99
# Calculate RMS scatter
# RMS^2 * N = sum(mag^2) - 2*<mag>*sum(mag) + N*<mag>^2
# where <mag> is a weighted average
# RMS = sqrt( sum(mag^2)/N - 2*<mag>*sum(mag)/N + <mag>^2 )
# sum(mag^2) is in the MAG2 column and sum(mag) is in TOT
rms = np.zeros(nobj,float)
gdrms, = np.where(obj['nphot'+f]>1)
ngdrms = len(gdrms)
bdrms, = np.where(obj['nphot'+f]<=1)
nbdrms = len(bdrms)
if ngdrms>0:
rms[gdrms] = np.sqrt( totobj[f+'mag2'][gdrms]/obj['nphot'+f][gdrms] -
2*obj[f+'mag'][gdrms]*totobj[f+'tot'][gdrms]/obj['nphot'+f][gdrms] + np.float64(obj[f+'mag'][gdrms])**2 )
if nbdrms>0: rms[bdrms] = 999999.
obj[f+'rms'] = rms
# Average the morphology parameters PER FILTER
gdet, = np.where(obj['ndet'+f]>0)
ngdet = len(gdet)
bdet, = np.where(obj['ndet'+f]==0)
nbdet = len(bdet)
if ngdet>0:
obj[f+'asemi'][gdet] /= obj['ndet'+f][gdet]
obj[f+'bsemi'][gdet] /= obj['ndet'+f][gdet]
obj[f+'theta'][gdet] /= obj['ndet'+f][gdet]
if nbdet>0:
obj[f+'asemi'][bdet] = 999999.
obj[f+'bsemi'][bdet] = 999999.
obj[f+'theta'][bdet] = 999999.
# Average the morphology parameters, Need a separate counter for that maybe?
mtags = ['asemi','bsemi','theta','fwhm','class_star']
gdet, = np.where(obj['ndet']>0)
ngdet = len(gdet)
bdet, = np.where(obj['ndet']==0)
nbdet = len(bdet)
for m in mtags:
# Divide by the number of detections
if ngdet>0: obj[m][gdet] /= obj['ndet'][gdet]
if nbdet>0: obj[m][bdet] = 999999. # no good detections
# Get the average error
metags = ['asemierr','bsemierr','thetaerr']
for m in metags:
# Just take the sqrt to complete the addition in quadrature
if ngdet>0: obj[m][gdet] = np.sqrt(obj[m][gdet]) / obj['ndet'][gdet]
if nbdet>0: obj[m][bdet] = 999999. # no good detections
# Add E(B-V)
print('Getting E(B-V)')
sfd = SFDQuery()
c = SkyCoord(obj['ra'],obj['dec'],frame='icrs',unit='deg')
#c = SkyCoord('05h00m00.00000s','+30d00m00.0000s', frame='icrs')
ebv = sfd(c)
obj['ebv'] = ebv
# ONLY INCLUDE OBJECTS WITH AVERAGE RA/DEC
# WITHIN THE BOUNDARY OF THE HEALPIX PIXEL!!!
ipring = hp.pixelfunc.ang2pix(nside,obj['ra'],obj['dec'],lonlat=True)
ind1, = np.where(ipring == pix)
nmatch = len(ind1)
if nmatch==0:
print('None of the final objects fall inside the pixel')
sys.exit()
# Get trimmed objects and indices
objtokeep = np.zeros(nobj,bool) # boolean to keep or trim objects
objtokeep[ind1] = True
if nmatch<nobj:
trimind = np.arange(nobj)
trimind = np.delete(trimind,ind1)
#trimind = utils.remove_indices(trimind,ind1)
trimobj = obj[trimind] # trimmed objects
newobjindex = np.zeros(nobj,int)-1 # new indices
newobjindex[ind1] = np.arange(nmatch)
# Keep the objects inside the Healpix
obj = obj[ind1]
print(str(nmatch)+' final objects fall inside the pixel')
# Remove trimmed objects from IDSTR
totrim, = np.where(objtokeep[idstr['objectindex']]==0) #using old index
if len(totrim)>0:
# Trim objects
idstr = np.delete(idstr,totrim)
#idstr = utils.remove_indices(idstr,totrim)
# Update IDSTR.objectindex
old_idstr_objectindex = idstr['objectindex']
idstr['objectindex'] = newobjindex[old_idstr_objectindex]
# Create final summary structure from ALLMETA
# get exposures that are in IDSTR
# sometimes EXPNUM numbers have the leading 0s removed
# and sometimes not, so turn to LONG to match
dum, uiexpnum = np.unique(idstr['expnum'].astype(int),return_index=True)
uexpnum = idstr[uiexpnum]['expnum'].astype(int)
nuexpnum = len(uexpnum)
ind1,ind2 = utils.match(allmeta['expnum'].astype(int),uexpnum)
nmatch = len(ind1)
sumstr = Table(allmeta[ind1])
col_nobj = Column(name='nobjects', dtype=np.int, length=len(sumstr))
col_healpix = Column(name='healpix', dtype=np.int, length=len(sumstr))
sumstr.add_columns([col_nobj, col_healpix])
sumstr['nobjects'] = 0
sumstr['healpix'] = pix
# get number of objects per exposure
expnum = idstr['expnum'].astype(int)
siexp = np.argsort(expnum)
expnum = expnum[siexp]
if nuexpnum>1:
brklo, = np.where(expnum != np.roll(expnum,1))
nbrk = len(brklo)
brkhi = np.hstack((brklo[1:nbrk],len(expnum)))
numobjexp = brkhi-brklo+1
else:
numobjexp=len(expnum)
ind1,ind2 = utils.match(sumstr['expnum'].astype(int),uexpnum)
nmatch = len(ind1)
sumstr['nobjects'][ind1] = numobjexp
# Write the output file
print('Writing combined catalog to '+outfile)
if os.path.exists(outdir) is False: os.mkdir(outdir)
if os.path.exists(outdir+'/'+subdir) is False: os.mkdir(outdir+'/'+subdir)
if os.path.exists(outfile): os.remove(outfile)
sumstr.write(outfile) # first, summary table
# append other fits binary tables
hdulist = fits.open(outfile)
hdu = fits.table_to_hdu(Table(obj)) # second, catalog
hdulist.append(hdu)
hdu = fits.table_to_hdu(Table(idstr)) # third, ID table
hdulist.append(hdu)
hdulist.writeto(outfile,overwrite=True)
hdulist.close()
if os.path.exists(outfile+'.gz'): os.remove(outfile+'.gz')
ret = subprocess.call(['gzip',outfile]) # compress final catalog
dt = time.time()-t0
print('dt = '+str(dt)+' sec.')
| mit |
sbt9uc/osf.io | framework/mongo/utils.py | 11 | 1888 | # -*- coding: utf-8 -*-
import re
import httplib as http
import pymongo
from modularodm.exceptions import ValidationValueError
from framework.exceptions import HTTPError
# MongoDB forbids field names that begin with "$" or contain ".". These
# utilities map to and from Mongo field names.
mongo_map = {
'.': '__!dot!__',
'$': '__!dollar!__',
}
def to_mongo(item):
for key, value in mongo_map.items():
item = item.replace(key, value)
return item
def to_mongo_key(item):
return to_mongo(item).strip().lower()
def from_mongo(item):
for key, value in mongo_map.items():
item = item.replace(value, key)
return item
sanitize_pattern = re.compile(r'<\/?[^>]+>')
def sanitized(value):
if value != sanitize_pattern.sub('', value):
raise ValidationValueError('Unsanitary string')
def unique_on(*groups):
"""Decorator for subclasses of `StoredObject`. Add a unique index on each
group of keys provided.
:param *groups: List of lists of keys to be indexed
"""
def wrapper(cls):
cls.__indices__ = getattr(cls, '__indices__', [])
cls.__indices__.extend([
{
'key_or_list': [
(key, pymongo.ASCENDING)
for key in group
],
'unique': True,
}
for group in groups
])
return cls
return wrapper
def get_or_http_error(Model, pk, allow_deleted=False):
instance = Model.load(pk)
if not allow_deleted and getattr(instance, 'is_deleted', False):
raise HTTPError(http.GONE, data=dict(
message_long="This resource has been deleted"
))
if not instance:
raise HTTPError(http.NOT_FOUND, data=dict(
message_long="No resource with that primary key could be found"
))
else:
return instance
| apache-2.0 |
holmes/intellij-community | python/helpers/epydoc/apidoc.py | 90 | 92479 | # epydoc -- API Documentation Classes
#
# Copyright (C) 2005 Edward Loper
# Author: Edward Loper <[email protected]>
# URL: <http://epydoc.sf.net>
#
# $Id: apidoc.py 1675 2008-01-29 17:12:56Z edloper $
"""
Classes for encoding API documentation about Python programs.
These classes are used as a common representation for combining
information derived from introspection and from parsing.
The API documentation for a Python program is encoded using a graph of
L{APIDoc} objects, each of which encodes information about a single
Python variable or value. C{APIDoc} has two direct subclasses:
L{VariableDoc}, for documenting variables; and L{ValueDoc}, for
documenting values. The C{ValueDoc} class is subclassed further, to
define the different pieces of information that should be recorded
about each value type:
G{classtree: APIDoc}
The distinction between variables and values is intentionally made
explicit. This allows us to distinguish information about a variable
itself (such as whether it should be considered 'public' in its
containing namespace) from information about the value it contains
(such as what type the value has). This distinction is also important
because several variables can contain the same value: each variable
should be described by a separate C{VariableDoc}; but we only need one
C{ValueDoc}, since they share a single value.
@todo: Add a cache to canonical name lookup?
"""
__docformat__ = 'epytext en'
######################################################################
## Imports
######################################################################
import types, re, os.path, pickle
from epydoc import log
import epydoc
import __builtin__
from epydoc.compat import * # Backwards compatibility
from epydoc.util import decode_with_backslashreplace, py_src_filename
import epydoc.markup.pyval_repr
######################################################################
# Dotted Names
######################################################################
class DottedName:
"""
A sequence of identifiers, separated by periods, used to name a
Python variable, value, or argument. The identifiers that make up
a dotted name can be accessed using the indexing operator:
>>> name = DottedName('epydoc', 'api_doc', 'DottedName')
>>> print name
epydoc.apidoc.DottedName
>>> name[1]
'api_doc'
"""
UNREACHABLE = "??"
_IDENTIFIER_RE = re.compile("""(?x)
(%s | # UNREACHABLE marker, or..
(script-)? # Prefix: script (not a module)
\w+ # Identifier (yes, identifiers starting with a
# digit are allowed. See SF bug #1649347)
'?) # Suffix: submodule that is shadowed by a var
(-\d+)? # Suffix: unreachable vals with the same name
$"""
% re.escape(UNREACHABLE))
class InvalidDottedName(ValueError):
"""
An exception raised by the DottedName constructor when one of
its arguments is not a valid dotted name.
"""
_ok_identifiers = set()
"""A cache of identifier strings that have been checked against
_IDENTIFIER_RE and found to be acceptable."""
def __init__(self, *pieces, **options):
"""
Construct a new dotted name from the given sequence of pieces,
each of which can be either a C{string} or a C{DottedName}.
Each piece is divided into a sequence of identifiers, and
these sequences are combined together (in order) to form the
identifier sequence for the new C{DottedName}. If a piece
contains a string, then it is divided into substrings by
splitting on periods, and each substring is checked to see if
it is a valid identifier.
As an optimization, C{pieces} may also contain a single tuple
of values. In that case, that tuple will be used as the
C{DottedName}'s identifiers; it will I{not} be checked to
see if it's valid.
@kwparam strict: if true, then raise an L{InvalidDottedName}
if the given name is invalid.
"""
if len(pieces) == 1 and isinstance(pieces[0], tuple):
self._identifiers = pieces[0] # Optimization
return
if len(pieces) == 0:
raise DottedName.InvalidDottedName('Empty DottedName')
self._identifiers = []
for piece in pieces:
if isinstance(piece, DottedName):
self._identifiers += piece._identifiers
elif isinstance(piece, basestring):
for subpiece in piece.split('.'):
if piece not in self._ok_identifiers:
if not self._IDENTIFIER_RE.match(subpiece):
if options.get('strict'):
raise DottedName.InvalidDottedName(
'Bad identifier %r' % (piece,))
else:
log.warning("Identifier %r looks suspicious; "
"using it anyway." % piece)
self._ok_identifiers.add(piece)
self._identifiers.append(subpiece)
else:
raise TypeError('Bad identifier %r: expected '
'DottedName or str' % (piece,))
self._identifiers = tuple(self._identifiers)
def __repr__(self):
idents = [`ident` for ident in self._identifiers]
return 'DottedName(' + ', '.join(idents) + ')'
def __str__(self):
"""
Return the dotted name as a string formed by joining its
identifiers with periods:
>>> print DottedName('epydoc', 'api_doc', DottedName')
epydoc.apidoc.DottedName
"""
return '.'.join(self._identifiers)
def __add__(self, other):
"""
Return a new C{DottedName} whose identifier sequence is formed
by adding C{other}'s identifier sequence to C{self}'s.
"""
if isinstance(other, (basestring, DottedName)):
return DottedName(self, other)
else:
return DottedName(self, *other)
def __radd__(self, other):
"""
Return a new C{DottedName} whose identifier sequence is formed
by adding C{self}'s identifier sequence to C{other}'s.
"""
if isinstance(other, (basestring, DottedName)):
return DottedName(other, self)
else:
return DottedName(*(list(other)+[self]))
def __getitem__(self, i):
"""
Return the C{i}th identifier in this C{DottedName}. If C{i} is
a non-empty slice, then return a C{DottedName} built from the
identifiers selected by the slice. If C{i} is an empty slice,
return an empty list (since empty C{DottedName}s are not valid).
"""
if isinstance(i, types.SliceType):
pieces = self._identifiers[i.start:i.stop]
if pieces: return DottedName(pieces)
else: return []
else:
return self._identifiers[i]
def __hash__(self):
return hash(self._identifiers)
def __cmp__(self, other):
"""
Compare this dotted name to C{other}. Two dotted names are
considered equal if their identifier subsequences are equal.
Ordering between dotted names is lexicographic, in order of
identifier from left to right.
"""
if not isinstance(other, DottedName):
return -1
return cmp(self._identifiers, other._identifiers)
def __len__(self):
"""
Return the number of identifiers in this dotted name.
"""
return len(self._identifiers)
def container(self):
"""
Return the DottedName formed by removing the last identifier
from this dotted name's identifier sequence. If this dotted
name only has one name in its identifier sequence, return
C{None} instead.
"""
if len(self._identifiers) == 1:
return None
else:
return DottedName(*self._identifiers[:-1])
def dominates(self, name, strict=False):
"""
Return true if this dotted name is equal to a prefix of
C{name}. If C{strict} is true, then also require that
C{self!=name}.
>>> DottedName('a.b').dominates(DottedName('a.b.c.d'))
True
"""
len_self = len(self._identifiers)
len_name = len(name._identifiers)
if (len_self > len_name) or (strict and len_self == len_name):
return False
# The following is redundant (the first clause is implied by
# the second), but is done as an optimization.
return ((self._identifiers[0] == name._identifiers[0]) and
self._identifiers == name._identifiers[:len_self])
def contextualize(self, context):
"""
If C{self} and C{context} share a common ancestor, then return
a name for C{self}, relative to that ancestor. If they do not
share a common ancestor (or if C{context} is C{UNKNOWN}), then
simply return C{self}.
This is used to generate shorter versions of dotted names in
cases where users can infer the intended target from the
context.
@type context: L{DottedName}
@rtype: L{DottedName}
"""
if context is UNKNOWN or not context or len(self) <= 1:
return self
if self[0] == context[0]:
return self[1:].contextualize(context[1:])
else:
return self
# Find the first index where self & context differ.
for i in range(min(len(context), len(self))):
if self._identifiers[i] != context._identifiers[i]:
first_difference = i
break
else:
first_difference = i+1
# Strip off anything before that index.
if first_difference == 0:
return self
elif first_difference == len(self):
return self[-1:]
else:
return self[first_difference:]
######################################################################
# UNKNOWN Value
######################################################################
class _Sentinel:
"""
A unique value that won't compare equal to any other value. This
class is used to create L{UNKNOWN}.
"""
def __init__(self, name):
self.name = name
def __repr__(self):
return '<%s>' % self.name
def __nonzero__(self):
raise ValueError('Sentinel value <%s> can not be used as a boolean' %
self.name)
UNKNOWN = _Sentinel('UNKNOWN')
"""A special value used to indicate that a given piece of
information about an object is unknown. This is used as the
default value for all instance variables."""
######################################################################
# API Documentation Objects: Abstract Base Classes
######################################################################
class APIDoc(object):
"""
API documentation information for a single element of a Python
program. C{APIDoc} itself is an abstract base class; subclasses
are used to specify what information should be recorded about each
type of program element. In particular, C{APIDoc} has two direct
subclasses, C{VariableDoc} for documenting variables and
C{ValueDoc} for documenting values; and the C{ValueDoc} class is
subclassed further for different value types.
Each C{APIDoc} subclass specifies the set of attributes that
should be used to record information about the corresponding
program element type. The default value for each attribute is
stored in the class; these default values can then be overridden
with instance variables. Most attributes use the special value
L{UNKNOWN} as their default value, to indicate that the correct
value for that attribute has not yet been determined. This makes
it easier to merge two C{APIDoc} objects that are documenting the
same element (in particular, to merge information about an element
that was derived from parsing with information that was derived
from introspection).
For all attributes with boolean values, use only the constants
C{True} and C{False} to designate true and false. In particular,
do I{not} use other values that evaluate as true or false, such as
C{2} or C{()}. This restriction makes it easier to handle
C{UNKNOWN} values. For example, to test if a boolean attribute is
C{True} or C{UNKNOWN}, use 'C{attrib in (True, UNKNOWN)}' or
'C{attrib is not False}'.
Two C{APIDoc} objects describing the same object can be X{merged},
using the method L{merge_and_overwrite(other)}. After two
C{APIDoc}s are merged, any changes to one will be reflected in the
other. This is accomplished by setting the two C{APIDoc} objects
to use a shared instance dictionary. See the documentation for
L{merge_and_overwrite} for more information, and some important
caveats about hashing.
"""
#{ Docstrings
docstring = UNKNOWN
"""@ivar: The documented item's docstring.
@type: C{string} or C{None}"""
docstring_lineno = UNKNOWN
"""@ivar: The line number on which the documented item's docstring
begins.
@type: C{int}"""
#} end of "docstrings" group
#{ Information Extracted from Docstrings
descr = UNKNOWN
"""@ivar: A description of the documented item, extracted from its
docstring.
@type: L{ParsedDocstring<epydoc.markup.ParsedDocstring>}"""
summary = UNKNOWN
"""@ivar: A summary description of the documented item, extracted from
its docstring.
@type: L{ParsedDocstring<epydoc.markup.ParsedDocstring>}"""
other_docs = UNKNOWN
"""@ivar: A flag indicating if the entire L{docstring} body (except tags
if any) is entirely included in the L{summary}.
@type: C{bool}"""
metadata = UNKNOWN
"""@ivar: Metadata about the documented item, extracted from fields in
its docstring. I{Currently} this is encoded as a list of tuples
C{(field, arg, descr)}. But that may change.
@type: C{(str, str, L{ParsedDocstring<markup.ParsedDocstring>})}"""
extra_docstring_fields = UNKNOWN
"""@ivar: A list of new docstring fields tags that are defined by the
documented item's docstring. These new field tags can be used by
this item or by any item it contains.
@type: L{DocstringField <epydoc.docstringparser.DocstringField>}"""
#} end of "information extracted from docstrings" group
#{ Source Information
docs_extracted_by = UNKNOWN # 'parser' or 'introspecter' or 'both'
"""@ivar: Information about where the information contained by this
C{APIDoc} came from. Can be one of C{'parser'},
C{'introspector'}, or C{'both'}.
@type: C{str}"""
#} end of "source information" group
def __init__(self, **kwargs):
"""
Construct a new C{APIDoc} object. Keyword arguments may be
used to initialize the new C{APIDoc}'s attributes.
@raise TypeError: If a keyword argument is specified that does
not correspond to a valid attribute for this (sub)class of
C{APIDoc}.
"""
if epydoc.DEBUG:
for key in kwargs:
if key[0] != '_' and not hasattr(self.__class__, key):
raise TypeError('%s got unexpected arg %r' %
(self.__class__.__name__, key))
self.__dict__.update(kwargs)
def _debug_setattr(self, attr, val):
"""
Modify an C{APIDoc}'s attribute. This is used when
L{epydoc.DEBUG} is true, to make sure we don't accidentally
set any inappropriate attributes on C{APIDoc} objects.
@raise AttributeError: If C{attr} is not a valid attribute for
this (sub)class of C{APIDoc}. (C{attr} is considered a
valid attribute iff C{self.__class__} defines an attribute
with that name.)
"""
# Don't intercept special assignments like __class__, or
# assignments to private variables.
if attr.startswith('_'):
return object.__setattr__(self, attr, val)
if not hasattr(self, attr):
raise AttributeError('%s does not define attribute %r' %
(self.__class__.__name__, attr))
self.__dict__[attr] = val
if epydoc.DEBUG:
__setattr__ = _debug_setattr
def __repr__(self):
return '<%s>' % self.__class__.__name__
def pp(self, doublespace=0, depth=5, exclude=(), include=()):
"""
Return a pretty-printed string representation for the
information contained in this C{APIDoc}.
"""
return pp_apidoc(self, doublespace, depth, exclude, include)
__str__ = pp
def specialize_to(self, cls):
"""
Change C{self}'s class to C{cls}. C{cls} must be a subclass
of C{self}'s current class. For example, if a generic
C{ValueDoc} was created for a value, and it is determined that
the value is a routine, you can update its class with:
>>> valdoc.specialize_to(RoutineDoc)
"""
if not issubclass(cls, self.__class__):
raise ValueError('Can not specialize to %r' % cls)
# Update the class.
self.__class__ = cls
# Update the class of any other apidoc's in the mergeset.
if self.__mergeset is not None:
for apidoc in self.__mergeset:
apidoc.__class__ = cls
# Re-initialize self, in case the subclass constructor does
# any special processing on its arguments.
self.__init__(**self.__dict__)
__has_been_hashed = False
"""True iff L{self.__hash__()} has ever been called."""
def __hash__(self):
self.__has_been_hashed = True
return id(self.__dict__)
def __cmp__(self, other):
if not isinstance(other, APIDoc): return -1
if self.__dict__ is other.__dict__: return 0
name_cmp = cmp(self.canonical_name, other.canonical_name)
if name_cmp == 0: return -1
else: return name_cmp
def is_detailed(self):
"""
Does this object deserve a box with extra details?
@return: True if the object needs extra details, else False.
@rtype: C{bool}
"""
if self.other_docs is True:
return True
if self.metadata is not UNKNOWN:
return bool(self.metadata)
__mergeset = None
"""The set of all C{APIDoc} objects that have been merged with
this C{APIDoc} (using L{merge_and_overwrite()}). Each C{APIDoc}
in this set shares a common instance dictionary (C{__dict__})."""
def merge_and_overwrite(self, other, ignore_hash_conflict=False):
"""
Combine C{self} and C{other} into a X{merged object}, such
that any changes made to one will affect the other. Any
attributes that C{other} had before merging will be discarded.
This is accomplished by copying C{self.__dict__} over
C{other.__dict__} and C{self.__class__} over C{other.__class__}.
Care must be taken with this method, since it modifies the
hash value of C{other}. To help avoid the problems that this
can cause, C{merge_and_overwrite} will raise an exception if
C{other} has ever been hashed, unless C{ignore_hash_conflict}
is True. Note that adding C{other} to a dictionary, set, or
similar data structure will implicitly cause it to be hashed.
If you do set C{ignore_hash_conflict} to True, then any
existing data structures that rely on C{other}'s hash staying
constant may become corrupted.
@return: C{self}
@raise ValueError: If C{other} has ever been hashed.
"""
# If we're already merged, then there's nothing to do.
if (self.__dict__ is other.__dict__ and
self.__class__ is other.__class__): return self
if other.__has_been_hashed and not ignore_hash_conflict:
raise ValueError("%r has already been hashed! Merging it "
"would cause its has value to change." % other)
# If other was itself already merged with anything,
# then we need to merge those too.
a,b = (self.__mergeset, other.__mergeset)
mergeset = (self.__mergeset or [self]) + (other.__mergeset or [other])
other.__dict__.clear()
for apidoc in mergeset:
#if apidoc is self: pass
apidoc.__class__ = self.__class__
apidoc.__dict__ = self.__dict__
self.__mergeset = mergeset
# Sanity chacks.
assert self in mergeset and other in mergeset
for apidoc in mergeset:
assert apidoc.__dict__ is self.__dict__
# Return self.
return self
def apidoc_links(self, **filters):
"""
Return a list of all C{APIDoc}s that are directly linked from
this C{APIDoc} (i.e., are contained or pointed to by one or
more of this C{APIDoc}'s attributes.)
Keyword argument C{filters} can be used to selectively exclude
certain categories of attribute value. For example, using
C{includes=False} will exclude variables that were imported
from other modules; and C{subclasses=False} will exclude
subclasses. The filter categories currently supported by
epydoc are:
- C{imports}: Imported variables.
- C{packages}: Containing packages for modules.
- C{submodules}: Contained submodules for packages.
- C{bases}: Bases for classes.
- C{subclasses}: Subclasses for classes.
- C{variables}: All variables.
- C{private}: Private variables.
- C{overrides}: Points from class variables to the variables
they override. This filter is False by default.
"""
return []
def reachable_valdocs(root, **filters):
"""
Return a list of all C{ValueDoc}s that can be reached, directly or
indirectly from the given root list of C{ValueDoc}s.
@param filters: A set of filters that can be used to prevent
C{reachable_valdocs} from following specific link types when
looking for C{ValueDoc}s that can be reached from the root
set. See C{APIDoc.apidoc_links} for a more complete
description.
"""
apidoc_queue = list(root)
val_set = set()
var_set = set()
while apidoc_queue:
api_doc = apidoc_queue.pop()
if isinstance(api_doc, ValueDoc):
val_set.add(api_doc)
else:
var_set.add(api_doc)
apidoc_queue.extend([v for v in api_doc.apidoc_links(**filters)
if v not in val_set and v not in var_set])
return val_set
######################################################################
# Variable Documentation Objects
######################################################################
class VariableDoc(APIDoc):
"""
API documentation information about a single Python variable.
@note: The only time a C{VariableDoc} will have its own docstring
is if that variable was created using an assignment statement, and
that assignment statement had a docstring-comment or was followed
by a pseudo-docstring.
"""
#{ Basic Variable Information
name = UNKNOWN
"""@ivar: The name of this variable in its containing namespace.
@type: C{str}"""
container = UNKNOWN
"""@ivar: API documentation for the namespace that contains this
variable.
@type: L{ValueDoc}"""
canonical_name = UNKNOWN
"""@ivar: A dotted name that serves as a unique identifier for
this C{VariableDoc}. It should be formed by concatenating
the C{VariableDoc}'s C{container} with its C{name}.
@type: L{DottedName}"""
value = UNKNOWN
"""@ivar: The API documentation for this variable's value.
@type: L{ValueDoc}"""
#}
#{ Information Extracted from Docstrings
type_descr = UNKNOWN
"""@ivar: A description of the variable's expected type, extracted from
its docstring.
@type: L{ParsedDocstring<epydoc.markup.ParsedDocstring>}"""
#} end of "information extracted from docstrings" group
#{ Information about Imported Variables
imported_from = UNKNOWN
"""@ivar: The fully qualified dotted name of the variable that this
variable's value was imported from. This attribute should only
be defined if C{is_instvar} is true.
@type: L{DottedName}"""
is_imported = UNKNOWN
"""@ivar: Was this variable's value imported from another module?
(Exception: variables that are explicitly included in __all__ have
C{is_imported} set to C{False}, even if they are in fact
imported.)
@type: C{bool}"""
#} end of "information about imported variables" group
#{ Information about Variables in Classes
is_instvar = UNKNOWN
"""@ivar: If true, then this variable is an instance variable; if false,
then this variable is a class variable. This attribute should
only be defined if the containing namespace is a class
@type: C{bool}"""
overrides = UNKNOWN # [XXX] rename -- don't use a verb.
"""@ivar: The API documentation for the variable that is overridden by
this variable. This attribute should only be defined if the
containing namespace is a class.
@type: L{VariableDoc}"""
#} end of "information about variables in classes" group
#{ Flags
is_alias = UNKNOWN
"""@ivar: Is this variable an alias for another variable with the same
value? If so, then this variable will be dispreferred when
assigning canonical names.
@type: C{bool}"""
is_public = UNKNOWN
"""@ivar: Is this variable part of its container's public API?
@type: C{bool}"""
#} end of "flags" group
def __init__(self, **kwargs):
APIDoc.__init__(self, **kwargs)
if self.is_public is UNKNOWN and self.name is not UNKNOWN:
self.is_public = (not self.name.startswith('_') or
self.name.endswith('_'))
def __repr__(self):
if self.canonical_name is not UNKNOWN:
return '<%s %s>' % (self.__class__.__name__, self.canonical_name)
if self.name is not UNKNOWN:
return '<%s %s>' % (self.__class__.__name__, self.name)
else:
return '<%s>' % self.__class__.__name__
def _get_defining_module(self):
if self.container is UNKNOWN:
return UNKNOWN
return self.container.defining_module
defining_module = property(_get_defining_module, doc="""
A read-only property that can be used to get the variable's
defining module. This is defined as the defining module
of the variable's container.""")
def apidoc_links(self, **filters):
# nb: overrides filter is *False* by default.
if (filters.get('overrides', False) and
(self.overrides not in (None, UNKNOWN))):
overrides = [self.overrides]
else:
overrides = []
if self.value in (None, UNKNOWN):
return []+overrides
else:
return [self.value]+overrides
def is_detailed(self):
pval = super(VariableDoc, self).is_detailed()
if pval or self.value in (None, UNKNOWN):
return pval
if (self.overrides not in (None, UNKNOWN) and
isinstance(self.value, RoutineDoc)):
return True
if isinstance(self.value, GenericValueDoc):
# [XX] This is a little hackish -- we assume that the
# summary lines will have SUMMARY_REPR_LINELEN chars,
# that len(name) of those will be taken up by the name,
# and that 3 of those will be taken up by " = " between
# the name & val. Note that if any docwriter uses a
# different formula for maxlen for this, then it will
# not get the right value for is_detailed().
maxlen = self.value.SUMMARY_REPR_LINELEN-3-len(self.name)
return (not self.value.summary_pyval_repr(maxlen).is_complete)
else:
return self.value.is_detailed()
######################################################################
# Value Documentation Objects
######################################################################
class ValueDoc(APIDoc):
"""
API documentation information about a single Python value.
"""
canonical_name = UNKNOWN
"""@ivar: A dotted name that serves as a unique identifier for
this C{ValueDoc}'s value. If the value can be reached using a
single sequence of identifiers (given the appropriate imports),
then that sequence of identifiers is used as its canonical name.
If the value can be reached by multiple sequences of identifiers
(i.e., if it has multiple aliases), then one of those sequences of
identifiers is used. If the value cannot be reached by any
sequence of identifiers (e.g., if it was used as a base class but
then its variable was deleted), then its canonical name will start
with C{'??'}. If necessary, a dash followed by a number will be
appended to the end of a non-reachable identifier to make its
canonical name unique.
When possible, canonical names are chosen when new C{ValueDoc}s
are created. However, this is sometimes not possible. If a
canonical name can not be chosen when the C{ValueDoc} is created,
then one will be assigned by L{assign_canonical_names()
<docbuilder.assign_canonical_names>}.
@type: L{DottedName}"""
#{ Value Representation
pyval = UNKNOWN
"""@ivar: A pointer to the actual Python object described by this
C{ValueDoc}. This is used to display the value (e.g., when
describing a variable.) Use L{pyval_repr()} to generate a
plaintext string representation of this value.
@type: Python object"""
parse_repr = UNKNOWN
"""@ivar: A text representation of this value, extracted from
parsing its source code. This representation may not accurately
reflect the actual value (e.g., if the value was modified after
the initial assignment).
@type: C{unicode}"""
REPR_MAXLINES = 5
"""@cvar: The maximum number of lines of text that should be
generated by L{pyval_repr()}. If the string representation does
not fit in this number of lines, an ellpsis marker (...) will
be placed at the end of the formatted representation."""
REPR_LINELEN = 75
"""@cvar: The maximum number of characters for lines of text that
should be generated by L{pyval_repr()}. Any lines that exceed
this number of characters will be line-wrappped; The S{crarr}
symbol will be used to indicate that the line was wrapped."""
SUMMARY_REPR_LINELEN = 75
"""@cvar: The maximum number of characters for the single-line
text representation generated by L{summary_pyval_repr()}. If
the value's representation does not fit in this number of
characters, an ellipsis marker (...) will be placed at the end
of the formatted representation."""
REPR_MIN_SCORE = 0
"""@cvar: The minimum score that a value representation based on
L{pyval} should have in order to be used instead of L{parse_repr}
as the canonical representation for this C{ValueDoc}'s value.
@see: L{epydoc.markup.pyval_repr}"""
#} end of "value representation" group
#{ Context
defining_module = UNKNOWN
"""@ivar: The documentation for the module that defines this
value. This is used, e.g., to lookup the appropriate markup
language for docstrings. For a C{ModuleDoc},
C{defining_module} should be C{self}.
@type: L{ModuleDoc}"""
#} end of "context group"
#{ Information about Imported Variables
proxy_for = None # [xx] in progress.
"""@ivar: If C{proxy_for} is not None, then this value was
imported from another file. C{proxy_for} is the dotted name of
the variable that this value was imported from. If that
variable is documented, then its C{value} may contain more
complete API documentation about this value. The C{proxy_for}
attribute is used by the source code parser to link imported
values to their source values (in particular, for base
classes). When possible, these proxy C{ValueDoc}s are replaced
by the imported value's C{ValueDoc} by
L{link_imports()<docbuilder.link_imports>}.
@type: L{DottedName}"""
#} end of "information about imported variables" group
#: @ivar:
#: This is currently used to extract values from __all__, etc, in
#: the docparser module; maybe I should specialize
#: process_assignment and extract it there? Although, for __all__,
#: it's not clear where I'd put the value, since I just use it to
#: set private/public/imported attribs on other vars (that might not
#: exist yet at the time.)
toktree = UNKNOWN
def __repr__(self):
if self.canonical_name is not UNKNOWN:
return '<%s %s>' % (self.__class__.__name__, self.canonical_name)
else:
return '<%s %s>' % (self.__class__.__name__,
self.summary_pyval_repr().to_plaintext(None))
def __setstate__(self, state):
self.__dict__ = state
def __getstate__(self):
"""
State serializer for the pickle module. This is necessary
because sometimes the C{pyval} attribute contains an
un-pickleable value.
"""
# Construct our pickled dictionary. Maintain this dictionary
# as a private attribute, so we can reuse it later, since
# merged objects need to share a single dictionary.
if not hasattr(self, '_ValueDoc__pickle_state'):
# Make sure __pyval_repr & __summary_pyval_repr are cached:
self.pyval_repr(), self.summary_pyval_repr()
# Construct the dictionary; leave out 'pyval'.
self.__pickle_state = self.__dict__.copy()
self.__pickle_state['pyval'] = UNKNOWN
if not isinstance(self, GenericValueDoc):
assert self.__pickle_state != {}
# Return the pickle state.
return self.__pickle_state
#{ Value Representation
def pyval_repr(self):
"""
Return a formatted representation of the Python object
described by this C{ValueDoc}. This representation may
include data from introspection or parsing, and is authorative
as 'the best way to represent a Python value.' Any lines that
go beyond L{REPR_LINELEN} characters will be wrapped; and if
the representation as a whole takes more than L{REPR_MAXLINES}
lines, then it will be truncated (with an ellipsis marker).
This function will never return L{UNKNOWN} or C{None}.
@rtype: L{ColorizedPyvalRepr}
"""
# Use self.__pyval_repr to cache the result.
if not hasattr(self, '_ValueDoc__pyval_repr'):
self.__pyval_repr = epydoc.markup.pyval_repr.colorize_pyval(
self.pyval, self.parse_repr, self.REPR_MIN_SCORE,
self.REPR_LINELEN, self.REPR_MAXLINES, linebreakok=True)
return self.__pyval_repr
def summary_pyval_repr(self, max_len=None):
"""
Return a single-line formatted representation of the Python
object described by this C{ValueDoc}. This representation may
include data from introspection or parsing, and is authorative
as 'the best way to summarize a Python value.' If the
representation takes more then L{SUMMARY_REPR_LINELEN}
characters, then it will be truncated (with an ellipsis
marker). This function will never return L{UNKNOWN} or
C{None}.
@rtype: L{ColorizedPyvalRepr}
"""
# If max_len is specified, then do *not* cache the result.
if max_len is not None:
return epydoc.markup.pyval_repr.colorize_pyval(
self.pyval, self.parse_repr, self.REPR_MIN_SCORE,
max_len, maxlines=1, linebreakok=False)
# Use self.__summary_pyval_repr to cache the result.
if not hasattr(self, '_ValueDoc__summary_pyval_repr'):
self.__summary_pyval_repr = epydoc.markup.pyval_repr.colorize_pyval(
self.pyval, self.parse_repr, self.REPR_MIN_SCORE,
self.SUMMARY_REPR_LINELEN, maxlines=1, linebreakok=False)
return self.__summary_pyval_repr
#} end of "value representation" group
def apidoc_links(self, **filters):
return []
class GenericValueDoc(ValueDoc):
"""
API documentation about a 'generic' value, i.e., one that does not
have its own docstring or any information other than its value and
parse representation. C{GenericValueDoc}s do not get assigned
cannonical names.
"""
canonical_name = None
def is_detailed(self):
return (not self.summary_pyval_repr().is_complete)
class NamespaceDoc(ValueDoc):
"""
API documentation information about a singe Python namespace
value. (I.e., a module or a class).
"""
#{ Information about Variables
variables = UNKNOWN
"""@ivar: The contents of the namespace, encoded as a
dictionary mapping from identifiers to C{VariableDoc}s. This
dictionary contains all names defined by the namespace,
including imported variables, aliased variables, and variables
inherited from base classes (once L{inherit_docs()
<epydoc.docbuilder.inherit_docs>} has added them).
@type: C{dict} from C{string} to L{VariableDoc}"""
sorted_variables = UNKNOWN
"""@ivar: A list of all variables defined by this
namespace, in sorted order. The elements of this list should
exactly match the values of L{variables}. The sort order for
this list is defined as follows:
- Any variables listed in a C{@sort} docstring field are
listed in the order given by that field.
- These are followed by any variables that were found while
parsing the source code, in the order in which they were
defined in the source file.
- Finally, any remaining variables are listed in
alphabetical order.
@type: C{list} of L{VariableDoc}"""
sort_spec = UNKNOWN
"""@ivar: The order in which variables should be listed,
encoded as a list of names. Any variables whose names are not
included in this list should be listed alphabetically,
following the variables that are included.
@type: C{list} of C{str}"""
group_specs = UNKNOWN
"""@ivar: The groups that are defined by this namespace's
docstrings. C{group_specs} is encoded as an ordered list of
tuples C{(group_name, elt_names)}, where C{group_name} is the
name of a group and C{elt_names} is a list of element names in
that group. (An element can be a variable or a submodule.) A
'*' in an element name will match any string of characters.
@type: C{list} of C{(str,list)}"""
variable_groups = UNKNOWN
"""@ivar: A dictionary specifying what group each
variable belongs to. The keys of the dictionary are group
names, and the values are lists of C{VariableDoc}s. The order
that groups should be listed in should be taken from
L{group_specs}.
@type: C{dict} from C{str} to C{list} of L{VariableDoc}"""
#} end of group "information about variables"
def __init__(self, **kwargs):
kwargs.setdefault('variables', {})
APIDoc.__init__(self, **kwargs)
assert self.variables is not UNKNOWN
def is_detailed(self):
return True
def apidoc_links(self, **filters):
variables = filters.get('variables', True)
imports = filters.get('imports', True)
private = filters.get('private', True)
if variables and imports and private:
return self.variables.values() # list the common case first.
elif not variables:
return []
elif not imports and not private:
return [v for v in self.variables.values() if
v.is_imported != True and v.is_public != False]
elif not private:
return [v for v in self.variables.values() if
v.is_public != False]
elif not imports:
return [v for v in self.variables.values() if
v.is_imported != True]
assert 0, 'this line should be unreachable'
def init_sorted_variables(self):
"""
Initialize the L{sorted_variables} attribute, based on the
L{variables} and L{sort_spec} attributes. This should usually
be called after all variables have been added to C{variables}
(including any inherited variables for classes).
"""
unsorted = self.variables.copy()
self.sorted_variables = []
# Add any variables that are listed in sort_spec
if self.sort_spec is not UNKNOWN:
unused_idents = set(self.sort_spec)
for ident in self.sort_spec:
if ident in unsorted:
self.sorted_variables.append(unsorted.pop(ident))
unused_idents.discard(ident)
elif '*' in ident:
regexp = re.compile('^%s$' % ident.replace('*', '(.*)'))
# sort within matching group?
for name, var_doc in unsorted.items():
if regexp.match(name):
self.sorted_variables.append(unsorted.pop(name))
unused_idents.discard(ident)
for ident in unused_idents:
if ident not in ['__all__', '__docformat__', '__path__']:
log.warning("@sort: %s.%s not found" %
(self.canonical_name, ident))
# Add any remaining variables in alphabetical order.
var_docs = unsorted.items()
var_docs.sort()
for name, var_doc in var_docs:
self.sorted_variables.append(var_doc)
def init_variable_groups(self):
"""
Initialize the L{variable_groups} attribute, based on the
L{sorted_variables} and L{group_specs} attributes.
"""
if self.sorted_variables is UNKNOWN:
self.init_sorted_variables()
assert len(self.sorted_variables) == len(self.variables)
elts = [(v.name, v) for v in self.sorted_variables]
self._unused_groups = dict([(n,set(i)) for (n,i) in self.group_specs])
self.variable_groups = self._init_grouping(elts)
def group_names(self):
"""
Return a list of the group names defined by this namespace, in
the order in which they should be listed, with no duplicates.
"""
name_list = ['']
name_set = set()
for name, spec in self.group_specs:
if name not in name_set:
name_set.add(name)
name_list.append(name)
return name_list
def _init_grouping(self, elts):
"""
Divide a given a list of APIDoc objects into groups, as
specified by L{self.group_specs}.
@param elts: A list of tuples C{(name, apidoc)}.
@return: A list of tuples C{(groupname, elts)}, where
C{groupname} is the name of a group and C{elts} is a list of
C{APIDoc}s in that group. The first tuple has name C{''}, and
is used for ungrouped elements. The remaining tuples are
listed in the order that they appear in C{self.group_specs}.
Within each tuple, the elements are listed in the order that
they appear in C{api_docs}.
"""
# Make the common case fast.
if len(self.group_specs) == 0:
return {'': [elt[1] for elt in elts]}
ungrouped = set([elt_doc for (elt_name, elt_doc) in elts])
ungrouped = dict(elts)
groups = {}
for elt_name, elt_doc in elts:
for (group_name, idents) in self.group_specs:
group = groups.setdefault(group_name, [])
unused_groups = self._unused_groups[group_name]
for ident in idents:
if re.match('^%s$' % ident.replace('*', '(.*)'), elt_name):
unused_groups.discard(ident)
if elt_name in ungrouped:
group.append(ungrouped.pop(elt_name))
else:
log.warning("%s.%s in multiple groups" %
(self.canonical_name, elt_name))
# Convert ungrouped from an unordered set to an ordered list.
groups[''] = [elt_doc for (elt_name, elt_doc) in elts
if elt_name in ungrouped]
return groups
def report_unused_groups(self):
"""
Issue a warning for any @group items that were not used by
L{_init_grouping()}.
"""
for (group, unused_idents) in self._unused_groups.items():
for ident in unused_idents:
log.warning("@group %s: %s.%s not found" %
(group, self.canonical_name, ident))
class ModuleDoc(NamespaceDoc):
"""
API documentation information about a single module.
"""
#{ Information about the Module
filename = UNKNOWN
"""@ivar: The name of the file that defines the module.
@type: C{string}"""
docformat = UNKNOWN
"""@ivar: The markup language used by docstrings in this module.
@type: C{string}"""
#{ Information about Submodules
submodules = UNKNOWN
"""@ivar: Modules contained by this module (if this module
is a package). (Note: on rare occasions, a module may have a
submodule that is shadowed by a variable with the same name.)
@type: C{list} of L{ModuleDoc}"""
submodule_groups = UNKNOWN
"""@ivar: A dictionary specifying what group each
submodule belongs to. The keys of the dictionary are group
names, and the values are lists of C{ModuleDoc}s. The order
that groups should be listed in should be taken from
L{group_specs}.
@type: C{dict} from C{str} to C{list} of L{ModuleDoc}"""
#{ Information about Packages
package = UNKNOWN
"""@ivar: API documentation for the module's containing package.
@type: L{ModuleDoc}"""
is_package = UNKNOWN
"""@ivar: True if this C{ModuleDoc} describes a package.
@type: C{bool}"""
path = UNKNOWN
"""@ivar: If this C{ModuleDoc} describes a package, then C{path}
contains a list of directories that constitute its path (i.e.,
the value of its C{__path__} variable).
@type: C{list} of C{str}"""
#{ Information about Imported Variables
imports = UNKNOWN
"""@ivar: A list of the source names of variables imported into
this module. This is used to construct import graphs.
@type: C{list} of L{DottedName}"""
#}
def apidoc_links(self, **filters):
val_docs = NamespaceDoc.apidoc_links(self, **filters)
if (filters.get('packages', True) and
self.package not in (None, UNKNOWN)):
val_docs.append(self.package)
if (filters.get('submodules', True) and
self.submodules not in (None, UNKNOWN)):
val_docs += self.submodules
return val_docs
def init_submodule_groups(self):
"""
Initialize the L{submodule_groups} attribute, based on the
L{submodules} and L{group_specs} attributes.
"""
if self.submodules in (None, UNKNOWN):
return
self.submodules = sorted(self.submodules,
key=lambda m:m.canonical_name)
elts = [(m.canonical_name[-1], m) for m in self.submodules]
self.submodule_groups = self._init_grouping(elts)
def select_variables(self, group=None, value_type=None, public=None,
imported=None, detailed=None):
"""
Return a specified subset of this module's L{sorted_variables}
list. If C{value_type} is given, then only return variables
whose values have the specified type. If C{group} is given,
then only return variables that belong to the specified group.
@require: The L{sorted_variables}, L{variable_groups}, and
L{submodule_groups} attributes must be initialized before
this method can be used. See L{init_sorted_variables()},
L{init_variable_groups()}, and L{init_submodule_groups()}.
@param value_type: A string specifying the value type for
which variables should be returned. Valid values are:
- 'class' - variables whose values are classes or types.
- 'function' - variables whose values are functions.
- 'other' - variables whose values are not classes,
exceptions, types, or functions.
@type value_type: C{string}
@param group: The name of the group for which variables should
be returned. A complete list of the groups defined by
this C{ModuleDoc} is available in the L{group_names}
instance variable. The first element of this list is
always the special group name C{''}, which is used for
variables that do not belong to any group.
@type group: C{string}
@param detailed: If True (False), return only the variables
deserving (not deserving) a detailed informative box.
If C{None}, don't care.
@type detailed: C{bool}
"""
if (self.sorted_variables is UNKNOWN or
self.variable_groups is UNKNOWN):
raise ValueError('sorted_variables and variable_groups '
'must be initialized first.')
if group is None: var_list = self.sorted_variables
else:
var_list = self.variable_groups.get(group, self.sorted_variables)
# Public/private filter (Count UNKNOWN as public)
if public is True:
var_list = [v for v in var_list if v.is_public is not False]
elif public is False:
var_list = [v for v in var_list if v.is_public is False]
# Imported filter (Count UNKNOWN as non-imported)
if imported is True:
var_list = [v for v in var_list if v.is_imported is True]
elif imported is False:
var_list = [v for v in var_list if v.is_imported is not True]
# Detailed filter
if detailed is True:
var_list = [v for v in var_list if v.is_detailed() is True]
elif detailed is False:
var_list = [v for v in var_list if v.is_detailed() is not True]
# [xx] Modules are not currently included in any of these
# value types.
if value_type is None:
return var_list
elif value_type == 'class':
return [var_doc for var_doc in var_list
if (isinstance(var_doc.value, ClassDoc))]
elif value_type == 'function':
return [var_doc for var_doc in var_list
if isinstance(var_doc.value, RoutineDoc)]
elif value_type == 'other':
return [var_doc for var_doc in var_list
if not isinstance(var_doc.value,
(ClassDoc, RoutineDoc, ModuleDoc))]
else:
raise ValueError('Bad value type %r' % value_type)
class ClassDoc(NamespaceDoc):
"""
API documentation information about a single class.
"""
#{ Information about Base Classes
bases = UNKNOWN
"""@ivar: API documentation for the class's base classes.
@type: C{list} of L{ClassDoc}"""
#{ Information about Subclasses
subclasses = UNKNOWN
"""@ivar: API documentation for the class's known subclasses.
@type: C{list} of L{ClassDoc}"""
#}
def apidoc_links(self, **filters):
val_docs = NamespaceDoc.apidoc_links(self, **filters)
if (filters.get('bases', True) and
self.bases not in (None, UNKNOWN)):
val_docs += self.bases
if (filters.get('subclasses', True) and
self.subclasses not in (None, UNKNOWN)):
val_docs += self.subclasses
return val_docs
def is_type(self):
if self.canonical_name == DottedName('type'): return True
if self.bases is UNKNOWN: return False
for base in self.bases:
if isinstance(base, ClassDoc) and base.is_type():
return True
return False
def is_exception(self):
if self.canonical_name == DottedName('Exception'): return True
if self.bases is UNKNOWN: return False
for base in self.bases:
if isinstance(base, ClassDoc) and base.is_exception():
return True
return False
def is_newstyle_class(self):
if self.canonical_name == DottedName('object'): return True
if self.bases is UNKNOWN: return False
for base in self.bases:
if isinstance(base, ClassDoc) and base.is_newstyle_class():
return True
return False
def mro(self, warn_about_bad_bases=False):
if self.is_newstyle_class():
return self._c3_mro(warn_about_bad_bases)
else:
return self._dfs_bases([], set(), warn_about_bad_bases)
def _dfs_bases(self, mro, seen, warn_about_bad_bases):
if self in seen: return mro
mro.append(self)
seen.add(self)
if self.bases is not UNKNOWN:
for base in self.bases:
if isinstance(base, ClassDoc) and base.proxy_for is None:
base._dfs_bases(mro, seen, warn_about_bad_bases)
elif warn_about_bad_bases:
self._report_bad_base(base)
return mro
def _c3_mro(self, warn_about_bad_bases):
"""
Compute the class precedence list (mro) according to C3.
@seealso: U{http://www.python.org/2.3/mro.html}
"""
bases = [base for base in self.bases if isinstance(base, ClassDoc)]
if len(bases) != len(self.bases) and warn_about_bad_bases:
for base in self.bases:
if (not isinstance(base, ClassDoc) or
base.proxy_for is not None):
self._report_bad_base(base)
w = [warn_about_bad_bases]*len(bases)
return self._c3_merge([[self]] + map(ClassDoc._c3_mro, bases, w) +
[list(bases)])
def _report_bad_base(self, base):
if not isinstance(base, ClassDoc):
if not isinstance(base, GenericValueDoc):
base_name = base.canonical_name
elif base.parse_repr is not UNKNOWN:
base_name = base.parse_repr
else:
base_name = '%r' % base
log.warning("%s's base %s is not a class" %
(self.canonical_name, base_name))
elif base.proxy_for is not None:
log.warning("No information available for %s's base %s" %
(self.canonical_name, base.proxy_for))
def _c3_merge(self, seqs):
"""
Helper function for L{_c3_mro}.
"""
res = []
while 1:
nonemptyseqs=[seq for seq in seqs if seq]
if not nonemptyseqs: return res
for seq in nonemptyseqs: # find merge candidates among seq heads
cand = seq[0]
nothead=[s for s in nonemptyseqs if cand in s[1:]]
if nothead: cand=None #reject candidate
else: break
if not cand: raise "Inconsistent hierarchy"
res.append(cand)
for seq in nonemptyseqs: # remove cand
if seq[0] == cand: del seq[0]
def select_variables(self, group=None, value_type=None, inherited=None,
public=None, imported=None, detailed=None):
"""
Return a specified subset of this class's L{sorted_variables}
list. If C{value_type} is given, then only return variables
whose values have the specified type. If C{group} is given,
then only return variables that belong to the specified group.
If C{inherited} is True, then only return inherited variables;
if C{inherited} is False, then only return local variables.
@require: The L{sorted_variables} and L{variable_groups}
attributes must be initialized before this method can be
used. See L{init_sorted_variables()} and
L{init_variable_groups()}.
@param value_type: A string specifying the value type for
which variables should be returned. Valid values are:
- 'instancemethod' - variables whose values are
instance methods.
- 'classmethod' - variables whose values are class
methods.
- 'staticmethod' - variables whose values are static
methods.
- 'properties' - variables whose values are properties.
- 'class' - variables whose values are nested classes
(including exceptions and types).
- 'instancevariable' - instance variables. This includes
any variables that are explicitly marked as instance
variables with docstring fields; and variables with
docstrings that are initialized in the constructor.
- 'classvariable' - class variables. This includes any
variables that are not included in any of the above
categories.
@type value_type: C{string}
@param group: The name of the group for which variables should
be returned. A complete list of the groups defined by
this C{ClassDoc} is available in the L{group_names}
instance variable. The first element of this list is
always the special group name C{''}, which is used for
variables that do not belong to any group.
@type group: C{string}
@param inherited: If C{None}, then return both inherited and
local variables; if C{True}, then return only inherited
variables; if C{False}, then return only local variables.
@param detailed: If True (False), return only the variables
deserving (not deserving) a detailed informative box.
If C{None}, don't care.
@type detailed: C{bool}
"""
if (self.sorted_variables is UNKNOWN or
self.variable_groups is UNKNOWN):
raise ValueError('sorted_variables and variable_groups '
'must be initialized first.')
if group is None: var_list = self.sorted_variables
else: var_list = self.variable_groups[group]
# Public/private filter (Count UNKNOWN as public)
if public is True:
var_list = [v for v in var_list if v.is_public is not False]
elif public is False:
var_list = [v for v in var_list if v.is_public is False]
# Inherited filter (Count UNKNOWN as non-inherited)
if inherited is None: pass
elif inherited:
var_list = [v for v in var_list if v.container != self]
else:
var_list = [v for v in var_list if v.container == self ]
# Imported filter (Count UNKNOWN as non-imported)
if imported is True:
var_list = [v for v in var_list if v.is_imported is True]
elif imported is False:
var_list = [v for v in var_list if v.is_imported is not True]
# Detailed filter
if detailed is True:
var_list = [v for v in var_list if v.is_detailed() is True]
elif detailed is False:
var_list = [v for v in var_list if v.is_detailed() is not True]
if value_type is None:
return var_list
elif value_type == 'method':
return [var_doc for var_doc in var_list
if (isinstance(var_doc.value, RoutineDoc) and
var_doc.is_instvar in (False, UNKNOWN))]
elif value_type == 'instancemethod':
return [var_doc for var_doc in var_list
if (isinstance(var_doc.value, RoutineDoc) and
not isinstance(var_doc.value, ClassMethodDoc) and
not isinstance(var_doc.value, StaticMethodDoc) and
var_doc.is_instvar in (False, UNKNOWN))]
elif value_type == 'classmethod':
return [var_doc for var_doc in var_list
if (isinstance(var_doc.value, ClassMethodDoc) and
var_doc.is_instvar in (False, UNKNOWN))]
elif value_type == 'staticmethod':
return [var_doc for var_doc in var_list
if (isinstance(var_doc.value, StaticMethodDoc) and
var_doc.is_instvar in (False, UNKNOWN))]
elif value_type == 'property':
return [var_doc for var_doc in var_list
if (isinstance(var_doc.value, PropertyDoc) and
var_doc.is_instvar in (False, UNKNOWN))]
elif value_type == 'class':
return [var_doc for var_doc in var_list
if (isinstance(var_doc.value, ClassDoc) and
var_doc.is_instvar in (False, UNKNOWN))]
elif value_type == 'instancevariable':
return [var_doc for var_doc in var_list
if var_doc.is_instvar is True]
elif value_type == 'classvariable':
return [var_doc for var_doc in var_list
if (var_doc.is_instvar in (False, UNKNOWN) and
not isinstance(var_doc.value,
(RoutineDoc, ClassDoc, PropertyDoc)))]
else:
raise ValueError('Bad value type %r' % value_type)
class RoutineDoc(ValueDoc):
"""
API documentation information about a single routine.
"""
#{ Signature
posargs = UNKNOWN
"""@ivar: The names of the routine's positional arguments.
If an argument list contains \"unpacking\" arguments, then
their names will be specified using nested lists. E.g., if
a function's argument list is C{((x1,y1), (x2,y2))}, then
posargs will be C{[['x1','y1'], ['x2','y2']]}.
@type: C{list}"""
posarg_defaults = UNKNOWN
"""@ivar: API documentation for the positional arguments'
default values. This list has the same length as C{posargs}, and
each element of C{posarg_defaults} describes the corresponding
argument in C{posargs}. For positional arguments with no default,
C{posargs_defaults} will contain None.
@type: C{list} of C{ValueDoc} or C{None}"""
vararg = UNKNOWN
"""@ivar: The name of the routine's vararg argument, or C{None} if
it has no vararg argument.
@type: C{string} or C{None}"""
kwarg = UNKNOWN
"""@ivar: The name of the routine's keyword argument, or C{None} if
it has no keyword argument.
@type: C{string} or C{None}"""
lineno = UNKNOWN # used to look up profiling info from pstats.
"""@ivar: The line number of the first line of the function's
signature. For Python functions, this is equal to
C{func.func_code.co_firstlineno}. The first line of a file
is considered line 1.
@type: C{int}"""
#} end of "signature" group
#{ Decorators
decorators = UNKNOWN
"""@ivar: A list of names of decorators that were applied to this
routine, in the order that they are listed in the source code.
(I.e., in the reverse of the order that they were applied in.)
@type: C{list} of C{string}"""
#} end of "decorators" group
#{ Information Extracted from Docstrings
arg_descrs = UNKNOWN
"""@ivar: A list of descriptions of the routine's
arguments. Each element of this list is a tuple C{(args,
descr)}, where C{args} is a list of argument names; and
C{descr} is a L{ParsedDocstring
<epydoc.markup.ParsedDocstring>} describing the argument(s)
specified by C{arg}.
@type: C{list}"""
arg_types = UNKNOWN
"""@ivar: Descriptions of the expected types for the
routine's arguments, encoded as a dictionary mapping from
argument names to type descriptions.
@type: C{dict} from C{string} to L{ParsedDocstring
<epydoc.markup.ParsedDocstring>}"""
return_descr = UNKNOWN
"""@ivar: A description of the value returned by this routine.
@type: L{ParsedDocstring<epydoc.markup.ParsedDocstring>}"""
return_type = UNKNOWN
"""@ivar: A description of expected type for the value
returned by this routine.
@type: L{ParsedDocstring<epydoc.markup.ParsedDocstring>}"""
exception_descrs = UNKNOWN
"""@ivar: A list of descriptions of exceptions
that the routine might raise. Each element of this list is a
tuple C{(exc, descr)}, where C{exc} is a string contianing the
exception name; and C{descr} is a L{ParsedDocstring
<epydoc.markup.ParsedDocstring>} describing the circumstances
under which the exception specified by C{exc} is raised.
@type: C{list}"""
#} end of "information extracted from docstrings" group
callgraph_uid = None
"""@ivar: L{DotGraph}.uid of the call graph for the function.
@type: C{str}"""
def is_detailed(self):
if super(RoutineDoc, self).is_detailed():
return True
if self.arg_descrs not in (None, UNKNOWN) and self.arg_descrs:
return True
if self.arg_types not in (None, UNKNOWN) and self.arg_types:
return True
if self.return_descr not in (None, UNKNOWN):
return True
if self.exception_descrs not in (None, UNKNOWN) and self.exception_descrs:
return True
if (self.decorators not in (None, UNKNOWN)
and [ d for d in self.decorators
if d not in ('classmethod', 'staticmethod') ]):
return True
return False
def all_args(self):
"""
@return: A list of the names of all arguments (positional,
vararg, and keyword), in order. If a positional argument
consists of a tuple of names, then that tuple will be
flattened.
"""
if self.posargs is UNKNOWN:
return UNKNOWN
all_args = _flatten(self.posargs)
if self.vararg not in (None, UNKNOWN):
all_args.append(self.vararg)
if self.kwarg not in (None, UNKNOWN):
all_args.append(self.kwarg)
return all_args
def _flatten(lst, out=None):
"""
Return a flattened version of C{lst}.
"""
if out is None: out = []
for elt in lst:
if isinstance(elt, (list,tuple)):
_flatten(elt, out)
else:
out.append(elt)
return out
class ClassMethodDoc(RoutineDoc): pass
class StaticMethodDoc(RoutineDoc): pass
class PropertyDoc(ValueDoc):
"""
API documentation information about a single property.
"""
#{ Property Access Functions
fget = UNKNOWN
"""@ivar: API documentation for the property's get function.
@type: L{RoutineDoc}"""
fset = UNKNOWN
"""@ivar: API documentation for the property's set function.
@type: L{RoutineDoc}"""
fdel = UNKNOWN
"""@ivar: API documentation for the property's delete function.
@type: L{RoutineDoc}"""
#}
#{ Information Extracted from Docstrings
type_descr = UNKNOWN
"""@ivar: A description of the property's expected type, extracted
from its docstring.
@type: L{ParsedDocstring<epydoc.markup.ParsedDocstring>}"""
#} end of "information extracted from docstrings" group
def apidoc_links(self, **filters):
val_docs = []
if self.fget not in (None, UNKNOWN): val_docs.append(self.fget)
if self.fset not in (None, UNKNOWN): val_docs.append(self.fset)
if self.fdel not in (None, UNKNOWN): val_docs.append(self.fdel)
return val_docs
def is_detailed(self):
if super(PropertyDoc, self).is_detailed():
return True
if self.fget not in (None, UNKNOWN) and self.fget.pyval is not None:
return True
if self.fset not in (None, UNKNOWN) and self.fset.pyval is not None:
return True
if self.fdel not in (None, UNKNOWN) and self.fdel.pyval is not None:
return True
return False
######################################################################
## Index
######################################################################
class DocIndex:
"""
[xx] out of date.
An index that .. hmm... it *can't* be used to access some things,
cuz they're not at the root level. Do I want to add them or what?
And if so, then I have a sort of a new top level. hmm.. so
basically the question is what to do with a name that's not in the
root var's name space. 2 types:
- entirely outside (eg os.path)
- inside but not known (eg a submodule that we didn't look at?)
- container of current thing not examined?
An index of all the C{APIDoc} objects that can be reached from a
root set of C{ValueDoc}s.
The members of this index can be accessed by dotted name. In
particular, C{DocIndex} defines two mappings, accessed via the
L{get_vardoc()} and L{get_valdoc()} methods, which can be used to
access C{VariableDoc}s or C{ValueDoc}s respectively by name. (Two
separate mappings are necessary because a single name can be used
to refer to both a variable and to the value contained by that
variable.)
Additionally, the index defines two sets of C{ValueDoc}s:
\"reachable C{ValueDoc}s\" and \"contained C{ValueDoc}s\". The
X{reachable C{ValueDoc}s} are defined as the set of all
C{ValueDoc}s that can be reached from the root set by following
I{any} sequence of pointers to C{ValueDoc}s or C{VariableDoc}s.
The X{contained C{ValueDoc}s} are defined as the set of all
C{ValueDoc}s that can be reached from the root set by following
only the C{ValueDoc} pointers defined by non-imported
C{VariableDoc}s. For example, if the root set contains a module
C{m}, then the contained C{ValueDoc}s includes the C{ValueDoc}s
for any functions, variables, or classes defined in that module,
as well as methods and variables defined in classes defined in the
module. The reachable C{ValueDoc}s includes all of those
C{ValueDoc}s, as well as C{ValueDoc}s for any values imported into
the module, and base classes for classes defined in the module.
"""
def __init__(self, root):
"""
Create a new documentation index, based on the given root set
of C{ValueDoc}s. If any C{APIDoc}s reachable from the root
set does not have a canonical name, then it will be assigned
one. etc.
@param root: A list of C{ValueDoc}s.
"""
for apidoc in root:
if apidoc.canonical_name in (None, UNKNOWN):
raise ValueError("All APIdocs passed to DocIndexer "
"must already have canonical names.")
# Initialize the root items list. We sort them by length in
# ascending order. (This ensures that variables will shadow
# submodules when appropriate.)
# When the elements name is the same, list in alphabetical order:
# this is needed by the check for duplicates below.
self.root = sorted(root,
key=lambda d: (len(d.canonical_name), d.canonical_name))
"""The list of C{ValueDoc}s to document.
@type: C{list}"""
# Drop duplicated modules
# [xx] maybe what causes duplicates should be fixed instead.
# If fixed, adjust the sort here above: sorting by names will not
# be required anymore
i = 1
while i < len(self.root):
if self.root[i-1] is self.root[i]:
del self.root[i]
else:
i += 1
self.mlclasses = self._get_module_classes(self.root)
"""A mapping from class names to L{ClassDoc}. Contains
classes defined at module level for modules in L{root}
and which can be used as fallback by L{find()} if looking
in containing namespaces fails.
@type: C{dict} from C{str} to L{ClassDoc} or C{list}"""
self.callers = None
"""A dictionary mapping from C{RoutineDoc}s in this index
to lists of C{RoutineDoc}s for the routine's callers.
This dictionary is initialized by calling
L{read_profiling_info()}.
@type: C{list} of L{RoutineDoc}"""
self.callees = None
"""A dictionary mapping from C{RoutineDoc}s in this index
to lists of C{RoutineDoc}s for the routine's callees.
This dictionary is initialized by calling
L{read_profiling_info()}.
@type: C{list} of L{RoutineDoc}"""
self._funcid_to_doc = {}
"""A mapping from C{profile} function ids to corresponding
C{APIDoc} objects. A function id is a tuple of the form
C{(filename, lineno, funcname)}. This is used to update
the L{callers} and L{callees} variables."""
self._container_cache = {}
"""A cache for the L{container()} method, to increase speed."""
self._get_cache = {}
"""A cache for the L{get_vardoc()} and L{get_valdoc()} methods,
to increase speed."""
#////////////////////////////////////////////////////////////
# Lookup methods
#////////////////////////////////////////////////////////////
# [xx]
# Currently these only work for things reachable from the
# root... :-/ I might want to change this so that imported
# values can be accessed even if they're not contained.
# Also, I might want canonical names to not start with ??
# if the thing is a top-level imported module..?
def get_vardoc(self, name):
"""
Return the C{VariableDoc} with the given name, or C{None} if this
index does not contain a C{VariableDoc} with the given name.
"""
var, val = self._get(name)
return var
def get_valdoc(self, name):
"""
Return the C{ValueDoc} with the given name, or C{None} if this
index does not contain a C{ValueDoc} with the given name.
"""
var, val = self._get(name)
return val
def _get(self, name):
"""
A helper function that's used to implement L{get_vardoc()}
and L{get_valdoc()}.
"""
# Convert name to a DottedName, if necessary.
if not isinstance(name, DottedName):
name = DottedName(name)
# Check if the result is cached.
val = self._get_cache.get(name)
if val is not None: return val
# Look for an element in the root set whose name is a prefix
# of `name`. If we can't find one, then return None.
for root_valdoc in self.root:
if root_valdoc.canonical_name.dominates(name):
# Starting at the root valdoc, walk down the variable/
# submodule chain until we find the requested item.
var_doc = None
val_doc = root_valdoc
for identifier in name[len(root_valdoc.canonical_name):]:
if val_doc is None: break
var_doc, val_doc = self._get_from(val_doc, identifier)
else:
# If we found it, then return.
if var_doc is not None or val_doc is not None:
self._get_cache[name] = (var_doc, val_doc)
return var_doc, val_doc
# We didn't find it.
self._get_cache[name] = (None, None)
return None, None
def _get_from(self, val_doc, identifier):
if isinstance(val_doc, NamespaceDoc):
child_var = val_doc.variables.get(identifier)
if child_var is not None:
child_val = child_var.value
if child_val is UNKNOWN: child_val = None
return child_var, child_val
# If that fails, then see if it's a submodule.
if (isinstance(val_doc, ModuleDoc) and
val_doc.submodules is not UNKNOWN):
for submodule in val_doc.submodules:
if submodule.canonical_name[-1] == identifier:
var_doc = None
val_doc = submodule
if val_doc is UNKNOWN: val_doc = None
return var_doc, val_doc
return None, None
def find(self, name, context):
"""
Look for an C{APIDoc} named C{name}, relative to C{context}.
Return the C{APIDoc} if one is found; otherwise, return
C{None}. C{find} looks in the following places, in order:
- Function parameters (if one matches, return C{None})
- All enclosing namespaces, from closest to furthest.
- If C{name} starts with C{'self'}, then strip it off and
look for the remaining part of the name using C{find}
- Builtins
- Parameter attributes
- Classes at module level (if the name is not ambiguous)
@type name: C{str} or L{DottedName}
@type context: L{APIDoc}
"""
if isinstance(name, basestring):
name = re.sub(r'\(.*\)$', '', name.strip())
if re.match('^([a-zA-Z_]\w*)(\.[a-zA-Z_]\w*)*$', name):
name = DottedName(name)
else:
return None
elif not isinstance(name, DottedName):
raise TypeError("'name' should be a string or DottedName")
if context is None or context.canonical_name is None:
container_name = []
else:
container_name = context.canonical_name
# Check for the name in all containing namespaces, starting
# with the closest one.
for i in range(len(container_name), -1, -1):
relative_name = container_name[:i]+name
# Is `name` the absolute name of a documented value?
# (excepting GenericValueDoc values.)
val_doc = self.get_valdoc(relative_name)
if (val_doc is not None and
not isinstance(val_doc, GenericValueDoc)):
return val_doc
# Is `name` the absolute name of a documented variable?
var_doc = self.get_vardoc(relative_name)
if var_doc is not None: return var_doc
# If the name begins with 'self', then try stripping that off
# and see if we can find the variable.
if name[0] == 'self':
doc = self.find('.'.join(name[1:]), context)
if doc is not None: return doc
# Is it the name of a builtin?
if len(name)==1 and hasattr(__builtin__, name[0]):
return None
# Is it a parameter's name or an attribute of a parameter?
if isinstance(context, RoutineDoc):
all_args = context.all_args()
if all_args is not UNKNOWN and name[0] in all_args:
return None
# Is this an object directly contained by any module?
doc = self.mlclasses.get(name[-1])
if isinstance(doc, APIDoc):
return doc
elif isinstance(doc, list):
log.warning("%s is an ambiguous name: it may be %s" % (
name[-1],
", ".join([ "'%s'" % d.canonical_name for d in doc ])))
# Drop this item so that the warning is reported only once.
# fail() will fail anyway.
del self.mlclasses[name[-1]]
def _get_module_classes(self, docs):
"""
Gather all the classes defined in a list of modules.
Very often people refers to classes only by class name,
even if they are not imported in the namespace. Linking
to such classes will fail if we look for them only in nested
namespaces. Allow them to retrieve only by name.
@param docs: containers of the objects to collect
@type docs: C{list} of C{APIDoc}
@return: mapping from objects name to the object(s) with that name
@rtype: C{dict} from C{str} to L{ClassDoc} or C{list}
"""
classes = {}
for doc in docs:
if not isinstance(doc, ModuleDoc):
continue
for var in doc.variables.values():
if not isinstance(var.value, ClassDoc):
continue
val = var.value
if val in (None, UNKNOWN) or val.defining_module is not doc:
continue
if val.canonical_name in (None, UNKNOWN):
continue
name = val.canonical_name[-1]
vals = classes.get(name)
if vals is None:
classes[name] = val
elif not isinstance(vals, list):
classes[name] = [ vals, val ]
else:
vals.append(val)
return classes
#////////////////////////////////////////////////////////////
# etc
#////////////////////////////////////////////////////////////
def reachable_valdocs(self, **filters):
"""
Return a list of all C{ValueDoc}s that can be reached,
directly or indirectly from this C{DocIndex}'s root set.
@param filters: A set of filters that can be used to prevent
C{reachable_valdocs} from following specific link types
when looking for C{ValueDoc}s that can be reached from the
root set. See C{APIDoc.apidoc_links} for a more complete
description.
"""
return reachable_valdocs(self.root, **filters)
def container(self, api_doc):
"""
Return the C{ValueDoc} that contains the given C{APIDoc}, or
C{None} if its container is not in the index.
"""
# Check if the result is cached.
val = self._container_cache.get(api_doc)
if val is not None: return val
if isinstance(api_doc, GenericValueDoc):
self._container_cache[api_doc] = None
return None # [xx] unknown.
if isinstance(api_doc, VariableDoc):
self._container_cache[api_doc] = api_doc.container
return api_doc.container
if len(api_doc.canonical_name) == 1:
self._container_cache[api_doc] = None
return None
elif isinstance(api_doc, ModuleDoc) and api_doc.package is not UNKNOWN:
self._container_cache[api_doc] = api_doc.package
return api_doc.package
else:
parent = self.get_valdoc(api_doc.canonical_name.container())
self._container_cache[api_doc] = parent
return parent
#////////////////////////////////////////////////////////////
# Profiling information
#////////////////////////////////////////////////////////////
def read_profiling_info(self, profile_stats):
"""
Initialize the L{callers} and L{callees} variables, given a
C{Stat} object from the C{pstats} module.
@warning: This method uses undocumented data structures inside
of C{profile_stats}.
"""
if self.callers is None: self.callers = {}
if self.callees is None: self.callees = {}
# The Stat object encodes functions using `funcid`s, or
# tuples of (filename, lineno, funcname). Create a mapping
# from these `funcid`s to `RoutineDoc`s.
self._update_funcid_to_doc(profile_stats)
for callee, (cc, nc, tt, ct, callers) in profile_stats.stats.items():
callee = self._funcid_to_doc.get(callee)
if callee is None: continue
for caller in callers:
caller = self._funcid_to_doc.get(caller)
if caller is None: continue
self.callers.setdefault(callee, []).append(caller)
self.callees.setdefault(caller, []).append(callee)
def _update_funcid_to_doc(self, profile_stats):
"""
Update the dictionary mapping from C{pstat.Stat} funciton ids to
C{RoutineDoc}s. C{pstat.Stat} function ids are tuples of
C{(filename, lineno, funcname)}.
"""
# Maps (filename, lineno, funcname) -> RoutineDoc
for val_doc in self.reachable_valdocs():
# We only care about routines.
if not isinstance(val_doc, RoutineDoc): continue
# Get the filename from the defining module.
module = val_doc.defining_module
if module is UNKNOWN or module.filename is UNKNOWN: continue
# Normalize the filename.
filename = os.path.abspath(module.filename)
try: filename = py_src_filename(filename)
except: pass
# Look up the stat_func_id
funcid = (filename, val_doc.lineno, val_doc.canonical_name[-1])
if funcid in profile_stats.stats:
self._funcid_to_doc[funcid] = val_doc
######################################################################
## Pretty Printing
######################################################################
def pp_apidoc(api_doc, doublespace=0, depth=5, exclude=(), include=(),
backpointers=None):
"""
@return: A multiline pretty-printed string representation for the
given C{APIDoc}.
@param doublespace: If true, then extra lines will be
inserted to make the output more readable.
@param depth: The maximum depth that pp_apidoc will descend
into descendent VarDocs. To put no limit on
depth, use C{depth=-1}.
@param exclude: A list of names of attributes whose values should
not be shown.
@param backpointers: For internal use.
"""
pyid = id(api_doc.__dict__)
if backpointers is None: backpointers = {}
if (hasattr(api_doc, 'canonical_name') and
api_doc.canonical_name not in (None, UNKNOWN)):
name = '%s for %s' % (api_doc.__class__.__name__,
api_doc.canonical_name)
elif getattr(api_doc, 'name', None) not in (UNKNOWN, None):
if (getattr(api_doc, 'container', None) not in (UNKNOWN, None) and
getattr(api_doc.container, 'canonical_name', None)
not in (UNKNOWN, None)):
name ='%s for %s' % (api_doc.__class__.__name__,
api_doc.container.canonical_name+
api_doc.name)
else:
name = '%s for %s' % (api_doc.__class__.__name__, api_doc.name)
else:
name = api_doc.__class__.__name__
if pyid in backpointers:
return '%s [%s] (defined above)' % (name, backpointers[pyid])
if depth == 0:
if hasattr(api_doc, 'name') and api_doc.name is not None:
return '%s...' % api_doc.name
else:
return '...'
backpointers[pyid] = len(backpointers)
s = '%s [%s]' % (name, backpointers[pyid])
# Only print non-empty fields:
fields = [field for field in api_doc.__dict__.keys()
if (field in include or
(getattr(api_doc, field) is not UNKNOWN
and field not in exclude))]
if include:
fields = [field for field in dir(api_doc)
if field in include]
else:
fields = [field for field in api_doc.__dict__.keys()
if (getattr(api_doc, field) is not UNKNOWN
and field not in exclude)]
fields.sort()
for field in fields:
fieldval = getattr(api_doc, field)
if doublespace: s += '\n |'
s += '\n +- %s' % field
if (isinstance(fieldval, types.ListType) and
len(fieldval)>0 and
isinstance(fieldval[0], APIDoc)):
s += _pp_list(api_doc, fieldval, doublespace, depth,
exclude, include, backpointers,
(field is fields[-1]))
elif (isinstance(fieldval, types.DictType) and
len(fieldval)>0 and
isinstance(fieldval.values()[0], APIDoc)):
s += _pp_dict(api_doc, fieldval, doublespace,
depth, exclude, include, backpointers,
(field is fields[-1]))
elif isinstance(fieldval, APIDoc):
s += _pp_apidoc(api_doc, fieldval, doublespace, depth,
exclude, include, backpointers,
(field is fields[-1]))
else:
s += ' = ' + _pp_val(api_doc, fieldval, doublespace,
depth, exclude, include, backpointers)
return s
def _pp_list(api_doc, items, doublespace, depth, exclude, include,
backpointers, is_last):
line1 = (is_last and ' ') or '|'
s = ''
for item in items:
line2 = ((item is items[-1]) and ' ') or '|'
joiner = '\n %s %s ' % (line1, line2)
if doublespace: s += '\n %s |' % line1
s += '\n %s +- ' % line1
valstr = _pp_val(api_doc, item, doublespace, depth, exclude, include,
backpointers)
s += joiner.join(valstr.split('\n'))
return s
def _pp_dict(api_doc, dict, doublespace, depth, exclude, include,
backpointers, is_last):
items = dict.items()
items.sort()
line1 = (is_last and ' ') or '|'
s = ''
for item in items:
line2 = ((item is items[-1]) and ' ') or '|'
joiner = '\n %s %s ' % (line1, line2)
if doublespace: s += '\n %s |' % line1
s += '\n %s +- ' % line1
valstr = _pp_val(api_doc, item[1], doublespace, depth, exclude,
include, backpointers)
s += joiner.join(('%s => %s' % (item[0], valstr)).split('\n'))
return s
def _pp_apidoc(api_doc, val, doublespace, depth, exclude, include,
backpointers, is_last):
line1 = (is_last and ' ') or '|'
s = ''
if doublespace: s += '\n %s | ' % line1
s += '\n %s +- ' % line1
joiner = '\n %s ' % line1
childstr = pp_apidoc(val, doublespace, depth-1, exclude,
include, backpointers)
return s + joiner.join(childstr.split('\n'))
def _pp_val(api_doc, val, doublespace, depth, exclude, include, backpointers):
from epydoc import markup
if isinstance(val, APIDoc):
return pp_apidoc(val, doublespace, depth-1, exclude,
include, backpointers)
elif isinstance(val, markup.ParsedDocstring):
valrepr = `val.to_plaintext(None)`
if len(valrepr) < 40: return valrepr
else: return valrepr[:37]+'...'
else:
valrepr = repr(val)
if len(valrepr) < 40: return valrepr
else: return valrepr[:37]+'...'
| apache-2.0 |
kabrapratik28/Stanford_courses | cs224n/assignment1/q4_sentiment.py | 1 | 8150 | #!/usr/bin/env python
import argparse
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import itertools
from utils.treebank import StanfordSentiment
import utils.glove as glove
from q3_sgd import load_saved_params, sgd
# We will use sklearn here because it will run faster than implementing
# ourselves. However, for other parts of this assignment you must implement
# the functions yourself!
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
def getArguments():
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--pretrained", dest="pretrained", action="store_true",
help="Use pretrained GloVe vectors.")
group.add_argument("--yourvectors", dest="yourvectors", action="store_true",
help="Use your vectors from q3.")
return parser.parse_args()
def getSentenceFeatures(tokens, wordVectors, sentence):
"""
Obtain the sentence feature for sentiment analysis by averaging its
word vectors
"""
# Implement computation for the sentence features given a sentence.
# Inputs:
# tokens -- a dictionary that maps words to their indices in
# the word vector list
# wordVectors -- word vectors (each row) for all tokens
# sentence -- a list of words in the sentence of interest
# Output:
# - sentVector: feature vector for the sentence
sentVector = np.zeros((wordVectors.shape[1],))
### YOUR CODE HERE
for word in sentence:
index = tokens[word]
sentVector += wordVectors[index]
sentVector /= len(sentence)
### END YOUR CODE
assert sentVector.shape == (wordVectors.shape[1],)
return sentVector
def getRegularizationValues():
"""Try different regularizations
Return a sorted list of values to try.
"""
values = None # Assign a list of floats in the block below
### YOUR CODE HERE
values = [100, 10, 1, 0, 1e-1, 5e-1, 1e-2, 5e-2,
1e-3, 5e-3, 1e-4, 5e-4, 1e-5, 5e-5, 1e-6]
### END YOUR CODE
return sorted(values)
def chooseBestModel(results):
"""Choose the best model based on parameter tuning on the dev set
Arguments:
results -- A list of python dictionaries of the following format:
{
"reg": regularization,
"clf": classifier,
"train": trainAccuracy,
"dev": devAccuracy,
"test": testAccuracy
}
Returns:
Your chosen result dictionary.
"""
bestResult = None
### YOUR CODE HERE
currBestValue = -1.0
for each_result in results:
if each_result["dev"] > currBestValue:
currBestValue = each_result["dev"]
bestResult = each_result
### END YOUR CODE
return bestResult
def accuracy(y, yhat):
""" Precision for classifier """
assert(y.shape == yhat.shape)
return np.sum(y == yhat) * 100.0 / y.size
def plotRegVsAccuracy(regValues, results, filename):
""" Make a plot of regularization vs accuracy """
plt.plot(regValues, [x["train"] for x in results])
plt.plot(regValues, [x["dev"] for x in results])
plt.xscale('log')
plt.xlabel("regularization")
plt.ylabel("accuracy")
plt.legend(['train', 'dev'], loc='upper left')
plt.savefig(filename)
def outputConfusionMatrix(features, labels, clf, filename):
""" Generate a confusion matrix """
pred = clf.predict(features)
cm = confusion_matrix(labels, pred, labels=range(5))
plt.figure()
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Reds)
plt.colorbar()
classes = ["- -", "-", "neut", "+", "+ +"]
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes)
plt.yticks(tick_marks, classes)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.savefig(filename)
def outputPredictions(dataset, features, labels, clf, filename):
""" Write the predictions to file """
pred = clf.predict(features)
with open(filename, "w") as f:
print >> f, "True\tPredicted\tText"
for i in xrange(len(dataset)):
print >> f, "%d\t%d\t%s" % (
labels[i], pred[i], " ".join(dataset[i][0]))
def main(args):
""" Train a model to do sentiment analyis"""
# Load the dataset
dataset = StanfordSentiment()
tokens = dataset.tokens()
nWords = len(tokens)
if args.yourvectors:
_, wordVectors, _ = load_saved_params()
wordVectors = np.concatenate(
(wordVectors[:nWords,:], wordVectors[nWords:,:]),
axis=1)
elif args.pretrained:
wordVectors = glove.loadWordVectors(tokens)
dimVectors = wordVectors.shape[1]
# Load the train set
trainset = dataset.getTrainSentences()
nTrain = len(trainset)
trainFeatures = np.zeros((nTrain, dimVectors))
trainLabels = np.zeros((nTrain,), dtype=np.int32)
for i in xrange(nTrain):
words, trainLabels[i] = trainset[i]
trainFeatures[i, :] = getSentenceFeatures(tokens, wordVectors, words)
# Prepare dev set features
devset = dataset.getDevSentences()
nDev = len(devset)
devFeatures = np.zeros((nDev, dimVectors))
devLabels = np.zeros((nDev,), dtype=np.int32)
for i in xrange(nDev):
words, devLabels[i] = devset[i]
devFeatures[i, :] = getSentenceFeatures(tokens, wordVectors, words)
# Prepare test set features
testset = dataset.getTestSentences()
nTest = len(testset)
testFeatures = np.zeros((nTest, dimVectors))
testLabels = np.zeros((nTest,), dtype=np.int32)
for i in xrange(nTest):
words, testLabels[i] = testset[i]
testFeatures[i, :] = getSentenceFeatures(tokens, wordVectors, words)
# We will save our results from each run
results = []
regValues = getRegularizationValues()
for reg in regValues:
print "Training for reg=%f" % reg
# Note: add a very small number to regularization to please the library
clf = LogisticRegression(C=1.0/(reg + 1e-12))
clf.fit(trainFeatures, trainLabels)
# Test on train set
pred = clf.predict(trainFeatures)
trainAccuracy = accuracy(trainLabels, pred)
print "Train accuracy (%%): %f" % trainAccuracy
# Test on dev set
pred = clf.predict(devFeatures)
devAccuracy = accuracy(devLabels, pred)
print "Dev accuracy (%%): %f" % devAccuracy
# Test on test set
# Note: always running on test is poor style. Typically, you should
# do this only after validation.
pred = clf.predict(testFeatures)
testAccuracy = accuracy(testLabels, pred)
print "Test accuracy (%%): %f" % testAccuracy
results.append({
"reg": reg,
"clf": clf,
"train": trainAccuracy,
"dev": devAccuracy,
"test": testAccuracy})
# Print the accuracies
print ""
print "=== Recap ==="
print "Reg\t\tTrain\tDev\tTest"
for result in results:
print "%.2E\t%.3f\t%.3f\t%.3f" % (
result["reg"],
result["train"],
result["dev"],
result["test"])
print ""
bestResult = chooseBestModel(results)
print "Best regularization value: %0.2E" % bestResult["reg"]
print "Test accuracy (%%): %f" % bestResult["test"]
# do some error analysis
if args.pretrained:
plotRegVsAccuracy(regValues, results, "q4_reg_v_acc.png")
outputConfusionMatrix(devFeatures, devLabels, bestResult["clf"],
"q4_dev_conf.png")
outputPredictions(devset, devFeatures, devLabels, bestResult["clf"],
"q4_dev_pred.txt")
if __name__ == "__main__":
main(getArguments())
| apache-2.0 |
xzturn/tensorflow | tensorflow/python/kernel_tests/proto/encode_proto_op_test_base.py | 26 | 8489 | # =============================================================================
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Table-driven test for encode_proto op.
It tests that encode_proto is a lossless inverse of decode_proto
(for the specified fields).
"""
# Python3 readiness boilerplate
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from google.protobuf import text_format
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.kernel_tests.proto import proto_op_test_base as test_base
from tensorflow.python.kernel_tests.proto import test_example_pb2
from tensorflow.python.ops import array_ops
class EncodeProtoOpTestBase(test_base.ProtoOpTestBase, parameterized.TestCase):
"""Base class for testing proto encoding ops."""
def __init__(self, decode_module, encode_module, methodName='runTest'): # pylint: disable=invalid-name
"""EncodeProtoOpTestBase initializer.
Args:
decode_module: a module containing the `decode_proto_op` method
encode_module: a module containing the `encode_proto_op` method
methodName: the name of the test method (same as for test.TestCase)
"""
super(EncodeProtoOpTestBase, self).__init__(methodName)
self._decode_module = decode_module
self._encode_module = encode_module
def testBadSizesShape(self):
if context.executing_eagerly():
expected_error = (errors.InvalidArgumentError,
r'Invalid shape for field double_value.')
else:
expected_error = (ValueError,
r'Shape must be at least rank 2 but is rank 0')
with self.assertRaisesRegexp(*expected_error):
self.evaluate(
self._encode_module.encode_proto(
sizes=1,
values=[np.double(1.0)],
message_type='tensorflow.contrib.proto.TestValue',
field_names=['double_value']))
def testBadInputs(self):
# Invalid field name
with self.assertRaisesOpError('Unknown field: non_existent_field'):
self.evaluate(
self._encode_module.encode_proto(
sizes=[[1]],
values=[np.array([[0.0]], dtype=np.int32)],
message_type='tensorflow.contrib.proto.TestValue',
field_names=['non_existent_field']))
# Incorrect types.
with self.assertRaisesOpError('Incompatible type for field double_value.'):
self.evaluate(
self._encode_module.encode_proto(
sizes=[[1]],
values=[np.array([[0.0]], dtype=np.int32)],
message_type='tensorflow.contrib.proto.TestValue',
field_names=['double_value']))
# Incorrect shapes of sizes.
for sizes_value in 1, np.array([[[0, 0]]]):
with self.assertRaisesOpError(
r'sizes should be batch_size \+ \[len\(field_names\)\]'):
if context.executing_eagerly():
self.evaluate(
self._encode_module.encode_proto(
sizes=sizes_value,
values=[np.array([[0.0]])],
message_type='tensorflow.contrib.proto.TestValue',
field_names=['double_value']))
else:
with self.cached_session():
sizes = array_ops.placeholder(dtypes.int32)
values = array_ops.placeholder(dtypes.float64)
self._encode_module.encode_proto(
sizes=sizes,
values=[values],
message_type='tensorflow.contrib.proto.TestValue',
field_names=['double_value']).eval(feed_dict={
sizes: sizes_value,
values: [[0.0]]
})
# Inconsistent shapes of values.
with self.assertRaisesOpError('Values must match up to the last dimension'):
if context.executing_eagerly():
self.evaluate(
self._encode_module.encode_proto(
sizes=[[1, 1]],
values=[np.array([[0.0]]),
np.array([[0], [0]])],
message_type='tensorflow.contrib.proto.TestValue',
field_names=['double_value', 'int32_value']))
else:
with self.cached_session():
values1 = array_ops.placeholder(dtypes.float64)
values2 = array_ops.placeholder(dtypes.int32)
(self._encode_module.encode_proto(
sizes=[[1, 1]],
values=[values1, values2],
message_type='tensorflow.contrib.proto.TestValue',
field_names=['double_value', 'int32_value']).eval(feed_dict={
values1: [[0.0]],
values2: [[0], [0]]
}))
def _testRoundtrip(self, in_bufs, message_type, fields):
field_names = [f.name for f in fields]
out_types = [f.dtype for f in fields]
with self.cached_session() as sess:
sizes, field_tensors = self._decode_module.decode_proto(
in_bufs,
message_type=message_type,
field_names=field_names,
output_types=out_types)
out_tensors = self._encode_module.encode_proto(
sizes,
field_tensors,
message_type=message_type,
field_names=field_names)
out_bufs, = sess.run([out_tensors])
# Check that the re-encoded tensor has the same shape.
self.assertEqual(in_bufs.shape, out_bufs.shape)
# Compare the input and output.
for in_buf, out_buf in zip(in_bufs.flat, out_bufs.flat):
in_obj = test_example_pb2.TestValue()
in_obj.ParseFromString(in_buf)
out_obj = test_example_pb2.TestValue()
out_obj.ParseFromString(out_buf)
# Check that the deserialized objects are identical.
self.assertEqual(in_obj, out_obj)
# Check that the input and output serialized messages are identical.
# If we fail here, there is a difference in the serialized
# representation but the new serialization still parses. This could
# be harmless (a change in map ordering?) or it could be bad (e.g.
# loss of packing in the encoding).
self.assertEqual(in_buf, out_buf)
@parameterized.named_parameters(
*test_base.ProtoOpTestBase.named_parameters(extension=False))
def testRoundtrip(self, case):
in_bufs = [value.SerializeToString() for value in case.values]
# np.array silently truncates strings if you don't specify dtype=object.
in_bufs = np.reshape(np.array(in_bufs, dtype=object), list(case.shapes))
return self._testRoundtrip(
in_bufs, 'tensorflow.contrib.proto.TestValue', case.fields)
@parameterized.named_parameters(
*test_base.ProtoOpTestBase.named_parameters(extension=False))
def testRoundtripPacked(self, case):
# Now try with the packed serialization.
# We test the packed representations by loading the same test cases using
# PackedTestValue instead of TestValue. To do this we rely on the text
# format being the same for packed and unpacked fields, and reparse the test
# message using the packed version of the proto.
in_bufs = [
# Note: float_format='.17g' is necessary to ensure preservation of
# doubles and floats in text format.
text_format.Parse(
text_format.MessageToString(
value, float_format='.17g'),
test_example_pb2.PackedTestValue()).SerializeToString()
for value in case.values
]
# np.array silently truncates strings if you don't specify dtype=object.
in_bufs = np.reshape(np.array(in_bufs, dtype=object), list(case.shapes))
return self._testRoundtrip(
in_bufs, 'tensorflow.contrib.proto.PackedTestValue', case.fields)
| apache-2.0 |
lepricon49/headphones | lib/bs4/builder/__init__.py | 73 | 11234 | from collections import defaultdict
import itertools
import sys
from bs4.element import (
CharsetMetaAttributeValue,
ContentMetaAttributeValue,
whitespace_re
)
__all__ = [
'HTMLTreeBuilder',
'SAXTreeBuilder',
'TreeBuilder',
'TreeBuilderRegistry',
]
# Some useful features for a TreeBuilder to have.
FAST = 'fast'
PERMISSIVE = 'permissive'
STRICT = 'strict'
XML = 'xml'
HTML = 'html'
HTML_5 = 'html5'
class TreeBuilderRegistry(object):
def __init__(self):
self.builders_for_feature = defaultdict(list)
self.builders = []
def register(self, treebuilder_class):
"""Register a treebuilder based on its advertised features."""
for feature in treebuilder_class.features:
self.builders_for_feature[feature].insert(0, treebuilder_class)
self.builders.insert(0, treebuilder_class)
def lookup(self, *features):
if len(self.builders) == 0:
# There are no builders at all.
return None
if len(features) == 0:
# They didn't ask for any features. Give them the most
# recently registered builder.
return self.builders[0]
# Go down the list of features in order, and eliminate any builders
# that don't match every feature.
features = list(features)
features.reverse()
candidates = None
candidate_set = None
while len(features) > 0:
feature = features.pop()
we_have_the_feature = self.builders_for_feature.get(feature, [])
if len(we_have_the_feature) > 0:
if candidates is None:
candidates = we_have_the_feature
candidate_set = set(candidates)
else:
# Eliminate any candidates that don't have this feature.
candidate_set = candidate_set.intersection(
set(we_have_the_feature))
# The only valid candidates are the ones in candidate_set.
# Go through the original list of candidates and pick the first one
# that's in candidate_set.
if candidate_set is None:
return None
for candidate in candidates:
if candidate in candidate_set:
return candidate
return None
# The BeautifulSoup class will take feature lists from developers and use them
# to look up builders in this registry.
builder_registry = TreeBuilderRegistry()
class TreeBuilder(object):
"""Turn a document into a Beautiful Soup object tree."""
NAME = "[Unknown tree builder]"
ALTERNATE_NAMES = []
features = []
is_xml = False
picklable = False
preserve_whitespace_tags = set()
empty_element_tags = None # A tag will be considered an empty-element
# tag when and only when it has no contents.
# A value for these tag/attribute combinations is a space- or
# comma-separated list of CDATA, rather than a single CDATA.
cdata_list_attributes = {}
def __init__(self):
self.soup = None
def reset(self):
pass
def can_be_empty_element(self, tag_name):
"""Might a tag with this name be an empty-element tag?
The final markup may or may not actually present this tag as
self-closing.
For instance: an HTMLBuilder does not consider a <p> tag to be
an empty-element tag (it's not in
HTMLBuilder.empty_element_tags). This means an empty <p> tag
will be presented as "<p></p>", not "<p />".
The default implementation has no opinion about which tags are
empty-element tags, so a tag will be presented as an
empty-element tag if and only if it has no contents.
"<foo></foo>" will become "<foo />", and "<foo>bar</foo>" will
be left alone.
"""
if self.empty_element_tags is None:
return True
return tag_name in self.empty_element_tags
def feed(self, markup):
raise NotImplementedError()
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
return markup, None, None, False
def test_fragment_to_document(self, fragment):
"""Wrap an HTML fragment to make it look like a document.
Different parsers do this differently. For instance, lxml
introduces an empty <head> tag, and html5lib
doesn't. Abstracting this away lets us write simple tests
which run HTML fragments through the parser and compare the
results against other HTML fragments.
This method should not be used outside of tests.
"""
return fragment
def set_up_substitutions(self, tag):
return False
def _replace_cdata_list_attribute_values(self, tag_name, attrs):
"""Replaces class="foo bar" with class=["foo", "bar"]
Modifies its input in place.
"""
if not attrs:
return attrs
if self.cdata_list_attributes:
universal = self.cdata_list_attributes.get('*', [])
tag_specific = self.cdata_list_attributes.get(
tag_name.lower(), None)
for attr in attrs.keys():
if attr in universal or (tag_specific and attr in tag_specific):
# We have a "class"-type attribute whose string
# value is a whitespace-separated list of
# values. Split it into a list.
value = attrs[attr]
if isinstance(value, basestring):
values = whitespace_re.split(value)
else:
# html5lib sometimes calls setAttributes twice
# for the same tag when rearranging the parse
# tree. On the second call the attribute value
# here is already a list. If this happens,
# leave the value alone rather than trying to
# split it again.
values = value
attrs[attr] = values
return attrs
class SAXTreeBuilder(TreeBuilder):
"""A Beautiful Soup treebuilder that listens for SAX events."""
def feed(self, markup):
raise NotImplementedError()
def close(self):
pass
def startElement(self, name, attrs):
attrs = dict((key[1], value) for key, value in list(attrs.items()))
#print "Start %s, %r" % (name, attrs)
self.soup.handle_starttag(name, attrs)
def endElement(self, name):
#print "End %s" % name
self.soup.handle_endtag(name)
def startElementNS(self, nsTuple, nodeName, attrs):
# Throw away (ns, nodeName) for now.
self.startElement(nodeName, attrs)
def endElementNS(self, nsTuple, nodeName):
# Throw away (ns, nodeName) for now.
self.endElement(nodeName)
#handler.endElementNS((ns, node.nodeName), node.nodeName)
def startPrefixMapping(self, prefix, nodeValue):
# Ignore the prefix for now.
pass
def endPrefixMapping(self, prefix):
# Ignore the prefix for now.
# handler.endPrefixMapping(prefix)
pass
def characters(self, content):
self.soup.handle_data(content)
def startDocument(self):
pass
def endDocument(self):
pass
class HTMLTreeBuilder(TreeBuilder):
"""This TreeBuilder knows facts about HTML.
Such as which tags are empty-element tags.
"""
preserve_whitespace_tags = set(['pre', 'textarea'])
empty_element_tags = set(['br' , 'hr', 'input', 'img', 'meta',
'spacer', 'link', 'frame', 'base'])
# The HTML standard defines these attributes as containing a
# space-separated list of values, not a single value. That is,
# class="foo bar" means that the 'class' attribute has two values,
# 'foo' and 'bar', not the single value 'foo bar'. When we
# encounter one of these attributes, we will parse its value into
# a list of values if possible. Upon output, the list will be
# converted back into a string.
cdata_list_attributes = {
"*" : ['class', 'accesskey', 'dropzone'],
"a" : ['rel', 'rev'],
"link" : ['rel', 'rev'],
"td" : ["headers"],
"th" : ["headers"],
"td" : ["headers"],
"form" : ["accept-charset"],
"object" : ["archive"],
# These are HTML5 specific, as are *.accesskey and *.dropzone above.
"area" : ["rel"],
"icon" : ["sizes"],
"iframe" : ["sandbox"],
"output" : ["for"],
}
def set_up_substitutions(self, tag):
# We are only interested in <meta> tags
if tag.name != 'meta':
return False
http_equiv = tag.get('http-equiv')
content = tag.get('content')
charset = tag.get('charset')
# We are interested in <meta> tags that say what encoding the
# document was originally in. This means HTML 5-style <meta>
# tags that provide the "charset" attribute. It also means
# HTML 4-style <meta> tags that provide the "content"
# attribute and have "http-equiv" set to "content-type".
#
# In both cases we will replace the value of the appropriate
# attribute with a standin object that can take on any
# encoding.
meta_encoding = None
if charset is not None:
# HTML 5 style:
# <meta charset="utf8">
meta_encoding = charset
tag['charset'] = CharsetMetaAttributeValue(charset)
elif (content is not None and http_equiv is not None
and http_equiv.lower() == 'content-type'):
# HTML 4 style:
# <meta http-equiv="content-type" content="text/html; charset=utf8">
tag['content'] = ContentMetaAttributeValue(content)
return (meta_encoding is not None)
def register_treebuilders_from(module):
"""Copy TreeBuilders from the given module into this module."""
# I'm fairly sure this is not the best way to do this.
this_module = sys.modules['bs4.builder']
for name in module.__all__:
obj = getattr(module, name)
if issubclass(obj, TreeBuilder):
setattr(this_module, name, obj)
this_module.__all__.append(name)
# Register the builder while we're at it.
this_module.builder_registry.register(obj)
class ParserRejectedMarkup(Exception):
pass
# Builders are registered in reverse order of priority, so that custom
# builder registrations will take precedence. In general, we want lxml
# to take precedence over html5lib, because it's faster. And we only
# want to use HTMLParser as a last result.
from . import _htmlparser
register_treebuilders_from(_htmlparser)
try:
from . import _html5lib
register_treebuilders_from(_html5lib)
except ImportError:
# They don't have html5lib installed.
pass
try:
from . import _lxml
register_treebuilders_from(_lxml)
except ImportError:
# They don't have lxml installed.
pass
| gpl-3.0 |
fw1121/genomics | bcftbx/Pipeline.py | 1 | 28839 | #!/usr/bin/env python
#
# Pipeline.py: classes for running scripts iteratively
# Copyright (C) University of Manchester 2011 Peter Briggs
#
########################################################################
#
# Pipeline.py
#
#########################################################################
"""
Classes for running scripts iteratively over a collection of data files.
The essential classes are:
* Job: wrapper for setting up, submitting and monitoring running
scripts
* PipelineRunner: queue and run script multiple times on standard set
of inputs
* SolidPipelineRunner: subclass of PipelineRunner specifically for
running on SOLiD data (i.e. pairs of csfasta/qual files)
There are also some useful methods:
* GetSolidDataFiles: collect csfasta/qual file pairs from a specific
directory
* GetSolidPairedEndFile: collect csfasta/qual file pairs for paired
end data
* GetFastqFiles: collect fastq files from a specific directory
* GetFastqGzFiles: collect gzipped fastq files
The PipelineRunners depend on the JobRunner instances (created from
classes in the JobRunner module) to interface with the job management
system. So typical usage might look like:
>>> import JobRunner
>>> import Pipeline
>>> runner = JobRunner.GEJobRunner() # to use Grid Engine
>>> pipeline = Pipeline.PipelineRunner(runner)
>>> pipeline.queueJob(...)
>>> pipeline.run()
"""
#######################################################################
# Module metadata
#######################################################################
__version__ = "0.1.3"
#######################################################################
# Import modules that this module depends on
#######################################################################
import sys
import os
import re
import time
import Queue
import logging
#######################################################################
# Class definitions
#######################################################################
# Job: container for a script run
class Job:
"""Wrapper class for setting up, submitting and monitoring running scripts
Set up a job by creating a Job instance specifying the name, working directory,
script file to execute, and arguments to be supplied to the script.
The job is started by invoking the 'start' method; its status can be checked
with the 'isRunning' method, and terminated and restarted using the 'terminate'
and 'restart' methods respectively.
Information about the job can also be accessed via its properties. The following
properties record the original parameters supplied on instantiation:
name
working_dir
script
args
label
group_label
Additional information is set once the job has started or stopped running:
job_id The id number for the running job returned by the JobRunner
log The log file for the job (relative to working_dir)
start_time The start time (seconds since the epoch)
end_time The end time (seconds since the epoch)
The Job class uses a JobRunner instance (which supplies the necessary methods for
starting, stopping and monitoring) for low-level job interactions.
"""
def __init__(self,runner,name,dirn,script,args,label=None,group=None):
"""Create an instance of Job.
Arguments:
runner: a JobRunner instance supplying job control methods
name: name to give the running job
dirn: directory to run the script in
script: script file to submit, either a full path, relative path to dirn, or
must be on the user's PATH in the environment where jobs are executed
args: Python list with the arguments to supply to the script when it is
submitted
label: (optional) arbitrary string to use as an identifier for the job
group: (optional) arbitrary string to use as a 'group' identifier;
assign the same 'group' label to multiple jobs to indicate they're
related
"""
self.name = name
self.working_dir = dirn
self.script = script
self.args = args
self.label = label
self.group_label = group
self.job_id = None
self.log = None
self.submitted = False
self.failed = False
self.terminated = False
self.start_time = None
self.end_time = None
self.home_dir = os.getcwd()
self.__finished = False
self.__runner = runner
# Time interval to use when checking for job start (seconds)
# Can be floating point number e.g. 0.1 (= 100ms)
self.__poll_interval = 1
# Time out period to use before giving up on job submission
# (seconds)
self.__timeout = 3600
def start(self):
"""Start the job running
Returns:
Id for job
"""
if not self.submitted and not self.__finished:
self.job_id = self.__runner.run(self.name,self.working_dir,self.script,self.args)
self.submitted = True
self.start_time = time.time()
if self.job_id is None:
# Failed to submit correctly
logging.warning("Job submission failed")
self.failed = True
self.__finished = True
self.end_time = self.start_time
return self.job_id
self.submitted = True
self.start_time = time.time()
self.log = self.__runner.logFile(self.job_id)
# Wait for evidence that the job has started
logging.debug("Waiting for job to start")
time_waiting = 0
while not self.__runner.isRunning(self.job_id) and not os.path.exists(self.log):
time.sleep(self.__poll_interval)
time_waiting += self.__poll_interval
if time_waiting > self.__timeout:
# Waited too long for job to start, give up
logging.error("Timed out waiting for job to start")
self.failed = True
self.__finished = True
self.end_time = self.start_time
return self.job_id
logging.debug("Job %s started (%s)" % (self.job_id,
time.asctime(time.localtime(self.start_time))))
# Also report queue (for GE jobs only)
try:
logging.debug("Queue: %s" % self.__runner.queue(self.job_id))
except AttributeError:
pass
return self.job_id
def terminate(self):
"""Terminate a running job
"""
if self.isRunning():
self.__runner.terminate(self.job_id)
self.terminated = True
self.end_time = time.time()
def restart(self):
"""Restart the job
Terminates the job (if still running) and restarts"""
# Terminate running job
if self.isRunning():
self.terminate()
while self.isRunning():
time.sleep(self.__poll_interval)
# Reset flags
self.__finished = False
self.submitted = False
self.terminated = False
self.start_time = None
self.end_time = None
# Resubmit
return self.start()
def isRunning(self):
"""Check if job is still running
"""
if not self.submitted:
return False
if not self.__finished:
if not self.__runner.isRunning(self.job_id):
self.end_time = time.time()
self.__finished = True
return not self.__finished
def errorState(self):
"""Check if the job is in an error state
"""
return self.__runner.errorState(self.job_id)
def status(self):
"""Return descriptive string indicating job status
"""
if self.__finished:
if self.terminated:
return "Terminated"
else:
return "Finished"
elif self.submitted:
if self.terminated:
return "Running pending termination"
else:
return "Running"
else:
return "Waiting"
def wait(self):
"""Wait for job to complete
Block calling process until the job has finished running.
"""
while self.isRunning():
time.sleep(1)
return
@property
def runner(self):
"""Return the JobRunner instance associated with the Job
"""
return self.__runner
# PipelineRunner: class to set up and run multiple jobs
class PipelineRunner:
"""Class to run and manage multiple concurrent jobs.
PipelineRunner enables multiple jobs to be queued via the 'queueJob' method. The
pipeline is then started using the 'run' method - this starts each job up to a
a specified maximum of concurrent jobs, and then monitors their progress. As jobs
finish, pending jobs are started until all jobs have completed.
Example usage:
>>> p = PipelineRunner()
>>> p.queueJob('/home/foo','foo.sh','bar.in')
... Queue more jobs ...
>>> p.run()
By default the pipeline runs in 'blocking' mode, i.e. 'run' doesn't return until all
jobs have been submitted and have completed; see the 'run' method for details of
how to operate the pipeline in non-blocking mode.
The invoking subprogram can also specify functions that will be called when a job
completes ('jobCompletionHandler'), and when a group completes
('groupCompletionHandler'). These can perform any specific actions that are required
such as sending notification email, setting file ownerships and permissions etc.
"""
def __init__(self,runner,max_concurrent_jobs=4,poll_interval=30,jobCompletionHandler=None,
groupCompletionHandler=None):
"""Create new PipelineRunner instance.
Arguments:
runner: a JobRunner instance
max_concurrent_jobs: maximum number of jobs that the script will allow to run
at one time (default = 4)
poll_interval: time interval (in seconds) between checks on the queue status
(only used when pipeline is run in 'blocking' mode)
"""
# Parameters
self.__runner = runner
self.max_concurrent_jobs = max_concurrent_jobs
self.poll_interval = poll_interval
# Groups
self.groups = []
self.njobs_in_group = {}
# Queue of jobs to run
self.jobs = Queue.Queue()
# Subset that are currently running
self.running = []
# Subset that have completed
self.completed = []
# Callback functions
self.handle_job_completion = jobCompletionHandler
self.handle_group_completion = groupCompletionHandler
def queueJob(self,working_dir,script,script_args,label=None,group=None):
"""Add a job to the pipeline.
The job will be queued and executed once the pipeline's 'run' method has been
executed.
Arguments:
working_dir: directory to run the job in
script: script file to run
script_args: arguments to be supplied to the script at run time
label: (optional) arbitrary string to use as an identifier in the job name
group: (optional) arbitrary string to use as a 'group' identifier;
assign the same 'group' label to multiple jobs to indicate they're
related
"""
job_name = os.path.splitext(os.path.basename(script))[0]+'.'+str(label)
if group:
if group not in self.groups:
# New group label
self.groups.append(group)
self.njobs_in_group[group] = 1
else:
self.njobs_in_group[group] += 1
self.jobs.put(Job(self.__runner,job_name,working_dir,script,script_args,
label,group))
logging.debug("Added job: now %d jobs in pipeline" % self.jobs.qsize())
def nWaiting(self):
"""Return the number of jobs still waiting to be started
"""
return self.jobs.qsize()
def nRunning(self):
"""Return the number of jobs currently running
"""
return len(self.running)
def nCompleted(self):
"""Return the number of jobs that have completed
"""
return len(self.completed)
def isRunning(self):
"""Check whether the pipeline is still running
Returns True if the pipeline is still running (i.e. has either
running jobs, waiting jobs or both) and False otherwise.
"""
# First update the pipeline status
self.update()
# Return the status
return (self.nWaiting() > 0 or self.nRunning() > 0)
def run(self,blocking=True):
"""Execute the jobs in the pipeline
Each job previously added to the pipeline by 'queueJob' will be
started and checked periodically for termination.
By default 'run' operates in 'blocking' mode, so it doesn't return
until all jobs have been submitted and have finished executing.
To run in non-blocking mode, set the 'blocking' argument to False.
In this mode the pipeline starts and returns immediately; it is
the responsibility of the calling subprogram to then periodically
check the status of the pipeline, e.g.
>>> p = PipelineRunner()
>>> p.queueJob('/home/foo','foo.sh','bar.in')
>>> p.run()
>>> while p.isRunning():
>>> time.sleep(30)
"""
logging.debug("PipelineRunner: started")
logging.debug("Blocking mode : %s" % blocking)
# Report set up
print "Initially %d jobs waiting, %d running, %d finished" % \
(self.nWaiting(),self.nRunning(),self.nCompleted())
# Initial update sets the jobs running
self.update()
if blocking:
while self.isRunning():
# Pipeline is still executing so wait
time.sleep(self.poll_interval)
# Pipeline has finished
print "Pipeline completed"
def update(self):
"""Update the pipeline
The 'update' method checks and updates the status of running jobs,
and submits any waiting jobs if space is available.
"""
# Flag to report updated status
updated_status = False
# Look for running jobs that have completed
for job in self.running[::-1]:
if not job.isRunning():
# Job has completed
self.running.remove(job)
self.completed.append(job)
updated_status = True
print "Job has completed: %s: %s %s (%s)" % (
job.job_id,
job.name,
os.path.basename(job.working_dir),
time.asctime(time.localtime(job.end_time)))
# Invoke callback on job completion
if self.handle_job_completion:
self.handle_job_completion(job)
# Check for completed group
if job.group_label is not None:
jobs_in_group = []
for check_job in self.completed:
if check_job.group_label == job.group_label:
jobs_in_group.append(check_job)
if self.njobs_in_group[job.group_label] == len(jobs_in_group):
# All jobs in group have completed
print "Group '%s' has completed" % job.group_label
# Invoke callback on group completion
if self.handle_group_completion:
self.handle_group_completion(job.group_label,jobs_in_group)
else:
# Job is running, check it's not in an error state
if job.errorState():
# Terminate jobs in error state
logging.warning("Terminating job %s in error state" % job.job_id)
job.terminate()
# Submit new jobs to GE queue
while not self.jobs.empty() and self.nRunning() < self.max_concurrent_jobs:
next_job = self.jobs.get()
next_job.start()
self.running.append(next_job)
updated_status = True
print "Job has started: %s: %s %s (%s)" % (
next_job.job_id,
next_job.name,
os.path.basename(next_job.working_dir),
time.asctime(time.localtime(next_job.start_time)))
if self.jobs.empty():
logging.debug("PipelineRunner: all jobs now submitted")
# Report
if updated_status:
print "Currently %d jobs waiting, %d running, %d finished" % \
(self.nWaiting(),self.nRunning(),self.nCompleted())
def report(self):
"""Return a report of the pipeline status
"""
# Pipeline status
if self.nRunning() > 0:
status = "RUNNING"
elif self.nWaiting() > 0:
status = "WAITING"
else:
status = "COMPLETED"
report = "Pipeline status at %s: %s\n\n" % (time.asctime(),status)
# Report directories
dirs = []
for job in self.completed:
if job.working_dir not in dirs:
dirs.append(job.working_dir)
for dirn in dirs:
report += "\t%s\n" % dirn
# Report jobs waiting
if self.nWaiting() > 0:
report += "\n%d jobs waiting to run\n" % self.nWaiting()
# Report jobs running
if self.nRunning() > 0:
report += "\n%d jobs running:\n" % self.nRunning()
for job in self.running:
report += "\t%s\t%s\t%s\n" % (job.label,job.log,job.working_dir)
# Report completed jobs
if self.nCompleted() > 0:
report += "\n%d jobs completed:\n" % self.nCompleted()
for job in self.completed:
report += "\t%s\t%s\t%s\t%.1fs\t[%s]\n" % (job.label,
job.log,
job.working_dir,
(job.end_time - job.start_time),
job.status())
return report
def __del__(self):
"""Deal with deletion of the pipeline
If the pipeline object is deleted while still running
then terminate all running jobs.
"""
# Empty the queue
while not self.jobs.empty():
self.jobs.get()
# Terminate the running jobs
for job in self.running:
logging.debug("Terminating job %s" % job.job_id)
print "Terminating job %s" % job.job_id
try:
job.terminate()
except Exception, ex:
logging.error("Failed to terminate job %s: %s" % (job.job_id,ex))
class SolidPipelineRunner(PipelineRunner):
"""Class to run and manage multiple jobs for Solid data pipelines
Subclass of PipelineRunner specifically for dealing with scripts
that take Solid data (i.e. csfasta/qual file pairs).
Defines the addDir method in addition to all methods already defined
in the base class; use this method one or more times to specify
directories with data to run the script on. The SOLiD data file pairs
in each specified directory will be located automatically.
For example:
solid_pipeline = SolidPipelineRunner('qc.sh')
solid_pipeline.addDir('/path/to/datadir')
solid_pipeline.run()
"""
def __init__(self,runner,script,max_concurrent_jobs=4,poll_interval=30):
PipelineRunner.__init__(self,runner)
self.script = script
def addDir(self,dirn):
logging.debug("Add dir: %s" % dirn)
run_data = GetSolidDataFiles(dirn)
for data in run_data:
self.queueJob(dirn,self.script,data)
#######################################################################
# Module Functions
#######################################################################
def GetSolidDataFiles(dirn,pattern=None,file_list=None):
"""Return list of csfasta/qual file pairs in target directory
Note that files with names ending in '_T_F3' will be rejected
as these are assumed to come from the preprocess filtering stage.
Optionally also specify a regular expression pattern that file
names must also match in order to be included.
Arguments:
dirn: name/path of directory to look for files in
pattern: optional, regular expression pattern to filter names with
file_list: optional, a list of file names to use instead of
fetching a list of files from the specified directory
Returns:
List of tuples consisting of two csfasta-qual file pairs (F3 and F5).
"""
data_files = []
if file_list is not None:
# Used supplied file list
logging.debug("Ignoring dirn argument and using supplied file list")
all_files = file_list
else:
# Check directory exists
if not os.path.isdir(dirn):
logging.error("'%s' not a directory: unable to collect SOLiD files" % dirn)
return []
# Gather data files
logging.debug("Collecting csfasta/qual file pairs in %s" % dirn)
all_files = os.listdir(dirn)
all_files.sort()
# Regular expression pattern
if pattern is not None:
regex = re.compile(pattern)
# Look for csfasta and matching qual files
for filen in all_files:
logging.debug("Examining file %s" % filen)
root = os.path.splitext(filen)[0]
ext = os.path.splitext(filen)[1]
if ext == ".qual":
qual = filen
# Reject names ending with "_T_F3"
try:
i = root.rindex('_T_F3')
logging.debug("Rejecting %s" % qual)
continue
except ValueError:
# Name is okay, ignore
pass
# Match csfasta names which don't have "_QV" in them
try:
i = root.rindex('_QV')
csfasta = root[:i]+root[i+3:]+".csfasta"
except ValueError:
# QV not in name, try to match whole name
csfasta = root+".csfasta"
if csfasta in all_files:
# If a regex pattern is specified then also filter on it
if pattern is None or regex.search(csfasta):
data_files.append((csfasta,qual))
else:
logging.critical("Unable to get csfasta for %s" % filen)
# Done - return file pairs
return data_files
def GetSolidPairedEndFiles(dirn,pattern=None,file_list=None):
"""Return list of csfasta/qual file pairs for paired end data
Optionally also specify a regular expression pattern that file
names must also match in order to be included.
Arguments:
dirn: name/path of directory to look for files in
pattern: optional, regular expression pattern to filter names with
file_list: optional, a list of file names to use instead of
fetching a list of files from the specified directory
Returns:
List of csfasta-qual pair tuples.
"""
# Get list of pairs
file_pairs = GetSolidDataFiles(dirn,pattern=pattern,file_list=file_list)
if not file_pairs:
return []
# Now match pairs of pairs: files with the same name except for
# 'F3' and 'F5' or 'F5-BC'
logging.debug("Matching F3 csfasta/qual file pairs with F5 counterparts")
key_list = []
matched_files = dict()
for pair in file_pairs:
# Remove _F3, _F5 and _F5-BC components from csfasta to
# use as a key
key = pair[0].replace('_F3','').replace('_F5-BC','').replace('_F5','')
logging.debug("Key: %s for %s" % (key,pair))
if key in key_list:
# Already has an entry
matched_files[key].append(pair)
else:
# New key
key_list.append(key)
matched_files[key] = [pair]
# Check pairs of pairs
data_files = []
for key in key_list:
if len(matched_files[key]) != 2:
logging.debug("discarding pairs: %s" % matched_files[key])
else:
# Look for F3 and F5s and put into order
try:
matched_files[key][0][0].index('F5')
# F5 pair are first set
f3_index = 1
f5_index = 0
except ValueError:
# F3 pair are first set
f3_index = 0
f5_index = 1
# Pull out files and append to list in the
# correct order (F3 then F5)
csfasta_f3 = matched_files[key][f3_index][0]
qual_f3 = matched_files[key][f3_index][1]
csfasta_f5 = matched_files[key][f5_index][0]
qual_f5 = matched_files[key][f5_index][1]
data_files.append((csfasta_f3,qual_f3,csfasta_f5,qual_f5))
# Done - return file list
return data_files
def GetFastqFiles(dirn,pattern=None,file_list=None):
"""Return list of fastq files in target directory
Optionally also specify a regular expression pattern that file
names must also match in order to be included.
Arguments:
dirn: name/path of directory to look for files in
pattern: optional, regular expression pattern to filter names with
file_list: optional, a list of file names to use instead of
fetching a list of files from the specified directory
Returns:
List of file-pair tuples.
"""
data_files = []
if file_list is not None:
# Used supplied file list
logging.debug("Ignoring dirn argument and using supplied file list")
all_files = file_list
else:
# Check directory exists
if not os.path.isdir(dirn):
logging.error("'%s' not a directory: unable to collect fastq files" % dirn)
return []
# Gather data files
logging.debug("Collecting fastq files in %s" % dirn)
all_files = os.listdir(dirn)
all_files.sort()
# Regular expression pattern
if pattern is not None:
regex = re.compile(pattern)
# Look for csfasta and matching qual files
for filen in all_files:
logging.debug("Examining file %s" % filen)
root = os.path.splitext(filen)[0]
ext = os.path.splitext(filen)[1]
if ext == ".fastq":
# If a regex pattern is specified then also filter on it
if pattern is None or regex.search(root):
data_files.append((filen,))
# Done - return file list
return data_files
def GetFastqGzFiles(dirn,pattern=None,file_list=None):
"""Return list of fastq.gz files in target directory
Optionally also specify a regular expression pattern that file
names must also match in order to be included.
Arguments:
dirn: name/path of directory to look for files in
pattern: optional, regular expression pattern to filter names with
file_list: optional, a list of file names to use instead of
fetching a list of files from the specified directory
Returns:
List of file-pair tuples.
"""
data_files = []
if file_list is not None:
# Used supplied file list
logging.debug("Ignoring dirn argument and using supplied file list")
all_files = file_list
else:
# Check directory exists
if not os.path.isdir(dirn):
logging.error("'%s' not a directory: unable to collect fastq.gz files" % dirn)
return []
# Gather data files
logging.debug("Collecting fastq.gz files in %s" % dirn)
all_files = os.listdir(dirn)
all_files.sort()
# Regular expression pattern
if pattern is not None:
regex = re.compile(pattern)
# Look for .fastq.gz
for filen in all_files:
logging.debug("Examining file %s" % filen)
if filen.split('.')[-1] == "gz":
# Ends with gz
try:
if filen.split('.')[-2] == "fastq":
# If a regex pattern is specified then also filter on it
if pattern is None or regex.search(filen.split('.')[-3]):
data_files.append((filen,))
except IndexError:
# Ignore
pass
# Done - return file list
return data_files
| artistic-2.0 |
buntyke/Flask | microblog/flask/lib/python2.7/site-packages/wtforms/meta.py | 79 | 3822 | from wtforms.utils import WebobInputWrapper
from wtforms import i18n
class DefaultMeta(object):
"""
This is the default Meta class which defines all the default values and
therefore also the 'API' of the class Meta interface.
"""
# -- Basic form primitives
def bind_field(self, form, unbound_field, options):
"""
bind_field allows potential customization of how fields are bound.
The default implementation simply passes the options to
:meth:`UnboundField.bind`.
:param form: The form.
:param unbound_field: The unbound field.
:param options:
A dictionary of options which are typically passed to the field.
:return: A bound field
"""
return unbound_field.bind(form=form, **options)
def wrap_formdata(self, form, formdata):
"""
wrap_formdata allows doing custom wrappers of WTForms formdata.
The default implementation detects webob-style multidicts and wraps
them, otherwise passes formdata back un-changed.
:param form: The form.
:param formdata: Form data.
:return: A form-input wrapper compatible with WTForms.
"""
if formdata is not None and not hasattr(formdata, 'getlist'):
if hasattr(formdata, 'getall'):
return WebobInputWrapper(formdata)
else:
raise TypeError("formdata should be a multidict-type wrapper that supports the 'getlist' method")
return formdata
def render_field(self, field, render_kw):
"""
render_field allows customization of how widget rendering is done.
The default implementation calls ``field.widget(field, **render_kw)``
"""
other_kw = getattr(field, 'render_kw', None)
if other_kw is not None:
render_kw = dict(other_kw, **render_kw)
return field.widget(field, **render_kw)
# -- CSRF
csrf = False
csrf_field_name = 'csrf_token'
csrf_secret = None
csrf_context = None
csrf_class = None
def build_csrf(self, form):
"""
Build a CSRF implementation. This is called once per form instance.
The default implementation builds the class referenced to by
:attr:`csrf_class` with zero arguments. If `csrf_class` is ``None``,
will instead use the default implementation
:class:`wtforms.csrf.session.SessionCSRF`.
:param form: The form.
:return: A CSRF implementation.
"""
if self.csrf_class is not None:
return self.csrf_class()
from wtforms.csrf.session import SessionCSRF
return SessionCSRF()
# -- i18n
locales = False
cache_translations = True
translations_cache = {}
def get_translations(self, form):
"""
Override in subclasses to provide alternate translations factory.
See the i18n documentation for more.
:param form: The form.
:return: An object that provides gettext() and ngettext() methods.
"""
locales = self.locales
if locales is False:
return None
if self.cache_translations:
# Make locales be a hashable value
locales = tuple(locales) if locales else None
translations = self.translations_cache.get(locales)
if translations is None:
translations = self.translations_cache[locales] = i18n.get_translations(locales)
return translations
return i18n.get_translations(locales)
# -- General
def update_values(self, values):
"""
Given a dictionary of values, update values on this `Meta` instance.
"""
for key, value in values.items():
setattr(self, key, value)
| mit |
abhishekgahlot/scikit-learn | examples/linear_model/plot_sgd_separating_hyperplane.py | 260 | 1219 | """
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([x1, x2])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
2013Commons/HUE-SHARK | desktop/core/ext-py/Django-1.2.3/django/core/serializers/json.py | 107 | 2053 | """
Serialize data to/from JSON
"""
import datetime
import decimal
from StringIO import StringIO
from django.core.serializers.python import Serializer as PythonSerializer
from django.core.serializers.python import Deserializer as PythonDeserializer
from django.utils import datetime_safe
from django.utils import simplejson
class Serializer(PythonSerializer):
"""
Convert a queryset to JSON.
"""
internal_use_only = False
def end_serialization(self):
self.options.pop('stream', None)
self.options.pop('fields', None)
self.options.pop('use_natural_keys', None)
simplejson.dump(self.objects, self.stream, cls=DjangoJSONEncoder, **self.options)
def getvalue(self):
if callable(getattr(self.stream, 'getvalue', None)):
return self.stream.getvalue()
def Deserializer(stream_or_string, **options):
"""
Deserialize a stream or string of JSON data.
"""
if isinstance(stream_or_string, basestring):
stream = StringIO(stream_or_string)
else:
stream = stream_or_string
for obj in PythonDeserializer(simplejson.load(stream), **options):
yield obj
class DjangoJSONEncoder(simplejson.JSONEncoder):
"""
JSONEncoder subclass that knows how to encode date/time and decimal types.
"""
DATE_FORMAT = "%Y-%m-%d"
TIME_FORMAT = "%H:%M:%S"
def default(self, o):
if isinstance(o, datetime.datetime):
d = datetime_safe.new_datetime(o)
return d.strftime("%s %s" % (self.DATE_FORMAT, self.TIME_FORMAT))
elif isinstance(o, datetime.date):
d = datetime_safe.new_date(o)
return d.strftime(self.DATE_FORMAT)
elif isinstance(o, datetime.time):
return o.strftime(self.TIME_FORMAT)
elif isinstance(o, decimal.Decimal):
return str(o)
else:
return super(DjangoJSONEncoder, self).default(o)
# Older, deprecated class name (for backwards compatibility purposes).
DateTimeAwareJSONEncoder = DjangoJSONEncoder
| apache-2.0 |
moyogo/tachyfont | run_time/src/gae_server/third_party/old-fonttools-master/Lib/fontTools/ttLib/tables/E_B_L_C_.py | 11 | 25091 | from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools.misc import sstruct
from . import DefaultTable
from fontTools.misc.textTools import safeEval
from .BitmapGlyphMetrics import BigGlyphMetrics, bigGlyphMetricsFormat, SmallGlyphMetrics, smallGlyphMetricsFormat
import struct
import itertools
from collections import deque
eblcHeaderFormat = """
> # big endian
version: 16.16F
numSizes: I
"""
# The table format string is split to handle sbitLineMetrics simply.
bitmapSizeTableFormatPart1 = """
> # big endian
indexSubTableArrayOffset: I
indexTablesSize: I
numberOfIndexSubTables: I
colorRef: I
"""
# The compound type for hori and vert.
sbitLineMetricsFormat = """
> # big endian
ascender: b
descender: b
widthMax: B
caretSlopeNumerator: b
caretSlopeDenominator: b
caretOffset: b
minOriginSB: b
minAdvanceSB: b
maxBeforeBL: b
minAfterBL: b
pad1: b
pad2: b
"""
# hori and vert go between the two parts.
bitmapSizeTableFormatPart2 = """
> # big endian
startGlyphIndex: H
endGlyphIndex: H
ppemX: B
ppemY: B
bitDepth: B
flags: b
"""
indexSubTableArrayFormat = ">HHL"
indexSubTableArraySize = struct.calcsize(indexSubTableArrayFormat)
indexSubHeaderFormat = ">HHL"
indexSubHeaderSize = struct.calcsize(indexSubHeaderFormat)
codeOffsetPairFormat = ">HH"
codeOffsetPairSize = struct.calcsize(codeOffsetPairFormat)
class table_E_B_L_C_(DefaultTable.DefaultTable):
dependencies = ['EBDT']
# This method can be overridden in subclasses to support new formats
# without changing the other implementation. Also can be used as a
# convenience method for coverting a font file to an alternative format.
def getIndexFormatClass(self, indexFormat):
return eblc_sub_table_classes[indexFormat]
def decompile(self, data, ttFont):
# Save the original data because offsets are from the start of the table.
origData = data
dummy, data = sstruct.unpack2(eblcHeaderFormat, data, self)
self.strikes = []
for curStrikeIndex in range(self.numSizes):
curStrike = Strike()
self.strikes.append(curStrike)
curTable = curStrike.bitmapSizeTable
dummy, data = sstruct.unpack2(bitmapSizeTableFormatPart1, data, curTable)
for metric in ('hori', 'vert'):
metricObj = SbitLineMetrics()
vars(curTable)[metric] = metricObj
dummy, data = sstruct.unpack2(sbitLineMetricsFormat, data, metricObj)
dummy, data = sstruct.unpack2(bitmapSizeTableFormatPart2, data, curTable)
for curStrike in self.strikes:
curTable = curStrike.bitmapSizeTable
for subtableIndex in range(curTable.numberOfIndexSubTables):
lowerBound = curTable.indexSubTableArrayOffset + subtableIndex * indexSubTableArraySize
upperBound = lowerBound + indexSubTableArraySize
data = origData[lowerBound:upperBound]
tup = struct.unpack(indexSubTableArrayFormat, data)
(firstGlyphIndex, lastGlyphIndex, additionalOffsetToIndexSubtable) = tup
offsetToIndexSubTable = curTable.indexSubTableArrayOffset + additionalOffsetToIndexSubtable
data = origData[offsetToIndexSubTable:]
tup = struct.unpack(indexSubHeaderFormat, data[:indexSubHeaderSize])
(indexFormat, imageFormat, imageDataOffset) = tup
indexFormatClass = self.getIndexFormatClass(indexFormat)
indexSubTable = indexFormatClass(data[indexSubHeaderSize:], ttFont)
indexSubTable.firstGlyphIndex = firstGlyphIndex
indexSubTable.lastGlyphIndex = lastGlyphIndex
indexSubTable.additionalOffsetToIndexSubtable = additionalOffsetToIndexSubtable
indexSubTable.indexFormat = indexFormat
indexSubTable.imageFormat = imageFormat
indexSubTable.imageDataOffset = imageDataOffset
curStrike.indexSubTables.append(indexSubTable)
def compile(self, ttFont):
dataList = []
self.numSizes = len(self.strikes)
dataList.append(sstruct.pack(eblcHeaderFormat, self))
# Data size of the header + bitmapSizeTable needs to be calculated
# in order to form offsets. This value will hold the size of the data
# in dataList after all the data is consolidated in dataList.
dataSize = len(dataList[0])
# The table will be structured in the following order:
# (0) header
# (1) Each bitmapSizeTable [1 ... self.numSizes]
# (2) Alternate between indexSubTableArray and indexSubTable
# for each bitmapSizeTable present.
#
# The issue is maintaining the proper offsets when table information
# gets moved around. All offsets and size information must be recalculated
# when building the table to allow editing within ttLib and also allow easy
# import/export to and from XML. All of this offset information is lost
# when exporting to XML so everything must be calculated fresh so importing
# from XML will work cleanly. Only byte offset and size information is
# calculated fresh. Count information like numberOfIndexSubTables is
# checked through assertions. If the information in this table was not
# touched or was changed properly then these types of values should match.
#
# The table will be rebuilt the following way:
# (0) Precompute the size of all the bitmapSizeTables. This is needed to
# compute the offsets properly.
# (1) For each bitmapSizeTable compute the indexSubTable and
# indexSubTableArray pair. The indexSubTable must be computed first
# so that the offset information in indexSubTableArray can be
# calculated. Update the data size after each pairing.
# (2) Build each bitmapSizeTable.
# (3) Consolidate all the data into the main dataList in the correct order.
for curStrike in self.strikes:
dataSize += sstruct.calcsize(bitmapSizeTableFormatPart1)
dataSize += len(('hori', 'vert')) * sstruct.calcsize(sbitLineMetricsFormat)
dataSize += sstruct.calcsize(bitmapSizeTableFormatPart2)
indexSubTablePairDataList = []
for curStrike in self.strikes:
curTable = curStrike.bitmapSizeTable
curTable.numberOfIndexSubTables = len(curStrike.indexSubTables)
curTable.indexSubTableArrayOffset = dataSize
# Precompute the size of the indexSubTableArray. This information
# is important for correctly calculating the new value for
# additionalOffsetToIndexSubtable.
sizeOfSubTableArray = curTable.numberOfIndexSubTables * indexSubTableArraySize
lowerBound = dataSize
dataSize += sizeOfSubTableArray
upperBound = dataSize
indexSubTableDataList = []
for indexSubTable in curStrike.indexSubTables:
indexSubTable.additionalOffsetToIndexSubtable = dataSize - curTable.indexSubTableArrayOffset
glyphIds = list(map(ttFont.getGlyphID, indexSubTable.names))
indexSubTable.firstGlyphIndex = min(glyphIds)
indexSubTable.lastGlyphIndex = max(glyphIds)
data = indexSubTable.compile(ttFont)
indexSubTableDataList.append(data)
dataSize += len(data)
curTable.startGlyphIndex = min(ist.firstGlyphIndex for ist in curStrike.indexSubTables)
curTable.endGlyphIndex = max(ist.lastGlyphIndex for ist in curStrike.indexSubTables)
for i in curStrike.indexSubTables:
data = struct.pack(indexSubHeaderFormat, i.firstGlyphIndex, i.lastGlyphIndex, i.additionalOffsetToIndexSubtable)
indexSubTablePairDataList.append(data)
indexSubTablePairDataList.extend(indexSubTableDataList)
curTable.indexTablesSize = dataSize - curTable.indexSubTableArrayOffset
for curStrike in self.strikes:
curTable = curStrike.bitmapSizeTable
data = sstruct.pack(bitmapSizeTableFormatPart1, curTable)
dataList.append(data)
for metric in ('hori', 'vert'):
metricObj = vars(curTable)[metric]
data = sstruct.pack(sbitLineMetricsFormat, metricObj)
dataList.append(data)
data = sstruct.pack(bitmapSizeTableFormatPart2, curTable)
dataList.append(data)
dataList.extend(indexSubTablePairDataList)
return bytesjoin(dataList)
def toXML(self, writer, ttFont):
writer.simpletag('header', [('version', self.version)])
writer.newline()
for curIndex, curStrike in enumerate(self.strikes):
curStrike.toXML(curIndex, writer, ttFont)
def fromXML(self, name, attrs, content, ttFont):
if name == 'header':
self.version = safeEval(attrs['version'])
elif name == 'strike':
if not hasattr(self, 'strikes'):
self.strikes = []
strikeIndex = safeEval(attrs['index'])
curStrike = Strike()
curStrike.fromXML(name, attrs, content, ttFont, self)
# Grow the strike array to the appropriate size. The XML format
# allows for the strike index value to be out of order.
if strikeIndex >= len(self.strikes):
self.strikes += [None] * (strikeIndex + 1 - len(self.strikes))
assert self.strikes[strikeIndex] is None, "Duplicate strike EBLC indices."
self.strikes[strikeIndex] = curStrike
class Strike(object):
def __init__(self):
self.bitmapSizeTable = BitmapSizeTable()
self.indexSubTables = []
def toXML(self, strikeIndex, writer, ttFont):
writer.begintag('strike', [('index', strikeIndex)])
writer.newline()
self.bitmapSizeTable.toXML(writer, ttFont)
writer.comment('GlyphIds are written but not read. The firstGlyphIndex and\nlastGlyphIndex values will be recalculated by the compiler.')
writer.newline()
for indexSubTable in self.indexSubTables:
indexSubTable.toXML(writer, ttFont)
writer.endtag('strike')
writer.newline()
def fromXML(self, name, attrs, content, ttFont, locator):
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == 'bitmapSizeTable':
self.bitmapSizeTable.fromXML(name, attrs, content, ttFont)
elif name.startswith(_indexSubTableSubclassPrefix):
indexFormat = safeEval(name[len(_indexSubTableSubclassPrefix):])
indexFormatClass = locator.getIndexFormatClass(indexFormat)
indexSubTable = indexFormatClass(None, None)
indexSubTable.indexFormat = indexFormat
indexSubTable.fromXML(name, attrs, content, ttFont)
self.indexSubTables.append(indexSubTable)
class BitmapSizeTable(object):
# Returns all the simple metric names that bitmap size table
# cares about in terms of XML creation.
def _getXMLMetricNames(self):
dataNames = sstruct.getformat(bitmapSizeTableFormatPart1)[1]
dataNames = dataNames + sstruct.getformat(bitmapSizeTableFormatPart2)[1]
# Skip the first 3 data names because they are byte offsets and counts.
return dataNames[3:]
def toXML(self, writer, ttFont):
writer.begintag('bitmapSizeTable')
writer.newline()
for metric in ('hori', 'vert'):
getattr(self, metric).toXML(metric, writer, ttFont)
for metricName in self._getXMLMetricNames():
writer.simpletag(metricName, value=getattr(self, metricName))
writer.newline()
writer.endtag('bitmapSizeTable')
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
# Create a lookup for all the simple names that make sense to
# bitmap size table. Only read the information from these names.
dataNames = set(self._getXMLMetricNames())
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == 'sbitLineMetrics':
direction = attrs['direction']
assert direction in ('hori', 'vert'), "SbitLineMetrics direction specified invalid."
metricObj = SbitLineMetrics()
metricObj.fromXML(name, attrs, content, ttFont)
vars(self)[direction] = metricObj
elif name in dataNames:
vars(self)[name] = safeEval(attrs['value'])
else:
print("Warning: unknown name '%s' being ignored in BitmapSizeTable." % name)
class SbitLineMetrics(object):
def toXML(self, name, writer, ttFont):
writer.begintag('sbitLineMetrics', [('direction', name)])
writer.newline()
for metricName in sstruct.getformat(sbitLineMetricsFormat)[1]:
writer.simpletag(metricName, value=getattr(self, metricName))
writer.newline()
writer.endtag('sbitLineMetrics')
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
metricNames = set(sstruct.getformat(sbitLineMetricsFormat)[1])
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name in metricNames:
vars(self)[name] = safeEval(attrs['value'])
# Important information about the naming scheme. Used for identifying subtables.
_indexSubTableSubclassPrefix = 'eblc_index_sub_table_'
class EblcIndexSubTable(object):
def __init__(self, data, ttFont):
self.data = data
self.ttFont = ttFont
# TODO Currently non-lazy decompiling doesn't work for this class...
#if not ttFont.lazy:
# self.decompile()
# del self.data, self.ttFont
def __getattr__(self, attr):
# Allow lazy decompile.
if attr[:2] == '__':
raise AttributeError(attr)
if not hasattr(self, "data"):
raise AttributeError(attr)
self.decompile()
del self.data, self.ttFont
return getattr(self, attr)
# This method just takes care of the indexSubHeader. Implementing subclasses
# should call it to compile the indexSubHeader and then continue compiling
# the remainder of their unique format.
def compile(self, ttFont):
return struct.pack(indexSubHeaderFormat, self.indexFormat, self.imageFormat, self.imageDataOffset)
# Creates the XML for bitmap glyphs. Each index sub table basically makes
# the same XML except for specific metric information that is written
# out via a method call that a subclass implements optionally.
def toXML(self, writer, ttFont):
writer.begintag(self.__class__.__name__, [
('imageFormat', self.imageFormat),
('firstGlyphIndex', self.firstGlyphIndex),
('lastGlyphIndex', self.lastGlyphIndex),
])
writer.newline()
self.writeMetrics(writer, ttFont)
# Write out the names as thats all thats needed to rebuild etc.
# For font debugging of consecutive formats the ids are also written.
# The ids are not read when moving from the XML format.
glyphIds = map(ttFont.getGlyphID, self.names)
for glyphName, glyphId in zip(self.names, glyphIds):
writer.simpletag('glyphLoc', name=glyphName, id=glyphId)
writer.newline()
writer.endtag(self.__class__.__name__)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
# Read all the attributes. Even though the glyph indices are
# recalculated, they are still read in case there needs to
# be an immediate export of the data.
self.imageFormat = safeEval(attrs['imageFormat'])
self.firstGlyphIndex = safeEval(attrs['firstGlyphIndex'])
self.lastGlyphIndex = safeEval(attrs['lastGlyphIndex'])
self.readMetrics(name, attrs, content, ttFont)
self.names = []
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == 'glyphLoc':
self.names.append(attrs['name'])
# A helper method that writes the metrics for the index sub table. It also
# is responsible for writing the image size for fixed size data since fixed
# size is not recalculated on compile. Default behavior is to do nothing.
def writeMetrics(self, writer, ttFont):
pass
# A helper method that is the inverse of writeMetrics.
def readMetrics(self, name, attrs, content, ttFont):
pass
# This method is for fixed glyph data sizes. There are formats where
# the glyph data is fixed but are actually composite glyphs. To handle
# this the font spec in indexSubTable makes the data the size of the
# fixed size by padding the component arrays. This function abstracts
# out this padding process. Input is data unpadded. Output is data
# padded only in fixed formats. Default behavior is to return the data.
def padBitmapData(self, data):
return data
# Remove any of the glyph locations and names that are flagged as skipped.
# This only occurs in formats {1,3}.
def removeSkipGlyphs(self):
# Determines if a name, location pair is a valid data location.
# Skip glyphs are marked when the size is equal to zero.
def isValidLocation(args):
(name, (startByte, endByte)) = args
return startByte < endByte
# Remove all skip glyphs.
dataPairs = list(filter(isValidLocation, zip(self.names, self.locations)))
self.names, self.locations = list(map(list, zip(*dataPairs)))
# A closure for creating a custom mixin. This is done because formats 1 and 3
# are very similar. The only difference between them is the size per offset
# value. Code put in here should handle both cases generally.
def _createOffsetArrayIndexSubTableMixin(formatStringForDataType):
# Prep the data size for the offset array data format.
dataFormat = '>'+formatStringForDataType
offsetDataSize = struct.calcsize(dataFormat)
class OffsetArrayIndexSubTableMixin(object):
def decompile(self):
numGlyphs = self.lastGlyphIndex - self.firstGlyphIndex + 1
indexingOffsets = [glyphIndex * offsetDataSize for glyphIndex in range(numGlyphs+2)]
indexingLocations = zip(indexingOffsets, indexingOffsets[1:])
offsetArray = [struct.unpack(dataFormat, self.data[slice(*loc)])[0] for loc in indexingLocations]
glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1))
modifiedOffsets = [offset + self.imageDataOffset for offset in offsetArray]
self.locations = list(zip(modifiedOffsets, modifiedOffsets[1:]))
self.names = list(map(self.ttFont.getGlyphName, glyphIds))
self.removeSkipGlyphs()
def compile(self, ttFont):
# First make sure that all the data lines up properly. Formats 1 and 3
# must have all its data lined up consecutively. If not this will fail.
for curLoc, nxtLoc in zip(self.locations, self.locations[1:]):
assert curLoc[1] == nxtLoc[0], "Data must be consecutive in indexSubTable offset formats"
glyphIds = list(map(ttFont.getGlyphID, self.names))
# Make sure that all ids are sorted strictly increasing.
assert all(glyphIds[i] < glyphIds[i+1] for i in range(len(glyphIds)-1))
# Run a simple algorithm to add skip glyphs to the data locations at
# the places where an id is not present.
idQueue = deque(glyphIds)
locQueue = deque(self.locations)
allGlyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1))
allLocations = []
for curId in allGlyphIds:
if curId != idQueue[0]:
allLocations.append((locQueue[0][0], locQueue[0][0]))
else:
idQueue.popleft()
allLocations.append(locQueue.popleft())
# Now that all the locations are collected, pack them appropriately into
# offsets. This is the form where offset[i] is the location and
# offset[i+1]-offset[i] is the size of the data location.
offsets = list(allLocations[0]) + [loc[1] for loc in allLocations[1:]]
# Image data offset must be less than or equal to the minimum of locations.
# This offset may change the value for round tripping but is safer and
# allows imageDataOffset to not be required to be in the XML version.
self.imageDataOffset = min(offsets)
offsetArray = [offset - self.imageDataOffset for offset in offsets]
dataList = [EblcIndexSubTable.compile(self, ttFont)]
dataList += [struct.pack(dataFormat, offsetValue) for offsetValue in offsetArray]
# Take care of any padding issues. Only occurs in format 3.
if offsetDataSize * len(dataList) % 4 != 0:
dataList.append(struct.pack(dataFormat, 0))
return bytesjoin(dataList)
return OffsetArrayIndexSubTableMixin
# A Mixin for functionality shared between the different kinds
# of fixed sized data handling. Both kinds have big metrics so
# that kind of special processing is also handled in this mixin.
class FixedSizeIndexSubTableMixin(object):
def writeMetrics(self, writer, ttFont):
writer.simpletag('imageSize', value=self.imageSize)
writer.newline()
self.metrics.toXML(writer, ttFont)
def readMetrics(self, name, attrs, content, ttFont):
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == 'imageSize':
self.imageSize = safeEval(attrs['value'])
elif name == BigGlyphMetrics.__name__:
self.metrics = BigGlyphMetrics()
self.metrics.fromXML(name, attrs, content, ttFont)
elif name == SmallGlyphMetrics.__name__:
print("Warning: SmallGlyphMetrics being ignored in format %d." % self.indexFormat)
def padBitmapData(self, data):
# Make sure that the data isn't bigger than the fixed size.
assert len(data) <= self.imageSize, "Data in indexSubTable format %d must be less than the fixed size." % self.indexFormat
# Pad the data so that it matches the fixed size.
pad = (self.imageSize - len(data)) * b'\0'
return data + pad
class eblc_index_sub_table_1(_createOffsetArrayIndexSubTableMixin('L'), EblcIndexSubTable):
pass
class eblc_index_sub_table_2(FixedSizeIndexSubTableMixin, EblcIndexSubTable):
def decompile(self):
(self.imageSize,) = struct.unpack(">L", self.data[:4])
self.metrics = BigGlyphMetrics()
sstruct.unpack2(bigGlyphMetricsFormat, self.data[4:], self.metrics)
glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1))
offsets = [self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds)+1)]
self.locations = list(zip(offsets, offsets[1:]))
self.names = list(map(self.ttFont.getGlyphName, glyphIds))
def compile(self, ttFont):
glyphIds = list(map(ttFont.getGlyphID, self.names))
# Make sure all the ids are consecutive. This is required by Format 2.
assert glyphIds == list(range(self.firstGlyphIndex, self.lastGlyphIndex+1)), "Format 2 ids must be consecutive."
self.imageDataOffset = min(zip(*self.locations)[0])
dataList = [EblcIndexSubTable.compile(self, ttFont)]
dataList.append(struct.pack(">L", self.imageSize))
dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
return bytesjoin(dataList)
class eblc_index_sub_table_3(_createOffsetArrayIndexSubTableMixin('H'), EblcIndexSubTable):
pass
class eblc_index_sub_table_4(EblcIndexSubTable):
def decompile(self):
(numGlyphs,) = struct.unpack(">L", self.data[:4])
data = self.data[4:]
indexingOffsets = [glyphIndex * codeOffsetPairSize for glyphIndex in range(numGlyphs+2)]
indexingLocations = zip(indexingOffsets, indexingOffsets[1:])
glyphArray = [struct.unpack(codeOffsetPairFormat, data[slice(*loc)]) for loc in indexingLocations]
glyphIds, offsets = list(map(list, zip(*glyphArray)))
# There are one too many glyph ids. Get rid of the last one.
glyphIds.pop()
offsets = [offset + self.imageDataOffset for offset in offsets]
self.locations = list(zip(offsets, offsets[1:]))
self.names = list(map(self.ttFont.getGlyphName, glyphIds))
def compile(self, ttFont):
# First make sure that all the data lines up properly. Format 4
# must have all its data lined up consecutively. If not this will fail.
for curLoc, nxtLoc in zip(self.locations, self.locations[1:]):
assert curLoc[1] == nxtLoc[0], "Data must be consecutive in indexSubTable format 4"
offsets = list(self.locations[0]) + [loc[1] for loc in self.locations[1:]]
# Image data offset must be less than or equal to the minimum of locations.
# Resetting this offset may change the value for round tripping but is safer
# and allows imageDataOffset to not be required to be in the XML version.
self.imageDataOffset = min(offsets)
offsets = [offset - self.imageDataOffset for offset in offsets]
glyphIds = list(map(ttFont.getGlyphID, self.names))
# Create an iterator over the ids plus a padding value.
idsPlusPad = list(itertools.chain(glyphIds, [0]))
dataList = [EblcIndexSubTable.compile(self, ttFont)]
dataList.append(struct.pack(">L", len(glyphIds)))
tmp = [struct.pack(codeOffsetPairFormat, *cop) for cop in zip(idsPlusPad, offsets)]
dataList += tmp
data = bytesjoin(dataList)
return data
class eblc_index_sub_table_5(FixedSizeIndexSubTableMixin, EblcIndexSubTable):
def decompile(self):
self.origDataLen = 0
(self.imageSize,) = struct.unpack(">L", self.data[:4])
data = self.data[4:]
self.metrics, data = sstruct.unpack2(bigGlyphMetricsFormat, data, BigGlyphMetrics())
(numGlyphs,) = struct.unpack(">L", data[:4])
data = data[4:]
glyphIds = [struct.unpack(">H", data[2*i:2*(i+1)])[0] for i in range(numGlyphs)]
offsets = [self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds)+1)]
self.locations = list(zip(offsets, offsets[1:]))
self.names = list(map(self.ttFont.getGlyphName, glyphIds))
def compile(self, ttFont):
self.imageDataOffset = min(zip(*self.locations)[0])
dataList = [EblcIndexSubTable.compile(self, ttFont)]
dataList.append(struct.pack(">L", self.imageSize))
dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
glyphIds = list(map(ttFont.getGlyphID, self.names))
dataList.append(struct.pack(">L", len(glyphIds)))
dataList += [struct.pack(">H", curId) for curId in glyphIds]
if len(glyphIds) % 2 == 1:
dataList.append(struct.pack(">H", 0))
return bytesjoin(dataList)
# Dictionary of indexFormat to the class representing that format.
eblc_sub_table_classes = {
1: eblc_index_sub_table_1,
2: eblc_index_sub_table_2,
3: eblc_index_sub_table_3,
4: eblc_index_sub_table_4,
5: eblc_index_sub_table_5,
}
| apache-2.0 |
tjanez/ansible | lib/ansible/plugins/terminal/iosxr.py | 19 | 1850 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import json
from ansible.plugins.terminal import TerminalBase
from ansible.errors import AnsibleConnectionFailure
class TerminalModule(TerminalBase):
terminal_prompts_re = [
re.compile(r"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(r"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$"),
re.compile(r']]>]]>[\r\n]?')
]
terminal_errors_re = [
re.compile(r"% ?Error"),
re.compile(r"% ?Bad secret"),
re.compile(r"invalid input", re.I),
re.compile(r"(?:incomplete|ambiguous) command", re.I),
re.compile(r"connection timed out", re.I),
re.compile(r"[^\r\n]+ not found", re.I),
re.compile(r"'[^']' +returned error code: ?\d+"),
]
supports_multiplexing = False
def on_open_shell(self):
try:
for cmd in ['terminal length 0', 'terminal exec prompt no-timestamp']:
self._connection.exec_command(cmd)
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.