repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
maxweisspoker/simplebitcoinfuncs | simplebitcoinfuncs/miscbitcoinfuncs.py | 1 | 7265 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Misc functions related to Bitcoin, but which didn't feel right being
in the main bitcoin funcs
See _doctester.py for examples of most functions below.
'''
import os
import datetime
from binascii import hexlify, unhexlify
try:
ModuleNotFoundError
except:
ModuleNotFoundError = ImportError
try:
from .hexhashes import hash256
from .ecmath import N
from .base58 import b58e
from .miscfuncs import *
except Exception as e:
if type(e) != ImportError and \
type(e) != ModuleNotFoundError and \
type(e) != ValueError and \
type(e) != SystemError:
raise Exception("Unknown problem with imports.")
from hexhashes import hash256
from ecmath import N
from base58 import b58e
from miscfuncs import *
def genkeyhex():
'''
Generate new random Bitcoin private key, using os.urandom and
double-sha256. Hex format.
'''
while True:
key = hash256(
hexlify(os.urandom(40) + str(datetime.datetime.now())
.encode("utf-8")))
# 40 bytes used instead of 32, as a buffer for any slight
# lack of entropy in urandom
# Double-sha256 used instead of single hash, for entropy
# reasons as well.
# I know, it's nit-picking, but better safe than sorry.
if int(key,16) > 1 and int(key,16) < N:
break
return key
def genkey(outcompressed=True,prefix='80'):
'''
Generate new random Bitcoin private key, using os.urandom and
double-sha256.
'''
key = prefix + genkeyhex()
if outcompressed:
key = key + '01'
return b58e(key)
def oppushdatalen(num):
assert isitint(num)
assert num < 4294967296
assert num > 0
if num < 76:
return dechex(num,1)
elif num < 256:
return "4c" + dechex(num,1)
elif num < 65536:
return "4d" + hexreverse(dechex(num,2))
elif num < 4294967296:
return "4e" + hexreverse(dechex(num,4))
def intfromoppushdatalen(oppushdatalenhex):
oppushdatalenhex = strlify(oppushdatalenhex)
if oppushdatalenhex[:2] == "4c":
assert len(oppushdatalenhex) == 4
return int(oppushdatalenhex[2:4],16)
elif oppushdatalenhex[:2] == "4d":
assert len(oppushdatalenhex) == 6
return int(oppushdatalenhex[4:6] +
oppushdatalenhex[2:4],16)
elif oppushdatalenhex[:2] == "4e":
assert len(oppushdatalenhex) == 10
return int(oppushdatalenhex[8:10] +
oppushdatalenhex[6:8] +
oppushdatalenhex[4:6] +
oppushdatalenhex[2:4],16)
else:
assert len(oppushdatalenhex) == 2
return int(oppushdatalenhex,16)
def tovarint(num):
assert isitint(num) and num < 18446744073709551616
if num == 0:
return '00'
elif num < 253:
o = dechex(num,1)
elif num < 65536:
o = hexstrlify(b'\xfd' + unhexlify(dechex(num,2))[::-1])
elif num < 4294967296:
o = hexstrlify(b'\xfe' + unhexlify(dechex(num,4))[::-1])
elif num < 18446744073709551616:
o = hexstrlify(b'\xff' + unhexlify(dechex(num,8))[::-1])
return o
def numvarintbytes(varint):
varint = strlify(varint)
assert len(varint) == 2
if varint == 'ff':
return 9
elif varint == 'fe':
return 5
elif varint == 'fd':
return 3
else:
return 1
def fromvarint(varint):
varint = strlify(varint)
if varint[:2] == 'ff':
assert len(varint) == 18
elif varint[:2] == 'fe':
assert len(varint) == 10
elif varint[:2] == 'fd':
assert len(varint) == 6
else:
assert len(varint) == 2
return int(varint,16)
return int(hexreverse(varint[2:]),16)
def getandstrip_varintdata(data):
'''
Takes a hex string that begins with varint data, and has extra at
the end, and gets the varint integer, strips the varint bytes, and
returns the integer and the remaining data. So rather than having
to manually read the varint prefix, count, and strip, you can do
it in one function. This function will return a tuple of the data
and the leftover.
For example, let's say you are parsing a transaction from
beginning to end, and you know the next byte is a varint byte.
Here's an example:
fd5d010048304502200187af928e9d155c4b1ac9c1c9118153239aba76774f77
5d7c1f9c3e106ff33c0221008822b0f658edec22274d0b6ae9de10ebf2da06b1
bbdaaba4e50eb078f39e3d78014730440220795f0f4f5941a77ae032ecb9e337
53788d7eb5cb0c78d805575d6b00a1d9bfed02203e1f4ad9332d1416ae01e270
38e945bc9db59c732728a383a6f1ed2fb99da7a4014cc952410491bba2510912
a5bd37da1fb5b1673010e43d2c6d812c514e91bfa9f2eb129e1c183329db55bd
868e209aac2fbc02cb33d98fe74bf23f0c235d6126b1d8334f864104865c4029
3a680cb9c020e7b1e106d8c1916d3cef99aa431a56d253e69256dac09ef122b1
a986818a7cb624532f062c1d1f8722084861c5c3291ccffef4ec687441048d24
55d2403e08708fc1f556002f1b6cd83f992d085097f9974ab08a28838f07896f
bab08f39495e15fa6fad6edbfb1e754e35fa1c7844c41f322a1863d4621353ae
ffffffff0140420f00000000001976a914ae56b4db13554d321c402db3961187
aed1bbed5b88ac00000000
If the above tx fragment is input as a single long string with no
white-space, this function will return the tuple:
('004830...53ae', 'ffffffff...00000000')
See _doctester.py for that example in action.
'''
data = strlify(data)
numbytes = numvarintbytes(data[:2])
varint = data[:2*numbytes]
data = data[2*numbytes:]
tostrip = fromvarint(varint) * 2
return data[:tostrip], data[tostrip:]
def inttoDER(a):
'''
Format an int/long to DER hex format
'''
o = dechex(a,1)
if int(o[:2],16) > 127:
o = '00' + o
olen = dechex(len(o)//2,1)
return '02' + olen + o
def inttoLEB128(intinput):
'''
Convert int/long to unsigned LEB128 format hex
'''
binstr = str(bin(intinput)) \
.lstrip("0b").replace("b","").replace("L","") \
.replace("'","").replace('"',"")
if len(binstr) % 7:
binstr = binstr.zfill(len(binstr) + 7 - (len(binstr) % 7))
bytelist = ""
for i in range(len(binstr) // 7):
if i < ((len(binstr) // 7) - 1):
pad = "1"
else:
pad = "0"
currbyte = binstr[(len(binstr) - (7*i + 7)):(len(binstr) - (7*i))]
currbyte = pad + currbyte
currbyte = dechex(int(currbyte,2))
# assert len(currbyte) == 2
bytelist = bytelist + currbyte
return bytelist
def LEB128toint(LEBinput):
'''
Convert unsigned LEB128 hex to integer
'''
reversedbytes = hexreverse(LEBinput)
binstr = ""
for i in range(len(LEBinput) // 2):
if i == 0:
assert int(reversedbytes[2*i:(2*i + 2)],16) < 128
else:
assert int(reversedbytes[2*i:(2*i + 2)],16) >= 128
tempbin = str(bin(int(reversedbytes[2*i:(2*i + 2)],16))) \
.lstrip("0b").replace("b","").replace("L","") \
.replace("'","").replace('"',"") \
.zfill(8)
binstr += tempbin[1:]
return int(binstr,2)
| mit |
trenton3983/Data_Science_from_Scratch | code-python3/logistic_regression.py | 12 | 6055 | from collections import Counter
from functools import partial, reduce
from linear_algebra import dot, vector_add
from gradient_descent import maximize_stochastic, maximize_batch
from working_with_data import rescale
from machine_learning import train_test_split
from multiple_regression import estimate_beta, predict
import math, random
def logistic(x):
return 1.0 / (1 + math.exp(-x))
def logistic_prime(x):
return logistic(x) * (1 - logistic(x))
def logistic_log_likelihood_i(x_i, y_i, beta):
if y_i == 1:
return math.log(logistic(dot(x_i, beta)))
else:
return math.log(1 - logistic(dot(x_i, beta)))
def logistic_log_likelihood(x, y, beta):
return sum(logistic_log_likelihood_i(x_i, y_i, beta)
for x_i, y_i in zip(x, y))
def logistic_log_partial_ij(x_i, y_i, beta, j):
"""here i is the index of the data point,
j the index of the derivative"""
return (y_i - logistic(dot(x_i, beta))) * x_i[j]
def logistic_log_gradient_i(x_i, y_i, beta):
"""the gradient of the log likelihood
corresponding to the i-th data point"""
return [logistic_log_partial_ij(x_i, y_i, beta, j)
for j, _ in enumerate(beta)]
def logistic_log_gradient(x, y, beta):
return reduce(vector_add,
[logistic_log_gradient_i(x_i, y_i, beta)
for x_i, y_i in zip(x,y)])
if __name__ == "__main__":
data = [(0.7,48000,1),(1.9,48000,0),(2.5,60000,1),(4.2,63000,0),(6,76000,0),(6.5,69000,0),(7.5,76000,0),(8.1,88000,0),(8.7,83000,1),(10,83000,1),(0.8,43000,0),(1.8,60000,0),(10,79000,1),(6.1,76000,0),(1.4,50000,0),(9.1,92000,0),(5.8,75000,0),(5.2,69000,0),(1,56000,0),(6,67000,0),(4.9,74000,0),(6.4,63000,1),(6.2,82000,0),(3.3,58000,0),(9.3,90000,1),(5.5,57000,1),(9.1,102000,0),(2.4,54000,0),(8.2,65000,1),(5.3,82000,0),(9.8,107000,0),(1.8,64000,0),(0.6,46000,1),(0.8,48000,0),(8.6,84000,1),(0.6,45000,0),(0.5,30000,1),(7.3,89000,0),(2.5,48000,1),(5.6,76000,0),(7.4,77000,0),(2.7,56000,0),(0.7,48000,0),(1.2,42000,0),(0.2,32000,1),(4.7,56000,1),(2.8,44000,1),(7.6,78000,0),(1.1,63000,0),(8,79000,1),(2.7,56000,0),(6,52000,1),(4.6,56000,0),(2.5,51000,0),(5.7,71000,0),(2.9,65000,0),(1.1,33000,1),(3,62000,0),(4,71000,0),(2.4,61000,0),(7.5,75000,0),(9.7,81000,1),(3.2,62000,0),(7.9,88000,0),(4.7,44000,1),(2.5,55000,0),(1.6,41000,0),(6.7,64000,1),(6.9,66000,1),(7.9,78000,1),(8.1,102000,0),(5.3,48000,1),(8.5,66000,1),(0.2,56000,0),(6,69000,0),(7.5,77000,0),(8,86000,0),(4.4,68000,0),(4.9,75000,0),(1.5,60000,0),(2.2,50000,0),(3.4,49000,1),(4.2,70000,0),(7.7,98000,0),(8.2,85000,0),(5.4,88000,0),(0.1,46000,0),(1.5,37000,0),(6.3,86000,0),(3.7,57000,0),(8.4,85000,0),(2,42000,0),(5.8,69000,1),(2.7,64000,0),(3.1,63000,0),(1.9,48000,0),(10,72000,1),(0.2,45000,0),(8.6,95000,0),(1.5,64000,0),(9.8,95000,0),(5.3,65000,0),(7.5,80000,0),(9.9,91000,0),(9.7,50000,1),(2.8,68000,0),(3.6,58000,0),(3.9,74000,0),(4.4,76000,0),(2.5,49000,0),(7.2,81000,0),(5.2,60000,1),(2.4,62000,0),(8.9,94000,0),(2.4,63000,0),(6.8,69000,1),(6.5,77000,0),(7,86000,0),(9.4,94000,0),(7.8,72000,1),(0.2,53000,0),(10,97000,0),(5.5,65000,0),(7.7,71000,1),(8.1,66000,1),(9.8,91000,0),(8,84000,0),(2.7,55000,0),(2.8,62000,0),(9.4,79000,0),(2.5,57000,0),(7.4,70000,1),(2.1,47000,0),(5.3,62000,1),(6.3,79000,0),(6.8,58000,1),(5.7,80000,0),(2.2,61000,0),(4.8,62000,0),(3.7,64000,0),(4.1,85000,0),(2.3,51000,0),(3.5,58000,0),(0.9,43000,0),(0.9,54000,0),(4.5,74000,0),(6.5,55000,1),(4.1,41000,1),(7.1,73000,0),(1.1,66000,0),(9.1,81000,1),(8,69000,1),(7.3,72000,1),(3.3,50000,0),(3.9,58000,0),(2.6,49000,0),(1.6,78000,0),(0.7,56000,0),(2.1,36000,1),(7.5,90000,0),(4.8,59000,1),(8.9,95000,0),(6.2,72000,0),(6.3,63000,0),(9.1,100000,0),(7.3,61000,1),(5.6,74000,0),(0.5,66000,0),(1.1,59000,0),(5.1,61000,0),(6.2,70000,0),(6.6,56000,1),(6.3,76000,0),(6.5,78000,0),(5.1,59000,0),(9.5,74000,1),(4.5,64000,0),(2,54000,0),(1,52000,0),(4,69000,0),(6.5,76000,0),(3,60000,0),(4.5,63000,0),(7.8,70000,0),(3.9,60000,1),(0.8,51000,0),(4.2,78000,0),(1.1,54000,0),(6.2,60000,0),(2.9,59000,0),(2.1,52000,0),(8.2,87000,0),(4.8,73000,0),(2.2,42000,1),(9.1,98000,0),(6.5,84000,0),(6.9,73000,0),(5.1,72000,0),(9.1,69000,1),(9.8,79000,1),]
data = list(map(list, data)) # change tuples to lists
x = [[1] + row[:2] for row in data] # each element is [1, experience, salary]
y = [row[2] for row in data] # each element is paid_account
print("linear regression:")
rescaled_x = rescale(x)
beta = estimate_beta(rescaled_x, y)
print(beta)
print("logistic regression:")
random.seed(0)
x_train, x_test, y_train, y_test = train_test_split(rescaled_x, y, 0.33)
# want to maximize log likelihood on the training data
fn = partial(logistic_log_likelihood, x_train, y_train)
gradient_fn = partial(logistic_log_gradient, x_train, y_train)
# pick a random starting point
beta_0 = [1, 1, 1]
# and maximize using gradient descent
beta_hat = maximize_batch(fn, gradient_fn, beta_0)
print("beta_batch", beta_hat)
beta_0 = [1, 1, 1]
beta_hat = maximize_stochastic(logistic_log_likelihood_i,
logistic_log_gradient_i,
x_train, y_train, beta_0)
print("beta stochastic", beta_hat)
true_positives = false_positives = true_negatives = false_negatives = 0
for x_i, y_i in zip(x_test, y_test):
predict = logistic(dot(beta_hat, x_i))
if y_i == 1 and predict >= 0.5: # TP: paid and we predict paid
true_positives += 1
elif y_i == 1: # FN: paid and we predict unpaid
false_negatives += 1
elif predict >= 0.5: # FP: unpaid and we predict paid
false_positives += 1
else: # TN: unpaid and we predict unpaid
true_negatives += 1
precision = true_positives / (true_positives + false_positives)
recall = true_positives / (true_positives + false_negatives)
print("precision", precision)
print("recall", recall)
| unlicense |
andersk/zulip | zerver/migrations/0217_migrate_create_stream_policy.py | 6 | 1270 | # Generated by Django 1.11.20 on 2019-05-06 13:15
from django.db import migrations
from django.db.backends.postgresql.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
def upgrade_create_stream_policy(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
Realm = apps.get_model("zerver", "Realm")
Realm.CREATE_STREAM_POLICY_MEMBERS = 1
Realm.CREATE_STREAM_POLICY_ADMINS = 2
Realm.CREATE_STREAM_POLICY_WAITING_PERIOD = 3
Realm.objects.filter(waiting_period_threshold__exact=0).filter(
create_stream_by_admins_only=False
).update(create_stream_policy=Realm.CREATE_STREAM_POLICY_MEMBERS)
Realm.objects.filter(create_stream_by_admins_only=True).update(
create_stream_policy=Realm.CREATE_STREAM_POLICY_ADMINS
)
Realm.objects.filter(waiting_period_threshold__gt=0).filter(
create_stream_by_admins_only=False
).update(create_stream_policy=Realm.CREATE_STREAM_POLICY_WAITING_PERIOD)
class Migration(migrations.Migration):
dependencies = [
("zerver", "0216_add_create_stream_policy"),
]
operations = [
migrations.RunPython(
upgrade_create_stream_policy, reverse_code=migrations.RunPython.noop, elidable=True
),
]
| apache-2.0 |
vauxoo-dev/vxtools-server | setup.py | 1 | 1465 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
requirements = [
# TODO: put package requirements here
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='vxtools-server',
version='0.1.0',
description='Server tools, part of VauxooTools',
long_description=readme + '\n\n' + history,
author='Tulio Ruiz',
author_email='[email protected]',
url='https://github.com/ruiztulio/vxtools-server',
packages=[
'vxtools-server',
],
package_dir={'vxtools-server':
'vxtools-server'},
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
keywords='vxtools-server',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests',
tests_require=test_requirements
) | bsd-3-clause |
webnotes/wnframework | core/doctype/file_data/file_data.py | 32 | 1734 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""
record of files
naming for same name files: file.gif, file-1.gif, file-2.gif etc
"""
import webnotes, webnotes.utils, os
from webnotes import conf
class DocType():
def __init__(self, d, dl):
self.doc, self.doclist = d, dl
def before_insert(self):
webnotes.local.rollback_observers.append(self)
def on_update(self):
# check duplicate assignement
n_records = webnotes.conn.sql("""select count(*) from `tabFile Data`
where file_name=%s
and attached_to_doctype=%s
and attached_to_name=%s""", (self.doc.file_name, self.doc.attached_to_doctype,
self.doc.attached_to_name))[0][0]
if n_records > 1:
webnotes.msgprint(webnotes._("Same file has already been attached to the record"))
raise webnotes.DuplicateEntryError
def on_trash(self):
if self.doc.attached_to_name:
# check persmission
try:
if not webnotes.has_permission(self.doc.attached_to_doctype,
"write", self.doc.attached_to_name):
webnotes.msgprint(webnotes._("No permission to write / remove."),
raise_exception=True)
except webnotes.DoesNotExistError:
pass
# if file not attached to any other record, delete it
if self.doc.file_name and not webnotes.conn.count("File Data",
{"file_name": self.doc.file_name, "name": ["!=", self.doc.name]}):
if self.doc.file_name.startswith("files/"):
path = webnotes.utils.get_site_path("public", self.doc.file_name)
else:
path = webnotes.utils.get_site_path(conf.files_path, self.doc.file_name)
if os.path.exists(path):
os.remove(path)
def on_rollback(self):
self.on_trash() | mit |
FrankBian/kuma | vendor/packages/pyparsing/examples/simpleSQL.py | 16 | 4698 | # simpleSQL.py
#
# simple demo of using the parsing library to do simple-minded SQL parsing
# could be extended to include where clauses etc.
#
# Copyright (c) 2003, Paul McGuire
#
from pyparsing import Literal, CaselessLiteral, Word, Upcase, delimitedList, Optional, \
Combine, Group, alphas, nums, alphanums, ParseException, Forward, oneOf, quotedString, \
ZeroOrMore, restOfLine, Keyword
def test( str ):
print str,"->"
try:
tokens = simpleSQL.parseString( str )
print "tokens = ", tokens
print "tokens.columns =", tokens.columns
print "tokens.tables =", tokens.tables
print "tokens.where =", tokens.where
except ParseException, err:
print " "*err.loc + "^\n" + err.msg
print err
print
# define SQL tokens
selectStmt = Forward()
selectToken = Keyword("select", caseless=True)
fromToken = Keyword("from", caseless=True)
ident = Word( alphas, alphanums + "_$" ).setName("identifier")
columnName = Upcase( delimitedList( ident, ".", combine=True ) )
columnNameList = Group( delimitedList( columnName ) )
tableName = Upcase( delimitedList( ident, ".", combine=True ) )
tableNameList = Group( delimitedList( tableName ) )
whereExpression = Forward()
and_ = Keyword("and", caseless=True)
or_ = Keyword("or", caseless=True)
in_ = Keyword("in", caseless=True)
E = CaselessLiteral("E")
binop = oneOf("= != < > >= <= eq ne lt le gt ge", caseless=True)
arithSign = Word("+-",exact=1)
realNum = Combine( Optional(arithSign) + ( Word( nums ) + "." + Optional( Word(nums) ) |
( "." + Word(nums) ) ) +
Optional( E + Optional(arithSign) + Word(nums) ) )
intNum = Combine( Optional(arithSign) + Word( nums ) +
Optional( E + Optional("+") + Word(nums) ) )
columnRval = realNum | intNum | quotedString | columnName # need to add support for alg expressions
whereCondition = Group(
( columnName + binop + columnRval ) |
( columnName + in_ + "(" + delimitedList( columnRval ) + ")" ) |
( columnName + in_ + "(" + selectStmt + ")" ) |
( "(" + whereExpression + ")" )
)
whereExpression << whereCondition + ZeroOrMore( ( and_ | or_ ) + whereExpression )
# define the grammar
selectStmt << ( selectToken +
( '*' | columnNameList ).setResultsName( "columns" ) +
fromToken +
tableNameList.setResultsName( "tables" ) +
Optional( Group( CaselessLiteral("where") + whereExpression ), "" ).setResultsName("where") )
simpleSQL = selectStmt
# define Oracle comment format, and ignore them
oracleSqlComment = "--" + restOfLine
simpleSQL.ignore( oracleSqlComment )
test( "SELECT * from XYZZY, ABC" )
test( "select * from SYS.XYZZY" )
test( "Select A from Sys.dual" )
test( "Select A,B,C from Sys.dual" )
test( "Select A, B, C from Sys.dual" )
test( "Select A, B, C from Sys.dual, Table2 " )
test( "Xelect A, B, C from Sys.dual" )
test( "Select A, B, C frox Sys.dual" )
test( "Select" )
test( "Select &&& frox Sys.dual" )
test( "Select A from Sys.dual where a in ('RED','GREEN','BLUE')" )
test( "Select A from Sys.dual where a in ('RED','GREEN','BLUE') and b in (10,20,30)" )
test( "Select A,b from table1,table2 where table1.id eq table2.id -- test out comparison operators" )
"""
Test output:
>pythonw -u simpleSQL.py
SELECT * from XYZZY, ABC ->
tokens = ['select', '*', 'from', ['XYZZY', 'ABC']]
tokens.columns = *
tokens.tables = ['XYZZY', 'ABC']
select * from SYS.XYZZY ->
tokens = ['select', '*', 'from', ['SYS.XYZZY']]
tokens.columns = *
tokens.tables = ['SYS.XYZZY']
Select A from Sys.dual ->
tokens = ['select', ['A'], 'from', ['SYS.DUAL']]
tokens.columns = ['A']
tokens.tables = ['SYS.DUAL']
Select A,B,C from Sys.dual ->
tokens = ['select', ['A', 'B', 'C'], 'from', ['SYS.DUAL']]
tokens.columns = ['A', 'B', 'C']
tokens.tables = ['SYS.DUAL']
Select A, B, C from Sys.dual ->
tokens = ['select', ['A', 'B', 'C'], 'from', ['SYS.DUAL']]
tokens.columns = ['A', 'B', 'C']
tokens.tables = ['SYS.DUAL']
Select A, B, C from Sys.dual, Table2 ->
tokens = ['select', ['A', 'B', 'C'], 'from', ['SYS.DUAL', 'TABLE2']]
tokens.columns = ['A', 'B', 'C']
tokens.tables = ['SYS.DUAL', 'TABLE2']
Xelect A, B, C from Sys.dual ->
^
Expected 'select'
Expected 'select' (0), (1,1)
Select A, B, C frox Sys.dual ->
^
Expected 'from'
Expected 'from' (15), (1,16)
Select ->
^
Expected '*'
Expected '*' (6), (1,7)
Select &&& frox Sys.dual ->
^
Expected '*'
Expected '*' (7), (1,8)
>Exit code: 0
""" | mpl-2.0 |
bootandy/sqlalchemy | test/engine/test_transaction.py | 23 | 54427 | from sqlalchemy.testing import eq_, assert_raises, \
assert_raises_message, ne_, expect_warnings
import sys
from sqlalchemy import event
from sqlalchemy.testing.engines import testing_engine
from sqlalchemy import create_engine, MetaData, INT, VARCHAR, Sequence, \
select, Integer, String, func, text, exc
from sqlalchemy.testing.schema import Table
from sqlalchemy.testing.schema import Column
from sqlalchemy import testing
from sqlalchemy.testing import fixtures
users, metadata = None, None
class TransactionTest(fixtures.TestBase):
__backend__ = True
@classmethod
def setup_class(cls):
global users, metadata
metadata = MetaData()
users = Table('query_users', metadata,
Column('user_id', INT, primary_key=True),
Column('user_name', VARCHAR(20)),
test_needs_acid=True,
)
users.create(testing.db)
def teardown(self):
testing.db.execute(users.delete()).close()
@classmethod
def teardown_class(cls):
users.drop(testing.db)
def test_commits(self):
connection = testing.db.connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
transaction.commit()
transaction = connection.begin()
connection.execute(users.insert(), user_id=2, user_name='user2')
connection.execute(users.insert(), user_id=3, user_name='user3')
transaction.commit()
transaction = connection.begin()
result = connection.execute("select * from query_users")
assert len(result.fetchall()) == 3
transaction.commit()
connection.close()
def test_rollback(self):
"""test a basic rollback"""
connection = testing.db.connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
connection.execute(users.insert(), user_id=2, user_name='user2')
connection.execute(users.insert(), user_id=3, user_name='user3')
transaction.rollback()
result = connection.execute("select * from query_users")
assert len(result.fetchall()) == 0
connection.close()
def test_raise(self):
connection = testing.db.connect()
transaction = connection.begin()
try:
connection.execute(users.insert(), user_id=1, user_name='user1')
connection.execute(users.insert(), user_id=2, user_name='user2')
connection.execute(users.insert(), user_id=1, user_name='user3')
transaction.commit()
assert False
except Exception as e:
print("Exception: ", e)
transaction.rollback()
result = connection.execute("select * from query_users")
assert len(result.fetchall()) == 0
connection.close()
def test_transaction_container(self):
def go(conn, table, data):
for d in data:
conn.execute(table.insert(), d)
testing.db.transaction(go, users, [dict(user_id=1,
user_name='user1')])
eq_(testing.db.execute(users.select()).fetchall(), [(1, 'user1'
)])
assert_raises(exc.DBAPIError, testing.db.transaction, go,
users, [{'user_id': 2, 'user_name': 'user2'},
{'user_id': 1, 'user_name': 'user3'}])
eq_(testing.db.execute(users.select()).fetchall(), [(1, 'user1'
)])
def test_nested_rollback(self):
connection = testing.db.connect()
try:
transaction = connection.begin()
try:
connection.execute(users.insert(), user_id=1,
user_name='user1')
connection.execute(users.insert(), user_id=2,
user_name='user2')
connection.execute(users.insert(), user_id=3,
user_name='user3')
trans2 = connection.begin()
try:
connection.execute(users.insert(), user_id=4,
user_name='user4')
connection.execute(users.insert(), user_id=5,
user_name='user5')
raise Exception('uh oh')
trans2.commit()
except:
trans2.rollback()
raise
transaction.rollback()
except Exception as e:
transaction.rollback()
raise
except Exception as e:
try:
assert str(e) == 'uh oh' # and not "This transaction is
# inactive"
finally:
connection.close()
def test_branch_nested_rollback(self):
connection = testing.db.connect()
try:
connection.begin()
branched = connection.connect()
assert branched.in_transaction()
branched.execute(users.insert(), user_id=1, user_name='user1')
nested = branched.begin()
branched.execute(users.insert(), user_id=2, user_name='user2')
nested.rollback()
assert not connection.in_transaction()
eq_(connection.scalar("select count(*) from query_users"), 0)
finally:
connection.close()
def test_branch_autorollback(self):
connection = testing.db.connect()
try:
branched = connection.connect()
branched.execute(users.insert(), user_id=1, user_name='user1')
try:
branched.execute(users.insert(), user_id=1, user_name='user1')
except exc.DBAPIError:
pass
finally:
connection.close()
def test_branch_orig_rollback(self):
connection = testing.db.connect()
try:
branched = connection.connect()
branched.execute(users.insert(), user_id=1, user_name='user1')
nested = branched.begin()
assert branched.in_transaction()
branched.execute(users.insert(), user_id=2, user_name='user2')
nested.rollback()
eq_(connection.scalar("select count(*) from query_users"), 1)
finally:
connection.close()
def test_branch_autocommit(self):
connection = testing.db.connect()
try:
branched = connection.connect()
branched.execute(users.insert(), user_id=1, user_name='user1')
finally:
connection.close()
eq_(testing.db.scalar("select count(*) from query_users"), 1)
@testing.requires.savepoints
def test_branch_savepoint_rollback(self):
connection = testing.db.connect()
try:
trans = connection.begin()
branched = connection.connect()
assert branched.in_transaction()
branched.execute(users.insert(), user_id=1, user_name='user1')
nested = branched.begin_nested()
branched.execute(users.insert(), user_id=2, user_name='user2')
nested.rollback()
assert connection.in_transaction()
trans.commit()
eq_(connection.scalar("select count(*) from query_users"), 1)
finally:
connection.close()
@testing.requires.two_phase_transactions
def test_branch_twophase_rollback(self):
connection = testing.db.connect()
try:
branched = connection.connect()
assert not branched.in_transaction()
branched.execute(users.insert(), user_id=1, user_name='user1')
nested = branched.begin_twophase()
branched.execute(users.insert(), user_id=2, user_name='user2')
nested.rollback()
assert not connection.in_transaction()
eq_(connection.scalar("select count(*) from query_users"), 1)
finally:
connection.close()
def test_retains_through_options(self):
connection = testing.db.connect()
try:
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
conn2 = connection.execution_options(dummy=True)
conn2.execute(users.insert(), user_id=2, user_name='user2')
transaction.rollback()
eq_(connection.scalar("select count(*) from query_users"), 0)
finally:
connection.close()
def test_nesting(self):
connection = testing.db.connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
connection.execute(users.insert(), user_id=2, user_name='user2')
connection.execute(users.insert(), user_id=3, user_name='user3')
trans2 = connection.begin()
connection.execute(users.insert(), user_id=4, user_name='user4')
connection.execute(users.insert(), user_id=5, user_name='user5')
trans2.commit()
transaction.rollback()
self.assert_(connection.scalar('select count(*) from '
'query_users') == 0)
result = connection.execute('select * from query_users')
assert len(result.fetchall()) == 0
connection.close()
def test_with_interface(self):
connection = testing.db.connect()
trans = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
connection.execute(users.insert(), user_id=2, user_name='user2')
try:
connection.execute(users.insert(), user_id=2, user_name='user2.5')
except Exception as e:
trans.__exit__(*sys.exc_info())
assert not trans.is_active
self.assert_(connection.scalar('select count(*) from '
'query_users') == 0)
trans = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
trans.__exit__(None, None, None)
assert not trans.is_active
self.assert_(connection.scalar('select count(*) from '
'query_users') == 1)
connection.close()
def test_close(self):
connection = testing.db.connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
connection.execute(users.insert(), user_id=2, user_name='user2')
connection.execute(users.insert(), user_id=3, user_name='user3')
trans2 = connection.begin()
connection.execute(users.insert(), user_id=4, user_name='user4')
connection.execute(users.insert(), user_id=5, user_name='user5')
assert connection.in_transaction()
trans2.close()
assert connection.in_transaction()
transaction.commit()
assert not connection.in_transaction()
self.assert_(connection.scalar('select count(*) from '
'query_users') == 5)
result = connection.execute('select * from query_users')
assert len(result.fetchall()) == 5
connection.close()
def test_close2(self):
connection = testing.db.connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
connection.execute(users.insert(), user_id=2, user_name='user2')
connection.execute(users.insert(), user_id=3, user_name='user3')
trans2 = connection.begin()
connection.execute(users.insert(), user_id=4, user_name='user4')
connection.execute(users.insert(), user_id=5, user_name='user5')
assert connection.in_transaction()
trans2.close()
assert connection.in_transaction()
transaction.close()
assert not connection.in_transaction()
self.assert_(connection.scalar('select count(*) from '
'query_users') == 0)
result = connection.execute('select * from query_users')
assert len(result.fetchall()) == 0
connection.close()
@testing.requires.savepoints
def test_nested_subtransaction_rollback(self):
connection = testing.db.connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
trans2 = connection.begin_nested()
connection.execute(users.insert(), user_id=2, user_name='user2')
trans2.rollback()
connection.execute(users.insert(), user_id=3, user_name='user3')
transaction.commit()
eq_(connection.execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(),
[(1, ), (3, )])
connection.close()
@testing.requires.savepoints
@testing.crashes('oracle+zxjdbc',
'Errors out and causes subsequent tests to '
'deadlock')
def test_nested_subtransaction_commit(self):
connection = testing.db.connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
trans2 = connection.begin_nested()
connection.execute(users.insert(), user_id=2, user_name='user2')
trans2.commit()
connection.execute(users.insert(), user_id=3, user_name='user3')
transaction.commit()
eq_(connection.execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(),
[(1, ), (2, ), (3, )])
connection.close()
@testing.requires.savepoints
def test_rollback_to_subtransaction(self):
connection = testing.db.connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
trans2 = connection.begin_nested()
connection.execute(users.insert(), user_id=2, user_name='user2')
trans3 = connection.begin()
connection.execute(users.insert(), user_id=3, user_name='user3')
trans3.rollback()
connection.execute(users.insert(), user_id=4, user_name='user4')
transaction.commit()
eq_(connection.execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(),
[(1, ), (4, )])
connection.close()
@testing.requires.two_phase_transactions
def test_two_phase_transaction(self):
connection = testing.db.connect()
transaction = connection.begin_twophase()
connection.execute(users.insert(), user_id=1, user_name='user1')
transaction.prepare()
transaction.commit()
transaction = connection.begin_twophase()
connection.execute(users.insert(), user_id=2, user_name='user2')
transaction.commit()
transaction.close()
transaction = connection.begin_twophase()
connection.execute(users.insert(), user_id=3, user_name='user3')
transaction.rollback()
transaction = connection.begin_twophase()
connection.execute(users.insert(), user_id=4, user_name='user4')
transaction.prepare()
transaction.rollback()
transaction.close()
eq_(connection.execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(),
[(1, ), (2, )])
connection.close()
# PG emergency shutdown:
# select * from pg_prepared_xacts
# ROLLBACK PREPARED '<xid>'
@testing.crashes('mysql', 'Crashing on 5.5, not worth it')
@testing.requires.skip_mysql_on_windows
@testing.requires.two_phase_transactions
@testing.requires.savepoints
def test_mixed_two_phase_transaction(self):
connection = testing.db.connect()
transaction = connection.begin_twophase()
connection.execute(users.insert(), user_id=1, user_name='user1')
transaction2 = connection.begin()
connection.execute(users.insert(), user_id=2, user_name='user2')
transaction3 = connection.begin_nested()
connection.execute(users.insert(), user_id=3, user_name='user3')
transaction4 = connection.begin()
connection.execute(users.insert(), user_id=4, user_name='user4')
transaction4.commit()
transaction3.rollback()
connection.execute(users.insert(), user_id=5, user_name='user5')
transaction2.commit()
transaction.prepare()
transaction.commit()
eq_(connection.execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(),
[(1, ), (2, ), (5, )])
connection.close()
@testing.requires.two_phase_transactions
@testing.crashes('mysql+oursql',
'Times out in full test runs only, causing '
'subsequent tests to fail')
@testing.crashes('mysql+zxjdbc',
'Deadlocks, causing subsequent tests to fail')
@testing.fails_on('mysql', 'FIXME: unknown')
def test_two_phase_recover(self):
# MySQL recovery doesn't currently seem to work correctly
# Prepared transactions disappear when connections are closed
# and even when they aren't it doesn't seem possible to use the
# recovery id.
connection = testing.db.connect()
transaction = connection.begin_twophase()
connection.execute(users.insert(), user_id=1, user_name='user1')
transaction.prepare()
connection.invalidate()
connection2 = testing.db.connect()
eq_(
connection2.execution_options(autocommit=True).
execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(), [])
recoverables = connection2.recover_twophase()
assert transaction.xid in recoverables
connection2.commit_prepared(transaction.xid, recover=True)
eq_(connection2.execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(),
[(1, )])
connection2.close()
@testing.requires.two_phase_transactions
def test_multiple_two_phase(self):
conn = testing.db.connect()
xa = conn.begin_twophase()
conn.execute(users.insert(), user_id=1, user_name='user1')
xa.prepare()
xa.commit()
xa = conn.begin_twophase()
conn.execute(users.insert(), user_id=2, user_name='user2')
xa.prepare()
xa.rollback()
xa = conn.begin_twophase()
conn.execute(users.insert(), user_id=3, user_name='user3')
xa.rollback()
xa = conn.begin_twophase()
conn.execute(users.insert(), user_id=4, user_name='user4')
xa.prepare()
xa.commit()
result = \
conn.execute(select([users.c.user_name]).
order_by(users.c.user_id))
eq_(result.fetchall(), [('user1', ), ('user4', )])
conn.close()
@testing.requires.two_phase_transactions
def test_reset_rollback_two_phase_no_rollback(self):
# test [ticket:2907], essentially that the
# TwoPhaseTransaction is given the job of "reset on return"
# so that picky backends like MySQL correctly clear out
# their state when a connection is closed without handling
# the transaction explicitly.
eng = testing_engine()
# MySQL raises if you call straight rollback() on
# a connection with an XID present
@event.listens_for(eng, "invalidate")
def conn_invalidated(dbapi_con, con_record, exception):
dbapi_con.close()
raise exception
with eng.connect() as conn:
rec = conn.connection._connection_record
raw_dbapi_con = rec.connection
xa = conn.begin_twophase()
conn.execute(users.insert(), user_id=1, user_name='user1')
assert rec.connection is raw_dbapi_con
with eng.connect() as conn:
result = \
conn.execute(select([users.c.user_name]).
order_by(users.c.user_id))
eq_(result.fetchall(), [])
class ResetAgentTest(fixtures.TestBase):
__backend__ = True
def test_begin_close(self):
with testing.db.connect() as connection:
trans = connection.begin()
assert connection.connection._reset_agent is trans
assert not trans.is_active
def test_begin_rollback(self):
with testing.db.connect() as connection:
trans = connection.begin()
assert connection.connection._reset_agent is trans
trans.rollback()
assert connection.connection._reset_agent is None
def test_begin_commit(self):
with testing.db.connect() as connection:
trans = connection.begin()
assert connection.connection._reset_agent is trans
trans.commit()
assert connection.connection._reset_agent is None
@testing.requires.savepoints
def test_begin_nested_close(self):
with testing.db.connect() as connection:
trans = connection.begin_nested()
assert connection.connection._reset_agent is trans
assert not trans.is_active
@testing.requires.savepoints
def test_begin_begin_nested_close(self):
with testing.db.connect() as connection:
trans = connection.begin()
trans2 = connection.begin_nested()
assert connection.connection._reset_agent is trans
assert trans2.is_active # was never closed
assert not trans.is_active
@testing.requires.savepoints
def test_begin_begin_nested_rollback_commit(self):
with testing.db.connect() as connection:
trans = connection.begin()
trans2 = connection.begin_nested()
assert connection.connection._reset_agent is trans
trans2.rollback()
assert connection.connection._reset_agent is trans
trans.commit()
assert connection.connection._reset_agent is None
@testing.requires.savepoints
def test_begin_begin_nested_rollback_rollback(self):
with testing.db.connect() as connection:
trans = connection.begin()
trans2 = connection.begin_nested()
assert connection.connection._reset_agent is trans
trans2.rollback()
assert connection.connection._reset_agent is trans
trans.rollback()
assert connection.connection._reset_agent is None
def test_begin_begin_rollback_rollback(self):
with testing.db.connect() as connection:
trans = connection.begin()
trans2 = connection.begin()
assert connection.connection._reset_agent is trans
trans2.rollback()
assert connection.connection._reset_agent is None
trans.rollback()
assert connection.connection._reset_agent is None
def test_begin_begin_commit_commit(self):
with testing.db.connect() as connection:
trans = connection.begin()
trans2 = connection.begin()
assert connection.connection._reset_agent is trans
trans2.commit()
assert connection.connection._reset_agent is trans
trans.commit()
assert connection.connection._reset_agent is None
@testing.requires.two_phase_transactions
def test_reset_via_agent_begin_twophase(self):
with testing.db.connect() as connection:
trans = connection.begin_twophase()
assert connection.connection._reset_agent is trans
@testing.requires.two_phase_transactions
def test_reset_via_agent_begin_twophase_commit(self):
with testing.db.connect() as connection:
trans = connection.begin_twophase()
assert connection.connection._reset_agent is trans
trans.commit()
assert connection.connection._reset_agent is None
@testing.requires.two_phase_transactions
def test_reset_via_agent_begin_twophase_rollback(self):
with testing.db.connect() as connection:
trans = connection.begin_twophase()
assert connection.connection._reset_agent is trans
trans.rollback()
assert connection.connection._reset_agent is None
class AutoRollbackTest(fixtures.TestBase):
__backend__ = True
@classmethod
def setup_class(cls):
global metadata
metadata = MetaData()
@classmethod
def teardown_class(cls):
metadata.drop_all(testing.db)
def test_rollback_deadlock(self):
"""test that returning connections to the pool clears any object
locks."""
conn1 = testing.db.connect()
conn2 = testing.db.connect()
users = Table('deadlock_users', metadata, Column('user_id',
INT, primary_key=True), Column('user_name',
VARCHAR(20)), test_needs_acid=True)
users.create(conn1)
conn1.execute('select * from deadlock_users')
conn1.close()
# without auto-rollback in the connection pool's return() logic,
# this deadlocks in PostgreSQL, because conn1 is returned to the
# pool but still has a lock on "deadlock_users". comment out the
# rollback in pool/ConnectionFairy._close() to see !
users.drop(conn2)
conn2.close()
class ExplicitAutoCommitTest(fixtures.TestBase):
"""test the 'autocommit' flag on select() and text() objects.
Requires PostgreSQL so that we may define a custom function which
modifies the database. """
__only_on__ = 'postgresql'
@classmethod
def setup_class(cls):
global metadata, foo
metadata = MetaData(testing.db)
foo = Table('foo', metadata, Column('id', Integer,
primary_key=True), Column('data', String(100)))
metadata.create_all()
testing.db.execute("create function insert_foo(varchar) "
"returns integer as 'insert into foo(data) "
"values ($1);select 1;' language sql")
def teardown(self):
foo.delete().execute().close()
@classmethod
def teardown_class(cls):
testing.db.execute('drop function insert_foo(varchar)')
metadata.drop_all()
def test_control(self):
# test that not using autocommit does not commit
conn1 = testing.db.connect()
conn2 = testing.db.connect()
conn1.execute(select([func.insert_foo('data1')]))
assert conn2.execute(select([foo.c.data])).fetchall() == []
conn1.execute(text("select insert_foo('moredata')"))
assert conn2.execute(select([foo.c.data])).fetchall() == []
trans = conn1.begin()
trans.commit()
assert conn2.execute(select([foo.c.data])).fetchall() \
== [('data1', ), ('moredata', )]
conn1.close()
conn2.close()
def test_explicit_compiled(self):
conn1 = testing.db.connect()
conn2 = testing.db.connect()
conn1.execute(select([func.insert_foo('data1'
)]).execution_options(autocommit=True))
assert conn2.execute(select([foo.c.data])).fetchall() \
== [('data1', )]
conn1.close()
conn2.close()
def test_explicit_connection(self):
conn1 = testing.db.connect()
conn2 = testing.db.connect()
conn1.execution_options(autocommit=True).\
execute(select([func.insert_foo('data1'
)]))
eq_(conn2.execute(select([foo.c.data])).fetchall(), [('data1',
)])
# connection supersedes statement
conn1.execution_options(autocommit=False).\
execute(select([func.insert_foo('data2'
)]).execution_options(autocommit=True))
eq_(conn2.execute(select([foo.c.data])).fetchall(), [('data1',
)])
# ditto
conn1.execution_options(autocommit=True).\
execute(select([func.insert_foo('data3'
)]).execution_options(autocommit=False))
eq_(conn2.execute(select([foo.c.data])).fetchall(), [('data1',
), ('data2', ), ('data3', )])
conn1.close()
conn2.close()
def test_explicit_text(self):
conn1 = testing.db.connect()
conn2 = testing.db.connect()
conn1.execute(text("select insert_foo('moredata')"
).execution_options(autocommit=True))
assert conn2.execute(select([foo.c.data])).fetchall() \
== [('moredata', )]
conn1.close()
conn2.close()
@testing.uses_deprecated(r'autocommit on select\(\) is deprecated',
r'``autocommit\(\)`` is deprecated')
def test_explicit_compiled_deprecated(self):
conn1 = testing.db.connect()
conn2 = testing.db.connect()
conn1.execute(select([func.insert_foo('data1')],
autocommit=True))
assert conn2.execute(select([foo.c.data])).fetchall() \
== [('data1', )]
conn1.execute(select([func.insert_foo('data2')]).autocommit())
assert conn2.execute(select([foo.c.data])).fetchall() \
== [('data1', ), ('data2', )]
conn1.close()
conn2.close()
@testing.uses_deprecated(r'autocommit on text\(\) is deprecated')
def test_explicit_text_deprecated(self):
conn1 = testing.db.connect()
conn2 = testing.db.connect()
conn1.execute(text("select insert_foo('moredata')",
autocommit=True))
assert conn2.execute(select([foo.c.data])).fetchall() \
== [('moredata', )]
conn1.close()
conn2.close()
def test_implicit_text(self):
conn1 = testing.db.connect()
conn2 = testing.db.connect()
conn1.execute(text("insert into foo (data) values "
"('implicitdata')"))
assert conn2.execute(select([foo.c.data])).fetchall() \
== [('implicitdata', )]
conn1.close()
conn2.close()
tlengine = None
class TLTransactionTest(fixtures.TestBase):
__requires__ = ('ad_hoc_engines', )
__backend__ = True
@classmethod
def setup_class(cls):
global users, metadata, tlengine
tlengine = testing_engine(options=dict(strategy='threadlocal'))
metadata = MetaData()
users = Table('query_users', metadata, Column('user_id', INT,
Sequence('query_users_id_seq', optional=True),
primary_key=True), Column('user_name',
VARCHAR(20)), test_needs_acid=True)
metadata.create_all(tlengine)
def teardown(self):
tlengine.execute(users.delete()).close()
@classmethod
def teardown_class(cls):
tlengine.close()
metadata.drop_all(tlengine)
tlengine.dispose()
def setup(self):
# ensure tests start with engine closed
tlengine.close()
@testing.crashes('oracle', 'TNS error of unknown origin occurs on the buildbot.')
def test_rollback_no_trans(self):
tlengine = testing_engine(options=dict(strategy="threadlocal"))
# shouldn't fail
tlengine.rollback()
tlengine.begin()
tlengine.rollback()
# shouldn't fail
tlengine.rollback()
def test_commit_no_trans(self):
tlengine = testing_engine(options=dict(strategy="threadlocal"))
# shouldn't fail
tlengine.commit()
tlengine.begin()
tlengine.rollback()
# shouldn't fail
tlengine.commit()
def test_prepare_no_trans(self):
tlengine = testing_engine(options=dict(strategy="threadlocal"))
# shouldn't fail
tlengine.prepare()
tlengine.begin()
tlengine.rollback()
# shouldn't fail
tlengine.prepare()
def test_connection_close(self):
"""test that when connections are closed for real, transactions
are rolled back and disposed."""
c = tlengine.contextual_connect()
c.begin()
assert c.in_transaction()
c.close()
assert not c.in_transaction()
def test_transaction_close(self):
c = tlengine.contextual_connect()
t = c.begin()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.execute(users.insert(), user_id=2, user_name='user2')
t2 = c.begin()
tlengine.execute(users.insert(), user_id=3, user_name='user3')
tlengine.execute(users.insert(), user_id=4, user_name='user4')
t2.close()
result = c.execute('select * from query_users')
assert len(result.fetchall()) == 4
t.close()
external_connection = tlengine.connect()
result = external_connection.execute('select * from query_users'
)
try:
assert len(result.fetchall()) == 0
finally:
c.close()
external_connection.close()
def test_rollback(self):
"""test a basic rollback"""
tlengine.begin()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.execute(users.insert(), user_id=2, user_name='user2')
tlengine.execute(users.insert(), user_id=3, user_name='user3')
tlengine.rollback()
external_connection = tlengine.connect()
result = external_connection.execute('select * from query_users'
)
try:
assert len(result.fetchall()) == 0
finally:
external_connection.close()
def test_commit(self):
"""test a basic commit"""
tlengine.begin()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.execute(users.insert(), user_id=2, user_name='user2')
tlengine.execute(users.insert(), user_id=3, user_name='user3')
tlengine.commit()
external_connection = tlengine.connect()
result = external_connection.execute('select * from query_users'
)
try:
assert len(result.fetchall()) == 3
finally:
external_connection.close()
def test_with_interface(self):
trans = tlengine.begin()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.execute(users.insert(), user_id=2, user_name='user2')
trans.commit()
trans = tlengine.begin()
tlengine.execute(users.insert(), user_id=3, user_name='user3')
trans.__exit__(Exception, "fake", None)
trans = tlengine.begin()
tlengine.execute(users.insert(), user_id=4, user_name='user4')
trans.__exit__(None, None, None)
eq_(
tlengine.execute(users.select().order_by(users.c.user_id)).fetchall(),
[
(1, 'user1'),
(2, 'user2'),
(4, 'user4'),
]
)
def test_commits(self):
connection = tlengine.connect()
assert connection.execute('select count(*) from query_users'
).scalar() == 0
connection.close()
connection = tlengine.contextual_connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
transaction.commit()
transaction = connection.begin()
connection.execute(users.insert(), user_id=2, user_name='user2')
connection.execute(users.insert(), user_id=3, user_name='user3')
transaction.commit()
transaction = connection.begin()
result = connection.execute('select * from query_users')
l = result.fetchall()
assert len(l) == 3, 'expected 3 got %d' % len(l)
transaction.commit()
connection.close()
def test_rollback_off_conn(self):
# test that a TLTransaction opened off a TLConnection allows
# that TLConnection to be aware of the transactional context
conn = tlengine.contextual_connect()
trans = conn.begin()
conn.execute(users.insert(), user_id=1, user_name='user1')
conn.execute(users.insert(), user_id=2, user_name='user2')
conn.execute(users.insert(), user_id=3, user_name='user3')
trans.rollback()
external_connection = tlengine.connect()
result = external_connection.execute('select * from query_users'
)
try:
assert len(result.fetchall()) == 0
finally:
conn.close()
external_connection.close()
def test_morerollback_off_conn(self):
# test that an existing TLConnection automatically takes place
# in a TLTransaction opened on a second TLConnection
conn = tlengine.contextual_connect()
conn2 = tlengine.contextual_connect()
trans = conn2.begin()
conn.execute(users.insert(), user_id=1, user_name='user1')
conn.execute(users.insert(), user_id=2, user_name='user2')
conn.execute(users.insert(), user_id=3, user_name='user3')
trans.rollback()
external_connection = tlengine.connect()
result = external_connection.execute('select * from query_users'
)
try:
assert len(result.fetchall()) == 0
finally:
conn.close()
conn2.close()
external_connection.close()
def test_commit_off_connection(self):
conn = tlengine.contextual_connect()
trans = conn.begin()
conn.execute(users.insert(), user_id=1, user_name='user1')
conn.execute(users.insert(), user_id=2, user_name='user2')
conn.execute(users.insert(), user_id=3, user_name='user3')
trans.commit()
external_connection = tlengine.connect()
result = external_connection.execute('select * from query_users'
)
try:
assert len(result.fetchall()) == 3
finally:
conn.close()
external_connection.close()
def test_nesting_rollback(self):
"""tests nesting of transactions, rollback at the end"""
external_connection = tlengine.connect()
self.assert_(external_connection.connection
is not tlengine.contextual_connect().connection)
tlengine.begin()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.execute(users.insert(), user_id=2, user_name='user2')
tlengine.execute(users.insert(), user_id=3, user_name='user3')
tlengine.begin()
tlengine.execute(users.insert(), user_id=4, user_name='user4')
tlengine.execute(users.insert(), user_id=5, user_name='user5')
tlengine.commit()
tlengine.rollback()
try:
self.assert_(external_connection.scalar(
'select count(*) from query_users'
) == 0)
finally:
external_connection.close()
def test_nesting_commit(self):
"""tests nesting of transactions, commit at the end."""
external_connection = tlengine.connect()
self.assert_(external_connection.connection
is not tlengine.contextual_connect().connection)
tlengine.begin()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.execute(users.insert(), user_id=2, user_name='user2')
tlengine.execute(users.insert(), user_id=3, user_name='user3')
tlengine.begin()
tlengine.execute(users.insert(), user_id=4, user_name='user4')
tlengine.execute(users.insert(), user_id=5, user_name='user5')
tlengine.commit()
tlengine.commit()
try:
self.assert_(external_connection.scalar(
'select count(*) from query_users'
) == 5)
finally:
external_connection.close()
def test_mixed_nesting(self):
"""tests nesting of transactions off the TLEngine directly
inside of transactions off the connection from the TLEngine"""
external_connection = tlengine.connect()
self.assert_(external_connection.connection
is not tlengine.contextual_connect().connection)
conn = tlengine.contextual_connect()
trans = conn.begin()
trans2 = conn.begin()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.execute(users.insert(), user_id=2, user_name='user2')
tlengine.execute(users.insert(), user_id=3, user_name='user3')
tlengine.begin()
tlengine.execute(users.insert(), user_id=4, user_name='user4')
tlengine.begin()
tlengine.execute(users.insert(), user_id=5, user_name='user5')
tlengine.execute(users.insert(), user_id=6, user_name='user6')
tlengine.execute(users.insert(), user_id=7, user_name='user7')
tlengine.commit()
tlengine.execute(users.insert(), user_id=8, user_name='user8')
tlengine.commit()
trans2.commit()
trans.rollback()
conn.close()
try:
self.assert_(external_connection.scalar(
'select count(*) from query_users'
) == 0)
finally:
external_connection.close()
def test_more_mixed_nesting(self):
"""tests nesting of transactions off the connection from the
TLEngine inside of transactions off the TLEngine directly."""
external_connection = tlengine.connect()
self.assert_(external_connection.connection
is not tlengine.contextual_connect().connection)
tlengine.begin()
connection = tlengine.contextual_connect()
connection.execute(users.insert(), user_id=1, user_name='user1')
tlengine.begin()
connection.execute(users.insert(), user_id=2, user_name='user2')
connection.execute(users.insert(), user_id=3, user_name='user3')
trans = connection.begin()
connection.execute(users.insert(), user_id=4, user_name='user4')
connection.execute(users.insert(), user_id=5, user_name='user5')
trans.commit()
tlengine.commit()
tlengine.rollback()
connection.close()
try:
self.assert_(external_connection.scalar(
'select count(*) from query_users'
) == 0)
finally:
external_connection.close()
@testing.requires.savepoints
def test_nested_subtransaction_rollback(self):
tlengine.begin()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.begin_nested()
tlengine.execute(users.insert(), user_id=2, user_name='user2')
tlengine.rollback()
tlengine.execute(users.insert(), user_id=3, user_name='user3')
tlengine.commit()
tlengine.close()
eq_(tlengine.execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(),
[(1, ), (3, )])
tlengine.close()
@testing.requires.savepoints
@testing.crashes('oracle+zxjdbc',
'Errors out and causes subsequent tests to '
'deadlock')
def test_nested_subtransaction_commit(self):
tlengine.begin()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.begin_nested()
tlengine.execute(users.insert(), user_id=2, user_name='user2')
tlengine.commit()
tlengine.execute(users.insert(), user_id=3, user_name='user3')
tlengine.commit()
tlengine.close()
eq_(tlengine.execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(),
[(1, ), (2, ), (3, )])
tlengine.close()
@testing.requires.savepoints
def test_rollback_to_subtransaction(self):
tlengine.begin()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.begin_nested()
tlengine.execute(users.insert(), user_id=2, user_name='user2')
tlengine.begin()
tlengine.execute(users.insert(), user_id=3, user_name='user3')
tlengine.rollback()
tlengine.rollback()
tlengine.execute(users.insert(), user_id=4, user_name='user4')
tlengine.commit()
tlengine.close()
eq_(tlengine.execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(),
[(1, ), (4, )])
tlengine.close()
def test_connections(self):
"""tests that contextual_connect is threadlocal"""
c1 = tlengine.contextual_connect()
c2 = tlengine.contextual_connect()
assert c1.connection is c2.connection
c2.close()
assert not c1.closed
assert not tlengine.closed
@testing.requires.independent_cursors
def test_result_closing(self):
"""tests that contextual_connect is threadlocal"""
r1 = tlengine.execute(select([1]))
r2 = tlengine.execute(select([1]))
row1 = r1.fetchone()
row2 = r2.fetchone()
r1.close()
assert r2.connection is r1.connection
assert not r2.connection.closed
assert not tlengine.closed
# close again, nothing happens since resultproxy calls close()
# only once
r1.close()
assert r2.connection is r1.connection
assert not r2.connection.closed
assert not tlengine.closed
r2.close()
assert r2.connection.closed
assert tlengine.closed
@testing.crashes('oracle+cx_oracle', 'intermittent failures on the buildbot')
def test_dispose(self):
eng = testing_engine(options=dict(strategy='threadlocal'))
result = eng.execute(select([1]))
eng.dispose()
eng.execute(select([1]))
@testing.requires.two_phase_transactions
def test_two_phase_transaction(self):
tlengine.begin_twophase()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.prepare()
tlengine.commit()
tlengine.begin_twophase()
tlengine.execute(users.insert(), user_id=2, user_name='user2')
tlengine.commit()
tlengine.begin_twophase()
tlengine.execute(users.insert(), user_id=3, user_name='user3')
tlengine.rollback()
tlengine.begin_twophase()
tlengine.execute(users.insert(), user_id=4, user_name='user4')
tlengine.prepare()
tlengine.rollback()
eq_(tlengine.execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(),
[(1, ), (2, )])
class IsolationLevelTest(fixtures.TestBase):
__requires__ = ('isolation_level', 'ad_hoc_engines')
__backend__ = True
def _default_isolation_level(self):
if testing.against('sqlite'):
return 'SERIALIZABLE'
elif testing.against('postgresql'):
return 'READ COMMITTED'
elif testing.against('mysql'):
return "REPEATABLE READ"
else:
assert False, "default isolation level not known"
def _non_default_isolation_level(self):
if testing.against('sqlite'):
return 'READ UNCOMMITTED'
elif testing.against('postgresql'):
return 'SERIALIZABLE'
elif testing.against('mysql'):
return "SERIALIZABLE"
else:
assert False, "non default isolation level not known"
def test_engine_param_stays(self):
eng = testing_engine()
isolation_level = eng.dialect.get_isolation_level(
eng.connect().connection)
level = self._non_default_isolation_level()
ne_(isolation_level, level)
eng = testing_engine(options=dict(isolation_level=level))
eq_(
eng.dialect.get_isolation_level(
eng.connect().connection),
level
)
# check that it stays
conn = eng.connect()
eq_(
eng.dialect.get_isolation_level(conn.connection),
level
)
conn.close()
conn = eng.connect()
eq_(
eng.dialect.get_isolation_level(conn.connection),
level
)
conn.close()
def test_default_level(self):
eng = testing_engine(options=dict())
isolation_level = eng.dialect.get_isolation_level(
eng.connect().connection)
eq_(isolation_level, self._default_isolation_level())
def test_reset_level(self):
eng = testing_engine(options=dict())
conn = eng.connect()
eq_(
eng.dialect.get_isolation_level(conn.connection),
self._default_isolation_level()
)
eng.dialect.set_isolation_level(
conn.connection, self._non_default_isolation_level()
)
eq_(
eng.dialect.get_isolation_level(conn.connection),
self._non_default_isolation_level()
)
eng.dialect.reset_isolation_level(conn.connection)
eq_(
eng.dialect.get_isolation_level(conn.connection),
self._default_isolation_level()
)
conn.close()
def test_reset_level_with_setting(self):
eng = testing_engine(
options=dict(
isolation_level=self._non_default_isolation_level()))
conn = eng.connect()
eq_(eng.dialect.get_isolation_level(conn.connection),
self._non_default_isolation_level())
eng.dialect.set_isolation_level(
conn.connection,
self._default_isolation_level())
eq_(eng.dialect.get_isolation_level(conn.connection),
self._default_isolation_level())
eng.dialect.reset_isolation_level(conn.connection)
eq_(eng.dialect.get_isolation_level(conn.connection),
self._non_default_isolation_level())
conn.close()
def test_invalid_level(self):
eng = testing_engine(options=dict(isolation_level='FOO'))
assert_raises_message(
exc.ArgumentError,
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s" %
("FOO",
eng.dialect.name, ", ".join(eng.dialect._isolation_lookup)),
eng.connect
)
def test_connection_invalidated(self):
eng = testing_engine()
conn = eng.connect()
c2 = conn.execution_options(
isolation_level=self._non_default_isolation_level())
c2.invalidate()
c2.connection
# TODO: do we want to rebuild the previous isolation?
# for now, this is current behavior so we will leave it.
eq_(c2.get_isolation_level(), self._default_isolation_level())
def test_per_connection(self):
from sqlalchemy.pool import QueuePool
eng = testing_engine(
options=dict(
poolclass=QueuePool,
pool_size=2, max_overflow=0))
c1 = eng.connect()
c1 = c1.execution_options(
isolation_level=self._non_default_isolation_level()
)
c2 = eng.connect()
eq_(
eng.dialect.get_isolation_level(c1.connection),
self._non_default_isolation_level()
)
eq_(
eng.dialect.get_isolation_level(c2.connection),
self._default_isolation_level()
)
c1.close()
c2.close()
c3 = eng.connect()
eq_(
eng.dialect.get_isolation_level(c3.connection),
self._default_isolation_level()
)
c4 = eng.connect()
eq_(
eng.dialect.get_isolation_level(c4.connection),
self._default_isolation_level()
)
c3.close()
c4.close()
def test_warning_in_transaction(self):
eng = testing_engine()
c1 = eng.connect()
with expect_warnings(
"Connection is already established with a Transaction; "
"setting isolation_level may implicitly rollback or commit "
"the existing transaction, or have no effect until next "
"transaction"
):
with c1.begin():
c1 = c1.execution_options(
isolation_level=self._non_default_isolation_level()
)
eq_(
eng.dialect.get_isolation_level(c1.connection),
self._non_default_isolation_level()
)
# stays outside of transaction
eq_(
eng.dialect.get_isolation_level(c1.connection),
self._non_default_isolation_level()
)
def test_per_statement_bzzt(self):
assert_raises_message(
exc.ArgumentError,
r"'isolation_level' execution option may only be specified "
r"on Connection.execution_options\(\), or "
r"per-engine using the isolation_level "
r"argument to create_engine\(\).",
select([1]).execution_options,
isolation_level=self._non_default_isolation_level()
)
def test_per_engine(self):
# new in 0.9
eng = create_engine(
testing.db.url,
execution_options={
'isolation_level':
self._non_default_isolation_level()}
)
conn = eng.connect()
eq_(
eng.dialect.get_isolation_level(conn.connection),
self._non_default_isolation_level()
)
def test_isolation_level_accessors_connection_default(self):
eng = create_engine(
testing.db.url
)
with eng.connect() as conn:
eq_(conn.default_isolation_level, self._default_isolation_level())
with eng.connect() as conn:
eq_(conn.get_isolation_level(), self._default_isolation_level())
def test_isolation_level_accessors_connection_option_modified(self):
eng = create_engine(
testing.db.url
)
with eng.connect() as conn:
c2 = conn.execution_options(
isolation_level=self._non_default_isolation_level())
eq_(conn.default_isolation_level, self._default_isolation_level())
eq_(conn.get_isolation_level(),
self._non_default_isolation_level())
eq_(c2.get_isolation_level(), self._non_default_isolation_level())
| mit |
JeanKossaifi/scikit-learn | sklearn/utils/tests/test_fixes.py | 281 | 1829 | # Authors: Gael Varoquaux <[email protected]>
# Justin Vincent
# Lars Buitinck
# License: BSD 3 clause
import numpy as np
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_true
from numpy.testing import (assert_almost_equal,
assert_array_almost_equal)
from sklearn.utils.fixes import divide, expit
from sklearn.utils.fixes import astype
def test_expit():
# Check numerical stability of expit (logistic function).
# Simulate our previous Cython implementation, based on
#http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression
assert_almost_equal(expit(1000.), 1. / (1. + np.exp(-1000.)), decimal=16)
assert_almost_equal(expit(-1000.), np.exp(-1000.) / (1. + np.exp(-1000.)),
decimal=16)
x = np.arange(10)
out = np.zeros_like(x, dtype=np.float32)
assert_array_almost_equal(expit(x), expit(x, out=out))
def test_divide():
assert_equal(divide(.6, 1), .600000000000)
def test_astype_copy_memory():
a_int32 = np.ones(3, np.int32)
# Check that dtype conversion works
b_float32 = astype(a_int32, dtype=np.float32, copy=False)
assert_equal(b_float32.dtype, np.float32)
# Changing dtype forces a copy even if copy=False
assert_false(np.may_share_memory(b_float32, a_int32))
# Check that copy can be skipped if requested dtype match
c_int32 = astype(a_int32, dtype=np.int32, copy=False)
assert_true(c_int32 is a_int32)
# Check that copy can be forced, and is the case by default:
d_int32 = astype(a_int32, dtype=np.int32, copy=True)
assert_false(np.may_share_memory(d_int32, a_int32))
e_int32 = astype(a_int32, dtype=np.int32)
assert_false(np.may_share_memory(e_int32, a_int32))
| bsd-3-clause |
maestrano/openerp | openerp/addons/l10n_be_hr_payroll/__openerp__.py | 118 | 1817 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Belgium - Payroll',
'category': 'Localization',
'author': 'OpenERP SA',
'depends': ['hr_payroll'],
'version': '1.0',
'description': """
Belgian Payroll Rules.
======================
* Employee Details
* Employee Contracts
* Passport based Contract
* Allowances/Deductions
* Allow to configure Basic/Gross/Net Salary
* Employee Payslip
* Monthly Payroll Register
* Integrated with Holiday Management
* Salary Maj, ONSS, Withholding Tax, Child Allowance, ...
""",
'auto_install': False,
'demo': ['l10n_be_hr_payroll_demo.xml'],
'data':[
'l10n_be_hr_payroll_view.xml',
'l10n_be_hr_payroll_data.xml',
'data/hr.salary.rule.csv',
],
'installable': True
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
openstack/manila | manila/tests/share/drivers/glusterfs/test_layout.py | 1 | 12982 | # Copyright (c) 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import os
from unittest import mock
import ddt
from oslo_config import cfg
from oslo_utils import importutils
from manila import exception
from manila.share import configuration as config
from manila.share import driver
from manila.share.drivers.glusterfs import layout
from manila import test
from manila.tests import fake_share
from manila.tests import fake_utils
CONF = cfg.CONF
fake_local_share_path = '/mnt/nfs/testvol/fakename'
fake_path_to_private_key = '/fakepath/to/privatekey'
fake_remote_server_password = 'fakepassword'
def fake_access(kwargs):
fake_access_rule = fake_share.fake_access(**kwargs)
fake_access_rule.to_dict = lambda: fake_access_rule.values
return fake_access_rule
class GlusterfsFakeShareDriver(layout.GlusterfsShareDriverBase):
supported_layouts = ('layout_fake.FakeLayout',
'layout_something.SomeLayout')
supported_protocols = ('NFS,')
_supported_access_types = ('ip',)
_supported_access_levels = ('rw',)
@ddt.ddt
class GlusterfsShareDriverBaseTestCase(test.TestCase):
"""Tests GlusterfsShareDriverBase."""
def setUp(self):
super(GlusterfsShareDriverBaseTestCase, self).setUp()
CONF.set_default('driver_handles_share_servers', False)
fake_conf, __ = self._setup()
self._driver = GlusterfsFakeShareDriver(False, configuration=fake_conf)
self.fake_share = mock.Mock(name='fake_share')
self.fake_context = mock.Mock(name='fake_context')
self.fake_access = mock.Mock(name='fake_access')
def _setup(self):
fake_conf = config.Configuration(None)
fake_layout = mock.Mock()
self.mock_object(importutils, "import_object",
mock.Mock(return_value=fake_layout))
return fake_conf, fake_layout
def test_init(self):
self.assertRaises(IndexError, layout.GlusterfsShareDriverBase, False,
configuration=config.Configuration(None))
@ddt.data({'has_snap': None, 'layout_name': None},
{'has_snap': False, 'layout_name': 'layout_fake.FakeLayout'},
{'has_snap': True, 'layout_name': 'layout_something.SomeLayout'})
@ddt.unpack
def test_init_subclass(self, has_snap, layout_name):
conf, _layout = self._setup()
if layout_name is not None:
conf.glusterfs_share_layout = layout_name
if has_snap is None:
del(_layout._snapshots_are_supported)
else:
_layout._snapshots_are_supported = has_snap
_driver = GlusterfsFakeShareDriver(False, configuration=conf)
snap_result = {None: False}.get(has_snap, has_snap)
layout_result = {None: 'layout_fake.FakeLayout'}.get(layout_name,
layout_name)
importutils.import_object.assert_called_once_with(
'manila.share.drivers.glusterfs.%s' % layout_result,
_driver, configuration=conf)
self.assertEqual(_layout, _driver.layout)
self.assertEqual(snap_result, _driver.snapshots_are_supported)
def test_init_nosupp_layout(self):
conf = config.Configuration(None)
conf.glusterfs_share_layout = 'nonsense_layout'
self.assertRaises(exception.GlusterfsException,
GlusterfsFakeShareDriver, False, configuration=conf)
def test_setup_via_manager(self):
self.assertIsNone(self._driver._setup_via_manager(mock.Mock()))
def test_supported_access_types(self):
self.assertEqual(('ip',), self._driver.supported_access_types)
def test_supported_access_levels(self):
self.assertEqual(('rw',), self._driver.supported_access_levels)
def test_access_rule_validator(self):
rule = mock.Mock()
abort = mock.Mock()
valid = mock.Mock()
self.mock_object(layout.ganesha_utils, 'validate_access_rule',
mock.Mock(return_value=valid))
ret = self._driver._access_rule_validator(abort)(rule)
self.assertEqual(valid, ret)
layout.ganesha_utils.validate_access_rule.assert_called_once_with(
('ip',), ('rw',), rule, abort)
@ddt.data({'inset': ([], ['ADD'], []), 'outset': (['ADD'], []),
'recovery': False},
{'inset': ([], [], ['DELETE']), 'outset': ([], ['DELETE']),
'recovery': False},
{'inset': (['EXISTING'], ['ADD'], ['DELETE']),
'outset': (['ADD'], ['DELETE']), 'recovery': False},
{'inset': (['EXISTING'], [], []), 'outset': (['EXISTING'], []),
'recovery': True})
@ddt.unpack
def test_update_access(self, inset, outset, recovery):
conf, _layout = self._setup()
gluster_mgr = mock.Mock(name='gluster_mgr')
self.mock_object(_layout, '_share_manager',
mock.Mock(return_value=gluster_mgr))
_driver = GlusterfsFakeShareDriver(False, configuration=conf)
self.mock_object(_driver, '_update_access_via_manager', mock.Mock())
rulemap = {t: fake_access({'access_type': "ip",
'access_level': "rw",
'access_to': t}) for t in (
'EXISTING', 'ADD', 'DELETE')}
in_rules, out_rules = (
[
[
rulemap[t] for t in r
] for r in rs
] for rs in (inset, outset))
_driver.update_access(self.fake_context, self.fake_share, *in_rules)
_layout._share_manager.assert_called_once_with(self.fake_share)
_driver._update_access_via_manager.assert_called_once_with(
gluster_mgr, self.fake_context, self.fake_share,
*out_rules, recovery=recovery)
def test_update_access_via_manager(self):
self.assertRaises(NotImplementedError,
self._driver._update_access_via_manager,
mock.Mock(), self.fake_context, self.fake_share,
[self.fake_access], [self.fake_access])
@ddt.data('NFS', 'PROTATO')
def test_check_proto_baseclass(self, proto):
self.assertRaises(exception.ShareBackendException,
layout.GlusterfsShareDriverBase._check_proto,
{'share_proto': proto})
def test_check_proto(self):
GlusterfsFakeShareDriver._check_proto({'share_proto': 'NFS'})
def test_check_proto_notsupported(self):
self.assertRaises(exception.ShareBackendException,
GlusterfsFakeShareDriver._check_proto,
{'share_proto': 'PROTATO'})
@ddt.data('', '_from_snapshot')
def test_create_share(self, variant):
conf, _layout = self._setup()
_driver = GlusterfsFakeShareDriver(False, configuration=conf)
self.mock_object(_driver, '_check_proto', mock.Mock())
getattr(_driver, 'create_share%s' % variant)(self.fake_context,
self.fake_share)
_driver._check_proto.assert_called_once_with(self.fake_share)
getattr(_layout,
'create_share%s' % variant).assert_called_once_with(
self.fake_context, self.fake_share)
@ddt.data(True, False)
def test_update_share_stats(self, internal_exception):
data = mock.Mock()
conf, _layout = self._setup()
def raise_exception(*args, **kwargs):
raise NotImplementedError
layoutstats = mock.Mock()
mock_kw = ({'side_effect': raise_exception} if internal_exception
else {'return_value': layoutstats})
self.mock_object(_layout, '_update_share_stats', mock.Mock(**mock_kw))
self.mock_object(driver.ShareDriver, '_update_share_stats',
mock.Mock())
_driver = GlusterfsFakeShareDriver(False, configuration=conf)
_driver._update_share_stats(data)
if internal_exception:
self.assertFalse(data.update.called)
else:
data.update.assert_called_once_with(layoutstats)
driver.ShareDriver._update_share_stats.assert_called_once_with(
data)
@ddt.data('do_setup', 'create_snapshot', 'delete_share', 'delete_snapshot',
'ensure_share', 'manage_existing', 'unmanage', 'extend_share',
'shrink_share')
def test_delegated_methods(self, method):
conf, _layout = self._setup()
_driver = GlusterfsFakeShareDriver(False, configuration=conf)
fake_args = (mock.Mock(), mock.Mock(), mock.Mock())
getattr(_driver, method)(*fake_args)
getattr(_layout, method).assert_called_once_with(*fake_args)
@ddt.ddt
class GlusterfsShareLayoutBaseTestCase(test.TestCase):
"""Tests GlusterfsShareLayoutBaseTestCase."""
def setUp(self):
super(GlusterfsShareLayoutBaseTestCase, self).setUp()
fake_utils.stub_out_utils_execute(self)
self._execute = fake_utils.fake_execute
self.addCleanup(fake_utils.fake_execute_set_repliers, [])
self.addCleanup(fake_utils.fake_execute_clear_log)
self.fake_driver = mock.Mock()
self.mock_object(self.fake_driver, '_execute',
self._execute)
class FakeLayout(layout.GlusterfsShareLayoutBase):
def _share_manager(self, share):
"""Return GlusterManager object representing share's backend."""
def do_setup(self, context):
"""Any initialization the share driver does while starting."""
def create_share(self, context, share, share_server=None):
"""Is called to create share."""
def create_share_from_snapshot(self, context, share, snapshot,
share_server=None, parent_share=None):
"""Is called to create share from snapshot."""
def create_snapshot(self, context, snapshot, share_server=None):
"""Is called to create snapshot."""
def delete_share(self, context, share, share_server=None):
"""Is called to remove share."""
def delete_snapshot(self, context, snapshot, share_server=None):
"""Is called to remove snapshot."""
def ensure_share(self, context, share, share_server=None):
"""Invoked to ensure that share is exported."""
def manage_existing(self, share, driver_options):
"""Brings an existing share under Manila management."""
def unmanage(self, share):
"""Removes the specified share from Manila management."""
def extend_share(self, share, new_size, share_server=None):
"""Extends size of existing share."""
def shrink_share(self, share, new_size, share_server=None):
"""Shrinks size of existing share."""
def test_init_invalid(self):
self.assertRaises(TypeError, layout.GlusterfsShareLayoutBase,
mock.Mock())
def test_subclass(self):
fake_conf = mock.Mock()
_layout = self.FakeLayout(self.fake_driver, configuration=fake_conf)
self.assertEqual(fake_conf, _layout.configuration)
self.assertRaises(NotImplementedError, _layout._update_share_stats)
def test_check_mount_glusterfs(self):
fake_conf = mock.Mock()
_driver = mock.Mock()
_driver._execute = mock.Mock()
_layout = self.FakeLayout(_driver, configuration=fake_conf)
_layout._check_mount_glusterfs()
_driver._execute.assert_called_once_with(
'mount.glusterfs',
check_exit_code=False)
@ddt.data({'_errno': errno.ENOENT,
'_exception': exception.GlusterfsException},
{'_errno': errno.EACCES, '_exception': OSError})
@ddt.unpack
def test_check_mount_glusterfs_not_installed(self, _errno, _exception):
fake_conf = mock.Mock()
_layout = self.FakeLayout(self.fake_driver, configuration=fake_conf)
def exec_runner(*ignore_args, **ignore_kwargs):
raise OSError(_errno, os.strerror(_errno))
expected_exec = ['mount.glusterfs']
fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)])
self.assertRaises(_exception, _layout._check_mount_glusterfs)
| apache-2.0 |
kartikgupta0909/gittest | configs/builds/releng_base_windows_32_builds.py | 2 | 4098 | import os
import sys
STAGE_USERNAME = 'ffxbld'
STAGE_SSH_KEY = 'ffxbld_dsa'
config = {
#########################################################################
######## WINDOWS GENERIC CONFIG KEYS/VAlUES
# if you are updating this with custom 32 bit keys/values please add them
# below under the '32 bit specific' code block otherwise, update in this
# code block and also make sure this is synced with
# releng_base_windows_64_builds.py
'default_actions': [
'clobber',
'clone-tools',
# 'setup-mock', windows do not use mock
'build',
'sendchanges',
'generate-build-stats',
'update', # decided by query_is_nightly()
],
"buildbot_json_path": "buildprops.json",
'exes': {
'python2.7': sys.executable,
'hgtool.py': [
sys.executable,
os.path.join(
os.getcwd(), 'build', 'tools', 'buildfarm', 'utils', 'hgtool.py'
)
],
"buildbot": [
sys.executable,
'c:\\mozilla-build\\buildbotve\\scripts\\buildbot'
],
"make": [
sys.executable,
os.path.join(
os.getcwd(), 'build', 'src', 'build', 'pymake', 'make.py'
)
]
},
'app_ini_path': '%(obj_dir)s/dist/bin/application.ini',
# decides whether we want to use moz_sign_cmd in env
'enable_signing': True,
'purge_skip': ['info', 'rel-*:45d', 'tb-rel-*:45d'],
'purge_basedirs': [],
'enable_ccache': False,
'vcs_share_base': 'C:/builds/hg-shared',
'objdir': 'obj-firefox',
'tooltool_script': [sys.executable,
'C:/mozilla-build/tooltool.py'],
'tooltool_bootstrap': "setup.sh",
'enable_count_ctors': False,
'enable_talos_sendchange': True,
'enable_unittest_sendchange': True,
'platform_supports_partials': True,
#########################################################################
#########################################################################
###### 32 bit specific ######
'base_name': 'WINNT_5.2_%(branch)s',
'platform': 'win32',
'stage_platform': 'win32',
'enable_max_vsize': True,
'env': {
'MOZBUILD_STATE_PATH': os.path.join(os.getcwd(), '.mozbuild'),
'MOZ_AUTOMATION': '1',
'BINSCOPE': 'C:/Program Files (x86)/Microsoft/SDL BinScope/BinScope.exe',
'HG_SHARE_BASE_DIR': 'C:/builds/hg-shared',
'MOZ_CRASHREPORTER_NO_REPORT': '1',
'MOZ_OBJDIR': 'obj-firefox',
'PATH': 'C:/mozilla-build/nsis-3.0a2;C:/mozilla-build/nsis-2.46u;C:/mozilla-build/python27;'
'C:/mozilla-build/buildbotve/scripts;'
'%s' % (os.environ.get('path')),
'PDBSTR_PATH': '/c/Program Files (x86)/Windows Kits/8.0/Debuggers/x64/srcsrv/pdbstr.exe',
'PROPERTIES_FILE': os.path.join(os.getcwd(), 'buildprops.json'),
# SYMBOL_SERVER_HOST is dictated from build_pool_specifics.py
'SYMBOL_SERVER_HOST': '%(symbol_server_host)s',
'SYMBOL_SERVER_SSH_KEY': '/c/Users/cltbld/.ssh/ffxbld_dsa',
'SYMBOL_SERVER_USER': 'ffxbld',
'SYMBOL_SERVER_PATH': '/mnt/netapp/breakpad/symbols_ffx/',
'POST_SYMBOL_UPLOAD_CMD': '/usr/local/bin/post-symbol-upload.py',
'TINDERBOX_OUTPUT': '1',
},
'upload_env': {
# UPLOAD_HOST is set to stage_server
# stage_server is dictated from build_pool_specifics.py
'UPLOAD_USER': STAGE_USERNAME,
'UPLOAD_TO_TEMP': '1',
'UPLOAD_SSH_KEY': '~/.ssh/%s' % (STAGE_SSH_KEY,),
},
"check_test_env": {
'MINIDUMP_STACKWALK': '%(abs_tools_dir)s/breakpad/win32/minidump_stackwalk.exe',
'MINIDUMP_SAVE_PATH': '%(base_work_dir)s/minidumps',
},
'purge_minsize': 12,
'src_mozconfig': 'browser/config/mozconfigs/win32/nightly',
'tooltool_manifest_src': "browser/config/tooltool-manifests/win32/releng.manifest",
'platform_ftp_name': 'win32.complete.mar',
#########################################################################
}
| mpl-2.0 |
Neural-Network/TicTacToe | pybrain/datasets/supervised.py | 21 | 4400 | from __future__ import print_function
__author__ = 'Thomas Rueckstiess, [email protected]'
from numpy import random
from random import sample
from scipy import isscalar
from pybrain.datasets.dataset import DataSet
from pybrain.utilities import fListToString
class SupervisedDataSet(DataSet):
"""SupervisedDataSets have two fields, one for input and one for the target.
"""
def __init__(self, inp, target):
"""Initialize an empty supervised dataset.
Pass `inp` and `target` to specify the dimensions of the input and
target vectors."""
DataSet.__init__(self)
if isscalar(inp):
# add input and target fields and link them
self.addField('input', inp)
self.addField('target', target)
else:
self.setField('input', inp)
self.setField('target', target)
self.linkFields(['input', 'target'])
# reset the index marker
self.index = 0
# the input and target dimensions
self.indim = self.getDimension('input')
self.outdim = self.getDimension('target')
def __reduce__(self):
_, _, state, _, _ = super(SupervisedDataSet, self).__reduce__()
creator = self.__class__
args = self.indim, self.outdim
return creator, args, state, iter([]), iter({})
def addSample(self, inp, target):
"""Add a new sample consisting of `input` and `target`."""
self.appendLinked(inp, target)
def getSample(self, index=None):
"""Return a sample at `index` or the current sample."""
return self.getLinked(index)
def setField(self, label, arr, **kwargs):
"""Set the given array `arr` as the new array of the field specfied by
`label`."""
DataSet.setField(self, label, arr, **kwargs)
# refresh dimensions, in case any of these fields were modified
if label == 'input':
self.indim = self.getDimension('input')
elif label == 'target':
self.outdim = self.getDimension('target')
def _provideSequences(self):
"""Return an iterator over sequence lists, although the dataset contains
only single samples."""
return iter([[x] for x in iter(self)])
def evaluateMSE(self, f, **args):
"""Evaluate the predictions of a function on the dataset and return the
Mean Squared Error, incorporating importance."""
ponderation = 0.
totalError = 0
for seq in self._provideSequences():
e, p = self._evaluateSequence(f, seq, **args)
totalError += e
ponderation += p
assert ponderation > 0
return totalError/ponderation
def _evaluateSequence(self, f, seq, verbose = False):
"""Return the ponderated MSE over one sequence."""
totalError = 0.
ponderation = 0.
for input, target in seq:
res = f(input)
e = 0.5 * sum((target-res).flatten()**2)
totalError += e
ponderation += len(target)
if verbose:
print(( 'out: ', fListToString( list( res ) )))
print(( 'correct:', fListToString( target )))
print(( 'error: % .8f' % e))
return totalError, ponderation
def evaluateModuleMSE(self, module, averageOver = 1, **args):
"""Evaluate the predictions of a module on a dataset and return the MSE
(potentially average over a number of epochs)."""
res = 0.
for dummy in range(averageOver):
module.reset()
res += self.evaluateMSE(module.activate, **args)
return res/averageOver
def splitWithProportion(self, proportion = 0.5):
"""Produce two new datasets, the first one containing the fraction given
by `proportion` of the samples."""
indicies = random.permutation(len(self))
separator = int(len(self) * proportion)
leftIndicies = indicies[:separator]
rightIndicies = indicies[separator:]
leftDs = SupervisedDataSet(inp=self['input'][leftIndicies].copy(),
target=self['target'][leftIndicies].copy())
rightDs = SupervisedDataSet(inp=self['input'][rightIndicies].copy(),
target=self['target'][rightIndicies].copy())
return leftDs, rightDs
| bsd-3-clause |
sakuraio/python-sakuraio | sakuraio/hardware/commands/operation.py | 1 | 3230 | import struct
import datetime
# Operation
CMD_GET_PRODUCT_ID = 0xA0
CMD_GET_UNIQUE_ID = 0xA1
CMD_GET_FIRMWARE_VERSION = 0xA2
CMD_UNLOCK = 0xA8
CMD_UPDATE_FIRMWARE = 0xA9
CMD_GET_FIRMWARE_UPDATE_STATUS = 0xAA
CMD_SOFTWARE_RESET = 0xAF
CMD_SET_POWER_SAVE_MODE = 0xB0
CMD_GET_POWER_SAVE_MODE = 0xB1
UNLOCK_MAGIC_NUMBERS = [0x53, 0x6B, 0x72, 0x61]
PRODUCT_ID_SCM_LTE_BETA = 0x01
PRODUCT_ID_SCM_LTE_01 = 0x02
PRODUCT_ID_MAP = {
PRODUCT_ID_SCM_LTE_BETA: "SCM-LTE-BETA",
PRODUCT_ID_SCM_LTE_01: "SCM-LTE-01",
}
POWER_SAVE_MODE_DISABLE = 0
POWER_SAVE_MODE_AUTO_SLEEP = 1
POWER_SAVE_MODE_RF_OFF = 2
class OperationMixins(object):
def get_product_id(self):
"""Get product id
:return: Product ID. Possible values:
:const:`PRODUCT_ID_SCM_LTE_BETA`, :const:`PRODUCT_ID_SCM_LTE_01`
:rtype: int
"""
response = self.execute_command(CMD_GET_PRODUCT_ID, as_bytes=True)
product_id = struct.unpack("<H", response)[0]
return product_id
def get_product_name(self):
"""Get product name
:return: Product name. Possible values: ``"SCM-LTE-BETA"``, ``"SCM-LTE-01"``.
:rtype: str
"""
product_id = self.get_product_id()
return PRODUCT_ID_MAP.get(product_id, "{0:04X}".format(product_id))
def get_unique_id(self):
"""Get unique id
:return: Unique ID. For example `"16X0000001"``.
:rtype: str
"""
return self.execute_command(CMD_GET_UNIQUE_ID, as_bytes=True).decode("ascii")
def get_firmware_version(self):
"""Get firmware version
:return: Firmware version. For example `"v1.1.2-170223-7e6ce64"``.
:rtype: str
"""
return self.execute_command(CMD_GET_FIRMWARE_VERSION, as_bytes=True).decode("ascii")
def unlock(self):
"""Unlock critical command"""
self.execute_command(CMD_UNLOCK, UNLOCK_MAGIC_NUMBERS)
def update_firmware(self):
"""Request to update firmware"""
self.execute_command(CMD_UPDATE_FIRMWARE)
def get_firmware_update_status(self):
"""Get firmware update status
:return: Status.
:rtype: dict
"""
response = self.execute_command(CMD_GET_FIRMWARE_UPDATE_STATUS)[0]
inprogress = (response & 0x80) == 0x80
return {
"inprogress": inprogress,
"error": response & 0x7f,
}
def reset(self):
"""Request software reset"""
self.execute_command(CMD_SOFTWARE_RESET)
def set_power_save_mode(self, mode):
"""Set PowerSaveMode
:param integer mode:
Power Save Mode number.
0: Disable mode.
1: Auto sleep mode.
2: Rf off mode.
"""
self.execute_command(CMD_SET_POWER_SAVE_MODE, [mode])
def get_power_save_mode(self):
"""Get PowerSaveMode
:rtype: int
Power Save Mode number.
0: Disable mode.
1: Auto sleep mode.
2: Rf off mode.
"""
ret = self.execute_command(CMD_GET_POWER_SAVE_MODE)
if isinstance(ret, list):
if len(ret) == 1:
return ret[0]
return None
| mit |
aviweit/libcloud | libcloud/common/brightbox.py | 55 | 3413 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
from libcloud.common.base import ConnectionUserAndKey, JsonResponse
from libcloud.compute.types import InvalidCredsError
from libcloud.utils.py3 import b
from libcloud.utils.py3 import httplib
try:
import simplejson as json
except ImportError:
import json
class BrightboxResponse(JsonResponse):
def success(self):
return self.status >= httplib.OK and self.status < httplib.BAD_REQUEST
def parse_body(self):
if self.headers['content-type'].split(';')[0] == 'application/json':
return super(BrightboxResponse, self).parse_body()
else:
return self.body
def parse_error(self):
response = super(BrightboxResponse, self).parse_body()
if 'error' in response:
if response['error'] in ['invalid_client', 'unauthorized_client']:
raise InvalidCredsError(response['error'])
return response['error']
elif 'error_name' in response:
return '%s: %s' % (response['error_name'], response['errors'][0])
return self.body
class BrightboxConnection(ConnectionUserAndKey):
"""
Connection class for the Brightbox driver
"""
host = 'api.gb1.brightbox.com'
responseCls = BrightboxResponse
def _fetch_oauth_token(self):
body = json.dumps({'client_id': self.user_id, 'grant_type': 'none'})
authorization = 'Basic ' + str(base64.encodestring(b('%s:%s' %
(self.user_id, self.key)))).rstrip()
self.connect()
headers = {
'Host': self.host,
'User-Agent': self._user_agent(),
'Authorization': authorization,
'Content-Type': 'application/json',
'Content-Length': str(len(body))
}
response = self.connection.request(method='POST', url='/token',
body=body, headers=headers)
response = self.connection.getresponse()
if response.status == httplib.OK:
return json.loads(response.read())['access_token']
else:
responseCls = BrightboxResponse(response=response, connection=self)
message = responseCls.parse_error()
raise InvalidCredsError(message)
def add_default_headers(self, headers):
try:
headers['Authorization'] = 'OAuth ' + self.token
except AttributeError:
self.token = self._fetch_oauth_token()
headers['Authorization'] = 'OAuth ' + self.token
return headers
def encode_data(self, data):
return json.dumps(data)
| apache-2.0 |
growingio/phoenix | bin/end2endTest.py | 31 | 1881 | #!/usr/bin/env python
############################################################################
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
############################################################################
# !!! PLEASE READ !!!
# !!! Do NOT run the script against a prodcution cluster because it wipes out
# !!! existing data of the cluster
import os
import subprocess
import sys
import phoenix_utils
phoenix_utils.setPath()
phoenix_jar_path = os.getenv(phoenix_utils.phoenix_class_path, phoenix_utils.phoenix_test_jar_path)
# HBase configuration folder path (where hbase-site.xml reside) for
# HBase/Phoenix client side property override
hbase_library_path = os.getenv('HBASE_LIBRARY_DIR', '')
print "Current ClassPath=%s:%s:%s" % (phoenix_utils.hbase_conf_dir, phoenix_jar_path,
hbase_library_path)
java_cmd = "java -cp " + phoenix_utils.hbase_conf_dir + os.pathsep + phoenix_jar_path + os.pathsep + \
hbase_library_path + " org.apache.phoenix.end2end.End2EndTestDriver " + \
' '.join(sys.argv[1:])
exitcode = subprocess.call(java_cmd, shell=True)
sys.exit(exitcode)
| apache-2.0 |
torufuru/oolhackathon | ryu/lib/xflow/netflow.py | 60 | 4009 | # Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
NETFLOW_V1 = 0x01
NETFLOW_V5 = 0x05
NETFLOW_V6 = 0x06
NETFLOW_V7 = 0x07
NETFLOW_V8 = 0x08
NETFLOW_V9 = 0x09
class NetFlow(object):
_PACK_STR = '!H'
_NETFLOW_VERSIONS = {}
@staticmethod
def register_netflow_version(version):
def _register_netflow_version(cls):
NetFlow._NETFLOW_VERSIONS[version] = cls
return cls
return _register_netflow_version
def __init__(self):
super(NetFlow, self).__init__()
@classmethod
def parser(cls, buf):
(version,) = struct.unpack_from(cls._PACK_STR, buf)
cls_ = cls._NETFLOW_VERSIONS.get(version, None)
if cls_:
return cls_.parser(buf)
else:
return None
@NetFlow.register_netflow_version(NETFLOW_V5)
class NetFlowV5(object):
_PACK_STR = '!HHIIIIBBH'
_MIN_LEN = struct.calcsize(_PACK_STR)
def __init__(self, version, count, sys_uptime, unix_secs,
unix_nsecs, flow_sequence, engine_type, engine_id,
sampling_interval, flows=None):
self.version = version
self.count = count
self.sys_uptime = sys_uptime
self.unix_secs = unix_secs
self.unix_nsecs = unix_nsecs
self.flow_sequence = flow_sequence
self.engine_type = engine_type
self.engine_id = engine_id
self.sampling_interval = sampling_interval
@classmethod
def parser(cls, buf):
(version, count, sys_uptime, unix_secs, unix_nsecs,
flow_sequence, engine_type, engine_id, sampling_interval) = \
struct.unpack_from(cls._PACK_STR, buf)
msg = cls(version, count, sys_uptime, unix_secs, unix_nsecs,
flow_sequence, engine_type, engine_id,
sampling_interval)
offset = cls._MIN_LEN
msg.flows = []
while len(buf) > offset:
f = NetFlowV5Flow.parser(buf, offset)
offset += NetFlowV5Flow._MIN_LEN
msg.flows.append(f)
return msg
class NetFlowV5Flow(object):
_PACK_STR = '!IIIHHIIIIHHxBBBHHBB2x'
_MIN_LEN = struct.calcsize(_PACK_STR)
def __init__(self, srcaddr, dstaddr, nexthop, input_, output,
dpkts, doctets, first, last, srcport, dstport,
tcp_flags, prot, tos, src_as, dst_as, src_mask,
dst_mask):
self.srcaddr = srcaddr
self.dstaddr = dstaddr
self.nexthop = nexthop
self.input = input_
self.output = output
self.dpkts = dpkts
self.doctets = doctets
self.first = first
self.last = last
self.srcport = srcport
self.dstport = dstport
self.tcp_flags = tcp_flags
self.prot = prot
self.tos = tos
self.src_as = src_as
self.dst_as = dst_as
self.src_mask = src_mask
self.dst_mask = dst_mask
@classmethod
def parser(cls, buf, offset):
(srcaddr, dstaddr, nexthop, input_, output, dpkts, doctets,
first, last, srcport, dstport, tcp_flags, prot, tos, src_as,
dst_as, src_mask, dst_mask) = struct.unpack_from(
cls._PACK_STR, buf, offset)
msg = cls(srcaddr, dstaddr, nexthop, input_, output, dpkts,
doctets, first, last, srcport, dstport, tcp_flags,
prot, tos, src_as, dst_as, src_mask, dst_mask)
return msg
| apache-2.0 |
bq-xiao/mongo-python-driver | test/test_replica_set_reconfig.py | 18 | 5751 | # Copyright 2013-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test clients and replica set configuration changes, using mocks."""
import sys
sys.path[0:0] = [""]
from pymongo.errors import ConnectionFailure, AutoReconnect
from pymongo import ReadPreference
from test import unittest, client_context, client_knobs, MockClientTest
from test.pymongo_mocks import MockClient
from test.utils import wait_until
@client_context.require_connection
def setUpModule():
pass
class TestSecondaryBecomesStandalone(MockClientTest):
# An administrator removes a secondary from a 3-node set and
# brings it back up as standalone, without updating the other
# members' config. Verify we don't continue using it.
def test_client(self):
c = MockClient(
standalones=[],
members=['a:1', 'b:2', 'c:3'],
mongoses=[],
host='a:1,b:2,c:3',
replicaSet='rs',
serverSelectionTimeoutMS=100)
# MongoClient connects to primary by default.
wait_until(lambda: c.address is not None, 'connect to primary')
self.assertEqual(c.address, ('a', 1))
# C is brought up as a standalone.
c.mock_members.remove('c:3')
c.mock_standalones.append('c:3')
# Fail over.
c.kill_host('a:1')
c.kill_host('b:2')
# Force reconnect.
c.close()
with self.assertRaises(AutoReconnect):
c.db.command('ismaster')
self.assertEqual(c.address, None)
def test_replica_set_client(self):
c = MockClient(
standalones=[],
members=['a:1', 'b:2', 'c:3'],
mongoses=[],
host='a:1,b:2,c:3',
replicaSet='rs')
wait_until(lambda: ('b', 2) in c.secondaries,
'discover host "b"')
wait_until(lambda: ('c', 3) in c.secondaries,
'discover host "c"')
# C is brought up as a standalone.
c.mock_members.remove('c:3')
c.mock_standalones.append('c:3')
wait_until(lambda: set([('b', 2)]) == c.secondaries,
'update the list of secondaries')
self.assertEqual(('a', 1), c.primary)
class TestSecondaryRemoved(MockClientTest):
# An administrator removes a secondary from a 3-node set *without*
# restarting it as standalone.
def test_replica_set_client(self):
c = MockClient(
standalones=[],
members=['a:1', 'b:2', 'c:3'],
mongoses=[],
host='a:1,b:2,c:3',
replicaSet='rs')
wait_until(lambda: ('b', 2) in c.secondaries, 'discover host "b"')
wait_until(lambda: ('c', 3) in c.secondaries, 'discover host "c"')
# C is removed.
c.mock_ismaster_hosts.remove('c:3')
wait_until(lambda: set([('b', 2)]) == c.secondaries,
'update list of secondaries')
self.assertEqual(('a', 1), c.primary)
class TestSocketError(MockClientTest):
def test_socket_error_marks_member_down(self):
# Disable background refresh.
with client_knobs(heartbeat_frequency=999999):
c = MockClient(
standalones=[],
members=['a:1', 'b:2'],
mongoses=[],
host='a:1',
replicaSet='rs')
wait_until(lambda: len(c.nodes) == 2, 'discover both nodes')
# b now raises socket.error.
c.mock_down_hosts.append('b:2')
self.assertRaises(
ConnectionFailure,
c.db.collection.with_options(
read_preference=ReadPreference.SECONDARY).find_one)
self.assertEqual(1, len(c.nodes))
class TestSecondaryAdded(MockClientTest):
def test_client(self):
c = MockClient(
standalones=[],
members=['a:1', 'b:2'],
mongoses=[],
host='a:1',
replicaSet='rs')
wait_until(lambda: len(c.nodes) == 2, 'discover both nodes')
# MongoClient connects to primary by default.
self.assertEqual(c.address, ('a', 1))
self.assertEqual(set([('a', 1), ('b', 2)]), c.nodes)
# C is added.
c.mock_members.append('c:3')
c.mock_ismaster_hosts.append('c:3')
c.close()
c.db.command('ismaster')
self.assertEqual(c.address, ('a', 1))
wait_until(lambda: set([('a', 1), ('b', 2), ('c', 3)]) == c.nodes,
'reconnect to both secondaries')
def test_replica_set_client(self):
c = MockClient(
standalones=[],
members=['a:1', 'b:2'],
mongoses=[],
host='a:1',
replicaSet='rs')
wait_until(lambda: ('a', 1) == c.primary, 'discover the primary')
wait_until(lambda: set([('b', 2)]) == c.secondaries,
'discover the secondary')
# C is added.
c.mock_members.append('c:3')
c.mock_ismaster_hosts.append('c:3')
wait_until(lambda: set([('b', 2), ('c', 3)]) == c.secondaries,
'discover the new secondary')
self.assertEqual(('a', 1), c.primary)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
rbooth200/DiscEvolution | DiscEvolution/internal_photo.py | 1 | 30463 | # internal_photo.py
#
# Author: A. Sellek
# Date: 12 - Aug - 2020
#
# Implementation of Photoevaporation Models
################################################################################
import numpy as np
import argparse
import json
import matplotlib.pyplot as plt
from DiscEvolution.constants import *
from DiscEvolution.star import PhotoStar
from scipy.signal import argrelmin
class NotHoleError(Exception):
"""Raised if finds an outer edge, not a hole"""
pass
class PhotoBase():
def __init__(self, disc, Regime=None, Type=None):
# Basic mass loss properties
self._regime = Regime # EUV or X-ray
self._type = Type # 'Primordial' or 'InnerHole'
self._Sigmadot = np.zeros_like(disc.R)
self.mdot_XE(disc.star)
# Evolutionary state flags
self._Hole = False # Has the hole started to open?
self._reset = False # Have we needed to reset a decoy hole?
self._empty = False # When no longer a valid hole radius or all below density threshold
self._Thin = False # Is the hole exposed (ie low column density to star)?
# Parameters of hole
self._R_hole = None
self._N_hole = None
# The column density threshold below which the inner disc is "Thin"
if self._regime=='X-ray':
self._N_crit = 1e22
elif self._regime=='EUV':
self._N_crit = 1e18
else:
self._N_crit = 0.0 # (if 0, can never switch)
# Outer radius
self._R_out = max(disc.R_edge)
def mdot_XE(self, star, Mdot=0):
# Generic wrapper for initiating X-ray or EUV mass loss
# Without prescription, mass loss is 0
self._Mdot = Mdot
self._Mdot_true = Mdot
def Sigma_dot(self, R, star):
if self._type=='Primordial':
self.Sigma_dot_Primordial(R, star)
elif self._type=='InnerHole':
self.Sigma_dot_InnerHole(R, star)
def Sigma_dot_Primordial(self, R, star, ret=False):
# Without prescription, mass loss is 0
if ret:
return np.zeros(len(R)+1)
else:
self._Sigmadot = np.zeros_like(R)
def Sigma_dot_InnerHole(self, R, star, ret=False):
# Without prescription, mass loss is 0
if ret:
return np.zeros(len(R)+1)
else:
self._Sigmadot = np.zeros_like(R)
def scaled_R(self, R, star):
# Prescriptions may rescale the radius variable
# Without prescription, radius is unscaled
return R
def R_inner(self, star):
# Innermost mass loss
return 0
def check_dt(self, disc, dt):
# Work out the timescale to clear cell
where_photoevap = (self.dSigmadt > 0)
t_w = np.full_like(disc.R,np.inf)
t_w[where_photoevap] = disc.Sigma_G[where_photoevap] / self.dSigmadt[where_photoevap]
# Return minimum value for cells inside outer edge
indisc = (disc.R < self._R_out) * where_photoevap # Prohibit hole outside of mass loss region.
try:
imin = argrelmin(t_w[indisc])[0][0] # Find local minima in clearing time, neglecting outer edge where tails off. Take first to avoid solutions due to noise in dusty outskirts
except IndexError: # If no local minimum, try to find hole as wherever the min is.
imin = np.argmin(t_w[indisc])
# Check against timestep and report
if (dt > t_w[where_photoevap][imin]): # If an entire cell can deplete
#if not self._Hole:
# print("Alert - hole can open after this timestep at {:.2f} AU".format(disc.R[imin]))
# print("Outer radius is currently {:.2f} AU".format(self._R_out))
self._Hole = True # Set hole flag
return t_w[where_photoevap][imin]
def remove_mass(self, disc, dt, external_photo=None):
# Find disc "outer edge" so we can apply mass loss only inside
if external_photo:
self._R_out = external_photo._Rot # If external photoevaporation is present, only consider radii inside its influence
else:
self._R_out = disc.Rout(thresh=1e-10)
if disc.Rout()==0.0:
print("Disc everywhere below density threshold. Declare Empty.")
self._empty = True
# Check whether hole can open
if not self._Hole: #self._type=='Primordial':
self.check_dt(disc, dt)
# Determine mass loss
dSigma = np.minimum(self.dSigmadt * dt, disc.Sigma_G) # Limit mass loss to density of cell
dSigma *= (disc.R < self._R_out) # Only apply mass loss inside disc outer edge
# Apply, preserving the dust mass
if hasattr(disc, 'Sigma_D'):
Sigma_D = disc.Sigma_D # Save the dust density
disc._Sigma -= dSigma
if hasattr(disc, 'Sigma_D'):
dusty = Sigma_D.sum(0)>0
disc.dust_frac[:,dusty] = np.fmin(Sigma_D[:,dusty]/disc.Sigma[dusty],disc.dust_frac[:,dusty]/disc.dust_frac.sum(0)[dusty])
disc.dust_frac[:] /= np.maximum(disc.dust_frac.sum(0), 1.0) # Renormalise to 1 if it exceeds
# Calculate actual mass loss given limit
if dt>0:
dM = 2*np.pi * disc.R * dSigma
self._Mdot_true = np.trapz(dM,disc.R) / dt * AU**2 / Msun
def get_Rhole(self, disc, external_photo=None):
"""Deal with calls when there is no hole"""
if not self._Hole:
print("No hole for which to get radius. Ignoring command and returning nans.")
return np.nan, np.nan
"""Otherwise continue on to find hole
First find outer edge of disc - hole must be inside this"""
if external_photo:
self._R_out = external_photo._Rot # If external photoevaporation is present, only consider radii inside its influence
else:
self._R_out = disc.Rout(thresh=1e-10)
where_photoevap = (self.dSigmadt > 0)
indisc = (disc.R < self._R_out) * where_photoevap # Prohibit hole outside of mass loss region.
empty_indisc = (disc.Sigma_G <= 1e-10) * indisc # Consider empty if below 10^-10 g/cm^2
try:
if np.sum(empty_indisc) == 0: # If none in disc are empty
minima = argrelmin(disc.Sigma_G)
if len(minima[0]) > 0:
i_hole_out = minima[0][0] # Position of hole is minimum density
else: # No empty cells anymore - disc has cleared to outside
raise NotHoleError
else:
# First find the inner edge of the innermost hole
i_hole_in = np.nonzero(empty_indisc)[0][0]
# The hole cell is defined as the one inside the first non-empty cell outside the inner edge of the hole
outer_disc = ~empty_indisc * (disc.R>disc.R_edge[i_hole_in])
if np.sum(outer_disc) > 0:
i_hole_out = np.nonzero(outer_disc)[0][0] - 1
else: # No non-empty cells outside this - this is not a hole, but an outer edge.
raise NotHoleError
if i_hole_out == np.nonzero(indisc)[0][-1]: # This is not a hole, but the outermost photoevaporating cell
raise NotHoleError
"""If hole position drops by an order of magnitude, it is likely that the previous was really the clearing of low surface density material in the outer disc, so reset"""
if self._R_hole:
R_old = self._R_hole
if disc.R_edge[i_hole_out+1]/R_old<0.1:
self._reset = True
"""If everything worked, update hole properties"""
if not self._R_hole:
print("Hole opened at {:.2f} AU".format(disc.R_edge[i_hole_out+1]))
self._R_hole = disc.R_edge[i_hole_out+1]
self._N_hole = disc.column_density[i_hole_out]
# Test whether Thin
if (self._N_hole < self._N_crit):
self._Thin = True
except NotHoleError:
"""Potential hole isn't a hole but an outer edge"""
if self._type == 'Primordial':
self._Hole = False
self._reset = True
if self._R_hole:
print("No hole found")
print("Last known location {} AU".format(self._R_hole))
return 0, 0
elif self._type == 'InnerHole':
if not self._empty:
print("Transition Disc has cleared to outside")
self._empty = True
# Proceed as usual to report but without update
# Save state if tracking
return self._R_hole, self._N_hole
@property
def Mdot(self):
return self._Mdot
@property
def dSigmadt(self):
return self._Sigmadot
def __call__(self, disc, dt, external_photo=None):
# For inner hole discs, need to update the hole radius and then the mass-loss as the normalisation changes based on R, not just x~R-Rhole.
if self._type=='InnerHole':
self.get_Rhole(disc)
self.Sigma_dot(disc.R_edge, disc.star)
# Remove the mass
self.remove_mass(disc,dt, external_photo)
# Check for new holes
if self._Hole and not self._Thin: # If there is a hole but the inner disc is not already optically thin, update its properties
R_hole, N_hole = self.get_Rhole(disc, external_photo)
# Check if hole is now large enough that inner disc optically thin, switch internal photoevaporation to direct field if so
if self._Thin:
print("Column density to hole has fallen to N = {} < {} g cm^-2".format(N_hole,self._N_crit))
self._type = 'InnerHole'
# Run the mass loss rates to update the table
self.mdot_XE(disc.star)
self.Sigma_dot(disc.R_edge, disc.star)
# Report
print("At initiation of InnerHole Type, M_D = {} M_J, Mdot = {}, t_clear ~ {} yr".format(disc.Mtot()/Mjup, self._Mdot, disc.Mtot()/Msun/self._Mdot))
def ASCII_header(self):
return ("# InternalEvaporation, Type: {}, Mdot: {}"
"".format(self._type+self.__class__.__name__,self._Mdot))
def HDF5_attributes(self):
header = {}
header['Type'] = self._type+"/"+self._regime
header['Mdot'] = '{}'.format(self._Mdot)
return self.__class__.__name__, header
#################################################################################
"""""""""
X-ray dominated photoevaporation
-Following prescription of Owen, Ercolano and Clarke (2012)
-Following prescription of Picogna, Ercolano, Owen and Weber (2019)
"""""""""
#################################################################################
"""Owen, Ercolano and Clarke (2012)"""
class XrayDiscOwen(PhotoBase):
def __init__(self, disc, Type='Primordial', R_hole=None):
super().__init__(disc, Regime='X-ray', Type=Type)
# Parameters for Primordial mass loss profile
self._a1 = 0.15138
self._b1 = -1.2182
self._c1 = 3.4046
self._d1 = -3.5717
self._e1 = -0.32762
self._f1 = 3.6064
self._g1 = -2.4918
# Parameters for Inner Hole mass loss profile
self._a2 = -0.438226
self._b2 = -0.10658387
self._c2 = 0.5699464
self._d2 = 0.010732277
self._e2 = -0.131809597
self._f2 = -1.32285709
# If initiating with an Inner Hole disc, need to update properties
if self._type == 'InnerHole':
self._Hole = True
self._R_hole = R_hole
#self.get_Rhole(disc)
# Run the mass loss rates to update the table
self.Sigma_dot(disc.R_edge, disc.star)
def mdot_XE(self, star, Mdot=None):
# In Msun/yr
if Mdot is not None:
self._Mdot = Mdot
elif self._type=='Primordial':
self._Mdot = 6.25e-9 * star.M**(-0.068) * (star.L_X / 1e30)**(1.14) # Equation B1
elif self._type=='InnerHole':
self._Mdot = 4.8e-9 * star.M**(-0.148) * (star.L_X / 1e30)**(1.14) # Equation B4
else:
raise NotImplementedError("Disc is of unrecognised type, and no mass-loss rate has been manually specified")
self._Mdot_true = self._Mdot
def scaled_R(self, R, star):
# Where R in AU
x = 0.85 * R / star.M # Equation B3
if self._Hole:
y = 0.95 * (R-self._R_hole) / star.M # Equation B6
else:
y = R
return x, y
def R_inner(self, star):
# Innermost mass loss
return 0.7 / 0.85 * star.M
def Sigma_dot_Primordial(self, R, star, ret=False):
# Equation B2
Sigmadot = np.zeros_like(R)
x, y = self.scaled_R(R,star)
where_photoevap = (x >= 0.7) * (x<=99) # No mass loss close to star, mass loss prescription becomes negative at log10(x)=1.996
logx = np.log(x[where_photoevap])
log10 = np.log(10)
log10x = logx/log10
# First term
exponent = self._a1 * log10x**6 + self._b1 * log10x**5 + self._c1 * log10x**4 + self._d1 * log10x**3 + self._e1 * log10x**2 + self._f1 * log10x + self._g1
t1 = 10**exponent
# Second term
terms = 6*self._a1*logx**5/log10**7 + 5*self._b1*logx**4/log10**6 + 4*self._c1*logx**3/log10**5 + 3*self._d1*logx**2/log10**4 + 2*self._e1*logx/log10**3 + self._f1/log10**2
t2 = terms/x[where_photoevap]**2
# Third term
t3 = np.exp(-(x[where_photoevap]/100)**10)
# Combine terms
Sigmadot[where_photoevap] = t1 * t2 * t3
# Work out total mass loss rate for normalisation
M_dot = 2*np.pi * R * Sigmadot
total = np.trapz(M_dot,R)
# Normalise, convert to cgs
Sigmadot = np.maximum(Sigmadot,0)
Sigmadot *= self.Mdot / total * Msun / AU**2 # in g cm^-2 / yr
if ret:
# Return unaveraged values at cell edges
return Sigmadot
else:
# Store values as average of mass loss rate at cell edges
self._Sigmadot = (Sigmadot[1:] + Sigmadot[:-1]) / 2
def Sigma_dot_InnerHole(self, R, star, ret=False):
# Equation B5
Sigmadot = np.zeros_like(R)
x, y = self.scaled_R(R,star)
where_photoevap = (y >= 0.0) # No mass loss inside hole
use_y = y[where_photoevap]
# Exponent of second term
exp2 = -(use_y/57)**10
# Numerator
terms = self._a2*self._b2 * np.exp(self._b2*use_y+exp2) + self._c2*self._d2 * np.exp(self._d2*use_y+exp2) + self._e2*self._f2 * np.exp(self._f2*use_y+exp2)
# Divide by Denominator
Sigmadot[where_photoevap] = terms/R[where_photoevap]
# Work out total mass loss rate for normalisation
M_dot = 2*np.pi * R * Sigmadot
total = np.trapz(M_dot,R)
# Normalise, convert to cgs
Sigmadot = np.maximum(Sigmadot,0)
Sigmadot *= self.Mdot / total * Msun / AU**2 # in g cm^-2 / yr
# Mopping up in the gap
mop_up = (x >= 0.7) * (y < 0.0)
Sigmadot[mop_up] = np.inf
if ret:
# Return unaveraged values at cell edges
return Sigmadot
else:
# Store values as average of mass loss rate at cell edges
self._Sigmadot = (Sigmadot[1:] + Sigmadot[:-1]) / 2
"""Picogna, Ercolano, Owen and Weber (2019)"""
class XrayDiscPicogna(PhotoBase):
def __init__(self, disc, Type='Primordial', R_hole=None):
super().__init__(disc, Regime='X-ray', Type=Type)
# Parameters for Primordial mass loss profile
self._a1 = -0.5885
self._b1 = 4.3130
self._c1 = -12.1214
self._d1 = 16.3587
self._e1 = -11.4721
self._f1 = 5.7248
self._g1 = -2.8562
# Parameters for Inner Hole mass loss profile
self._a2 = 0.11843
self._b2 = 0.99695
self._c2 = 0.48835
# If initiating with an Inner Hole disc, need to update properties
if self._type == 'InnerHole':
self._Hole = True
self._R_hole = R_hole
#self.get_Rhole(disc)
# Run the mass loss rates to update the table
self.Sigma_dot(disc.R_edge, disc.star)
def mdot_XE(self, star, Mdot=None):
# In Msun/yr
if Mdot is not None:
self._Mdot = Mdot
elif self._type=='Primordial':
logMd = -2.7326 * np.exp((np.log(np.log(star.L_X)/np.log(10))-3.3307)**2/-2.9868e-3) - 7.2580 # Equation 5
self._Mdot = 10**logMd
elif self._type=='InnerHole':
logMd = -2.7326 * np.exp((np.log(np.log(star.L_X)/np.log(10))-3.3307)**2/-2.9868e-3) - 7.2580 # 1.12 * Equation 5
self._Mdot = 1.12 * (10**logMd)
else:
raise NotImplementedError("Disc is of unrecognised type, and no mass-loss rate has been manually specified")
self._Mdot_true = self._Mdot
def scaled_R(self, R, star):
# Where R in AU
# All are divided by stellar mass normalised to 0.7 Msun (value used by Picogna+19) to represent rescaling by gravitational radius
x = R / (star.M/0.7)
if self._Hole:
y = (R-self._R_hole) / (star.M/0.7) # Equation B6
else:
y = R / (star.M/0.7)
return x, y
def R_inner(self, star):
# Innermost mass loss
if self._type=='Primordial':
return 0 # Mass loss possible throughout
elif self._type=='InnerHole':
return self._R_hole # Mass loss profile applies outside hole
else:
return 0 # If unspecified, assume mass loss possible throughout
def Sigma_dot_Primordial(self, R, star, ret=False):
# Equation B2
Sigmadot = np.zeros_like(R)
x, y = self.scaled_R(R,star)
where_photoevap = (x<=137) # Mass loss prescription becomes negative at x=1.3785
logx = np.log(x[where_photoevap])
log10 = np.log(10)
log10x = logx/log10
# First term
exponent = self._a1 * log10x**6 + self._b1 * log10x**5 + self._c1 * log10x**4 + self._d1 * log10x**3 + self._e1 * log10x**2 + self._f1 * log10x + self._g1
t1 = 10**exponent
# Second term
terms = 6*self._a1*log10x**5 + 5*self._b1*log10x**4 + 4*self._c1*log10x**3 + 3*self._d1*log10x**2 + 2*self._e1*log10x + self._f1
t2 = terms/(2*np.pi*x[where_photoevap]**2)
# Combine terms
Sigmadot[where_photoevap] = t1 * t2
# Work out total mass loss rate for normalisation
M_dot = 2*np.pi * R * Sigmadot
total = np.trapz(M_dot,R)
# Normalise, convert to cgs
Sigmadot = np.maximum(Sigmadot,0)
Sigmadot *= self.Mdot / total * Msun / AU**2 # in g cm^-2 / yr
if ret:
# Return unaveraged values at cell edges
return Sigmadot
else:
# Store values as average of mass loss rate at cell edges
self._Sigmadot = (Sigmadot[1:] + Sigmadot[:-1]) / 2
def Sigma_dot_InnerHole(self, R, star, ret=False):
# Equation B5
Sigmadot = np.zeros_like(R)
x, y = self.scaled_R(R,star)
where_photoevap = (y > 0.0) * (y < -self._c2/np.log(self._b2)) # No mass loss inside hole, becomes negative at x=-c/ln(b)
use_y = y[where_photoevap]
# Numerator
terms = self._a2 * np.power(self._b2,use_y) * np.power(use_y,self._c2-1) * (use_y * np.log(self._b2) + self._c2)
# Divide by Denominator
Sigmadot[where_photoevap] = terms/(2*np.pi*R[where_photoevap])
# Work out total mass loss rate for normalisation
M_dot = 2*np.pi * R * Sigmadot
total = np.trapz(M_dot,R)
# Normalise, convert to cgs
Sigmadot = np.maximum(Sigmadot,0)
Sigmadot *= self.Mdot / total * Msun / AU**2 # in g cm^-2 / yr
# Mopping up in the gap - assume usual primordial rates there.
Sigmadot[(y<=0.0) * (x<=137)] = self.Sigma_dot_Primordial(R, star, ret=True)[(y<=0.0)*(x<=137)]/1.12 # divide by 1.12 so that normalise to correct mass loss rate
mop_up = (x > 137) * (y < 0.0)
Sigmadot[mop_up] = np.inf # Avoid having discontinuous mass-loss by filling in the rest
if ret:
# Return unaveraged values at cell edges
return Sigmadot
else:
# Store values as average of mass loss rate at cell edges
self._Sigmadot = (Sigmadot[1:] + Sigmadot[:-1]) / 2
#################################################################################
"""""""""
EUV dominated photoevaporation
-Following prescription given in Alexander and Armitage (2007)
and based on Font, McCarthy, Johnstone and Ballantyne (2004) for Primordial Discs
and based on Alexander, Clarke and Pringle (2006) for Inner Hole Discs
"""""""""
#################################################################################
class EUVDiscAlexander(PhotoBase):
def __init__(self, disc, Type='Primordial', R_hole=None):
super().__init__(disc, Regime='EUV', Type=Type)
# Parameters for mass loss profiles
self._cs = 10 # Sound speed in km s^-1
self._RG = disc.star.M / (self._cs*1e5 /Omega0/AU)**2 # Gravitational Radius in AU
self._mu = 1.35
self._aB = 2.6e-13 # Case B Recombination coeff. in cm^3 s^-1
self._C1 = 0.14
self._A = 0.3423
self._B = -0.3612
self._D = 0.2457
self._C2 = 0.235
self._a = 2.42
h = disc.H/disc.R
he = np.empty_like(disc.R_edge)
he[1:-1] = 0.5*(h[1:] + h[-1:])
he[0] = 1.5*h[0] - 0.5*h[1]
he[-1] = 1.5*h[-1] - 0.5*h[-2]
self._h = he
# If initiating with an Inner Hole disc, need to update properties
if self._type == 'InnerHole':
self._Hole = True
self._R_hole = R_hole
#self.get_Rhole(disc)
# Run the mass loss rates to update the table
self.Sigma_dot(disc.R_edge, disc.star)
def mdot_XE(self, star, Mdot=0):
# Store Mdot calculated from profile
self._Mdot = Mdot # In Msun/yr
self._Mdot_true = self._Mdot
def scaled_R(self, R, star):
if self._type=='Primordial':
return R / self._RG # Normalise to RG
elif self._type=='InnerHole':
return R / self.R_inner() # Normalise to inner edge
else:
return R # If unspecified, don't modify
def R_inner(self):
# Innermost mass loss
if self._type=='Primordial':
return 0.1 * self._RG # Mass loss profile is only positive for >0.1 RG
elif self._type=='InnerHole':
return self._R_hole # Mass loss profile applies outside hole
else:
return 0 # If unspecified, assume mass-loss possible throughout
def Sigma_dot_Primordial(self, R, star, ret=False):
Sigmadot = np.zeros_like(R)
x = self.scaled_R(R,star)
where_photoevap = (x >= 0.1) # No mass loss close to star
# Equation A3
nG = self._C1 * (3 * star.Phi / (4*np.pi * (self._RG*AU)**3 * self._aB))**(1/2) # cm^-3
# Equation A2
n0 = nG * (2 / (x**7.5 + x**12.5))**(1/5)
# Equation A4
u1 = self._cs*1e5*yr/Omega0 * self._A * np.exp(self._B * (x-0.1)) * (x-0.1)**self._D # cm yr^-1
# Combine terms (Equation A1)
Sigmadot[where_photoevap] = 2 * self._mu * m_H * (n0 * u1)[where_photoevap] # g cm^-2 /yr
Sigmadot = np.maximum(Sigmadot,0)
# Work out total mass loss rate
dMdot = 2*np.pi * R * Sigmadot
Mdot = np.trapz(dMdot,R) # g yr^-1 (AU/cm)^2
# Normalise, convert to cgs
Mdot = Mdot * AU**2/Msun # g yr^-1
# Store result
self.mdot_XE(star, Mdot=Mdot)
if ret:
# Return unaveraged values at cell edges
return Sigmadot
else:
# Store values as average of mass loss rate at cell edges
self._Sigmadot = (Sigmadot[1:] + Sigmadot[:-1]) / 2
def Sigma_dot_InnerHole(self, R, star, ret=False):
Sigmadot = np.zeros_like(R)
x = self.scaled_R(R,star)
where_photoevap = (x > 1) # No mass loss inside hole
# Combine terms (Equation A5)
Sigmadot[where_photoevap] = (2 * self._mu * m_H * self._C2 * self._cs*1e5*yr/Omega0 * (star.Phi / (4*np.pi * (self.R_inner()*AU)**3 * self._aB * self._h))**(1/2) * x**(-self._a))[where_photoevap] # g cm^-2 /yr
Sigmadot = np.maximum(Sigmadot,0)
# Work out total mass loss rate
dMdot = 2*np.pi * R * Sigmadot
Mdot = np.trapz(dMdot,R) # g yr^-1 (AU/cm)^2
# Normalise, convert to cgs
Mdot = Mdot * AU**2/Msun # g yr^-1
# Store result
self.mdot_XE(star, Mdot=Mdot)
# Mopping up in the gap
mop_up = (R >= 0.1 * self._RG) * (x <= 1.0)
Sigmadot[mop_up] = np.inf
if ret:
# Return unaveraged values at cell edges
return Sigmadot
else:
# Store values as average of mass loss rate at cell edges
self._Sigmadot = (Sigmadot[1:] + Sigmadot[:-1]) / 2
#################################################################################
"""""""""
Functions for running as main
Designed for plotting to test things out
"""""""""
#################################################################################
class DummyDisc(object):
def __init__(self, R, star, MD=10, RC=100):
self._M = MD * Mjup
self.Rc = RC
self.R_edge = R
self.R = 0.5*(self.R_edge[1:]+self.R_edge[:-1])
self._Sigma = self._M / (2 * np.pi * self.Rc * self.R * AU**2) * np.exp(-self.R/self.Rc)
self.star = star
def Rout(self, thresh=None):
return max(self.R_edge)
@property
def Sigma(self):
return self._Sigma
@property
def Sigma_G(self):
return self._Sigma
def main():
Sigma_dot_plot()
Test_Removal()
def Test_Removal():
"""Removes gas fom a power law disc in regular timesteps without viscous evolution etc"""
star1 = PhotoStar(LX=1e30, M=1.0, R=2.5, T_eff=4000)
R = np.linspace(0.1,200,2000)
disc1 = DummyDisc(R, star1, RC=10)
internal_photo = XrayDiscPicogna(disc1)
plt.figure()
for t in np.linspace(0,2e3,6):
internal_photo(disc1, 2e3)
plt.loglog(0.5*(R[1:]+R[:-1]), disc1.Sigma, label='{}'.format(t))
plt.xlabel("R / AU")
plt.ylabel("$\Sigma_G~/~\mathrm{g~cm^{-2}}$")
plt.legend(title='Time / yr')
plt.show()
def Sigma_dot_plot():
"""Plot a comparison of the mass loss rate prescriptions"""
from control_scripts import run_model
# Set up dummy model
parser = argparse.ArgumentParser()
parser.add_argument("--model", "-m", type=str, default=DefaultModel)
args = parser.parse_args()
model = json.load(open(args.model, 'r'))
plt.figure(figsize=(6,6))
starX = PhotoStar(LX=1e30, M=model['star']['mass'], R=model['star']['radius'], T_eff=model['star']['T_eff'])
starE = PhotoStar(Phi=1e42, M=model['star']['mass'], R=model['star']['radius'], T_eff=model['star']['T_eff'])
disc = run_model.setup_disc(model)
R = disc.R
# Calculate EUV rates
disc._star = starE
internal_photo_E = EUVDiscAlexander(disc)
Sigma_dot_E = internal_photo_E.dSigmadt
photoevaporating_E = (Sigma_dot_E>0)
t_w_E = disc.Sigma[photoevaporating_E] / Sigma_dot_E[photoevaporating_E]
print("Mdot maximum at R = {} AU".format(R[np.argmax(Sigma_dot_E)]))
print("Time minimum at R = {} AU".format(R[photoevaporating_E][np.argmin(t_w_E)]))
plt.loglog(R, Sigma_dot_E, label='EUV (AA07), $\Phi={}~\mathrm{{s^{{-1}}}}$'.format(1e42), linestyle='--')
# Calculate X-ray rates
disc._star = starX
internal_photo_X = XrayDiscOwen(disc)
Sigma_dot_X = internal_photo_X.dSigmadt
photoevaporating_X = (Sigma_dot_X>0)
t_w_X = disc.Sigma[photoevaporating_X] / Sigma_dot_X[photoevaporating_X]
print("Mdot maximum at R = {} AU".format(R[np.argmax(Sigma_dot_X)]))
print("Time minimum at R = {} AU".format(R[photoevaporating_X][np.argmin(t_w_X)]))
plt.loglog(R, Sigma_dot_X, label='X-ray (OEC12), $L_X={}~\mathrm{{erg~s^{{-1}}}}$'.format(1e30))
# Calculate X-ray rates
disc._star = starX
internal_photo_X2 = XrayDiscPicogna(disc)
Sigma_dot_X2 = internal_photo_X2.dSigmadt
photoevaporating_X2 = (Sigma_dot_X2>0)
t_w_X2 = disc.Sigma[photoevaporating_X2] / Sigma_dot_X2[photoevaporating_X2]
print("Mdot maximum at R = {} AU".format(R[np.argmax(Sigma_dot_X2)]))
print("Time minimum at R = {} AU".format(R[photoevaporating_X2][np.argmin(t_w_X2)]))
plt.loglog(R, Sigma_dot_X2, label='X-ray (PEOW19), $L_X={}~\mathrm{{erg~s^{{-1}}}}$'.format(1e30))
# Plot mass loss rates
plt.xlabel("R / AU")
plt.ylabel("$\dot{\Sigma}_{\\rm w}$ / g cm$^{-2}$ yr$^{-1}$")
plt.xlim([0.1,1000])
plt.ylim([1e-8,1e-2])
plt.legend()
plt.show()
# Plot depletion time
plt.figure(figsize=(6,6))
plt.loglog(R[photoevaporating_E], t_w_E, label='EUV (AA07), $\Phi={}~\mathrm{{s^{{-1}}}}$'.format(1e42), linestyle='--')
plt.loglog(R[photoevaporating_X], t_w_X, label='X-ray (OEC12), $L_X={}~\mathrm{{erg~s^{{-1}}}}$'.format(1e30))
plt.loglog(R[photoevaporating_X2], t_w_X2, label='X-ray (PEOW19), $L_X={}~\mathrm{{erg~s^{{-1}}}}$'.format(1e30))
plt.xlabel("R / AU")
plt.ylabel("$t_w / \mathrm{yr}$")
plt.xlim([0.1,1000])
plt.ylim([1e4,1e12])
plt.legend()
plt.show()
if __name__ == "__main__":
# Set extra things
DefaultModel = "../control_scripts/DiscConfig_default.json"
plt.rcParams['text.usetex'] = "True"
plt.rcParams['font.family'] = "serif"
main()
| gpl-3.0 |
puttarajubr/commcare-hq | corehq/apps/hqadmin/system_info/checks.py | 2 | 3064 | from django.core import cache
from django.conf import settings
from django.utils.safestring import mark_safe
from restkit import Resource
import json
from corehq.apps.hqadmin.system_info.utils import human_bytes
from soil import heartbeat
def check_redis():
#redis status
ret = {}
redis_status = ""
redis_results = ""
if 'redis' in settings.CACHES:
rc = cache.get_cache('redis')
try:
import redis
redis_api = redis.StrictRedis.from_url('redis://%s' % rc._server)
info_dict = redis_api.info()
redis_status = "Online"
redis_results = "Used Memory: %s" % info_dict['used_memory_human']
except Exception, ex:
redis_status = "Offline"
redis_results = "Redis connection error: %s" % ex
else:
redis_status = "Not Configured"
redis_results = "Redis is not configured on this system!"
ret['redis_status'] = redis_status
ret['redis_results'] = redis_results
return ret
def check_rabbitmq():
ret ={}
mq_status = "Unknown"
if settings.BROKER_URL.startswith('amqp'):
amqp_parts = settings.BROKER_URL.replace('amqp://','').split('/')
mq_management_url = amqp_parts[0].replace('5672', '15672')
vhost = amqp_parts[1]
try:
mq = Resource('http://%s' % mq_management_url, timeout=2)
vhost_dict = json.loads(mq.get('api/vhosts', timeout=2).body_string())
mq_status = "Offline"
for d in vhost_dict:
if d['name'] == vhost:
mq_status='RabbitMQ OK'
except Exception, ex:
mq_status = "RabbitMQ Error: %s" % ex
else:
mq_status = "RabbitMQ Not configured"
ret['rabbitmq_status'] = mq_status
return ret
def check_celery_health():
ret = {}
celery_monitoring = getattr(settings, 'CELERY_FLOWER_URL', None)
worker_status = ""
if celery_monitoring:
cresource = Resource(celery_monitoring, timeout=3)
all_workers = {}
try:
t = cresource.get("api/workers").body_string()
all_workers = json.loads(t)
except Exception, ex:
pass
worker_ok = '<span class="label label-success">OK</span>'
worker_bad = '<span class="label label-important">Down</span>'
tasks_ok = 'label-success'
tasks_full = 'label-warning'
worker_info = []
for hostname, w in all_workers.items():
status_html = mark_safe(worker_ok if w['status'] else worker_bad)
tasks_class = tasks_full if w['running_tasks'] == w['concurrency'] else tasks_ok
tasks_html = mark_safe('<span class="label %s">%d / %d</span> :: %d' % (tasks_class, w['running_tasks'], w['concurrency'], w['completed_tasks']))
worker_info.append(' '.join([hostname, status_html, tasks_html]))
worker_status = '<br>'.join(worker_info)
ret['worker_status'] = mark_safe(worker_status)
ret['heartbeat'] = heartbeat.is_alive()
return ret
| bsd-3-clause |
phalax4/CarnotKE | jyhton/lib-python/2.7/test/test_abc.py | 119 | 7715 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Unit tests for abc.py."""
import unittest, weakref
from test import test_support
import abc
from inspect import isabstract
class TestABC(unittest.TestCase):
def test_abstractmethod_basics(self):
@abc.abstractmethod
def foo(self): pass
self.assertTrue(foo.__isabstractmethod__)
def bar(self): pass
self.assertFalse(hasattr(bar, "__isabstractmethod__"))
def test_abstractproperty_basics(self):
@abc.abstractproperty
def foo(self): pass
self.assertTrue(foo.__isabstractmethod__)
def bar(self): pass
self.assertFalse(hasattr(bar, "__isabstractmethod__"))
class C:
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def foo(self): return 3
class D(C):
@property
def foo(self): return super(D, self).foo
self.assertEqual(D().foo, 3)
def test_abstractmethod_integration(self):
for abstractthing in [abc.abstractmethod, abc.abstractproperty]:
class C:
__metaclass__ = abc.ABCMeta
@abstractthing
def foo(self): pass # abstract
def bar(self): pass # concrete
self.assertEqual(C.__abstractmethods__, set(["foo"]))
self.assertRaises(TypeError, C) # because foo is abstract
self.assertTrue(isabstract(C))
class D(C):
def bar(self): pass # concrete override of concrete
self.assertEqual(D.__abstractmethods__, set(["foo"]))
self.assertRaises(TypeError, D) # because foo is still abstract
self.assertTrue(isabstract(D))
class E(D):
def foo(self): pass
self.assertEqual(E.__abstractmethods__, set())
E() # now foo is concrete, too
self.assertFalse(isabstract(E))
class F(E):
@abstractthing
def bar(self): pass # abstract override of concrete
self.assertEqual(F.__abstractmethods__, set(["bar"]))
self.assertRaises(TypeError, F) # because bar is abstract now
self.assertTrue(isabstract(F))
def test_subclass_oldstyle_class(self):
class A:
__metaclass__ = abc.ABCMeta
class OldstyleClass:
pass
self.assertFalse(issubclass(OldstyleClass, A))
self.assertFalse(issubclass(A, OldstyleClass))
def test_isinstance_class(self):
class A:
__metaclass__ = abc.ABCMeta
class OldstyleClass:
pass
self.assertFalse(isinstance(OldstyleClass, A))
self.assertTrue(isinstance(OldstyleClass, type(OldstyleClass)))
self.assertFalse(isinstance(A, OldstyleClass))
# This raises a recursion depth error, but is low-priority:
# self.assertTrue(isinstance(A, abc.ABCMeta))
def test_registration_basics(self):
class A:
__metaclass__ = abc.ABCMeta
class B(object):
pass
b = B()
self.assertFalse(issubclass(B, A))
self.assertFalse(issubclass(B, (A,)))
self.assertNotIsInstance(b, A)
self.assertNotIsInstance(b, (A,))
A.register(B)
self.assertTrue(issubclass(B, A))
self.assertTrue(issubclass(B, (A,)))
self.assertIsInstance(b, A)
self.assertIsInstance(b, (A,))
class C(B):
pass
c = C()
self.assertTrue(issubclass(C, A))
self.assertTrue(issubclass(C, (A,)))
self.assertIsInstance(c, A)
self.assertIsInstance(c, (A,))
def test_isinstance_invalidation(self):
class A:
__metaclass__ = abc.ABCMeta
class B(object):
pass
b = B()
self.assertFalse(isinstance(b, A))
self.assertFalse(isinstance(b, (A,)))
A.register(B)
self.assertTrue(isinstance(b, A))
self.assertTrue(isinstance(b, (A,)))
def test_registration_builtins(self):
class A:
__metaclass__ = abc.ABCMeta
A.register(int)
self.assertIsInstance(42, A)
self.assertIsInstance(42, (A,))
self.assertTrue(issubclass(int, A))
self.assertTrue(issubclass(int, (A,)))
class B(A):
pass
B.register(basestring)
self.assertIsInstance("", A)
self.assertIsInstance("", (A,))
self.assertTrue(issubclass(str, A))
self.assertTrue(issubclass(str, (A,)))
def test_registration_edge_cases(self):
class A:
__metaclass__ = abc.ABCMeta
A.register(A) # should pass silently
class A1(A):
pass
self.assertRaises(RuntimeError, A1.register, A) # cycles not allowed
class B(object):
pass
A1.register(B) # ok
A1.register(B) # should pass silently
class C(A):
pass
A.register(C) # should pass silently
self.assertRaises(RuntimeError, C.register, A) # cycles not allowed
C.register(B) # ok
def test_register_non_class(self):
class A(object):
__metaclass__ = abc.ABCMeta
self.assertRaisesRegexp(TypeError, "Can only register classes",
A.register, 4)
def test_registration_transitiveness(self):
class A:
__metaclass__ = abc.ABCMeta
self.assertTrue(issubclass(A, A))
self.assertTrue(issubclass(A, (A,)))
class B:
__metaclass__ = abc.ABCMeta
self.assertFalse(issubclass(A, B))
self.assertFalse(issubclass(A, (B,)))
self.assertFalse(issubclass(B, A))
self.assertFalse(issubclass(B, (A,)))
class C:
__metaclass__ = abc.ABCMeta
A.register(B)
class B1(B):
pass
self.assertTrue(issubclass(B1, A))
self.assertTrue(issubclass(B1, (A,)))
class C1(C):
pass
B1.register(C1)
self.assertFalse(issubclass(C, B))
self.assertFalse(issubclass(C, (B,)))
self.assertFalse(issubclass(C, B1))
self.assertFalse(issubclass(C, (B1,)))
self.assertTrue(issubclass(C1, A))
self.assertTrue(issubclass(C1, (A,)))
self.assertTrue(issubclass(C1, B))
self.assertTrue(issubclass(C1, (B,)))
self.assertTrue(issubclass(C1, B1))
self.assertTrue(issubclass(C1, (B1,)))
C1.register(int)
class MyInt(int):
pass
self.assertTrue(issubclass(MyInt, A))
self.assertTrue(issubclass(MyInt, (A,)))
self.assertIsInstance(42, A)
self.assertIsInstance(42, (A,))
def test_all_new_methods_are_called(self):
class A:
__metaclass__ = abc.ABCMeta
class B(object):
counter = 0
def __new__(cls):
B.counter += 1
return super(B, cls).__new__(cls)
class C(A, B):
pass
self.assertEqual(B.counter, 0)
C()
self.assertEqual(B.counter, 1)
def test_cache_leak(self):
# See issue #2521.
class A(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def f(self):
pass
class C(A):
def f(self):
A.f(self)
r = weakref.ref(C)
# Trigger cache.
C().f()
del C
test_support.gc_collect()
self.assertEqual(r(), None)
def test_main():
test_support.run_unittest(TestABC)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
ivanhorvath/openshift-tools | openshift/installer/vendored/openshift-ansible-3.5.127/roles/lib_openshift/library/oc_secret.py | 12 | 57672 | #!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import json
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/secret -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_secret
short_description: Module to manage openshift secrets
description:
- Manage openshift secrets programmatically.
options:
state:
description:
- If present, the secret will be created if it doesn't exist or updated if different. If absent, the secret will be removed if present. If list, information about the secret will be gathered and returned as part of the Ansible call results.
required: false
default: present
choices: ["present", "absent", "list"]
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: False
aliases: []
name:
description:
- Name of the object that is being queried.
required: false
default: None
aliases: []
namespace:
description:
- The namespace where the object lives.
required: false
default: default
aliases: []
files:
description:
- A list of files provided for secrets
required: false
default: None
aliases: []
delete_after:
description:
- Whether or not to delete the files after processing them.
required: false
default: false
aliases: []
contents:
description:
- Content of the secrets
required: false
default: None
aliases: []
force:
description:
- Whether or not to force the operation
required: false
default: false
aliases: []
decode:
description:
- base64 decode the object
required: false
default: false
aliases: []
author:
- "Kenny Woodson <[email protected]>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
- name: create secret
oc_secret:
state: present
namespace: openshift-infra
name: metrics-deployer
files:
- name: nothing
path: /dev/null
register: secretout
run_once: true
- name: get ca from hawkular
oc_secret:
state: list
namespace: openshift-infra
name: hawkular-metrics-certificate
decode: True
register: hawkout
run_once: true
- name: Create secrets
oc_secret:
namespace: mynamespace
name: mysecrets
contents:
- path: data.yml
data: "{{ data_content }}"
- path: auth-keys
data: "{{ auth_keys_content }}"
- path: configdata.yml
data: "{{ configdata_content }}"
- path: cert.crt
data: "{{ cert_content }}"
- path: key.pem
data: "{{ osso_site_key_content }}"
- path: ca.cert.pem
data: "{{ ca_cert_content }}"
register: secretout
'''
# -*- -*- -*- End included fragment: doc/secret -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# pylint: disable=undefined-variable,missing-docstring
# noqa: E301,E302
class YeditException(Exception):
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@separator.setter
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key % ''.join(common_separators), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
yfd.write(contents)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. %s' % err)
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in ' +
'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.load(invalue)
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
% (inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# If vtype is not str then go ahead and attempt to yaml load it.
if isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming ' +
'value. value=[%s] vtype=[%s]'
% (type(inc_value), vtype))
return inc_value
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(module):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=module.params['src'],
backup=module.params['backup'],
separator=module.params['separator'])
if module.params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and \
module.params['state'] != 'present':
return {'failed': True,
'msg': 'Error opening file [%s]. Verify that the ' +
'file exists, that it is has correct' +
' permissions, and is valid yaml.'}
if module.params['state'] == 'list':
if module.params['content']:
content = Yedit.parse_value(module.params['content'],
module.params['content_type'])
yamlfile.yaml_dict = content
if module.params['key']:
rval = yamlfile.get(module.params['key']) or {}
return {'changed': False, 'result': rval, 'state': "list"}
elif module.params['state'] == 'absent':
if module.params['content']:
content = Yedit.parse_value(module.params['content'],
module.params['content_type'])
yamlfile.yaml_dict = content
if module.params['update']:
rval = yamlfile.pop(module.params['key'],
module.params['value'])
else:
rval = yamlfile.delete(module.params['key'])
if rval[0] and module.params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
elif module.params['state'] == 'present':
# check if content is different than what is in the file
if module.params['content']:
content = Yedit.parse_value(module.params['content'],
module.params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
module.params['value'] is None:
return {'changed': False,
'result': yamlfile.yaml_dict,
'state': "present"}
yamlfile.yaml_dict = content
# we were passed a value; parse it
if module.params['value']:
value = Yedit.parse_value(module.params['value'],
module.params['value_type'])
key = module.params['key']
if module.params['update']:
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
module.params['curr_value_format']) # noqa: E501
rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
elif module.params['append']:
rval = yamlfile.append(key, value)
else:
rval = yamlfile.put(key, value)
if rval[0] and module.params['src']:
yamlfile.write()
return {'changed': rval[0],
'result': rval[1], 'state': "present"}
# no edits to make
if module.params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': "present"}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
''' Find and return oc binary file '''
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
# We are removing the 'resourceVersion' to handle
# a race condition when modifying oc objects
yed = Yedit(fname)
results = yed.delete('metadata.resourceVersion')
if results[0]:
yed.write()
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, rname, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource, rname]
if selector:
cmd.append('--selector=%s' % selector)
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["%s=%s" % (key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, rname=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector:
cmd.append('--selector=%s' % selector)
elif rname:
cmd.append(rname)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
cmd.append('--schedulable=%s' % schedulable)
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
if grace_period:
cmd.append('--grace-period=%s' % int(grace_period))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = [self.oc_binary]
if oadm:
cmds.append('adm')
cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
rval = {}
results = ''
err = None
if self.verbose:
print(' '.join(cmds))
try:
returncode, stdout, stderr = self._run(cmds, input_data)
except OSError as ex:
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"results": results,
"cmd": ' '.join(cmds)}
if returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as err:
if "No JSON object could be decoded" in err.args:
err = err.args
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {}})
return rval
class Utils(object):
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(contents)
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
# "v3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = version[1:4]
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import yum
yum_base = yum.YumBase()
if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
return True
return False
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(user_def[key])
print(value)
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(user_values)
print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self):
'''return all options as a string'''
return self.stringify()
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
for key, data in self.config_options.items():
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/secret.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class SecretConfig(object):
''' Handle secret options '''
# pylint: disable=too-many-arguments
def __init__(self,
sname,
namespace,
kubeconfig,
secrets=None):
''' constructor for handling secret options '''
self.kubeconfig = kubeconfig
self.name = sname
self.namespace = namespace
self.secrets = secrets
self.data = {}
self.create_dict()
def create_dict(self):
''' assign the correct properties for a secret dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Secret'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
self.data['data'] = {}
if self.secrets:
for key, value in self.secrets.items():
self.data['data'][key] = value
# pylint: disable=too-many-instance-attributes
class Secret(Yedit):
''' Class to wrap the oc command line tools '''
secret_path = "data"
kind = 'secret'
def __init__(self, content):
'''secret constructor'''
super(Secret, self).__init__(content=content)
self._secrets = None
@property
def secrets(self):
'''secret property getter'''
if self._secrets is None:
self._secrets = self.get_secrets()
return self._secrets
@secrets.setter
def secrets(self):
'''secret property setter'''
if self._secrets is None:
self._secrets = self.get_secrets()
return self._secrets
def get_secrets(self):
''' returns all of the defined secrets '''
return self.get(Secret.secret_path) or {}
def add_secret(self, key, value):
''' add a secret '''
if self.secrets:
self.secrets[key] = value
else:
self.put(Secret.secret_path, {key: value})
return True
def delete_secret(self, key):
''' delete secret'''
try:
del self.secrets[key]
except KeyError as _:
return False
return True
def find_secret(self, key):
''' find secret'''
rval = None
try:
rval = self.secrets[key]
except KeyError as _:
return None
return {'key': key, 'value': rval}
def update_secret(self, key, value):
''' update a secret'''
if key in self.secrets:
self.secrets[key] = value
else:
self.add_secret(key, value)
return True
# -*- -*- -*- End included fragment: lib/secret.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_secret.py -*- -*- -*-
# pylint: disable=wrong-import-position,wrong-import-order
import base64
# pylint: disable=too-many-arguments
class OCSecret(OpenShiftCLI):
''' Class to wrap the oc command line tools
'''
def __init__(self,
namespace,
secret_name=None,
decode=False,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OpenshiftOC '''
super(OCSecret, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.name = secret_name
self.decode = decode
def get(self):
'''return a secret by name '''
results = self._get('secrets', self.name)
results['decoded'] = {}
results['exists'] = False
if results['returncode'] == 0 and results['results'][0]:
results['exists'] = True
if self.decode:
if 'data' in results['results'][0]:
for sname, value in results['results'][0]['data'].items():
results['decoded'][sname] = base64.b64decode(value)
if results['returncode'] != 0 and '"%s" not found' % self.name in results['stderr']:
results['returncode'] = 0
return results
def delete(self):
'''delete a secret by name'''
return self._delete('secrets', self.name)
def create(self, files=None, contents=None):
'''Create a secret '''
if not files:
files = Utils.create_tmp_files_from_contents(contents)
secrets = ["%s=%s" % (sfile['name'], sfile['path']) for sfile in files]
cmd = ['secrets', 'new', self.name]
cmd.extend(secrets)
results = self.openshift_cmd(cmd)
return results
def update(self, files, force=False):
'''run update secret
This receives a list of file names and converts it into a secret.
The secret is then written to disk and passed into the `oc replace` command.
'''
secret = self.prep_secret(files)
if secret['returncode'] != 0:
return secret
sfile_path = '/tmp/%s' % self.name
with open(sfile_path, 'w') as sfd:
sfd.write(json.dumps(secret['results']))
atexit.register(Utils.cleanup, [sfile_path])
return self._replace(sfile_path, force=force)
def prep_secret(self, files=None, contents=None):
''' return what the secret would look like if created
This is accomplished by passing -ojson. This will most likely change in the future
'''
if not files:
files = Utils.create_tmp_files_from_contents(contents)
secrets = ["%s=%s" % (sfile['name'], sfile['path']) for sfile in files]
cmd = ['-ojson', 'secrets', 'new', self.name]
cmd.extend(secrets)
return self.openshift_cmd(cmd, output=True)
@staticmethod
# pylint: disable=too-many-return-statements,too-many-branches
# TODO: This function should be refactored into its individual parts.
def run_ansible(params, check_mode):
'''run the ansible idempotent code'''
ocsecret = OCSecret(params['namespace'],
params['name'],
params['decode'],
kubeconfig=params['kubeconfig'],
verbose=params['debug'])
state = params['state']
api_rval = ocsecret.get()
#####
# Get
#####
if state == 'list':
return {'changed': False, 'results': api_rval, state: 'list'}
if not params['name']:
return {'failed': True,
'msg': 'Please specify a name when state is absent|present.'}
########
# Delete
########
if state == 'absent':
if not Utils.exists(api_rval['results'], params['name']):
return {'changed': False, 'state': 'absent'}
if check_mode:
return {'changed': True, 'msg': 'Would have performed a delete.'}
api_rval = ocsecret.delete()
return {'changed': True, 'results': api_rval, 'state': 'absent'}
if state == 'present':
if params['files']:
files = params['files']
elif params['contents']:
files = Utils.create_tmp_files_from_contents(params['contents'])
else:
return {'failed': True,
'msg': 'Either specify files or contents.'}
########
# Create
########
if not Utils.exists(api_rval['results'], params['name']):
if check_mode:
return {'changed': True,
'msg': 'Would have performed a create.'}
api_rval = ocsecret.create(files, params['contents'])
# Remove files
if files and params['delete_after']:
Utils.cleanup([ftmp['path'] for ftmp in files])
if api_rval['returncode'] != 0:
return {'failed': True,
'msg': api_rval}
return {'changed': True,
'results': api_rval,
'state': 'present'}
########
# Update
########
secret = ocsecret.prep_secret(params['files'], params['contents'])
if secret['returncode'] != 0:
return {'failed': True, 'msg': secret}
if Utils.check_def_equal(secret['results'], api_rval['results'][0]):
# Remove files
if files and params['delete_after']:
Utils.cleanup([ftmp['path'] for ftmp in files])
return {'changed': False,
'results': secret['results'],
'state': 'present'}
if check_mode:
return {'changed': True,
'msg': 'Would have performed an update.'}
api_rval = ocsecret.update(files, force=params['force'])
# Remove files
if secret and params['delete_after']:
Utils.cleanup([ftmp['path'] for ftmp in files])
if api_rval['returncode'] != 0:
return {'failed': True,
'msg': api_rval}
return {'changed': True,
'results': api_rval,
'state': 'present'}
return {'failed': True,
'changed': False,
'msg': 'Unknown state passed. %s' % state,
'state': 'unknown'}
# -*- -*- -*- End included fragment: class/oc_secret.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_secret.py -*- -*- -*-
def main():
'''
ansible oc module for managing OpenShift Secrets
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
name=dict(default=None, type='str'),
files=dict(default=None, type='list'),
delete_after=dict(default=False, type='bool'),
contents=dict(default=None, type='list'),
force=dict(default=False, type='bool'),
decode=dict(default=False, type='bool'),
),
mutually_exclusive=[["contents", "files"]],
supports_check_mode=True,
)
rval = OCSecret.run_ansible(module.params, module.check_mode)
if 'failed' in rval:
module.fail_json(**rval)
module.exit_json(**rval)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_secret.py -*- -*- -*-
| apache-2.0 |
CERNDocumentServer/invenio | modules/bibsword/lib/bibsword_webinterface.py | 1 | 15249 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2010, 2011, 2012, 2013, 2014, 2015, 2016 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
BibSword Web Interface.
"""
from invenio.access_control_engine import(
acc_authorize_action
)
import invenio.bibsword_client as sword_client
from invenio.config import(
CFG_SITE_LANG,
CFG_SITE_URL
)
from invenio.messages import(
gettext_set_language
)
from invenio.webinterface_handler import(
wash_urlargd,
WebInterfaceDirectory
)
from invenio.webpage import(
page
)
from invenio.webuser import(
getUid,
page_not_authorized
)
__lastupdated__ = """$Date$"""
class WebInterfaceSwordClient(WebInterfaceDirectory):
"""Web interface for the BibSword client."""
_exports = [
"",
"servers",
"server_options",
"submissions",
"submission_options",
"submit",
"submit_step_1",
"submit_step_2",
"submit_step_3",
"submit_step_4",
]
def submissions(self, req, form):
"""Web interface for the existing submissions."""
# Check if the user has rights to manage the Sword client
auth_code, auth_message = acc_authorize_action(
req,
"run_sword_client"
)
if auth_code != 0:
return page_not_authorized(
req=req,
referer="/sword_client/",
text=auth_message,
navtrail=""
)
argd = wash_urlargd(form, {
"ln": (str, CFG_SITE_LANG),
})
# Get the user ID
uid = getUid(req)
# Set language for i18n text auto generation
ln = argd["ln"]
_ = gettext_set_language(ln)
body = sword_client.perform_request_submissions(
ln
)
navtrail = """
> <a class="navtrail" href="%(CFG_SITE_URL)s/sword_client">%(label)s</a>
""" % {
'CFG_SITE_URL': CFG_SITE_URL,
'label': _("Sword Client"),
}
return page(
title=_("Submissions"),
body=body,
navtrail=navtrail,
lastupdated=__lastupdated__,
req=req,
language=ln
)
def submission_options(self, req, form):
"""Web interface for the options on the submissions."""
# Check if the user has rights to manage the Sword client
auth_code, auth_message = acc_authorize_action(
req,
"run_sword_client"
)
if auth_code != 0:
return page_not_authorized(
req=req,
referer="/sword_client",
text=auth_message,
navtrail=""
)
argd = wash_urlargd(form, {
"option": (str, ""),
"action": (str, "submit"),
"server_id": (int, 0),
"status_url": (str, ""),
"ln": (str, CFG_SITE_LANG),
})
if argd["option"] in ("update",):
option = argd["option"]
else:
option = ""
if argd["action"] in ("submit",):
action = argd["action"]
else:
action = ""
server_id = argd["server_id"]
status_url = argd["status_url"]
ln = argd["ln"]
(error, result) = sword_client.perform_request_submission_options(
option,
action,
server_id,
status_url,
ln
)
if error:
req.set_content_type("text/plain; charset=utf-8")
req.set_status("400")
req.send_http_header()
req.write("Error: {0}".format(error))
return
return result
def servers(self, req, form):
"""Web interface for the available servers."""
# Check if the user has rights to manage the Sword client
auth_code, auth_message = acc_authorize_action(
req,
"manage_sword_client"
)
if auth_code != 0:
return page_not_authorized(
req=req,
referer="/sword_client/",
text=auth_message,
navtrail=""
)
argd = wash_urlargd(form, {
"ln": (str, CFG_SITE_LANG),
})
# Get the user ID
uid = getUid(req)
# Set language for i18n text auto generation
ln = argd["ln"]
_ = gettext_set_language(ln)
body = sword_client.perform_request_servers(
ln
)
navtrail = """
> <a class="navtrail" href="%(CFG_SITE_URL)s/sword_client">%(label)s</a>
""" % {
'CFG_SITE_URL': CFG_SITE_URL,
'label': _("Sword Client"),
}
return page(
title=_("Servers"),
body=body,
navtrail=navtrail,
lastupdated=__lastupdated__,
req=req,
language=ln
)
def server_options(self, req, form):
"""Web interface for the options on the available servers."""
# Check if the user has rights to manage the Sword client
auth_code, auth_message = acc_authorize_action(
req,
"manage_sword_client"
)
if auth_code != 0:
return page_not_authorized(
req=req,
referer="/sword_client",
text=auth_message,
navtrail=""
)
argd = wash_urlargd(form, {
"option": (str, ""),
"action": (str, "submit"),
"server_id": (int, 0),
"sword_client_server_name": (str, ""),
"sword_client_server_engine": (str, ""),
"sword_client_server_username": (str, ""),
"sword_client_server_password": (str, ""),
"sword_client_server_email": (str, ""),
"sword_client_server_update_frequency": (str, ""),
"ln": (str, CFG_SITE_LANG),
})
if argd["option"] in ("add", "update", "modify", "delete"):
option = argd["option"]
else:
option = ""
if argd["action"] in ("prepare", "submit"):
action = argd["action"]
else:
action = ""
server_id = argd["server_id"]
server = (
argd["sword_client_server_name"],
argd["sword_client_server_engine"],
argd["sword_client_server_username"],
argd["sword_client_server_password"],
argd["sword_client_server_email"],
argd["sword_client_server_update_frequency"],
)
ln = argd["ln"]
(error, result) = sword_client.perform_request_server_options(
option,
action,
server_id,
server,
ln
)
if error:
req.set_content_type("text/plain; charset=utf-8")
req.set_status("400")
req.send_http_header()
req.write("Error: {0}".format(error))
return
return result
def submit(self, req, form):
"""Submit a record using SWORD."""
# Check if the user has rights to manage the Sword client
auth_code, auth_message = acc_authorize_action(
req,
"run_sword_client"
)
if auth_code != 0:
return page_not_authorized(
req=req,
referer="/sword_client",
text=auth_message,
navtrail=""
)
argd = wash_urlargd(form, {
"record_id": (int, 0),
"server_id": (int, 0),
"ln": (str, CFG_SITE_LANG),
})
# Get the user ID
uid = getUid(req)
# Set language for i18n text auto generation
ln = argd["ln"]
_ = gettext_set_language(ln)
record_id = argd["record_id"]
server_id = argd["server_id"]
body = sword_client.perform_submit(
uid,
record_id,
server_id,
ln
)
navtrail = """
> <a class="navtrail" href="%(CFG_SITE_URL)s/sword_client">%(label)s</a>
""" % {
'CFG_SITE_URL': CFG_SITE_URL,
'label': _("Sword Client"),
}
return page(
title=_("Submit"),
body=body,
navtrail=navtrail,
lastupdated=__lastupdated__,
req=req,
language=ln
)
def submit_step_1(self, req, form):
"""Process step 1 in the submission workflow."""
# Check if the user has adequate rights to run the bibsword client
# TODO: in a more advanced model, also check if the give user has
# rights to the current submission based on the user id and the
# submission id. It would get even more complicated if we
# introduced people that can approve specific submissions etc.
auth_code, auth_message = acc_authorize_action(
req,
"run_sword_client"
)
if auth_code != 0:
return page_not_authorized(
req=req,
referer="/sword_client",
text=auth_message,
navtrail=""
)
argd = wash_urlargd(form, {
'sid': (str, ''),
'server_id': (int, 0),
'ln': (str, CFG_SITE_LANG),
})
sid = argd['sid']
server_id = argd['server_id']
ln = argd['ln']
return sword_client.perform_submit_step_1(
sid,
server_id,
ln
)
def submit_step_2(self, req, form):
"""Process step 2 in the submission workflow."""
# Check if the user has adequate rights to run the bibsword client
# TODO: in a more advanced model, also check if the give user has
# rights to the current submission based on the user id and the
# submission id. It would get even more complicated if we
# introduced people that can approve specific submissions etc.
auth_code, auth_message = acc_authorize_action(
req,
"run_sword_client"
)
if auth_code != 0:
return page_not_authorized(
req=req,
referer="/sword_client",
text=auth_message,
navtrail=""
)
argd = wash_urlargd(form, {
'sid': (str, ""),
'collection_url': (str, ""),
'ln': (str, CFG_SITE_LANG),
})
sid = argd['sid']
collection_url = argd['collection_url']
ln = argd['ln']
return sword_client.perform_submit_step_2(
sid,
collection_url,
ln
)
def submit_step_3(self, req, form):
"""Process step 3 in the submission workflow."""
# Check if the user has adequate rights to run the bibsword client
# TODO: in a more advanced model, also check if the give user has
# rights to the current submission based on the user id and the
# submission id. It would get even more complicated if we
# introduced people that can approve specific submissions etc.
auth_code, auth_message = acc_authorize_action(
req,
"run_sword_client"
)
if auth_code != 0:
return page_not_authorized(
req=req,
referer="/sword_client",
text=auth_message,
navtrail=""
)
argd = wash_urlargd(form, {
'sid': (str, ""),
'mandatory_category_url': (str, ""),
'optional_categories_urls': (list, []),
'ln': (str, CFG_SITE_LANG),
})
sid = argd['sid']
ln = argd['ln']
mandatory_category_url = argd['mandatory_category_url']
optional_categories_urls = argd['optional_categories_urls']
return sword_client.perform_submit_step_3(
sid,
mandatory_category_url,
optional_categories_urls,
ln
)
def submit_step_4(self, req, form):
"""Process step 4 in the submission workflow."""
# Check if the user has adequate rights to run the bibsword client
# TODO: in a more advanced model, also check if the give user has
# rights to the current submission based on the user id and the
# submission id. It would get even more complicated if we
# introduced people that can approve specific submissions etc.
auth_code, auth_message = acc_authorize_action(
req,
"run_sword_client"
)
if auth_code != 0:
return page_not_authorized(
req=req,
referer="/sword_client",
text=auth_message,
navtrail=""
)
argd = wash_urlargd(form, {
"sid": (str, ""),
"rn": (str, ""),
"additional_rn": (list, []),
"title": (str, ""),
"author_fullname": (str, ""),
"author_email": (str, ""),
"author_affiliation": (str, ""),
"abstract": (str, ""),
"contributor_fullname": (list, []),
"contributor_email": (list, []),
"contributor_affiliation": (list, []),
"files": (list, []),
"ln": (str, CFG_SITE_LANG),
})
sid = argd["sid"]
rn = argd["rn"]
additional_rn = argd["additional_rn"]
title = argd["title"]
author_fullname = argd["author_fullname"]
author_email = argd["author_email"]
author_affiliation = argd["author_affiliation"]
abstract = argd["abstract"]
contributor_fullname = argd["contributor_fullname"]
contributor_email = argd["contributor_email"]
contributor_affiliation = argd["contributor_affiliation"]
files_indexes = argd["files"]
ln = argd["ln"]
return sword_client.perform_submit_step_4(
sid,
(
rn,
additional_rn,
title,
author_fullname,
author_email,
author_affiliation,
abstract,
contributor_fullname,
contributor_email,
contributor_affiliation,
files_indexes
),
ln
)
index = submissions
| gpl-2.0 |
omnirom/android_external_chromium-org | chrome/browser/PRESUBMIT.py | 36 | 2691 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for Chromium browser code.
This script currently only checks HTML/CSS/JS files in resources/.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl/git cl, and see
http://www.chromium.org/developers/web-development-style-guide for the rules
checked for here.
"""
def CheckChangeOnUpload(input_api, output_api):
return _CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return _CommonChecks(input_api, output_api)
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
path = input_api.os_path
cwd = input_api.PresubmitLocalPath()
resources = path.join(cwd, 'resources')
webui = path.join(cwd, 'ui', 'webui')
affected_files = (f.AbsoluteLocalPath() for f in input_api.AffectedFiles())
would_affect_tests = (
path.join(cwd, 'PRESUBMIT.py'),
path.join(cwd, 'test_presubmit.py'),
path.join(cwd, 'web_dev_style', 'css_checker.py'),
path.join(cwd, 'web_dev_style', 'html_checker.py'),
path.join(cwd, 'web_dev_style', 'js_checker.py'),
)
if any(f for f in affected_files if f in would_affect_tests):
tests = [path.join(cwd, 'test_presubmit.py')]
results.extend(
input_api.canned_checks.RunUnitTests(input_api, output_api, tests))
import sys
old_path = sys.path
try:
sys.path = [cwd] + old_path
from web_dev_style import (resource_checker, css_checker, html_checker,
js_checker)
search_dirs = (resources, webui)
def _html_css_js_resource(p):
return p.endswith(('.html', '.css', '.js')) and p.startswith(search_dirs)
BLACKLIST = ['chrome/browser/resources/pdf/index.html',
'chrome/browser/resources/pdf/index.js']
def is_resource(maybe_resource):
return (maybe_resource.LocalPath() not in BLACKLIST and
_html_css_js_resource(maybe_resource.AbsoluteLocalPath()))
results.extend(resource_checker.ResourceChecker(
input_api, output_api, file_filter=is_resource).RunChecks())
results.extend(css_checker.CSSChecker(
input_api, output_api, file_filter=is_resource).RunChecks())
results.extend(html_checker.HtmlChecker(
input_api, output_api, file_filter=is_resource).RunChecks())
results.extend(js_checker.JSChecker(
input_api, output_api, file_filter=is_resource).RunChecks())
finally:
sys.path = old_path
return results
| bsd-3-clause |
TarasRudnyk/scrapy | docs/utils/linkfix.py | 141 | 1764 | #!/usr/bin/python
"""
Linkfix - a companion to sphinx's linkcheck builder.
Uses the linkcheck's output file to fix links in docs.
Originally created for this issue:
https://github.com/scrapy/scrapy/issues/606
Author: dufferzafar
"""
import re
# Used for remembering the file (and its contents)
# so we don't have to open the same file again.
_filename = None
_contents = None
# A regex that matches standard linkcheck output lines
line_re = re.compile(ur'(.*)\:\d+\:\s\[(.*)\]\s(?:(.*)\sto\s(.*)|(.*))')
# Read lines from the linkcheck output file
try:
with open("build/linkcheck/output.txt") as out:
output_lines = out.readlines()
except IOError:
print("linkcheck output not found; please run linkcheck first.")
exit(1)
# For every line, fix the respective file
for line in output_lines:
match = re.match(line_re, line)
if match:
newfilename = match.group(1)
errortype = match.group(2)
# Broken links can't be fixed and
# I am not sure what do with the local ones.
if errortype.lower() in ["broken", "local"]:
print("Not Fixed: " + line)
else:
# If this is a new file
if newfilename != _filename:
# Update the previous file
if _filename:
with open(_filename, "w") as _file:
_file.write(_contents)
_filename = newfilename
# Read the new file to memory
with open(_filename) as _file:
_contents = _file.read()
_contents = _contents.replace(match.group(3), match.group(4))
else:
# We don't understand what the current line means!
print("Not Understood: " + line)
| bsd-3-clause |
MarsSnail/gyp_tools | test/rules/gyptest-default.py | 25 | 1660 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies simple rules when using an explicit build target of 'all'.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('actions.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('actions.gyp', chdir='relocate/src')
expect = """\
Hello from program.c
Hello from function1.in
Hello from function2.in
"""
if test.format == 'xcode':
chdir = 'relocate/src/subdir1'
else:
chdir = 'relocate/src'
test.run_built_executable('program', chdir=chdir, stdout=expect)
expect = """\
Hello from program.c
Hello from function3.in
"""
if test.format == 'xcode':
chdir = 'relocate/src/subdir3'
else:
chdir = 'relocate/src'
test.run_built_executable('program2', chdir=chdir, stdout=expect)
test.must_match('relocate/src/subdir2/file1.out', 'Hello from file1.in\n')
test.must_match('relocate/src/subdir2/file2.out', 'Hello from file2.in\n')
test.must_match('relocate/src/subdir2/file1.out2', 'Hello from file1.in\n')
test.must_match('relocate/src/subdir2/file2.out2', 'Hello from file2.in\n')
test.must_match('relocate/src/subdir2/file1.out4', 'Hello from file1.in\n')
test.must_match('relocate/src/subdir2/file2.out4', 'Hello from file2.in\n')
test.must_match('relocate/src/subdir2/file1.copy', 'Hello from file1.in\n')
test.must_match('relocate/src/external/file1.external_rules.out',
'Hello from file1.in\n')
test.must_match('relocate/src/external/file2.external_rules.out',
'Hello from file2.in\n')
test.pass_test()
| bsd-3-clause |
patvarilly/DNACC | examples/competing_linkages/competing_linkages.py | 1 | 10390 | # Copyright 2012 Patrick Varilly, Stefano Angioletti-Uberti
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#!/usr/bin/env python
# Python script to produce all the figures in Bortolo's competing
# interactions paper:
#
# B.M. Mognetti, M.E. Leunissen and D. Frenkel, Soft Matter 8, 2213 (2012),
# doi: 10.1039/c2sm06635a
import numpy as np
from math import sqrt
import subprocess
import operator
import dnacc
from dnacc.units import nm
# Set up basic system
plates = dnacc.PlatesMeanField()
L = 20 * nm
plates.set_tether_type_prototype(L=L, sigma=0.0)
ALPHA = plates.add_tether_type(plate='lower', sticky_end='alpha')
BETA = plates.add_tether_type(plate='lower', sticky_end='beta')
ALPHA_P = plates.add_tether_type(plate='upper', sticky_end='alphap')
BETA_P = plates.add_tether_type(plate='upper', sticky_end='betap')
# A few useful utility methods
def reset_plates(plates):
for t in plates.tether_types:
t['sigma'] = 0.0
t['L'] = L
plates.beta_DeltaG0.clear()
def set_competing_interactions(plates, beta_DeltaG0a, beta_DeltaG0b):
plates.beta_DeltaG0['alpha', 'alphap'] = beta_DeltaG0a
plates.beta_DeltaG0['alpha', 'betap'] = beta_DeltaG0b
plates.beta_DeltaG0['beta', 'alphap'] = beta_DeltaG0b
# Sample interactions potentials at every 0.05 * L
hArr = np.linspace(0.05 * L, 2.00 * L, 40)
# Figure 2
# ========
def figure2():
reset_plates(plates)
S = 0.75 * sqrt(2.0)
sigma = 1 / (S * L) ** 2
for t in plates.tether_types:
t['sigma'] = sigma
plates.separation = L
with open('fig2.txt', 'w') as f:
f.write('# betaDeltaG0a (kT)\t' 'n_alpha / N\t' 'n_beta / N\n')
for beta_DeltaDeltaG in (3, 5, 8, 11, 14):
f.write("# beta_DeltaDeltaG = %g kT\n" % beta_DeltaDeltaG)
for beta_DeltaG0a in xrange(0, -51, -1):
set_competing_interactions(plates, beta_DeltaG0a,
beta_DeltaG0a + beta_DeltaDeltaG)
plates.update()
f.write('%g\t%g\t%g\n' %
(beta_DeltaG0a,
plates.sigma_bound[ALPHA, ALPHA_P] / (2 * sigma),
(plates.sigma_bound[ALPHA, BETA_P] +
plates.sigma_bound[BETA, ALPHA_P]) / (2 * sigma)))
f.write('\n\n')
subprocess.call(['gnuplot', 'plot_fig2.gp'])
# Figure 3a
# =========
def figure3a():
single = dnacc.PlatesMeanField()
single.set_tether_type_prototype(L=L, sigma=0)
ALPHA = single.add_tether_type(plate='lower', sticky_end='alpha')
ALPHA_P = single.add_tether_type(plate='upper', sticky_end='alphap')
with open('fig3a.txt', 'w') as f:
f.write('# betaDeltaG0a (kT)\t' 'n_alpha / N (MF)\t'
'n_alpha / N (SCMF)\n')
for S in (1.06, 0.75, 0.53):
f.write('# S = %.1f L_alpha\n' % S)
sigma = 1 / (S * L) ** 2
for t in single.tether_types:
t['sigma'] = sigma
for h in (1, 1.5):
f.write('# h = %.1f L_alpha\n' % h)
single.separation = h * L
for beta_DeltaG0a in xrange(0, -31, -1):
single.beta_DeltaG0['alpha', 'alphap'] = beta_DeltaG0a
single.update()
f.write('%g\t%g\t%g\n' %
(beta_DeltaG0a,
# Mean Field (MF) [not really]
single.sigma_bound[ALPHA, ALPHA_P] / sigma,
# Self-consistent mean field
single.sigma_bound[ALPHA, ALPHA_P] / sigma))
f.write('\n\n')
subprocess.call(['gnuplot', 'plot_fig3a.gp'])
# Figure 3b
# =========
def figure3b():
reset_plates(plates)
S = 0.75
sigma = 1 / (S * L) ** 2
for t in plates.tether_types:
t['sigma'] = sigma
plates.separation = L
with open('fig3b.txt', 'w') as f:
f.write('# betaDeltaG0a (kT)\t' 'n_alpha / N\t' 'n_beta / N\n')
beta_DeltaDeltaG = 8
for beta_DeltaG0a in xrange(0, -41, -1):
set_competing_interactions(plates, beta_DeltaG0a,
beta_DeltaG0a + beta_DeltaDeltaG)
plates.update()
f.write('%g\t%g\t%g\n' %
(beta_DeltaG0a,
plates.sigma_bound[ALPHA, ALPHA_P] / (2 * sigma),
(plates.sigma_bound[ALPHA, BETA_P] +
plates.sigma_bound[BETA, ALPHA_P]) / (2 * sigma)))
subprocess.call(['gnuplot', 'plot_fig3b.gp'])
# Figure 4a
# =========
def figure4a():
reset_plates(plates)
S = 0.75
ts = plates.tether_types
ts[ALPHA]['sigma'] = ts[ALPHA_P]['sigma'] = 0.3 / (S * L) ** 2
ts[BETA]['sigma'] = ts[BETA_P]['sigma'] = 0.7 / (S * L) ** 2
with open('fig4a.txt', 'w') as f:
f.write('# betaDeltaG0b (kT)\t' 'F_min (kT/L^2)\n')
for beta_DeltaDeltaG in (-1000, 8, 5, 3):
f.write("# beta_DeltaDeltaG = %g kT\n" % beta_DeltaDeltaG)
for beta_DeltaG0b in xrange(-20, 9):
set_competing_interactions(plates,
beta_DeltaG0b - beta_DeltaDeltaG,
beta_DeltaG0b)
f.write('%g\t%g\n' %
(beta_DeltaG0b,
min((plates.at(h).free_energy_density
for h in hArr)) / (1 / L ** 2)))
f.write('\n\n')
subprocess.call(['gnuplot', 'plot_fig4a.gp'])
# Figure 4b
# =========
def figure4b():
reset_plates(plates)
S = 0.75
ts = plates.tether_types
ts[ALPHA]['sigma'] = ts[ALPHA_P]['sigma'] = 0.3 / (S * L) ** 2
ts[BETA]['sigma'] = ts[BETA_P]['sigma'] = 0.7 / (S * L) ** 2
with open('fig4b.txt', 'w') as f:
f.write('# f (Fraction of hybridised linkages)\t'
'F_min (kT/L^2)\n')
for beta_DeltaDeltaG in (+1000, -1000, 8, 5, 3):
f.write("# beta_DeltaDeltaG = %g kT\n" % beta_DeltaDeltaG)
for beta_DeltaG0b in xrange(-24, 5):
beta_DeltaG0a = beta_DeltaG0b - beta_DeltaDeltaG
if beta_DeltaDeltaG == +1000:
set_competing_interactions(plates,
beta_DeltaG0b,
+1000)
else:
set_competing_interactions(plates,
beta_DeltaG0a,
beta_DeltaG0b)
hAtMin, minF = min(((h, plates.at(h).free_energy_density)
for h in hArr),
key=operator.itemgetter(1))
plates.at(hAtMin)
if beta_DeltaDeltaG == +1000:
maxFract = plates.tether_types[ALPHA]['sigma']
else:
maxFract = 2 * min(plates.tether_types[ALPHA]['sigma'],
plates.tether_types[BETA]['sigma'])
fract = sum(plates.sigma_bound[x]
for x in [(ALPHA, ALPHA_P), (ALPHA, BETA_P),
(BETA, ALPHA_P)]) / maxFract
f.write('%g\t%g\n' % (fract, minF / (1 / L ** 2)))
f.write('\n\n')
subprocess.call(['gnuplot', 'plot_fig4b.gp'])
# Figure 5a
# =========
#
# Here, the mean field theory seems to work poorly due to the importance
# of coverage fluctuations
def figure5a():
reset_plates(plates)
S = 0.75
sigma = 0.5 / (S * L) ** 2
ts = plates.tether_types
ts[ALPHA]['L'] = ts[ALPHA_P]['L'] = 0.3 * L
ts[BETA]['L'] = ts[BETA_P]['L'] = 1.7 * L
for t in plates.tether_types:
t['sigma'] = sigma
plates.separation = 0.3 * L
with open('fig5a.txt', 'w') as f:
f.write('# betaDeltaG0a (kT)\t' 'n_alpha / N\t' 'n_beta / N\n')
for beta_DeltaDeltaG in (3, 5, 8, 11, 14):
f.write("# beta_DeltaDeltaG = %g\n" % beta_DeltaDeltaG)
for beta_DeltaG0a in xrange(-40, 1):
set_competing_interactions(plates, beta_DeltaG0a,
beta_DeltaG0a + beta_DeltaDeltaG)
plates.update()
f.write('%g\t%g\t%g\n' %
(beta_DeltaG0a,
plates.sigma_bound[ALPHA, ALPHA_P] / (2 * sigma),
(plates.sigma_bound[ALPHA, BETA_P] +
plates.sigma_bound[BETA, ALPHA_P]) / (2 * sigma)))
f.write('\n\n')
subprocess.call(['gnuplot', 'plot_fig5a.gp'])
# Figure 5b
# =========
def figure5b():
reset_plates(plates)
S = 0.75
sigma = 0.5 / (S * L) ** 2
ts = plates.tether_types
ts[ALPHA]['L'] = ts[ALPHA_P]['L'] = 0.3 * L
ts[BETA]['L'] = ts[BETA_P]['L'] = 1.7 * L
for t in plates.tether_types:
t['sigma'] = sigma
with open('fig5b.txt', 'w') as f:
f.write('# h/L (L = 20 nm)\t' 'F (kT / L^2)\n')
beta_DeltaDeltaG = 8
for beta_DeltaG0a in (-5.8, -8.7, -11.6, -14.5, -17.4, -20.3):
f.write("# beta_DeltaG0a = %g\n" % beta_DeltaG0a)
set_competing_interactions(plates, beta_DeltaG0a,
beta_DeltaG0a + beta_DeltaDeltaG)
VArr = [plates.at(h).free_energy_density for h in hArr]
for (h, V) in zip(hArr, VArr):
f.write('%g\t%g\n' % (h / L, V / (1 / L ** 2)))
f.write('\n\n')
subprocess.call(['gnuplot', 'plot_fig5b.gp'])
# Main module
figure2()
figure3a()
figure3b()
figure4a()
figure4b()
figure5a()
figure5b()
| gpl-3.0 |
sebrandon1/neutron | neutron/plugins/ml2/drivers/l2pop/mech_driver.py | 2 | 14284 | # Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib import constants as const
from neutron_lib import exceptions
from oslo_config import cfg
from oslo_log import log as logging
from neutron._i18n import _, _LW
from neutron import context as n_context
from neutron.db import api as db_api
from neutron.db import l3_hamode_db
from neutron import manager
from neutron.plugins.common import constants as service_constants
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers.l2pop import config # noqa
from neutron.plugins.ml2.drivers.l2pop import db as l2pop_db
from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc
LOG = logging.getLogger(__name__)
class L2populationMechanismDriver(api.MechanismDriver):
def __init__(self):
super(L2populationMechanismDriver, self).__init__()
self.L2populationAgentNotify = l2pop_rpc.L2populationAgentNotifyAPI()
def initialize(self):
LOG.debug("Experimental L2 population driver")
self.rpc_ctx = n_context.get_admin_context_without_session()
def _get_port_fdb_entries(self, port):
# the port might be concurrently deleted
if not port or not port.get('fixed_ips'):
return []
return [l2pop_rpc.PortInfo(mac_address=port['mac_address'],
ip_address=ip['ip_address'])
for ip in port['fixed_ips']]
def check_vlan_transparency(self, context):
"""L2population driver vlan transparency support."""
return True
def _get_ha_port_agents_fdb(
self, session, network_id, router_id):
other_fdb_ports = {}
for agent in l2pop_db.get_ha_agents_by_router_id(session, router_id):
agent_active_ports = l2pop_db.get_agent_network_active_port_count(
session, agent.host, network_id)
if agent_active_ports == 0:
ip = l2pop_db.get_agent_ip(agent)
other_fdb_ports[ip] = [const.FLOODING_ENTRY]
return other_fdb_ports
def delete_port_postcommit(self, context):
port = context.current
agent_host = context.host
fdb_entries = self._get_agent_fdb(context.bottom_bound_segment,
port, agent_host)
if port['device_owner'] in l2pop_db.HA_ROUTER_PORTS and fdb_entries:
session = db_api.get_session()
network_id = port['network_id']
other_fdb_ports = self._get_ha_port_agents_fdb(
session, network_id, port['device_id'])
fdb_entries[network_id]['ports'] = other_fdb_ports
self.L2populationAgentNotify.remove_fdb_entries(self.rpc_ctx,
fdb_entries)
def filter_hosts_with_segment_access(
self, context, segments, candidate_hosts, agent_getter):
# NOTE(cbrandily): let other mechanisms (openvswitch, linuxbridge, ...)
# perform the filtering
return set()
def _get_diff_ips(self, orig, port):
orig_ips = set([ip['ip_address'] for ip in orig['fixed_ips']])
port_ips = set([ip['ip_address'] for ip in port['fixed_ips']])
# check if an ip has been added or removed
orig_chg_ips = orig_ips.difference(port_ips)
port_chg_ips = port_ips.difference(orig_ips)
if orig_chg_ips or port_chg_ips:
return orig_chg_ips, port_chg_ips
def _fixed_ips_changed(self, context, orig, port, diff_ips):
orig_ips, port_ips = diff_ips
if (port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE):
agent_host = context.host
else:
agent_host = context.original_host
if not agent_host:
return
agent_ip = l2pop_db.get_agent_ip_by_host(db_api.get_session(),
agent_host)
orig_mac_ip = [l2pop_rpc.PortInfo(mac_address=port['mac_address'],
ip_address=ip)
for ip in orig_ips]
port_mac_ip = [l2pop_rpc.PortInfo(mac_address=port['mac_address'],
ip_address=ip)
for ip in port_ips]
upd_fdb_entries = {port['network_id']: {agent_ip: {}}}
ports = upd_fdb_entries[port['network_id']][agent_ip]
if orig_mac_ip:
ports['before'] = orig_mac_ip
if port_mac_ip:
ports['after'] = port_mac_ip
self.L2populationAgentNotify.update_fdb_entries(
self.rpc_ctx, {'chg_ip': upd_fdb_entries})
return True
def update_port_precommit(self, context):
port = context.current
orig = context.original
if (orig['mac_address'] != port['mac_address'] and
context.status == const.PORT_STATUS_ACTIVE):
msg = _("unable to modify mac_address of ACTIVE port "
"%s") % port['id']
raise exceptions.InvalidInput(error_message=msg)
def update_port_postcommit(self, context):
port = context.current
orig = context.original
if l3_hamode_db.is_ha_router_port(port['device_owner'],
port['device_id']):
return
diff_ips = self._get_diff_ips(orig, port)
if diff_ips:
self._fixed_ips_changed(context, orig, port, diff_ips)
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
if context.status == const.PORT_STATUS_ACTIVE:
self.update_port_up(context)
if context.status == const.PORT_STATUS_DOWN:
agent_host = context.host
fdb_entries = self._get_agent_fdb(
context.bottom_bound_segment, port, agent_host)
self.L2populationAgentNotify.remove_fdb_entries(
self.rpc_ctx, fdb_entries)
elif (context.host != context.original_host
and context.original_status == const.PORT_STATUS_ACTIVE
and context.status == const.PORT_STATUS_DOWN):
# The port has been migrated. Send notification about port
# removal from old host.
fdb_entries = self._get_agent_fdb(
context.original_bottom_bound_segment,
orig, context.original_host)
self.L2populationAgentNotify.remove_fdb_entries(
self.rpc_ctx, fdb_entries)
elif context.status != context.original_status:
if context.status == const.PORT_STATUS_ACTIVE:
self.update_port_up(context)
elif context.status == const.PORT_STATUS_DOWN:
fdb_entries = self._get_agent_fdb(
context.bottom_bound_segment, port, context.host)
self.L2populationAgentNotify.remove_fdb_entries(
self.rpc_ctx, fdb_entries)
def _validate_segment(self, segment, port_id, agent):
if not segment:
LOG.debug("Port %(port)s updated by agent %(agent)s isn't bound "
"to any segment", {'port': port_id, 'agent': agent})
return False
network_types = l2pop_db.get_agent_l2pop_network_types(agent)
if network_types is None:
network_types = l2pop_db.get_agent_tunnel_types(agent)
if segment['network_type'] not in network_types:
return False
return True
def _create_agent_fdb(self, session, agent, segment, network_id):
agent_fdb_entries = {network_id:
{'segment_id': segment['segmentation_id'],
'network_type': segment['network_type'],
'ports': {}}}
tunnel_network_ports = (
l2pop_db.get_distributed_active_network_ports(session, network_id))
fdb_network_ports = (
l2pop_db.get_nondistributed_active_network_ports(session,
network_id))
ports = agent_fdb_entries[network_id]['ports']
ports.update(self._get_tunnels(
fdb_network_ports + tunnel_network_ports,
agent.host))
for agent_ip, fdbs in ports.items():
for binding, agent in fdb_network_ports:
if l2pop_db.get_agent_ip(agent) == agent_ip:
fdbs.extend(self._get_port_fdb_entries(binding.port))
return agent_fdb_entries
def _get_tunnels(self, tunnel_network_ports, exclude_host):
agents = {}
for __, agent in tunnel_network_ports:
if agent.host == exclude_host:
continue
ip = l2pop_db.get_agent_ip(agent)
if not ip:
LOG.debug("Unable to retrieve the agent ip, check "
"the agent %s configuration.", agent.host)
continue
if ip not in agents:
agents[ip] = [const.FLOODING_ENTRY]
return agents
def update_port_down(self, context):
port = context.current
agent_host = context.host
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
# when agent transitions to backup, don't remove flood flows
if agent_host and l3plugin and getattr(
l3plugin, "list_router_ids_on_host", None):
admin_context = n_context.get_admin_context()
if l3plugin.list_router_ids_on_host(
admin_context, agent_host, [port['device_id']]):
return
fdb_entries = self._get_agent_fdb(
context.bottom_bound_segment, port, agent_host)
self.L2populationAgentNotify.remove_fdb_entries(
self.rpc_ctx, fdb_entries)
def update_port_up(self, context):
port = context.current
agent_host = context.host
session = db_api.get_session()
agent = l2pop_db.get_agent_by_host(session, agent_host)
if not agent:
LOG.warning(_LW("Unable to retrieve active L2 agent on host %s"),
agent_host)
return
network_id = port['network_id']
agent_active_ports = l2pop_db.get_agent_network_active_port_count(
session, agent_host, network_id)
agent_ip = l2pop_db.get_agent_ip(agent)
segment = context.bottom_bound_segment
if not self._validate_segment(segment, port['id'], agent):
return
other_fdb_entries = self._get_fdb_entries_template(
segment, agent_ip, network_id)
other_fdb_ports = other_fdb_entries[network_id]['ports']
if agent_active_ports == 1 or (l2pop_db.get_agent_uptime(agent) <
cfg.CONF.l2pop.agent_boot_time):
# First port activated on current agent in this network,
# we have to provide it with the whole list of fdb entries
agent_fdb_entries = self._create_agent_fdb(session,
agent,
segment,
network_id)
# And notify other agents to add flooding entry
other_fdb_ports[agent_ip].append(const.FLOODING_ENTRY)
if agent_fdb_entries[network_id]['ports'].keys():
self.L2populationAgentNotify.add_fdb_entries(
self.rpc_ctx, agent_fdb_entries, agent_host)
# Notify other agents to add fdb rule for current port
if (port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE and
not l3_hamode_db.is_ha_router_port(port['device_owner'],
port['device_id'])):
other_fdb_ports[agent_ip] += self._get_port_fdb_entries(port)
self.L2populationAgentNotify.add_fdb_entries(self.rpc_ctx,
other_fdb_entries)
def _get_agent_fdb(self, segment, port, agent_host):
if not agent_host:
return
network_id = port['network_id']
session = db_api.get_session()
agent_active_ports = l2pop_db.get_agent_network_active_port_count(
session, agent_host, network_id)
agent = l2pop_db.get_agent_by_host(db_api.get_session(), agent_host)
if not self._validate_segment(segment, port['id'], agent):
return
agent_ip = l2pop_db.get_agent_ip(agent)
other_fdb_entries = self._get_fdb_entries_template(
segment, agent_ip, port['network_id'])
if agent_active_ports == 0:
# Agent is removing its last activated port in this network,
# other agents needs to be notified to delete their flooding entry.
other_fdb_entries[network_id]['ports'][agent_ip].append(
const.FLOODING_ENTRY)
# Notify other agents to remove fdb rules for current port
if (port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE and
not l3_hamode_db.is_ha_router_port(port['device_owner'],
port['device_id'])):
fdb_entries = self._get_port_fdb_entries(port)
other_fdb_entries[network_id]['ports'][agent_ip] += fdb_entries
return other_fdb_entries
@classmethod
def _get_fdb_entries_template(cls, segment, agent_ip, network_id):
return {
network_id:
{'segment_id': segment['segmentation_id'],
'network_type': segment['network_type'],
'ports': {agent_ip: []}}}
| apache-2.0 |
arenadata/ambari | ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/package/scripts/interpreter_json_template.py | 3 | 10516 | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
template = '''
{
"interpreterSettings": {
"2CKEKWY8Z": {
"id": "2CKEKWY8Z",
"name": "angular",
"group": "angular",
"properties": {},
"status": "READY",
"interpreterGroup": [
{
"name": "angular",
"class": "org.apache.zeppelin.angular.AngularInterpreter",
"defaultInterpreter": false,
"editor": {
"editOnDblClick": true
}
}
],
"dependencies": [],
"option": {
"remote": true,
"port": -1,
"perNote": "shared",
"perUser": "shared",
"isExistingProcess": false,
"setPermission": false,
"users": [],
"isUserImpersonate": false
}
},
"2CKX8WPU1": {
"id": "2CKX8WPU1",
"name": "spark",
"group": "spark",
"properties": {
"spark.executor.memory": "512m",
"args": "",
"zeppelin.spark.printREPLOutput": "true",
"spark.cores.max": "",
"zeppelin.dep.additionalRemoteRepository": "spark-packages,http://dl.bintray.com/spark-packages/maven,false;",
"zeppelin.spark.sql.stacktrace": "false",
"zeppelin.spark.importImplicit": "true",
"zeppelin.spark.concurrentSQL": "false",
"zeppelin.spark.useHiveContext": "true",
"zeppelin.pyspark.python": "python",
"zeppelin.dep.localrepo": "local-repo",
"zeppelin.R.knitr": "true",
"zeppelin.spark.maxResult": "1000",
"master": "yarn-client",
"spark.app.name": "Zeppelin",
"zeppelin.R.image.width": "100%",
"zeppelin.R.render.options": "out.format \u003d \u0027html\u0027, comment \u003d NA, echo \u003d FALSE, results \u003d \u0027asis\u0027, message \u003d F, warning \u003d F",
"zeppelin.R.cmd": "R"
},
"status": "READY",
"interpreterGroup": [
{
"name": "spark",
"class": "org.apache.zeppelin.spark.SparkInterpreter",
"defaultInterpreter": true,
"editor": {
"language": "scala"
}
},
{
"name": "sql",
"class": "org.apache.zeppelin.spark.SparkSqlInterpreter",
"defaultInterpreter": false,
"editor": {
"language": "sql"
}
},
{
"name": "dep",
"class": "org.apache.zeppelin.spark.DepInterpreter",
"defaultInterpreter": false,
"editor": {
"language": "scala"
}
},
{
"name": "pyspark",
"class": "org.apache.zeppelin.spark.PySparkInterpreter",
"defaultInterpreter": false,
"editor": {
"language": "python"
}
},
{
"name": "r",
"class": "org.apache.zeppelin.spark.SparkRInterpreter",
"defaultInterpreter": false,
"editor": {
"language": "r"
}
}
],
"dependencies": [],
"option": {
"remote": true,
"port": -1,
"perNote": "shared",
"perUser": "shared",
"isExistingProcess": false,
"setPermission": false,
"users": [],
"isUserImpersonate": false
}
},
"2CK8A9MEG": {
"id": "2CK8A9MEG",
"name": "jdbc",
"group": "jdbc",
"properties": {
"default.password": "",
"zeppelin.jdbc.auth.type": "",
"common.max_count": "1000",
"zeppelin.jdbc.principal": "",
"default.user": "gpadmin",
"default.url": "jdbc:postgresql://localhost:5432/",
"default.driver": "org.postgresql.Driver",
"zeppelin.jdbc.keytab.location": "",
"zeppelin.jdbc.concurrent.use": "true",
"zeppelin.jdbc.concurrent.max_connection": "10"
},
"status": "READY",
"interpreterGroup": [
{
"name": "sql",
"class": "org.apache.zeppelin.jdbc.JDBCInterpreter",
"defaultInterpreter": false,
"editor": {
"language": "sql",
"editOnDblClick": false
}
}
],
"dependencies": [],
"option": {
"remote": true,
"port": -1,
"perNote": "shared",
"perUser": "shared",
"isExistingProcess": false,
"setPermission": false,
"users": [],
"isUserImpersonate": false
}
},
"2CKX6DGQZ": {
"id": "2CKX6DGQZ",
"name": "livy",
"group": "livy",
"properties": {
"zeppelin.livy.pull_status.interval.millis": "1000",
"livy.spark.executor.memory": "",
"zeppelin.livy.session.create_timeout": "120",
"zeppelin.livy.principal": "",
"zeppelin.livy.spark.sql.maxResult": "1000",
"zeppelin.livy.keytab": "",
"zeppelin.livy.concurrentSQL": "false",
"zeppelin.livy.spark.sql.field.truncate": "true",
"livy.spark.executor.cores": "",
"zeppelin.livy.displayAppInfo": "false",
"zeppelin.livy.url": "http://localhost:8998",
"livy.spark.dynamicAllocation.minExecutors": "",
"livy.spark.driver.cores": "",
"livy.spark.jars.packages": "",
"livy.spark.dynamicAllocation.enabled": "",
"livy.spark.executor.instances": "",
"livy.spark.dynamicAllocation.cachedExecutorIdleTimeout": "",
"livy.spark.dynamicAllocation.maxExecutors": "",
"livy.spark.dynamicAllocation.initialExecutors": "",
"livy.spark.driver.memory": ""
},
"status": "READY",
"interpreterGroup": [
{
"name": "spark",
"class": "org.apache.zeppelin.livy.LivySparkInterpreter",
"defaultInterpreter": true,
"editor": {
"language": "scala",
"editOnDblClick": false
}
},
{
"name": "sql",
"class": "org.apache.zeppelin.livy.LivySparkSQLInterpreter",
"defaultInterpreter": false,
"editor": {
"language": "sql",
"editOnDblClick": false
}
},
{
"name": "pyspark",
"class": "org.apache.zeppelin.livy.LivyPySparkInterpreter",
"defaultInterpreter": false,
"editor": {
"language": "python",
"editOnDblClick": false
}
},
{
"name": "pyspark3",
"class": "org.apache.zeppelin.livy.LivyPySpark3Interpreter",
"defaultInterpreter": false,
"editor": {
"language": "python",
"editOnDblClick": false
}
},
{
"name": "sparkr",
"class": "org.apache.zeppelin.livy.LivySparkRInterpreter",
"defaultInterpreter": false,
"editor": {
"language": "r",
"editOnDblClick": false
}
}
],
"dependencies": [],
"option": {
"remote": true,
"port": -1,
"perNote": "shared",
"perUser": "scoped",
"isExistingProcess": false,
"setPermission": false,
"users": [],
"isUserImpersonate": false
}
},
"2CKAY1A8Y": {
"id": "2CKAY1A8Y",
"name": "md",
"group": "md",
"properties": {
"markdown.parser.type": "pegdown"
},
"status": "READY",
"interpreterGroup": [
{
"name": "md",
"class": "org.apache.zeppelin.markdown.Markdown",
"defaultInterpreter": false,
"editor": {
"language": "markdown",
"editOnDblClick": true
}
}
],
"dependencies": [],
"option": {
"remote": true,
"port": -1,
"perNote": "shared",
"perUser": "shared",
"isExistingProcess": false,
"setPermission": false,
"users": [],
"isUserImpersonate": false
}
},
"2CHS8UYQQ": {
"id": "2CHS8UYQQ",
"name": "sh",
"group": "sh",
"properties": {
"zeppelin.shell.keytab.location": "",
"shell.command.timeout.millisecs": "60000",
"zeppelin.shell.principal": "",
"zeppelin.shell.auth.type": ""
},
"status": "READY",
"interpreterGroup": [
{
"name": "sh",
"class": "org.apache.zeppelin.shell.ShellInterpreter",
"defaultInterpreter": false,
"editor": {
"language": "sh",
"editOnDblClick": false
}
}
],
"dependencies": [],
"option": {
"remote": true,
"port": -1,
"perNote": "shared",
"perUser": "shared",
"isExistingProcess": false,
"setPermission": false,
"users": [],
"isUserImpersonate": false
}
}
},
"interpreterBindings": {},
"interpreterRepositories": [
{
"id": "central",
"type": "default",
"url": "http://repo1.maven.org/maven2/",
"releasePolicy": {
"enabled": true,
"updatePolicy": "daily",
"checksumPolicy": "warn"
},
"snapshotPolicy": {
"enabled": true,
"updatePolicy": "daily",
"checksumPolicy": "warn"
},
"mirroredRepositories": [],
"repositoryManager": false
},
{
"id": "local",
"type": "default",
"url": "file:///home/zeppelin/.m2/repository",
"releasePolicy": {
"enabled": true,
"updatePolicy": "daily",
"checksumPolicy": "warn"
},
"snapshotPolicy": {
"enabled": true,
"updatePolicy": "daily",
"checksumPolicy": "warn"
},
"mirroredRepositories": [],
"repositoryManager": false
}
]
}
'''
| apache-2.0 |
EnviroCentre/jython-upgrade | jython/lib/test/test_sha.py | 136 | 1703 | # Testing sha module (NIST's Secure Hash Algorithm)
# use the three examples from Federal Information Processing Standards
# Publication 180-1, Secure Hash Standard, 1995 April 17
# http://www.itl.nist.gov/div897/pubs/fip180-1.htm
import warnings
warnings.filterwarnings("ignore", "the sha module is deprecated.*",
DeprecationWarning)
import sha
import unittest
from test import test_support
class SHATestCase(unittest.TestCase):
def check(self, data, digest):
# Check digest matches the expected value
obj = sha.new(data)
computed = obj.hexdigest()
self.assertTrue(computed == digest)
# Verify that the value doesn't change between two consecutive
# digest operations.
computed_again = obj.hexdigest()
self.assertTrue(computed == computed_again)
# Check hexdigest() output matches digest()'s output
digest = obj.digest()
hexd = ""
for c in digest:
hexd += '%02x' % ord(c)
self.assertTrue(computed == hexd)
def test_case_1(self):
self.check("abc",
"a9993e364706816aba3e25717850c26c9cd0d89d")
def test_case_2(self):
self.check("abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
"84983e441c3bd26ebaae4aa1f95129e5e54670f1")
def test_case_3(self):
self.check("a" * 1000000,
"34aa973cd4c4daa4f61eeb2bdbad27316534016f")
def test_case_4(self):
self.check(chr(0xAA) * 80,
'4ca0ef38f1794b28a8f8ee110ee79d48ce13be25')
def test_main():
test_support.run_unittest(SHATestCase)
if __name__ == "__main__":
test_main()
| mit |
Hawaii-Smart-Energy-Project/Maui-Smart-Grid | src/filelock.py | 1 | 4303 | # Copyright (c) 2009, Evan Fosmark
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of the FreeBSD Project.
import os
import time
import errno
class FileLockException(Exception):
pass
class FileLock(object):
""" A file locking mechanism that has context-manager support so
you can use it in a with statement. This should be relatively cross
compatible as it doesn't rely on msvcrt or fcntl for the locking.
"""
__slots__ = ('fd', 'is_locked', 'lockfile', 'file_name', 'timeout', 'delay')
def __init__(self, file_name, timeout = 10, delay = .05):
""" Prepare the file locker. Specify the file to lock and optionally
the maximum timeout and the delay between each attempt to lock.
"""
self.is_locked = False
self.lockfile = os.path.abspath(
os.path.expanduser(os.path.expandvars("%s.lock" % file_name)))
self.file_name = file_name
self.timeout = timeout
self.delay = delay
def acquire(self):
""" Acquire the lock, if possible. If the lock is in use, it check again
every `wait` seconds. It does this until it either gets the lock or
exceeds `timeout` number of seconds, in which case it throws
an exception.
"""
start_time = time.time()
pid = os.getpid()
while True:
try:
self.fd = os.open(self.lockfile,
os.O_CREAT | os.O_EXCL | os.O_RDWR)
os.write(self.fd, "%d" % pid)
break
except OSError as e:
if e.errno != errno.EEXIST:
raise
if (time.time() - start_time) >= self.timeout:
raise FileLockException("Timeout occured.")
time.sleep(self.delay)
self.is_locked = True
def release(self):
""" Get rid of the lock by deleting the lockfile.
When working in a `with` statement, this gets automatically
called at the end.
"""
if self.is_locked:
os.close(self.fd)
os.unlink(self.lockfile)
self.is_locked = False
def __enter__(self):
""" Activated when used in the with statement.
Should automatically acquire a lock to be used in the with block.
"""
if not self.is_locked:
self.acquire()
return self
def __exit__(self, type, value, traceback):
""" Activated at the end of the with statement.
It automatically releases the lock if it isn't locked.
"""
if self.is_locked:
self.release()
def __del__(self):
""" Make sure that the FileLock instance doesn't leave a lockfile
lying around.
"""
self.release()
| bsd-3-clause |
hack4sec/ws-cli | classes/jobs/MongoJob.py | 1 | 5376 | # -*- coding: utf-8 -*-
"""
This is part of WebScout software
Docs EN: http://hack4sec.pro/wiki/index.php/WebScout_en
Docs RU: http://hack4sec.pro/wiki/index.php/WebScout
License: MIT
Copyright (c) Anton Kuzmin <http://anton-kuzmin.ru> (ru) <http://anton-kuzmin.pro> (en)
Common class for jobs works with MongoDB
"""
import Queue
from classes.Registry import Registry
from classes.kernel.WSJob import WSJob
class MongoJob(WSJob):
""" Common class for jobs works with MongoDB """
unique = True
collection = None
select_limit = 50
skip_blank_rows = True
counter = 0
collection_name = None
def __init__(self, maxsize=0):
WSJob.__init__(self, maxsize)
self.collection = Registry().get('mongo')[self.collection_name]
def build_row(self, _str):
""" Common build row method for MongoDB """
return {
"name": _str.strip(),
"checked": 0,
"getted": 0
}
def qsize(self):
""" Size of queue """
return self.collection.find({"checked": 0}).count()
def set_unique(self, unique=True):
""" Enable remove dups in queue """
self.unique = unique
def set_skip_blank_rows(self, value=True):
""" If True - we will skip blank rows then fill queue from dict or file """
self.skip_blank_rows = value
def task_done(self, name):
""" Mark current row as done """
self.counter += 1
self.collection.update({'name': str(unicode(name)), "getted": 1}, {"$set": {"checked": 1}})
WSJob.task_done(self)
def get(self, block=False, timeout=None):
""" Get next item from queue """
if self.empty() or self.qsize() < 50:
self.load_data()
if self.empty():
raise Queue.Empty
return WSJob.get(self, block, timeout)
def load_data(self):
""" Load data into queue from MongoDB """
data = self.collection.find(
{"checked": 0, "getted": 0},
limit=int(Registry().get('config')['main']['mongo_data_load_per_once'])
)
for row in data:
self.put(row['name'])
self.collection.update({"name": row['name']}, {"$set": {"getted": 1}})
return True
def load_dict(self, dict_for_load, drop=True):
""" Fill collection from dict """
if drop:
self.collection.drop()
counter = 0
last = "START OF FILE"
for line in dict_for_load:
try:
line = line.strip()
unicode(line)
self.collection.insert(self.build_row(line))
except UnicodeDecodeError:
_str = " UNICODE ERROR: In file '{0}' skip word '{1}', after word '{2}' !".format(file, line, last)
if Registry().isset('logger'):
Registry().get('logger').log(_str)
else:
print _str
continue
counter += 1
last = line
self.load_data()
return counter
def load_dom(self, dom):
""" Fill queue from DictOfMask """
self.collection.drop()
while True:
word = dom.get()
if word is None:
break
self.collection.insert(self.build_row(word))
self.collection.create_index('name', drop_dups=True, unique=self.unique)
self.load_data()
return self.collection.count()
def load_file(self, _file):
""" Fill queue from text file """
self.collection.drop()
fh = open(_file)
last = "START OF FILE"
while True:
line = fh.readline()
if not line:
break
if not line.strip() and self.skip_blank_rows:
continue
try:
line = line.strip()
unicode(line)
self.collection.insert(self.build_row(line))
except UnicodeDecodeError:
_str = " UNICODE ERROR: In file '{0}' skip word '{1}', after word '{2}' !".format(_file, line, last)
if Registry().isset('logger'):
Registry().get('logger').log(_str)
else:
print _str
continue
last = line
fh.close()
self.collection.create_index('name', drop_dups=True, unique=self.unique)
self.load_data()
return self.collection.count()
# 2 метода ниже взяты с
# http://stackoverflow.com/questions/1581895/how-check-if-a-task-is-already-in-python-queue
# Рецепт для уникальных задачь в очереди
def _init(self, maxsize):
WSJob._init(self, maxsize)
if self.unique:
self.all_items = set()
def _put(self, item):
if self.unique:
if item not in self.all_items:
WSJob._put(self, item)
self.all_items.add(item)
else:
_str = "WARNING: try to add not unique item `{0}`".format(item)
if Registry().isset('logger'):
#Registry().get('logger').log(_str)
pass
else:
#print _str
pass
else:
WSJob._put(self, item)
| mit |
gannetson/django | django/contrib/flatpages/forms.py | 357 | 2024 | from django import forms
from django.conf import settings
from django.contrib.flatpages.models import FlatPage
from django.utils.translation import ugettext, ugettext_lazy as _
class FlatpageForm(forms.ModelForm):
url = forms.RegexField(label=_("URL"), max_length=100, regex=r'^[-\w/\.~]+$',
help_text=_("Example: '/about/contact/'. Make sure to have leading"
" and trailing slashes."),
error_messages={
"invalid": _("This value must contain only letters, numbers,"
" dots, underscores, dashes, slashes or tildes."),
},
)
class Meta:
model = FlatPage
fields = '__all__'
def clean_url(self):
url = self.cleaned_data['url']
if not url.startswith('/'):
raise forms.ValidationError(
ugettext("URL is missing a leading slash."),
code='missing_leading_slash',
)
if (settings.APPEND_SLASH and
'django.middleware.common.CommonMiddleware' in settings.MIDDLEWARE_CLASSES and
not url.endswith('/')):
raise forms.ValidationError(
ugettext("URL is missing a trailing slash."),
code='missing_trailing_slash',
)
return url
def clean(self):
url = self.cleaned_data.get('url')
sites = self.cleaned_data.get('sites')
same_url = FlatPage.objects.filter(url=url)
if self.instance.pk:
same_url = same_url.exclude(pk=self.instance.pk)
if sites and same_url.filter(sites__in=sites).exists():
for site in sites:
if same_url.filter(sites=site).exists():
raise forms.ValidationError(
_('Flatpage with url %(url)s already exists for site %(site)s'),
code='duplicate_url',
params={'url': url, 'site': site},
)
return super(FlatpageForm, self).clean()
| bsd-3-clause |
fenginx/django | tests/admin_inlines/models.py | 14 | 6885 | """
Testing of admin inline formsets.
"""
import random
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
class Parent(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class Teacher(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class Child(models.Model):
name = models.CharField(max_length=50)
teacher = models.ForeignKey(Teacher, models.CASCADE)
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
parent = GenericForeignKey()
def __str__(self):
return 'I am %s, a child of %s' % (self.name, self.parent)
class Book(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class Author(models.Model):
name = models.CharField(max_length=50)
books = models.ManyToManyField(Book)
class NonAutoPKBook(models.Model):
rand_pk = models.IntegerField(primary_key=True, editable=False)
author = models.ForeignKey(Author, models.CASCADE)
title = models.CharField(max_length=50)
def save(self, *args, **kwargs):
while not self.rand_pk:
test_pk = random.randint(1, 99999)
if not NonAutoPKBook.objects.filter(rand_pk=test_pk).exists():
self.rand_pk = test_pk
super().save(*args, **kwargs)
class NonAutoPKBookChild(NonAutoPKBook):
pass
class EditablePKBook(models.Model):
manual_pk = models.IntegerField(primary_key=True)
author = models.ForeignKey(Author, models.CASCADE)
title = models.CharField(max_length=50)
class Holder(models.Model):
dummy = models.IntegerField()
class Inner(models.Model):
dummy = models.IntegerField()
holder = models.ForeignKey(Holder, models.CASCADE)
readonly = models.CharField("Inner readonly label", max_length=1)
def get_absolute_url(self):
return '/inner/'
class Holder2(models.Model):
dummy = models.IntegerField()
class Inner2(models.Model):
dummy = models.IntegerField()
holder = models.ForeignKey(Holder2, models.CASCADE)
class Holder3(models.Model):
dummy = models.IntegerField()
class Inner3(models.Model):
dummy = models.IntegerField()
holder = models.ForeignKey(Holder3, models.CASCADE)
# Models for ticket #8190
class Holder4(models.Model):
dummy = models.IntegerField()
class Inner4Stacked(models.Model):
dummy = models.IntegerField(help_text="Awesome stacked help text is awesome.")
holder = models.ForeignKey(Holder4, models.CASCADE)
class Inner4Tabular(models.Model):
dummy = models.IntegerField(help_text="Awesome tabular help text is awesome.")
holder = models.ForeignKey(Holder4, models.CASCADE)
# Models for #12749
class Person(models.Model):
firstname = models.CharField(max_length=15)
class OutfitItem(models.Model):
name = models.CharField(max_length=15)
class Fashionista(models.Model):
person = models.OneToOneField(Person, models.CASCADE, primary_key=True)
weaknesses = models.ManyToManyField(OutfitItem, through='ShoppingWeakness', blank=True)
class ShoppingWeakness(models.Model):
fashionista = models.ForeignKey(Fashionista, models.CASCADE)
item = models.ForeignKey(OutfitItem, models.CASCADE)
# Models for #13510
class TitleCollection(models.Model):
pass
class Title(models.Model):
collection = models.ForeignKey(TitleCollection, models.SET_NULL, blank=True, null=True)
title1 = models.CharField(max_length=100)
title2 = models.CharField(max_length=100)
# Models for #15424
class Poll(models.Model):
name = models.CharField(max_length=40)
class Question(models.Model):
text = models.CharField(max_length=40)
poll = models.ForeignKey(Poll, models.CASCADE)
class Novel(models.Model):
name = models.CharField(max_length=40)
class NovelReadonlyChapter(Novel):
class Meta:
proxy = True
class Chapter(models.Model):
name = models.CharField(max_length=40)
novel = models.ForeignKey(Novel, models.CASCADE)
class FootNote(models.Model):
"""
Model added for ticket 19838
"""
chapter = models.ForeignKey(Chapter, models.PROTECT)
note = models.CharField(max_length=40)
# Models for #16838
class CapoFamiglia(models.Model):
name = models.CharField(max_length=100)
class Consigliere(models.Model):
name = models.CharField(max_length=100, help_text='Help text for Consigliere')
capo_famiglia = models.ForeignKey(CapoFamiglia, models.CASCADE, related_name='+')
class SottoCapo(models.Model):
name = models.CharField(max_length=100)
capo_famiglia = models.ForeignKey(CapoFamiglia, models.CASCADE, related_name='+')
class ReadOnlyInline(models.Model):
name = models.CharField(max_length=100, help_text='Help text for ReadOnlyInline')
capo_famiglia = models.ForeignKey(CapoFamiglia, models.CASCADE)
# Models for #18433
class ParentModelWithCustomPk(models.Model):
my_own_pk = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=100)
class ChildModel1(models.Model):
my_own_pk = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=100)
parent = models.ForeignKey(ParentModelWithCustomPk, models.CASCADE)
def get_absolute_url(self):
return '/child_model1/'
class ChildModel2(models.Model):
my_own_pk = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=100)
parent = models.ForeignKey(ParentModelWithCustomPk, models.CASCADE)
def get_absolute_url(self):
return '/child_model2/'
# Models for #19425
class BinaryTree(models.Model):
name = models.CharField(max_length=100)
parent = models.ForeignKey('self', models.SET_NULL, null=True, blank=True)
# Models for #19524
class LifeForm(models.Model):
pass
class ExtraTerrestrial(LifeForm):
name = models.CharField(max_length=100)
class Sighting(models.Model):
et = models.ForeignKey(ExtraTerrestrial, models.CASCADE)
place = models.CharField(max_length=100)
# Models for #18263
class SomeParentModel(models.Model):
name = models.CharField(max_length=1)
class SomeChildModel(models.Model):
name = models.CharField(max_length=1)
position = models.PositiveIntegerField()
parent = models.ForeignKey(SomeParentModel, models.CASCADE)
readonly_field = models.CharField(max_length=1)
# Other models
class ProfileCollection(models.Model):
pass
class Profile(models.Model):
collection = models.ForeignKey(ProfileCollection, models.SET_NULL, blank=True, null=True)
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
| bsd-3-clause |
boompig/passzero | passzero/main_routes.py | 1 | 12491 | from functools import wraps
from flask import (Blueprint, abort, current_app, escape, flash, make_response,
redirect, render_template, request, session, url_for)
from sqlalchemy.orm.exc import NoResultFound
from passzero.api_utils import check_auth
from passzero.backend import (activate_account, decrypt_entries, get_entries,
get_link_by_id, get_services_map,
password_strength_scores, get_document_by_id)
from passzero.models import AuthToken, User, db
from . import export_utils
main_routes = Blueprint("main_routes", __name__)
def auth_or_redirect_login(function):
"""This is a decorator which does authentication for GET requests to templates.
If not authenticated, return a redirect to the login screen.
If authenticated, call the function."""
@wraps(function)
def inner(*args, **kwargs):
if check_auth():
return function(*args, **kwargs)
else:
return redirect(url_for("main_routes.login"))
return inner
def auth_or_abort(function):
"""This is a decorator which does authentication for GET requests to templates.
If not authenticated, show the 401 screen.
If authenticated, call the function."""
@wraps(function)
def inner(*args, **kwargs):
if check_auth():
return function(*args, **kwargs)
else:
return abort(401)
return inner
@main_routes.route("/", methods=["GET"])
def index():
if check_auth():
return redirect(url_for("main_routes.view_entries"))
else:
return render_template("landing.jinja2")
@main_routes.route("/done_login", methods=["GET"])
@auth_or_abort
def post_login():
flash(f"Successfully logged in as {escape(session['email'])}")
return redirect(url_for("main_routes.view_entries"))
@main_routes.route("/login", methods=["GET"])
def login():
return render_template("login.jinja2", login=True, error=None)
@main_routes.route("/logout", methods=["GET", "POST"])
def logout():
if 'email' in session:
session.pop("email")
if 'password' in session:
session.pop("password")
if 'user_id' in session:
session.pop("user_id")
return redirect(url_for("main_routes.login"))
@main_routes.route("/post_account_delete", methods=["GET", "POST"])
def post_account_delete():
flash("Account successfully deleted")
return redirect(url_for("main_routes.logout"))
@main_routes.route("/done_signup/<email>", methods=["GET"])
def post_signup(email: str):
flash("Successfully created account with email %s. A confirmation email was sent to this address." % escape(email))
return redirect(url_for("main_routes.login"))
# --- entries --- #
@main_routes.route("/entries/post_delete/<account_name>", methods=["GET"])
@auth_or_abort
def post_delete(account_name: str):
flash(f"Successfully deleted account {escape(account_name)}")
return redirect(url_for("main_routes.view_entries"))
@main_routes.route("/entries/new", methods=["GET"])
@auth_or_redirect_login
def new_entry_view():
user = db.session.query(User).filter_by(id=session["user_id"]).one()
user_prefs = {
"default_random_password_length": user.default_random_password_length,
"default_random_passphrase_length": user.default_random_passphrase_length
}
return render_template("new.jinja2", title="PassZero · New Entry",
user_prefs=user_prefs, error=None)
@main_routes.route("/entries/done_edit/<account_name>")
@auth_or_abort
def post_edit(account_name):
flash(f"Successfully changed entry for account {escape(account_name)}")
return redirect(url_for("main_routes.view_entries"))
@main_routes.route("/entries/done_new/<account_name>", methods=["GET"])
@auth_or_abort
def post_create(account_name):
flash(f"Successfully created entry for account {escape(account_name)}")
return redirect(url_for("main_routes.view_entries"))
@main_routes.route("/entries", methods=["GET"])
@auth_or_redirect_login
def view_entries():
return render_template("entries.jinja2")
# --- entries --- #
# --- links --- #
@main_routes.route("/links", methods=["GET"])
@auth_or_redirect_login
def view_links():
return render_template("links/links.jinja2")
@main_routes.route("/links/new", methods=["GET"])
@auth_or_redirect_login
def new_link_view():
return render_template("links/new-link.jinja2", title="PassZero · New Link", link_id=-1)
@main_routes.route("/links/<int:link_id>", methods=["GET"])
@auth_or_redirect_login
def edit_link(link_id: int):
user = db.session.query(User).filter_by(id=session["user_id"]).one()
link = get_link_by_id(db.session, user.id, link_id)
if link is None:
flash("Error: no link with ID %d" % link_id, "error")
return redirect(url_for("main_routes.view_links"))
dec_link = link.decrypt(session["password"])
return render_template("links/new-link.jinja2", title="PassZero · Edit Link",
link_id=link_id,
service_name=dec_link.service_name,
link=dec_link.link)
# --- links --- #
# --- documents --- #
@main_routes.route("/docs", methods=["GET"])
@auth_or_redirect_login
def view_docs():
return render_template("docs/docs.jinja2")
@main_routes.route("/docs/new", methods=["GET"])
@auth_or_redirect_login
def new_doc_view():
return render_template("docs/new-doc.jinja2", title="PassZero · New Document",
document_id=-1)
@main_routes.route("/docs/<int:document_id>/edit", methods=["GET"])
@auth_or_redirect_login
def edit_doc(document_id: int):
# get the document
user = db.session.query(User).filter_by(id=session["user_id"]).one()
doc = get_document_by_id(db.session, user.id, document_id)
if doc is None:
flash(f"Error: no document with ID {document_id}", "error")
return redirect(url_for("main_routes.view_docs"))
return render_template("docs/new-doc.jinja2", title="PassZero · New Document",
document_id=document_id)
@main_routes.route("/docs/<int:document_id>/view", methods=["GET"])
@auth_or_redirect_login
def view_decrypted_doc(document_id: int):
user = db.session.query(User).filter_by(id=session["user_id"]).one()
doc = get_document_by_id(db.session, user.id, document_id)
if doc is None:
flash(f"Error: no document with ID {document_id}", "error")
return redirect(url_for("main_routes.view_docs"))
dec_doc = doc.decrypt(session["password"])
return render_template(
"docs/view-doc.jinja2",
title="PassZero · View Document",
document_id=document_id,
document_mimetype=dec_doc.mimetype,
document_name=dec_doc.name
)
# --- documents --- #
@main_routes.route("/signup", methods=["GET"])
def signup():
error = None
return render_template("login.jinja2", login=False, error=error)
@main_routes.route("/signup/post_confirm")
def post_confirm_signup():
flash("Successfully signed up! Login with your newly created account")
return redirect(url_for("main_routes.login"))
@main_routes.route("/signup/confirm")
def confirm_signup():
try:
token = request.args["token"]
token_obj = db.session.query(AuthToken).filter_by(token=token).one()
if token_obj.is_expired():
flash("Token has expired", "error")
# delete old token from database
db.session.delete(token_obj)
db.session.commit()
return redirect(url_for("main_routes.signup"))
else:
# token deleted when password changed
db.session.delete(token_obj)
user = db.session.query(User).filter_by(id=token_obj.user_id).one()
activate_account(db.session, user)
return redirect(url_for("main_routes.post_confirm_signup"))
except NoResultFound:
flash("Token is invalid", "error")
return redirect(url_for("main_routes.signup"))
except KeyError:
flash("Token is mandatory", "error")
return redirect(url_for("main_routes.signup"))
@main_routes.route("/advanced/export", methods=["GET"])
@auth_or_abort
def export_entries():
export_contents = export_utils.export_decrypted_entries(
db.session,
session["user_id"],
session["password"]
)
response = make_response(export_contents)
response.headers["Content-Disposition"] = (
"attachment; filename=%s" % current_app.config['DUMP_FILE']
)
return response
@main_routes.route("/advanced/done_export")
@auth_or_abort
def done_export():
flash("database successfully dumped to file %s" % current_app.config['DUMP_FILE'])
return redirect("/advanced")
@main_routes.route("/edit/<int:entry_id>", methods=["GET"])
@main_routes.route("/entries/<int:entry_id>", methods=["GET"])
@auth_or_redirect_login
def edit_entry(entry_id: int):
user = db.session.query(User).filter_by(id=session["user_id"]).one()
entries = get_entries(db.session, session["user_id"])
my_entries = [e for e in entries if e.id == entry_id]
if len(my_entries) == 0:
flash("Error: no entry with ID %d" % entry_id, "error")
return redirect(url_for("main_routes.view_entries"))
else:
fe = decrypt_entries(my_entries, session['password'])
user_prefs = {
"default_random_password_length": user.default_random_password_length,
"default_random_passphrase_length": user.default_random_passphrase_length
}
return render_template(
"new.jinja2",
user_prefs=user_prefs,
e_id=entry_id,
entry=fe[0],
error=None
)
@main_routes.route("/entries/strength")
@auth_or_redirect_login
def password_strength():
entries = get_entries(db.session, session["user_id"])
dec_entries = decrypt_entries(entries, session['password'])
entry_scores = password_strength_scores(session["email"], dec_entries)
return render_template("password_strength.jinja2", entry_scores=entry_scores)
@main_routes.route("/entries/2fa")
@auth_or_redirect_login
def two_factor():
entries = get_entries(db.session, session["user_id"])
services_map = get_services_map(db.session)
two_factor_map = {}
for entry in entries:
account = entry.account.lower()
two_factor_map[entry.account] = {
"service_has_2fa": services_map.get(account, {}).get("has_two_factor", False),
"entry_has_2fa": entry.has_2fa,
"entry_id": entry.id
}
return render_template("entries_2fa.jinja2", two_factor_map=two_factor_map)
@main_routes.route("/advanced")
@auth_or_redirect_login
def advanced():
return render_template("advanced.jinja2")
@main_routes.route("/profile")
@auth_or_redirect_login
def profile():
user = db.session.query(User).filter_by(id=session["user_id"]).one()
user_prefs = {
"default_random_password_length": user.default_random_password_length,
"default_random_passphrase_length": user.default_random_passphrase_length,
}
return render_template(
"profile.jinja2",
title="PassZero · Profile",
user_prefs=user_prefs
)
@main_routes.route("/recover")
def recover_password():
return render_template("recover.jinja2")
@main_routes.route("/recover/confirm")
def recover_account_confirm():
try:
token = request.args['token']
token_obj = db.session.query(AuthToken).filter_by(token=token).one()
if token_obj.is_expired():
flash("Token has expired", "error")
# delete old token from database
db.session.delete(token_obj)
db.session.commit()
return redirect(url_for("main_routes.recover_password"))
else:
# token deleted when password changed
return render_template("recover.jinja2", confirm=True)
except NoResultFound:
flash("Token is invalid", "error")
return redirect(url_for("main_routes.recover_password"))
except KeyError:
flash("Token is mandatory", "error")
return redirect(url_for("main_routes.recover_password"))
@main_routes.route("/about")
def about():
return render_template("about.jinja2")
@main_routes.route("/version")
def get_version():
return current_app.config['BUILD_ID']
| gpl-3.0 |
LoHChina/nova | nova/api/openstack/compute/contrib/extended_virtual_interfaces_net.py | 39 | 2104 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import network
authorize = extensions.soft_extension_authorizer('compute', 'extended_vif_net')
class ExtendedServerVIFNetController(wsgi.Controller):
def __init__(self):
super(ExtendedServerVIFNetController, self).__init__()
self.network_api = network.API()
@wsgi.extends
def index(self, req, resp_obj, server_id):
key = "%s:net_id" % Extended_virtual_interfaces_net.alias
context = req.environ['nova.context']
if authorize(context):
for vif in resp_obj.obj['virtual_interfaces']:
vif1 = self.network_api.get_vif_by_mac_address(context,
vif['mac_address'])
vif[key] = vif1.net_uuid
class Extended_virtual_interfaces_net(extensions.ExtensionDescriptor):
"""Adds network id parameter to the virtual interface list."""
name = "ExtendedVIFNet"
alias = "OS-EXT-VIF-NET"
namespace = ("http://docs.openstack.org/compute/ext/"
"extended-virtual-interfaces-net/api/v1.1")
updated = "2013-03-07T00:00:00Z"
def get_controller_extensions(self):
controller = ExtendedServerVIFNetController()
extension = extensions.ControllerExtension(self,
'os-virtual-interfaces',
controller)
return [extension]
| apache-2.0 |
aferr/LatticeMemCtl | src/python/m5/util/attrdict.py | 84 | 3421 | # Copyright (c) 2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
__all__ = [ 'attrdict', 'multiattrdict', 'optiondict' ]
class attrdict(dict):
"""Wrap dict, so you can use attribute access to get/set elements"""
def __getattr__(self, attr):
if attr in self:
return self.__getitem__(attr)
return super(attrdict, self).__getattribute__(attr)
def __setattr__(self, attr, value):
if attr in dir(self) or attr.startswith('_'):
return super(attrdict, self).__setattr__(attr, value)
return self.__setitem__(attr, value)
def __delattr__(self, attr):
if attr in self:
return self.__delitem__(attr)
return super(attrdict, self).__delattr__(attr)
def __getstate__(self):
return dict(self)
def __setstate__(self, state):
self.update(state)
class multiattrdict(attrdict):
"""Wrap attrdict so that nested attribute accesses automatically create
nested dictionaries."""
def __getattr__(self, attr):
try:
return super(multiattrdict, self).__getattr__(attr)
except AttributeError:
if attr.startswith('_'):
raise
d = multiattrdict()
setattr(self, attr, d)
return d
class optiondict(attrdict):
"""Modify attrdict so that a missing attribute just returns None"""
def __getattr__(self, attr):
try:
return super(optiondict, self).__getattr__(attr)
except AttributeError:
return None
if __name__ == '__main__':
x = attrdict()
x.y = 1
x['z'] = 2
print x['y'], x.y
print x['z'], x.z
print dir(x)
print x
print
del x['y']
del x.z
print dir(x)
print(x)
print
print "multiattrdict"
x = multiattrdict()
x.x.x.x = 9
x.y.z = 9
print x
print x.y
print x.y.z
print x.z.z
| bsd-3-clause |
hynnet/openwrt-mt7620 | staging_dir/host/lib/python2.7/distutils/dep_util.py | 177 | 3509 | """distutils.dep_util
Utility functions for simple, timestamp-based dependency of files
and groups of files; also, function based entirely on such
timestamp dependency analysis."""
__revision__ = "$Id$"
import os
from stat import ST_MTIME
from distutils.errors import DistutilsFileError
def newer(source, target):
"""Tells if the target is newer than the source.
Return true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't.
Return false if both exist and 'target' is the same age or younger
than 'source'. Raise DistutilsFileError if 'source' does not exist.
Note that this test is not very accurate: files created in the same second
will have the same "age".
"""
if not os.path.exists(source):
raise DistutilsFileError("file '%s' does not exist" %
os.path.abspath(source))
if not os.path.exists(target):
return True
return os.stat(source)[ST_MTIME] > os.stat(target)[ST_MTIME]
def newer_pairwise(sources, targets):
"""Walk two filename lists in parallel, testing if each source is newer
than its corresponding target. Return a pair of lists (sources,
targets) where source is newer than target, according to the semantics
of 'newer()'.
"""
if len(sources) != len(targets):
raise ValueError, "'sources' and 'targets' must be same length"
# build a pair of lists (sources, targets) where source is newer
n_sources = []
n_targets = []
for source, target in zip(sources, targets):
if newer(source, target):
n_sources.append(source)
n_targets.append(target)
return n_sources, n_targets
def newer_group(sources, target, missing='error'):
"""Return true if 'target' is out-of-date with respect to any file
listed in 'sources'.
In other words, if 'target' exists and is newer
than every file in 'sources', return false; otherwise return true.
'missing' controls what we do when a source file is missing; the
default ("error") is to blow up with an OSError from inside 'stat()';
if it is "ignore", we silently drop any missing source files; if it is
"newer", any missing source files make us assume that 'target' is
out-of-date (this is handy in "dry-run" mode: it'll make you pretend to
carry out commands that wouldn't work because inputs are missing, but
that doesn't matter because you're not actually going to run the
commands).
"""
# If the target doesn't even exist, then it's definitely out-of-date.
if not os.path.exists(target):
return True
# Otherwise we have to find out the hard way: if *any* source file
# is more recent than 'target', then 'target' is out-of-date and
# we can immediately return true. If we fall through to the end
# of the loop, then 'target' is up-to-date and we return false.
target_mtime = os.stat(target)[ST_MTIME]
for source in sources:
if not os.path.exists(source):
if missing == 'error': # blow up when we stat() the file
pass
elif missing == 'ignore': # missing source dropped from
continue # target's dependency list
elif missing == 'newer': # missing source means target is
return True # out-of-date
if os.stat(source)[ST_MTIME] > target_mtime:
return True
return False
| gpl-2.0 |
shakamunyi/docker-registry | depends/docker-registry-core/docker_registry/testing/driver.py | 30 | 13434 | # -*- coding: utf-8 -*-
# Copyright (c) 2014 Docker.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import math
import random
import string
from ..core import compat
from ..core import driver
from ..core import exceptions
from nose import SkipTest # noqa
from nose import tools
logger = logging.getLogger(__name__)
class Driver(object):
def __init__(self, scheme=None, path=None, config=None):
self.scheme = scheme
self.path = path
self.config = config
# Load the requested driver
def setUp(self):
storage = driver.fetch(self.scheme)
self._storage = storage(self.path, self.config)
def tearDown(self):
pass
def gen_random_string(self, length=16):
return ''.join([random.choice(string.ascii_uppercase + string.digits)
for x in range(length)]).lower()
def simplehelp(self, path, content, expected, size=0):
self._storage.put_content(path, content)
assert self._storage.get_content(path) == expected
assert self._storage.get_content(path) == expected
if size:
assert self._storage.get_size(path) == size
def unicodehelp(self, path, content, expected):
self._storage.put_unicode(path, content)
assert self._storage.get_unicode(path) == expected
assert self._storage.get_unicode(path) == expected
def jsonhelp(self, path, content, expected):
self._storage.put_json(path, content)
assert self._storage.get_json(path) == expected
assert self._storage.get_json(path) == expected
def test_exists_non_existent(self):
filename = self.gen_random_string()
assert not self._storage.exists(filename)
def test_exists_existent(self):
filename = self.gen_random_string()
self._storage.put_content(filename, b'')
assert self._storage.exists(filename)
# get / put
def test_write_read_1(self):
filename = self.gen_random_string()
content = b'a'
expected = b'a'
self.simplehelp(filename, content, expected, len(expected))
def test_write_read_2(self):
filename = self.gen_random_string()
content = b'\xc3\x9f'
expected = b'\xc3\x9f'
self.simplehelp(filename, content, expected, len(expected))
def test_write_read_3(self):
filename = self.gen_random_string()
content = u'ß'.encode('utf8')
expected = b'\xc3\x9f'
self.simplehelp(filename, content, expected, len(expected))
def test_write_read_4(self):
filename = self.gen_random_string()
content = 'ß'
if compat.is_py2:
content = content.decode('utf8')
content = content.encode('utf8')
expected = b'\xc3\x9f'
self.simplehelp(filename, content, expected, len(expected))
def test_write_read_5(self):
filename = self.gen_random_string()
content = self.gen_random_string().encode('utf8')
expected = content
self.simplehelp(filename, content, expected, len(expected))
def test_write_read_6(self):
filename = self.gen_random_string()
content = self.gen_random_string(1024 * 1024).encode('utf8')
expected = content
self.simplehelp(filename, content, expected, len(expected))
# get / put unicode
def test_unicode_1(self):
filename = self.gen_random_string()
content = 'a'
expected = u'a'
self.unicodehelp(filename, content, expected)
def test_unicode_2(self):
filename = self.gen_random_string()
content = b'\xc3\x9f'.decode('utf8')
expected = u'ß'
self.unicodehelp(filename, content, expected)
def test_unicode_3(self):
filename = self.gen_random_string()
content = u'ß'
expected = u'ß'
self.unicodehelp(filename, content, expected)
def test_unicode_4(self):
filename = self.gen_random_string()
content = 'ß'
if compat.is_py2:
content = content.decode('utf8')
expected = u'ß'
self.unicodehelp(filename, content, expected)
def test_unicode_5(self):
filename = self.gen_random_string()
content = self.gen_random_string()
expected = content
self.unicodehelp(filename, content, expected)
def test_unicode_6(self):
filename = self.gen_random_string()
content = self.gen_random_string(1024 * 1024)
expected = content
self.unicodehelp(filename, content, expected)
# JSON
def test_json(self):
filename = self.gen_random_string()
content = {u"ß": u"ß"}
expected = {u"ß": u"ß"}
self.jsonhelp(filename, content, expected)
# Removes
def test_remove_existent(self):
filename = self.gen_random_string()
content = self.gen_random_string().encode('utf8')
self._storage.put_content(filename, content)
self._storage.remove(filename)
assert not self._storage.exists(filename)
def test_remove_folder(self):
dirname = self.gen_random_string()
filename1 = self.gen_random_string()
filename2 = self.gen_random_string()
content = self.gen_random_string().encode('utf8')
self._storage.put_content('%s/%s' % (dirname, filename1), content)
self._storage.put_content('%s/%s' % (dirname, filename2), content)
self._storage.remove(dirname)
assert not self._storage.exists(filename1)
assert not self._storage.exists(filename2)
assert not self._storage.exists(dirname)
# Check the lru is ok
try:
self._storage.get_content(filename1)
assert False
except Exception:
pass
try:
self._storage.get_content(filename2)
assert False
except Exception:
pass
@tools.raises(exceptions.FileNotFoundError)
def test_remove_inexistent(self):
filename = self.gen_random_string()
self._storage.remove(filename)
@tools.raises(exceptions.FileNotFoundError)
def test_read_inexistent(self):
filename = self.gen_random_string()
self._storage.get_content(filename)
@tools.raises(exceptions.FileNotFoundError)
def test_get_size_inexistent(self):
filename = self.gen_random_string()
self._storage.get_size(filename)
def test_stream(self):
filename = self.gen_random_string()
# test 7MB
content = self.gen_random_string(7).encode('utf8') # * 1024 * 1024
# test exists
io = compat.StringIO(content)
logger.debug("%s should NOT exists still" % filename)
assert not self._storage.exists(filename)
self._storage.stream_write(filename, io)
io.close()
logger.debug("%s should exist now" % filename)
assert self._storage.exists(filename)
# test read / write
data = compat.bytes()
for buf in self._storage.stream_read(filename):
data += buf
assert content == data
# test bytes_range only if the storage backend suppports it
if self._storage.supports_bytes_range:
b = random.randint(0, math.floor(len(content) / 2))
bytes_range = (b, random.randint(b + 1, len(content) - 1))
data = compat.bytes()
for buf in self._storage.stream_read(filename, bytes_range):
data += buf
expected_content = content[bytes_range[0]:bytes_range[1] + 1]
assert data == expected_content
# logger.debug("Content length is %s" % len(content))
# logger.debug("And retrieved content length should equal it: %s" %
# len(data))
# logger.debug("got content %s" % content)
# logger.debug("got data %s" % data)
# test remove
self._storage.remove(filename)
assert not self._storage.exists(filename)
@tools.raises(exceptions.FileNotFoundError)
def test_stream_read_inexistent(self):
filename = self.gen_random_string()
data = compat.bytes()
for buf in self._storage.stream_read(filename):
data += buf
@tools.raises(exceptions.FileNotFoundError)
def test_inexistent_list_directory(self):
notexist = self.gen_random_string()
iterator = self._storage.list_directory(notexist)
next(iterator)
# XXX only elliptics return StopIteration for now - though we should
# return probably that for all
@tools.raises(exceptions.FileNotFoundError, StopIteration)
def test_empty_list_directory(self):
path = self.gen_random_string()
content = self.gen_random_string().encode('utf8')
self._storage.put_content(path, content)
iterator = self._storage.list_directory(path)
next(iterator)
def test_list_directory(self):
base = self.gen_random_string()
filename1 = self.gen_random_string()
filename2 = self.gen_random_string()
fb1 = '%s/%s' % (base, filename1)
fb2 = '%s/%s' % (base, filename2)
content = self.gen_random_string().encode('utf8')
self._storage.put_content(fb1, content)
self._storage.put_content(fb2, content)
assert sorted([fb1, fb2]
) == sorted(list(self._storage.list_directory(base)))
def test_list_directory_with_subdir(self):
if self.scheme == 's3':
raise SkipTest("Check GH #596.")
base = self.gen_random_string()
dir1 = self.gen_random_string()
dir2 = self.gen_random_string()
filename1 = self.gen_random_string()
filename2 = self.gen_random_string()
fd1 = '%s/%s' % (base, dir1)
fd2 = '%s/%s' % (base, dir2)
fb1 = '%s/%s' % (fd1, filename1)
fb2 = '%s/%s' % (fd2, filename2)
content = self.gen_random_string().encode('utf8')
self._storage.put_content(fb1, content)
self._storage.put_content(fb2, content)
assert sorted([fd1, fd2]
) == sorted(list(self._storage.list_directory(base)))
# def test_root_list_directory(self):
# fb1 = self.gen_random_string()
# fb2 = self.gen_random_string()
# content = self.gen_random_string()
# self._storage.put_content(fb1, content)
# self._storage.put_content(fb2, content)
# print(list(self._storage.list_directory()))
# assert sorted([fb1, fb2]
# ) == sorted(list(self._storage.list_directory()))
@tools.raises(exceptions.FileNotFoundError, StopIteration)
def test_empty_after_remove_list_directory(self):
base = self.gen_random_string()
filename1 = self.gen_random_string()
filename2 = self.gen_random_string()
fb1 = '%s/%s' % (base, filename1)
fb2 = '%s/%s' % (base, filename2)
content = self.gen_random_string().encode('utf8')
self._storage.put_content(fb1, content)
self._storage.put_content(fb2, content)
self._storage.remove(fb1)
self._storage.remove(fb2)
iterator = self._storage.list_directory(base)
next(iterator)
def test_paths(self):
namespace = 'namespace'
repository = 'repository'
tag = 'sometag'
image_id = 'imageid'
p = self._storage.images_list_path(namespace, repository)
assert not self._storage.exists(p)
p = self._storage.image_json_path(image_id)
assert not self._storage.exists(p)
p = self._storage.image_mark_path(image_id)
assert not self._storage.exists(p)
p = self._storage.image_checksum_path(image_id)
assert not self._storage.exists(p)
p = self._storage.image_layer_path(image_id)
assert not self._storage.exists(p)
p = self._storage.image_ancestry_path(image_id)
assert not self._storage.exists(p)
p = self._storage.image_files_path(image_id)
assert not self._storage.exists(p)
p = self._storage.image_diff_path(image_id)
assert not self._storage.exists(p)
p = self._storage.repository_path(namespace, repository)
assert not self._storage.exists(p)
p = self._storage.tag_path(namespace, repository)
assert not self._storage.exists(p)
p = self._storage.tag_path(namespace, repository, tag)
assert not self._storage.exists(p)
p = self._storage.repository_json_path(namespace, repository)
assert not self._storage.exists(p)
p = self._storage.repository_tag_json_path(namespace, repository, tag)
assert not self._storage.exists(p)
p = self._storage.index_images_path(namespace, repository)
assert not self._storage.exists(p)
p = self._storage.private_flag_path(namespace, repository)
assert not self._storage.exists(p)
| apache-2.0 |
rastaman/ansible-modules-core | cloud/amazon/ec2_metric_alarm.py | 61 | 10684 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
module: ec2_metric_alarm
short_description: "Create/update or delete AWS Cloudwatch 'metric alarms'"
description:
- Can create or delete AWS metric alarms
- Metrics you wish to alarm on must already exist
version_added: "1.6"
author: "Zacharie Eakin (@zeekin)"
options:
state:
description:
- register or deregister the alarm
required: true
choices: ['present', 'absent']
name:
description:
- Unique name for the alarm
required: true
metric:
description:
- Name of the monitored metric (e.g. CPUUtilization)
- Metric must already exist
required: false
namespace:
description:
- Name of the appropriate namespace ('AWS/EC2', 'System/Linux', etc.), which determines the category it will appear under in cloudwatch
required: false
statistic:
description:
- Operation applied to the metric
- Works in conjunction with period and evaluation_periods to determine the comparison value
required: false
options: ['SampleCount','Average','Sum','Minimum','Maximum']
comparison:
description:
- Determines how the threshold value is compared
required: false
options: ['<=','<','>','>=']
threshold:
description:
- Sets the min/max bound for triggering the alarm
required: false
period:
description:
- The time (in seconds) between metric evaluations
required: false
evaluation_periods:
description:
- The number of times in which the metric is evaluated before final calculation
required: false
unit:
description:
- The threshold's unit of measurement
required: false
options: ['Seconds','Microseconds','Milliseconds','Bytes','Kilobytes','Megabytes','Gigabytes','Terabytes','Bits','Kilobits','Megabits','Gigabits','Terabits','Percent','Count','Bytes/Second','Kilobytes/Second','Megabytes/Second','Gigabytes/Second','Terabytes/Second','Bits/Second','Kilobits/Second','Megabits/Second','Gigabits/Second','Terabits/Second','Count/Second','None']
description:
description:
- A longer description of the alarm
required: false
dimensions:
description:
- Describes to what the alarm is applied
required: false
alarm_actions:
description:
- A list of the names action(s) taken when the alarm is in the 'alarm' status
required: false
insufficient_data_actions:
description:
- A list of the names of action(s) to take when the alarm is in the 'insufficient_data' status
required: false
ok_actions:
description:
- A list of the names of action(s) to take when the alarm is in the 'ok' status
required: false
extends_documentation_fragment: aws
"""
EXAMPLES = '''
- name: create alarm
ec2_metric_alarm:
state: present
region: ap-southeast-2
name: "cpu-low"
metric: "CPUUtilization"
namespace: "AWS/EC2"
statistic: Average
comparison: "<="
threshold: 5.0
period: 300
evaluation_periods: 3
unit: "Percent"
description: "This will alarm when a bamboo slave's cpu usage average is lower than 5% for 15 minutes "
dimensions: {'InstanceId':'i-XXX'}
alarm_actions: ["action1","action2"]
'''
import sys
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
try:
import boto.ec2.cloudwatch
from boto.ec2.cloudwatch import CloudWatchConnection, MetricAlarm
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def create_metric_alarm(connection, module):
name = module.params.get('name')
metric = module.params.get('metric')
namespace = module.params.get('namespace')
statistic = module.params.get('statistic')
comparison = module.params.get('comparison')
threshold = module.params.get('threshold')
period = module.params.get('period')
evaluation_periods = module.params.get('evaluation_periods')
unit = module.params.get('unit')
description = module.params.get('description')
dimensions = module.params.get('dimensions')
alarm_actions = module.params.get('alarm_actions')
insufficient_data_actions = module.params.get('insufficient_data_actions')
ok_actions = module.params.get('ok_actions')
alarms = connection.describe_alarms(alarm_names=[name])
if not alarms:
alm = MetricAlarm(
name=name,
metric=metric,
namespace=namespace,
statistic=statistic,
comparison=comparison,
threshold=threshold,
period=period,
evaluation_periods=evaluation_periods,
unit=unit,
description=description,
dimensions=dimensions,
alarm_actions=alarm_actions,
insufficient_data_actions=insufficient_data_actions,
ok_actions=ok_actions
)
try:
connection.create_alarm(alm)
changed = True
alarms = connection.describe_alarms(alarm_names=[name])
except BotoServerError, e:
module.fail_json(msg=str(e))
else:
alarm = alarms[0]
changed = False
for attr in ('comparison','metric','namespace','statistic','threshold','period','evaluation_periods','unit','description'):
if getattr(alarm, attr) != module.params.get(attr):
changed = True
setattr(alarm, attr, module.params.get(attr))
#this is to deal with a current bug where you cannot assign '<=>' to the comparator when modifying an existing alarm
comparison = alarm.comparison
comparisons = {'<=' : 'LessThanOrEqualToThreshold', '<' : 'LessThanThreshold', '>=' : 'GreaterThanOrEqualToThreshold', '>' : 'GreaterThanThreshold'}
alarm.comparison = comparisons[comparison]
dim1 = module.params.get('dimensions', {})
dim2 = alarm.dimensions
for keys in dim1:
if not isinstance(dim1[keys], list):
dim1[keys] = [dim1[keys]]
if keys not in dim2 or dim1[keys] != dim2[keys]:
changed=True
setattr(alarm, 'dimensions', dim1)
for attr in ('alarm_actions','insufficient_data_actions','ok_actions'):
action = module.params.get(attr) or []
if getattr(alarm, attr) != action:
changed = True
setattr(alarm, attr, module.params.get(attr))
try:
if changed:
connection.create_alarm(alarm)
except BotoServerError, e:
module.fail_json(msg=str(e))
result = alarms[0]
module.exit_json(changed=changed, name=result.name,
actions_enabled=result.actions_enabled,
alarm_actions=result.alarm_actions,
alarm_arn=result.alarm_arn,
comparison=result.comparison,
description=result.description,
dimensions=result.dimensions,
evaluation_periods=result.evaluation_periods,
insufficient_data_actions=result.insufficient_data_actions,
last_updated=result.last_updated,
metric=result.metric,
namespace=result.namespace,
ok_actions=result.ok_actions,
period=result.period,
state_reason=result.state_reason,
state_value=result.state_value,
statistic=result.statistic,
threshold=result.threshold,
unit=result.unit)
def delete_metric_alarm(connection, module):
name = module.params.get('name')
alarms = connection.describe_alarms(alarm_names=[name])
if alarms:
try:
connection.delete_alarms([name])
module.exit_json(changed=True)
except BotoServerError, e:
module.fail_json(msg=str(e))
else:
module.exit_json(changed=False)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
metric=dict(type='str'),
namespace=dict(type='str'),
statistic=dict(type='str', choices=['SampleCount', 'Average', 'Sum', 'Minimum', 'Maximum']),
comparison=dict(type='str', choices=['<=', '<', '>', '>=']),
threshold=dict(type='float'),
period=dict(type='int'),
unit=dict(type='str', choices=['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes', 'Terabytes', 'Bits', 'Kilobits', 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count', 'Bytes/Second', 'Kilobytes/Second', 'Megabytes/Second', 'Gigabytes/Second', 'Terabytes/Second', 'Bits/Second', 'Kilobits/Second', 'Megabits/Second', 'Gigabits/Second', 'Terabits/Second', 'Count/Second', 'None']),
evaluation_periods=dict(type='int'),
description=dict(type='str'),
dimensions=dict(type='dict'),
alarm_actions=dict(type='list'),
insufficient_data_actions=dict(type='list'),
ok_actions=dict(type='list'),
state=dict(default='present', choices=['present', 'absent']),
region=dict(aliases=['aws_region', 'ec2_region']),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
state = module.params.get('state')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
try:
connection = connect_to_aws(boto.ec2.cloudwatch, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, StandardError), e:
module.fail_json(msg=str(e))
if state == 'present':
create_metric_alarm(connection, module)
elif state == 'absent':
delete_metric_alarm(connection, module)
main()
| gpl-3.0 |
wolverineav/neutron | neutron/db/common_db_mixin.py | 3 | 14029 | # Copyright (c) 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import weakref
from oslo_log import log as logging
from oslo_utils import excutils
import six
from sqlalchemy import and_
from sqlalchemy.ext import associationproxy
from sqlalchemy import or_
from sqlalchemy import sql
from neutron._i18n import _LE
from neutron.db import sqlalchemyutils
LOG = logging.getLogger(__name__)
@contextlib.contextmanager
def _noop_context_manager():
yield
def safe_creation(context, create_fn, delete_fn, create_bindings,
transaction=True):
'''This function wraps logic of object creation in safe atomic way.
In case of exception, object is deleted.
More information when this method could be used can be found in
developer guide - Effective Neutron: Database interaction section.
http://docs.openstack.org/developer/neutron/devref/effective_neutron.html
:param context: context
:param create_fn: function without arguments that is called to create
object and returns this object.
:param delete_fn: function that is called to delete an object. It is
called with object's id field as an argument.
:param create_bindings: function that is called to create bindings for
an object. It is called with object's id field as an argument.
:param transaction: if true the whole operation will be wrapped in a
transaction. if false, no transaction will be used.
'''
cm = (context.session.begin(subtransactions=True)
if transaction else _noop_context_manager())
with cm:
obj = create_fn()
try:
value = create_bindings(obj['id'])
except Exception:
with excutils.save_and_reraise_exception():
try:
delete_fn(obj['id'])
except Exception as e:
LOG.error(_LE("Cannot clean up created object %(obj)s. "
"Exception: %(exc)s"), {'obj': obj['id'],
'exc': e})
return obj, value
def model_query_scope(context, model):
# Unless a context has 'admin' or 'advanced-service' rights the
# query will be scoped to a single tenant_id
return ((not context.is_admin and hasattr(model, 'tenant_id')) and
(not context.is_advsvc and hasattr(model, 'tenant_id')))
def model_query(context, model):
query = context.session.query(model)
# define basic filter condition for model query
query_filter = None
if model_query_scope(context, model):
query_filter = (model.tenant_id == context.tenant_id)
if query_filter is not None:
query = query.filter(query_filter)
return query
class CommonDbMixin(object):
"""Common methods used in core and service plugins."""
# Plugins, mixin classes implementing extension will register
# hooks into the dict below for "augmenting" the "core way" of
# building a query for retrieving objects from a model class.
# To this aim, the register_model_query_hook and unregister_query_hook
# from this class should be invoked
_model_query_hooks = {}
# This dictionary will store methods for extending attributes of
# api resources. Mixins can use this dict for adding their own methods
# TODO(salvatore-orlando): Avoid using class-level variables
_dict_extend_functions = {}
@classmethod
def register_model_query_hook(cls, model, name, query_hook, filter_hook,
result_filters=None):
"""Register a hook to be invoked when a query is executed.
Add the hooks to the _model_query_hooks dict. Models are the keys
of this dict, whereas the value is another dict mapping hook names to
callables performing the hook.
Each hook has a "query" component, used to build the query expression
and a "filter" component, which is used to build the filter expression.
Query hooks take as input the query being built and return a
transformed query expression.
Filter hooks take as input the filter expression being built and return
a transformed filter expression
"""
cls._model_query_hooks.setdefault(model, {})[name] = {
'query': query_hook, 'filter': filter_hook,
'result_filters': result_filters}
@classmethod
def register_dict_extend_funcs(cls, resource, funcs):
cls._dict_extend_functions.setdefault(resource, []).extend(funcs)
@property
def safe_reference(self):
"""Return a weakref to the instance.
Minimize the potential for the instance persisting
unnecessarily in memory by returning a weakref proxy that
won't prevent deallocation.
"""
return weakref.proxy(self)
def model_query_scope(self, context, model):
return model_query_scope(context, model)
def _model_query(self, context, model):
query = context.session.query(model)
# define basic filter condition for model query
query_filter = None
if self.model_query_scope(context, model):
if hasattr(model, 'rbac_entries'):
query = query.outerjoin(model.rbac_entries)
rbac_model = model.rbac_entries.property.mapper.class_
query_filter = (
(model.tenant_id == context.tenant_id) |
((rbac_model.action == 'access_as_shared') &
((rbac_model.target_tenant == context.tenant_id) |
(rbac_model.target_tenant == '*'))))
elif hasattr(model, 'shared'):
query_filter = ((model.tenant_id == context.tenant_id) |
(model.shared == sql.true()))
else:
query_filter = (model.tenant_id == context.tenant_id)
# Execute query hooks registered from mixins and plugins
for _name, hooks in six.iteritems(self._model_query_hooks.get(model,
{})):
query_hook = hooks.get('query')
if isinstance(query_hook, six.string_types):
query_hook = getattr(self, query_hook, None)
if query_hook:
query = query_hook(context, model, query)
filter_hook = hooks.get('filter')
if isinstance(filter_hook, six.string_types):
filter_hook = getattr(self, filter_hook, None)
if filter_hook:
query_filter = filter_hook(context, model, query_filter)
# NOTE(salvatore-orlando): 'if query_filter' will try to evaluate the
# condition, raising an exception
if query_filter is not None:
query = query.filter(query_filter)
return query
def _fields(self, resource, fields):
if fields:
return dict(((key, item) for key, item in resource.items()
if key in fields))
return resource
def _get_by_id(self, context, model, id):
query = self._model_query(context, model)
return query.filter(model.id == id).one()
def _apply_filters_to_query(self, query, model, filters, context=None):
if filters:
for key, value in six.iteritems(filters):
column = getattr(model, key, None)
# NOTE(kevinbenton): if column is a hybrid property that
# references another expression, attempting to convert to
# a boolean will fail so we must compare to None.
# See "An Important Expression Language Gotcha" in:
# docs.sqlalchemy.org/en/rel_0_9/changelog/migration_06.html
if column is not None:
if not value:
query = query.filter(sql.false())
return query
if isinstance(column, associationproxy.AssociationProxy):
# association proxies don't support in_ so we have to
# do multiple equals matches
query = query.filter(
or_(*[column == v for v in value]))
else:
query = query.filter(column.in_(value))
elif key == 'shared' and hasattr(model, 'rbac_entries'):
# translate a filter on shared into a query against the
# object's rbac entries
query = query.outerjoin(model.rbac_entries)
rbac = model.rbac_entries.property.mapper.class_
matches = [rbac.target_tenant == '*']
if context:
matches.append(rbac.target_tenant == context.tenant_id)
# any 'access_as_shared' records that match the
# wildcard or requesting tenant
is_shared = and_(rbac.action == 'access_as_shared',
or_(*matches))
if not value[0]:
# NOTE(kevinbenton): we need to find objects that don't
# have an entry that matches the criteria above so
# we use a subquery to exclude them.
# We can't just filter the inverse of the query above
# because that will still give us a network shared to
# our tenant (or wildcard) if it's shared to another
# tenant.
# This is the column joining the table to rbac via
# the object_id. We can't just use model.id because
# subnets join on network.id so we have to inspect the
# relationship.
join_cols = model.rbac_entries.property.local_columns
oid_col = list(join_cols)[0]
is_shared = ~oid_col.in_(
query.session.query(rbac.object_id).
filter(is_shared)
)
query = query.filter(is_shared)
for _nam, hooks in six.iteritems(self._model_query_hooks.get(model,
{})):
result_filter = hooks.get('result_filters', None)
if isinstance(result_filter, six.string_types):
result_filter = getattr(self, result_filter, None)
if result_filter:
query = result_filter(query, filters)
return query
def _apply_dict_extend_functions(self, resource_type,
response, db_object):
for func in self._dict_extend_functions.get(
resource_type, []):
args = (response, db_object)
if isinstance(func, six.string_types):
func = getattr(self, func, None)
else:
# must call unbound method - use self as 1st argument
args = (self,) + args
if func:
func(*args)
def _get_collection_query(self, context, model, filters=None,
sorts=None, limit=None, marker_obj=None,
page_reverse=False):
collection = self._model_query(context, model)
collection = self._apply_filters_to_query(collection, model, filters,
context)
if limit and page_reverse and sorts:
sorts = [(s[0], not s[1]) for s in sorts]
collection = sqlalchemyutils.paginate_query(collection, model, limit,
sorts,
marker_obj=marker_obj)
return collection
def _get_collection(self, context, model, dict_func, filters=None,
fields=None, sorts=None, limit=None, marker_obj=None,
page_reverse=False):
query = self._get_collection_query(context, model, filters=filters,
sorts=sorts,
limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
items = [dict_func(c, fields) for c in query]
if limit and page_reverse:
items.reverse()
return items
def _get_collection_count(self, context, model, filters=None):
return self._get_collection_query(context, model, filters).count()
def _get_marker_obj(self, context, resource, limit, marker):
if limit and marker:
return getattr(self, '_get_%s' % resource)(context, marker)
return None
def _filter_non_model_columns(self, data, model):
"""Remove all the attributes from data which are not columns or
association proxies of the model passed as second parameter
"""
columns = [c.name for c in model.__table__.columns]
return dict((k, v) for (k, v) in
six.iteritems(data) if k in columns or
isinstance(getattr(model, k, None),
associationproxy.AssociationProxy))
| apache-2.0 |
cgstudiomap/cgstudiomap | main/eggs/reportlab-3.2.0-py2.7-linux-x86_64.egg/reportlab/graphics/barcode/qrencoder.py | 36 | 34126 | # QRCode for Python
#
# Support for Kanji, Hanzi, ECI, FNC1 and Structurded append,
# and optimizations by Anders Hammarquist <[email protected]>
#
# Copyright (c) 2014 Open End AB http://www.openend.se/
#
# Ported from the Javascript library by Sam Curren
#
# QRCode for Javascript
# http://d-project.googlecode.com/svn/trunk/misc/qrcode/js/qrcode.js
#
# Copyright (c) 2009 Kazuhiko Arase
#
# URL: http://www.d-project.com/
#
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
#
# The word "QR Code" is registered trademark of
# DENSO WAVE INCORPORATED
# http://www.denso-wave.com/qrcode/faqpatent-e.html
import re
import itertools
try:
from itertools import zip_longest
except:
from itertools import izip_longest as zip_longest
try:
unicode
except NameError:
# No unicode in Python 3
unicode = str
class QR:
valid = None
bits = None
group = 0
def __init__(self, data):
if self.valid and not self.valid(data):
raise ValueError
self.data = data
def __len__(self):
return len(self.data)
@property
def bitlength(self):
if self.bits is None:
return 0
q, r = divmod(len(self), len(self.bits))
return q * sum(self.bits) + sum(self.bits[:r])
def getLengthBits(self, ver):
if 0 < ver < 10:
return self.lengthbits[0]
elif ver < 27:
return self.lengthbits[1]
elif ver < 41:
return self.lengthbits[2]
raise ValueError("Unknown version: " + ver)
def getLength(self):
return len(self.data)
def __repr__(self):
return repr(self.data)
def write_header(self, buffer, version):
buffer.put(self.mode, 4)
lenbits = self.getLengthBits(version)
if lenbits:
buffer.put(len(self.data), lenbits )
def write(self, buffer, version):
self.write_header(buffer, version)
for g in zip_longest(*[iter(self.data)] * self.group):
bits = 0
n = 0
for i in range(self.group):
if g[i] is not None:
n *= len(self.chars)
n += self.chars.index(g[i])
bits += self.bits[i]
buffer.put(n, bits)
class QRNumber(QR):
valid = re.compile(u'[0-9]*$').match
chars = u'0123456789'
bits = (4,3,3)
group = 3
mode = 0x1
lengthbits = (10, 12, 14)
class QRAlphaNum(QR):
valid = re.compile(u'[-0-9A-Z $%*+-./:]*$').match
chars = u'0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ $%*+-./:'
bits = (6,5)
group = 2
mode = 0x2
lengthbits = (9, 11, 13)
class QR8bitByte(QR):
bits = (8,)
group = 1
mode = 0x4
lengthbits = (8, 16, 16)
def __init__(self, data):
if isinstance(data, unicode):
self.data = data.encode('utf-8') # XXX This really needs an ECI too
else:
self.data = data # It'd better be byte data
def write(self, buffer, version):
self.write_header(buffer, version)
for c in self.data:
if isinstance(c, str):
c = ord(c)
buffer.put(c, 8)
class QRKanji(QR):
bits = (13,)
group = 1
mode = 0x8
lengthbits = (8, 10, 12)
def __init__(self, data):
try:
self.data = self.unicode_to_qrkanji(data)
except UnicodeEncodeError:
raise ValueError('Not valid kanji')
def unicode_to_qrkanji(self, data):
codes = []
for i,c in enumerate(data):
try:
c = c.encode('shift-jis')
try:
c,d = map(ord, c)
except TypeError:
# Python 3
c,d = c
except UnicodeEncodeError as e:
raise UnicodeEncodeError('qrkanji', data, i, i+1, e.args[4])
except ValueError:
raise UnicodeEncodeError('qrkanji', data, i, i+1,
'illegal multibyte sequence')
c = c << 8 | d
if 0x8140 <= c <=0x9ffc:
c -= 0x8140
c = (((c & 0xff00) >> 8) * 0xc0) + (c & 0xff)
elif 0xe040 <= c <= 0xebbf:
c -= 0xc140
c = (((c & 0xff00) >> 8) * 0xc0) + (c & 0xff)
else:
raise UnicodeEncodeError('qrkanji', data, i, i+1,
'illegal multibyte sequence')
codes.append(c)
return codes
def write(self, buffer, version):
self.write_header(buffer, version)
for d in self.data:
buffer.put(d, 13)
class QRHanzi(QR):
bits = (13,)
group = 1
mode = 0xD
lengthbits = (8, 10, 12)
def __init__(self, data):
try:
self.data = self.unicode_to_qrhanzi(data)
except UnicodeEncodeError:
raise ValueError('Not valid hanzi')
def unicode_to_qrhanzi(self, data):
codes = []
for i,c in enumerate(data):
try:
c = c.encode('gb2312')
try:
c,d = map(ord, c)
except TypeError:
# Python 3
c,d = c
except UnicodeEncodeError as e:
raise UnicodeEncodeError('qrhanzi', data, i, i+1, e.args[4])
except ValueError:
raise UnicodeEncodeError('qrhanzi', data, i, i+1,
'illegal multibyte sequence')
c = c << 8 | d
if 0xa1a1 <= c <=0xaafe:
c -= 0xa1a1
c = (((c & 0xff00) >> 8) * 0x60) + (c & 0xff)
elif 0xb0a1 <= c <= 0xfafe:
c -= 0xa6a1
c = (((c & 0xff00) >> 8) * 0x60) + (c & 0xff)
else:
raise UnicodeEncodeError('qrhanzi', data, i, i+1,
'illegal multibyte sequence')
codes.append(c)
return codes
def write_header(self, buffer, version):
buffer.put(self.mode, 4)
buffer.put(1, 4) # Subset 1: GB2312 encoding
lenbits = self.getLengthBits(version)
if lenbits:
buffer.put(len(self.data), lenbits )
def write(self, buffer, version):
self.write_header(buffer, version)
for d in self.data:
buffer.put(d, 13)
# Special modes
class QRECI(QR):
mode = 0x7
lengthbits = (0, 0, 0)
def __init__(self, data):
if not 0 < data < 999999:
# Spec says 999999, format supports up to 0x1fffff = 2097151
raise ValueError("ECI out of range")
self.data = data
def write(self, buffer, version):
self.write_header(buffer, version)
if self.data <= 0x7f:
buffer.put(self.data, 8)
elif self.data <= 0x3fff:
buffer.put(self.data | 0x8000, 16)
elif self.data <= 0x1fffff:
buffer.put(self.data | 0xC00000, 24)
class QRStructAppend(QR):
mode = 0x3
lengthbits = (0, 0, 0)
def __init__(self, part, total, parity):
if not 0 < part <= 16:
raise ValueError("part out of range [1,16]")
if not 0 < total <= 16:
raise ValueError("total out of range [1,16]")
self.part = part
self.total = total
self.parity = parity
def write(self, buffer, version):
self.write_header(buffer, version)
buffer.put(self.part, 4)
buffer.put(self.total, 4)
buffer.put(self.parity, 8)
class QRFNC1First(QR):
mode = 0x5
lengthbits = (0, 0, 0)
def __init__(self):
pass
def write(self, buffer, version):
self.write_header(buffer, version)
class QRFNC1Second(QR):
valid = re.compile('^([A-Za-z]|[0-9][0-9])$').match
mode = 0x9
lengthbits = (0, 0, 0)
def write(self, buffer, version):
self.write_header(buffer, version)
d = self.data
if len(d) == 1:
d = ord(d) + 100
else:
d = int(d)
buffer.put(d, 8)
class QRCode:
def __init__(self, version, errorCorrectLevel):
self.version = version
self.errorCorrectLevel = errorCorrectLevel
self.modules = None
self.moduleCount = 0
self.dataCache = None
self.dataList = []
def addData(self, data):
if isinstance(data, QR):
newData = data
else:
for conv in (QRNumber, QRAlphaNum, QRKanji, QR8bitByte):
try:
newData = conv(data)
break
except ValueError:
pass
else:
raise ValueError
self.dataList.append(newData)
self.dataCache = None
def isDark(self, row, col):
return self.modules[row][col]
def getModuleCount(self):
return self.moduleCount
def calculate_version(self):
# Calculate version for data to fit the QR Code capacity
for version in range(1, 40):
rsBlocks = QRRSBlock.getRSBlocks(version, self.errorCorrectLevel)
totalDataCount = sum(block.dataCount for block in rsBlocks)
length = 0
for data in self.dataList:
length += 4
length += data.getLengthBits(version)
length += data.bitlength
if length <= totalDataCount * 8:
break
return version
def make(self):
if self.version is None:
self.version = self.calculate_version()
self.makeImpl(False, self.getBestMaskPattern())
def makeImpl(self, test, maskPattern):
self.moduleCount = self.version * 4 + 17
self.modules = [ [False] * self.moduleCount
for x in range(self.moduleCount) ]
self.setupPositionProbePattern(0, 0)
self.setupPositionProbePattern(self.moduleCount - 7, 0)
self.setupPositionProbePattern(0, self.moduleCount - 7)
self.setupPositionAdjustPattern()
self.setupTimingPattern()
self.setupTypeInfo(test, maskPattern)
if (self.version >= 7):
self.setupTypeNumber(test)
if (self.dataCache == None):
self.dataCache = QRCode.createData(self.version,
self.errorCorrectLevel,
self.dataList)
self.mapData(self.dataCache, maskPattern)
_positionProbePattern = [
[True, True, True, True, True, True, True],
[True, False, False, False, False, False, True],
[True, False, True, True, True, False, True],
[True, False, True, True, True, False, True],
[True, False, True, True, True, False, True],
[True, False, False, False, False, False, True],
[True, True, True, True, True, True, True],
]
def setupPositionProbePattern(self, row, col):
if row == 0:
self.modules[row+7][col:col+7] = [False] * 7
if col == 0:
self.modules[row+7][col+7] = False
else:
self.modules[row+7][col-1] = False
else:
# col == 0
self.modules[row-1][col:col+8] = [False] * 8
for r, data in enumerate(self._positionProbePattern):
self.modules[row+r][col:col+7] = data
if col == 0:
self.modules[row+r][col+7] = False
else:
self.modules[row+r][col-1] = False
def getBestMaskPattern(self):
minLostPoint = 0
pattern = 0
for i in range(8):
self.makeImpl(True, i);
lostPoint = QRUtil.getLostPoint(self);
if (i == 0 or minLostPoint > lostPoint):
minLostPoint = lostPoint
pattern = i
return pattern
def setupTimingPattern(self):
for r in range(8, self.moduleCount - 8):
self.modules[r][6] = (r % 2 == 0)
self.modules[6][8:self.moduleCount - 8] = itertools.islice(
itertools.cycle([True, False]), self.moduleCount - 16)
_positionAdjustPattern = [
[True, True, True, True, True],
[True, False, False, False, True],
[True, False, True, False, True],
[True, False, False, False, True],
[True, True, True, True, True],
]
def setupPositionAdjustPattern(self):
pos = QRUtil.getPatternPosition(self.version)
maxpos = self.moduleCount - 8
for row, col in itertools.product(pos, pos):
if col <= 8 and (row <= 8 or row >= maxpos):
continue
elif col >= maxpos and row <= 8:
continue
for r, data in enumerate(self._positionAdjustPattern):
self.modules[row + r - 2][col-2:col+3] = data
def setupTypeNumber(self, test):
bits = QRUtil.getBCHTypeNumber(self.version)
for i in range(18):
mod = (not test and ( (bits >> i) & 1) == 1)
self.modules[i // 3][i % 3 + self.moduleCount - 8 - 3] = mod;
for i in range(18):
mod = (not test and ( (bits >> i) & 1) == 1)
self.modules[i % 3 + self.moduleCount - 8 - 3][i // 3] = mod;
def setupTypeInfo(self, test, maskPattern):
data = (self.errorCorrectLevel << 3) | maskPattern
bits = QRUtil.getBCHTypeInfo(data)
# vertical
for i in range(15):
mod = (not test and ( (bits >> i) & 1) == 1)
if (i < 6):
self.modules[i][8] = mod
elif (i < 8):
self.modules[i + 1][8] = mod
else:
self.modules[self.moduleCount - 15 + i][8] = mod
# horizontal
for i in range(15):
mod = (not test and ( (bits >> i) & 1) == 1);
if (i < 8):
self.modules[8][self.moduleCount - i - 1] = mod
elif (i < 9):
self.modules[8][15 - i - 1 + 1] = mod
else:
self.modules[8][15 - i - 1] = mod
# fixed module
self.modules[self.moduleCount - 8][8] = (not test)
def _dataPosIterator(self):
cols = itertools.chain(range(self.moduleCount - 1, 6, -2),
range(5, 0, -2))
rows = (list(range(9, self.moduleCount - 8)),
list(itertools.chain(range(6), range(7, self.moduleCount))),
list(range(9, self.moduleCount)))
rrows = tuple( list(reversed(r)) for r in rows)
ppos = QRUtil.getPatternPosition(self.version)
ppos = set(itertools.chain.from_iterable(
(p-2, p-1, p, p+1, p+2) for p in ppos))
maxpos = self.moduleCount - 11
for col in cols:
rows, rrows = rrows, rows
if col <= 8: rowidx = 0
elif col >= self.moduleCount - 8: rowidx = 2
else: rowidx = 1
for row in rows[rowidx]:
for c in range(2):
c = col - c
if self.version >= 7:
if row < 6 and c >= self.moduleCount - 11:
continue
elif col < 6 and row >= self.moduleCount - 11:
continue
if row in ppos and c in ppos:
if not (row < 11 and (c < 11 or c > maxpos) or
c < 11 and (row < 11 or row > maxpos)):
continue
yield (c, row)
_dataPosList = None
def dataPosIterator(self):
if not self._dataPosList:
self._dataPosList = list(self._dataPosIterator())
return self._dataPosList
def _dataBitIterator(self, data):
for byte in data:
for bit in [0x80, 0x40, 0x20, 0x10,
0x08, 0x04, 0x02, 0x01]:
yield bool(byte & bit)
_dataBitList = None
def dataBitIterator(self, data):
if not self._dataBitList:
self._dataBitList = list(self._dataBitIterator(data))
return iter(self._dataBitList)
def mapData(self, data, maskPattern):
bits = self.dataBitIterator(data)
mask = QRUtil.getMask(maskPattern)
for (col, row), dark in zip_longest(self.dataPosIterator(), bits,
fillvalue=False):
self.modules[row][col] = dark ^ mask(row, col)
PAD0 = 0xEC
PAD1 = 0x11
@staticmethod
def createData(version, errorCorrectLevel, dataList):
rsBlocks = QRRSBlock.getRSBlocks(version, errorCorrectLevel)
buffer = QRBitBuffer();
for data in dataList:
data.write(buffer, version)
# calc num max data.
totalDataCount = 0;
for block in rsBlocks:
totalDataCount += block.dataCount
if (buffer.getLengthInBits() > totalDataCount * 8):
raise Exception("code length overflow. (%d > %d)" %
(buffer.getLengthInBits(), totalDataCount * 8))
# end code
if (buffer.getLengthInBits() + 4 <= totalDataCount * 8):
buffer.put(0, 4)
# padding
while (buffer.getLengthInBits() % 8 != 0):
buffer.putBit(False)
# padding
while (True):
if (buffer.getLengthInBits() >= totalDataCount * 8):
break
buffer.put(QRCode.PAD0, 8)
if (buffer.getLengthInBits() >= totalDataCount * 8):
break
buffer.put(QRCode.PAD1, 8)
return QRCode.createBytes(buffer, rsBlocks)
@staticmethod
def createBytes(buffer, rsBlocks):
offset = 0
maxDcCount = 0
maxEcCount = 0
totalCodeCount = 0
dcdata = []
ecdata = []
for block in rsBlocks:
totalCodeCount += block.totalCount
dcCount = block.dataCount
ecCount = block.totalCount - dcCount
maxDcCount = max(maxDcCount, dcCount)
maxEcCount = max(maxEcCount, ecCount)
dcdata.append(buffer.buffer[offset:offset+dcCount])
offset += dcCount
rsPoly = QRUtil.getErrorCorrectPolynomial(ecCount)
rawPoly = QRPolynomial(dcdata[-1], rsPoly.getLength() - 1)
modPoly = rawPoly.mod(rsPoly)
rLen = rsPoly.getLength() - 1
mLen = modPoly.getLength()
ecdata.append([ (modPoly.get(i) if i >= 0 else 0)
for i in range(mLen - rLen, mLen) ])
data = [ d for dd in itertools.chain(
zip_longest(*dcdata), zip_longest(*ecdata))
for d in dd if d is not None]
return data
class QRErrorCorrectLevel:
L = 1
M = 0
Q = 3
H = 2
class QRMaskPattern:
PATTERN000 = 0
PATTERN001 = 1
PATTERN010 = 2
PATTERN011 = 3
PATTERN100 = 4
PATTERN101 = 5
PATTERN110 = 6
PATTERN111 = 7
class QRUtil(object):
PATTERN_POSITION_TABLE = [
[],
[6, 18],
[6, 22],
[6, 26],
[6, 30],
[6, 34],
[6, 22, 38],
[6, 24, 42],
[6, 26, 46],
[6, 28, 50],
[6, 30, 54],
[6, 32, 58],
[6, 34, 62],
[6, 26, 46, 66],
[6, 26, 48, 70],
[6, 26, 50, 74],
[6, 30, 54, 78],
[6, 30, 56, 82],
[6, 30, 58, 86],
[6, 34, 62, 90],
[6, 28, 50, 72, 94],
[6, 26, 50, 74, 98],
[6, 30, 54, 78, 102],
[6, 28, 54, 80, 106],
[6, 32, 58, 84, 110],
[6, 30, 58, 86, 114],
[6, 34, 62, 90, 118],
[6, 26, 50, 74, 98, 122],
[6, 30, 54, 78, 102, 126],
[6, 26, 52, 78, 104, 130],
[6, 30, 56, 82, 108, 134],
[6, 34, 60, 86, 112, 138],
[6, 30, 58, 86, 114, 142],
[6, 34, 62, 90, 118, 146],
[6, 30, 54, 78, 102, 126, 150],
[6, 24, 50, 76, 102, 128, 154],
[6, 28, 54, 80, 106, 132, 158],
[6, 32, 58, 84, 110, 136, 162],
[6, 26, 54, 82, 110, 138, 166],
[6, 30, 58, 86, 114, 142, 170]
]
G15 = ((1 << 10) | (1 << 8) | (1 << 5) | (1 << 4) | (1 << 2) | (1 << 1) |
(1 << 0))
G18 = ((1 << 12) | (1 << 11) | (1 << 10) | (1 << 9) | (1 << 8) |
(1 << 5) | (1 << 2) | (1 << 0))
G15_MASK = (1 << 14) | (1 << 12) | (1 << 10) | (1 << 4) | (1 << 1)
@staticmethod
def getBCHTypeInfo(data):
d = data << 10;
while (QRUtil.getBCHDigit(d) - QRUtil.getBCHDigit(QRUtil.G15) >= 0):
d ^= (QRUtil.G15 << (QRUtil.getBCHDigit(d) -
QRUtil.getBCHDigit(QRUtil.G15) ) )
return ( (data << 10) | d) ^ QRUtil.G15_MASK
@staticmethod
def getBCHTypeNumber(data):
d = data << 12;
while (QRUtil.getBCHDigit(d) - QRUtil.getBCHDigit(QRUtil.G18) >= 0):
d ^= (QRUtil.G18 << (QRUtil.getBCHDigit(d) -
QRUtil.getBCHDigit(QRUtil.G18) ) )
return (data << 12) | d
@staticmethod
def getBCHDigit(data):
digit = 0;
while (data != 0):
digit += 1
data >>= 1
return digit
@staticmethod
def getPatternPosition(version):
return QRUtil.PATTERN_POSITION_TABLE[version - 1]
maskPattern = {
0: lambda i,j: (i + j) % 2 == 0,
1: lambda i,j: i % 2 == 0,
2: lambda i,j: j % 3 == 0,
3: lambda i,j: (i + j) % 3 == 0,
4: lambda i,j: (i // 2 + j // 3) % 2 == 0,
5: lambda i,j: (i*j)%2 + (i*j)%3 == 0,
6: lambda i,j: ( (i * j) % 2 + (i * j) % 3) % 2 == 0,
7: lambda i,j: ( (i * j) % 3 + (i + j) % 2) % 2 == 0
}
@classmethod
def getMask(cls, maskPattern):
return cls.maskPattern[maskPattern]
@staticmethod
def getErrorCorrectPolynomial(errorCorrectLength):
a = QRPolynomial([1], 0);
for i in range(errorCorrectLength):
a = a.multiply(QRPolynomial([1, QRMath.gexp(i)], 0) )
return a
@classmethod
def maskScoreRule1vert(cls, modules):
score = 0
lastCount = [0]
lastRow = None
for row in modules:
# Vertical patterns
if lastRow:
changed = [a ^ b for a,b in zip(row, lastRow)]
scores = [a and (b-4+3) for a,b in
zip_longest(changed, lastCount, fillvalue=0)
if b >= 4]
score += sum(scores)
lastCount = [0 if a else b + 1
for a,b in zip_longest(changed, lastCount,
fillvalue=0)]
lastRow = row
score += sum([b-4+3 for b in lastCount if b >= 4]) # final counts
return score
@classmethod
def maskScoreRule2(cls, modules):
score = 0
lastRow = modules[0]
for row in modules[1:]:
lastCol0, lastCol1 = row[0], lastRow[0]
for col0, col1 in zip(row[1:], lastRow[1:]):
if col0 == col1 == lastCol0 == lastCol1:
score += 3
lastCol0, lastCol1 = col0, col1
lastRow = row
return score
@classmethod
def maskScoreRule3hor(
cls, modules,
pattern = [True, False, True, True, True, False, True,
False, False, False, False]):
patternlen = len(pattern)
score = 0
for row in modules:
j = 0
maxj = len(row) - patternlen
while j < maxj:
if row[j:j+patternlen] == pattern:
score += 40
j += patternlen
else:
j += 1
return score
@classmethod
def maskScoreRule4(cls, modules):
cellCount = len(modules)**2
count = sum(sum(row) for row in modules)
return 10 * (abs(100 * count // cellCount - 50) // 5)
@classmethod
def getLostPoint(cls, qrCode):
lostPoint = 0;
# LEVEL1
lostPoint += cls.maskScoreRule1vert(qrCode.modules)
lostPoint += cls.maskScoreRule1vert(zip(*qrCode.modules))
# LEVEL2
lostPoint += cls.maskScoreRule2(qrCode.modules)
# LEVEL3
lostPoint += cls.maskScoreRule3hor(qrCode.modules)
lostPoint += cls.maskScoreRule3hor(zip(*qrCode.modules))
# LEVEL4
lostPoint += cls.maskScoreRule4(qrCode.modules)
return lostPoint
class QRMath:
@staticmethod
def glog(n):
if (n < 1):
raise Exception("glog(" + n + ")")
return LOG_TABLE[n];
@staticmethod
def gexp(n):
while n < 0:
n += 255
while n >= 256:
n -= 255
return EXP_TABLE[n];
EXP_TABLE = [x for x in range(256)]
LOG_TABLE = [x for x in range(256)]
for i in range(8):
EXP_TABLE[i] = 1 << i;
for i in range(8, 256):
EXP_TABLE[i] = (EXP_TABLE[i - 4] ^ EXP_TABLE[i - 5] ^
EXP_TABLE[i - 6] ^ EXP_TABLE[i - 8])
for i in range(255):
LOG_TABLE[EXP_TABLE[i] ] = i
class QRPolynomial:
def __init__(self, num, shift):
if (len(num) == 0):
raise Exception(len(num) + "/" + shift)
offset = 0
while offset < len(num) and num[offset] == 0:
offset += 1
self.num = num[offset:] + [0]*shift
def get(self, index):
return self.num[index]
def getLength(self):
return len(self.num)
def multiply(self, e):
num = [0] * (self.getLength() + e.getLength() - 1);
for i in range(self.getLength()):
for j in range(e.getLength()):
num[i + j] ^= QRMath.gexp(QRMath.glog(self.get(i) ) +
QRMath.glog(e.get(j) ) )
return QRPolynomial(num, 0);
def mod(self, e):
if (self.getLength() < e.getLength()):
return self;
ratio = QRMath.glog(self.num[0] ) - QRMath.glog(e.num[0] )
num = [nn ^ QRMath.gexp(QRMath.glog(en) + ratio)
for nn,en in zip(self.num, e.num)]
num += self.num[e.getLength():]
# recursive call
return QRPolynomial(num, 0).mod(e);
class QRRSBlock:
RS_BLOCK_TABLE = [
# L
# M
# Q
# H
# 1
[1, 26, 19],
[1, 26, 16],
[1, 26, 13],
[1, 26, 9],
# 2
[1, 44, 34],
[1, 44, 28],
[1, 44, 22],
[1, 44, 16],
# 3
[1, 70, 55],
[1, 70, 44],
[2, 35, 17],
[2, 35, 13],
# 4
[1, 100, 80],
[2, 50, 32],
[2, 50, 24],
[4, 25, 9],
# 5
[1, 134, 108],
[2, 67, 43],
[2, 33, 15, 2, 34, 16],
[2, 33, 11, 2, 34, 12],
# 6
[2, 86, 68],
[4, 43, 27],
[4, 43, 19],
[4, 43, 15],
# 7
[2, 98, 78],
[4, 49, 31],
[2, 32, 14, 4, 33, 15],
[4, 39, 13, 1, 40, 14],
# 8
[2, 121, 97],
[2, 60, 38, 2, 61, 39],
[4, 40, 18, 2, 41, 19],
[4, 40, 14, 2, 41, 15],
# 9
[2, 146, 116],
[3, 58, 36, 2, 59, 37],
[4, 36, 16, 4, 37, 17],
[4, 36, 12, 4, 37, 13],
# 10
[2, 86, 68, 2, 87, 69],
[4, 69, 43, 1, 70, 44],
[6, 43, 19, 2, 44, 20],
[6, 43, 15, 2, 44, 16],
# 11
[4, 101, 81],
[1, 80, 50, 4, 81, 51],
[4, 50, 22, 4, 51, 23],
[3, 36, 12, 8, 37, 13],
# 12
[2, 116, 92, 2, 117, 93],
[6, 58, 36, 2, 59, 37],
[4, 46, 20, 6, 47, 21],
[7, 42, 14, 4, 43, 15],
# 13
[4, 133, 107],
[8, 59, 37, 1, 60, 38],
[8, 44, 20, 4, 45, 21],
[12, 33, 11, 4, 34, 12],
# 14
[3, 145, 115, 1, 146, 116],
[4, 64, 40, 5, 65, 41],
[11, 36, 16, 5, 37, 17],
[11, 36, 12, 5, 37, 13],
# 15
[5, 109, 87, 1, 110, 88],
[5, 65, 41, 5, 66, 42],
[5, 54, 24, 7, 55, 25],
[11, 36, 12],
# 16
[5, 122, 98, 1, 123, 99],
[7, 73, 45, 3, 74, 46],
[15, 43, 19, 2, 44, 20],
[3, 45, 15, 13, 46, 16],
# 17
[1, 135, 107, 5, 136, 108],
[10, 74, 46, 1, 75, 47],
[1, 50, 22, 15, 51, 23],
[2, 42, 14, 17, 43, 15],
# 18
[5, 150, 120, 1, 151, 121],
[9, 69, 43, 4, 70, 44],
[17, 50, 22, 1, 51, 23],
[2, 42, 14, 19, 43, 15],
# 19
[3, 141, 113, 4, 142, 114],
[3, 70, 44, 11, 71, 45],
[17, 47, 21, 4, 48, 22],
[9, 39, 13, 16, 40, 14],
# 20
[3, 135, 107, 5, 136, 108],
[3, 67, 41, 13, 68, 42],
[15, 54, 24, 5, 55, 25],
[15, 43, 15, 10, 44, 16],
# 21
[4, 144, 116, 4, 145, 117],
[17, 68, 42],
[17, 50, 22, 6, 51, 23],
[19, 46, 16, 6, 47, 17],
# 22
[2, 139, 111, 7, 140, 112],
[17, 74, 46],
[7, 54, 24, 16, 55, 25],
[34, 37, 13],
# 23
[4, 151, 121, 5, 152, 122],
[4, 75, 47, 14, 76, 48],
[11, 54, 24, 14, 55, 25],
[16, 45, 15, 14, 46, 16],
# 24
[6, 147, 117, 4, 148, 118],
[6, 73, 45, 14, 74, 46],
[11, 54, 24, 16, 55, 25],
[30, 46, 16, 2, 47, 17],
# 25
[8, 132, 106, 4, 133, 107],
[8, 75, 47, 13, 76, 48],
[7, 54, 24, 22, 55, 25],
[22, 45, 15, 13, 46, 16],
# 26
[10, 142, 114, 2, 143, 115],
[19, 74, 46, 4, 75, 47],
[28, 50, 22, 6, 51, 23],
[33, 46, 16, 4, 47, 17],
# 27
[8, 152, 122, 4, 153, 123],
[22, 73, 45, 3, 74, 46],
[8, 53, 23, 26, 54, 24],
[12, 45, 15, 28, 46, 16],
# 28
[3, 147, 117, 10, 148, 118],
[3, 73, 45, 23, 74, 46],
[4, 54, 24, 31, 55, 25],
[11, 45, 15, 31, 46, 16],
# 29
[7, 146, 116, 7, 147, 117],
[21, 73, 45, 7, 74, 46],
[1, 53, 23, 37, 54, 24],
[19, 45, 15, 26, 46, 16],
# 30
[5, 145, 115, 10, 146, 116],
[19, 75, 47, 10, 76, 48],
[15, 54, 24, 25, 55, 25],
[23, 45, 15, 25, 46, 16],
# 31
[13, 145, 115, 3, 146, 116],
[2, 74, 46, 29, 75, 47],
[42, 54, 24, 1, 55, 25],
[23, 45, 15, 28, 46, 16],
# 32
[17, 145, 115],
[10, 74, 46, 23, 75, 47],
[10, 54, 24, 35, 55, 25],
[19, 45, 15, 35, 46, 16],
# 33
[17, 145, 115, 1, 146, 116],
[14, 74, 46, 21, 75, 47],
[29, 54, 24, 19, 55, 25],
[11, 45, 15, 46, 46, 16],
# 34
[13, 145, 115, 6, 146, 116],
[14, 74, 46, 23, 75, 47],
[44, 54, 24, 7, 55, 25],
[59, 46, 16, 1, 47, 17],
# 35
[12, 151, 121, 7, 152, 122],
[12, 75, 47, 26, 76, 48],
[39, 54, 24, 14, 55, 25],
[22, 45, 15, 41, 46, 16],
# 36
[6, 151, 121, 14, 152, 122],
[6, 75, 47, 34, 76, 48],
[46, 54, 24, 10, 55, 25],
[2, 45, 15, 64, 46, 16],
# 37
[17, 152, 122, 4, 153, 123],
[29, 74, 46, 14, 75, 47],
[49, 54, 24, 10, 55, 25],
[24, 45, 15, 46, 46, 16],
# 38
[4, 152, 122, 18, 153, 123],
[13, 74, 46, 32, 75, 47],
[48, 54, 24, 14, 55, 25],
[42, 45, 15, 32, 46, 16],
# 39
[20, 147, 117, 4, 148, 118],
[40, 75, 47, 7, 76, 48],
[43, 54, 24, 22, 55, 25],
[10, 45, 15, 67, 46, 16],
# 40
[19, 148, 118, 6, 149, 119],
[18, 75, 47, 31, 76, 48],
[34, 54, 24, 34, 55, 25],
[20, 45, 15, 61, 46, 16]
]
def __init__(self, totalCount, dataCount):
self.totalCount = totalCount
self.dataCount = dataCount
@staticmethod
def getRSBlocks(version, errorCorrectLevel):
rsBlock = QRRSBlock.getRsBlockTable(version, errorCorrectLevel);
if rsBlock == None:
raise Exception("bad rs block @ version:" + version +
"/errorCorrectLevel:" + errorCorrectLevel)
length = len(rsBlock) // 3
list = []
for i in range(length):
count = rsBlock[i * 3 + 0]
totalCount = rsBlock[i * 3 + 1]
dataCount = rsBlock[i * 3 + 2]
for j in range(count):
list.append(QRRSBlock(totalCount, dataCount))
return list;
@staticmethod
def getRsBlockTable(version, errorCorrectLevel):
if errorCorrectLevel == QRErrorCorrectLevel.L:
return QRRSBlock.RS_BLOCK_TABLE[(version - 1) * 4 + 0];
elif errorCorrectLevel == QRErrorCorrectLevel.M:
return QRRSBlock.RS_BLOCK_TABLE[(version - 1) * 4 + 1];
elif errorCorrectLevel == QRErrorCorrectLevel.Q:
return QRRSBlock.RS_BLOCK_TABLE[(version - 1) * 4 + 2];
elif errorCorrectLevel == QRErrorCorrectLevel.H:
return QRRSBlock.RS_BLOCK_TABLE[(version - 1) * 4 + 3];
else:
return None;
class QRBitBuffer:
def __init__(self):
self.buffer = []
self.length = 0
def __repr__(self):
return ".".join([str(n) for n in self.buffer])
def get(self, index):
bufIndex = index // 8
return ( (self.buffer[bufIndex] >> (7 - index % 8) ) & 1) == 1
def put(self, num, length):
for i in range(length):
self.putBit( ( (num >> (length - i - 1) ) & 1) == 1)
def getLengthInBits(self):
return self.length
def putBit(self, bit):
bufIndex = self.length // 8
if len(self.buffer) <= bufIndex:
self.buffer.append(0)
if bit:
self.buffer[bufIndex] |= (0x80 >> (self.length % 8) )
self.length += 1
| agpl-3.0 |
ashwyn/eden-message_parser | private/templates/default/menus.py | 8 | 4154 | # -*- coding: utf-8 -*-
from gluon import *
from s3 import *
from eden.layouts import *
try:
from .layouts import *
except ImportError:
pass
import eden.menus as default
# Below is an example which you can base your own template's menus.py on
# - there are also other examples in the other templates folders
# =============================================================================
#class S3MainMenu(default.S3MainMenu):
#"""
#Custom Application Main Menu:
#The main menu consists of several sub-menus, each of which can
#be customized separately as a method of this class. The overall
#composition of the menu is defined in the menu() method, which can
#be customized as well:
#Function Sub-Menu Access to (standard)
#menu_modules() the modules menu the Eden modules
#menu_gis() the GIS menu GIS configurations
#menu_admin() the Admin menu System/User Administration
#menu_lang() the Language menu Selection of the GUI locale
#menu_auth() the User menu Login, Logout, User Profile
#menu_help() the Help menu Contact page, About page
#The standard uses the MM layout class for main menu items - but you
#can of course use a custom layout class which you define in layouts.py.
#Additional sub-menus can simply be defined as additional functions in
#this class, and then be included in the menu() method.
#Each sub-menu function returns a list of menu items, only the menu()
#function must return a layout class instance.
#"""
# -------------------------------------------------------------------------
#@classmethod
#def menu(cls):
#""" Compose Menu """
#main_menu = MM()(
## Modules-menu, align-left
#cls.menu_modules(),
## Service menus, align-right
## Note: always define right-hand items in reverse order!
#cls.menu_help(right=True),
#cls.menu_auth(right=True),
#cls.menu_lang(right=True),
#cls.menu_admin(right=True),
#cls.menu_gis(right=True)
#)
#return main_menu
# -------------------------------------------------------------------------
#@classmethod
#def menu_modules(cls):
#""" Custom Modules Menu """
#return [
#homepage(),
#homepage("gis"),
#homepage("pr")(
#MM("Persons", f="person"),
#MM("Groups", f="group")
#),
#MM("more", link=False)(
#homepage("dvi"),
#homepage("irs")
#),
#]
# =============================================================================
#class S3OptionsMenu(default.S3OptionsMenu):
#"""
#Custom Controller Menus
#The options menu (left-hand options menu) is individual for each
#controller, so each controller has its own options menu function
#in this class.
#Each of these option menu functions can be customized separately,
#by simply overriding (re-defining) the default function. The
#options menu function must return an instance of the item layout.
#The standard menu uses the M item layout class, but you can of
#course also use any other layout class which you define in
#layouts.py (can also be mixed).
#Make sure additional helper functions in this class don't match
#any current or future controller prefix (e.g. by using an
#underscore prefix).
#"""
#def cr(self):
#""" CR / Shelter Registry """
#return M(c="cr")(
#M("Camp", f="shelter")(
#M("New", m="create"),
#M("List All"),
#M("Map", m="map"),
#M("Import", m="import"),
#)
#)
# END =========================================================================
| mit |
kampanita/pelisalacarta | python/main-classic/platformcode/xbmc_info_window.py | 1 | 21922 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta 4
# Copyright 2015 [email protected]
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#
# Distributed under the terms of GNU General Public License v3 (GPLv3)
# http://www.gnu.org/licenses/gpl-3.0.html
# ------------------------------------------------------------
# This file is part of pelisalacarta 4.
#
# pelisalacarta 4 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pelisalacarta 4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pelisalacarta 4. If not, see <http://www.gnu.org/licenses/>.
# ------------------------------------------------------------
import inspect
import os
import re
import xbmcgui
from core.tmdb import Tmdb
from core.item import Item
from core import logger
class InfoWindow(xbmcgui.WindowXMLDialog):
otmdb = None
item_title = ""
item_serie = ""
item_temporada = 0
item_episodio = 0
result = {}
@staticmethod
def get_language(lng):
# Cambiamos el formato del Idioma
languages = {
'aa': 'Afar', 'ab': 'Abkhazian', 'af': 'Afrikaans', 'ak': 'Akan', 'sq': 'Albanian', 'am': 'Amharic',
'ar': 'Arabic', 'an': 'Aragonese', 'as': 'Assamese', 'av': 'Avaric', 'ae': 'Avestan',
'ay': 'Aymara', 'az': 'Azerbaijani', 'ba': 'Bashkir', 'bm': 'Bambara', 'eu': 'Basque',
'be': 'Belarusian', 'bn': 'Bengali', 'bh': 'Bihari languages', 'bi': 'Bislama',
'bo': 'Tibetan', 'bs': 'Bosnian', 'br': 'Breton', 'bg': 'Bulgarian', 'my': 'Burmese',
'ca': 'Catalan; Valencian', 'cs': 'Czech', 'ch': 'Chamorro', 'ce': 'Chechen', 'zh': 'Chinese',
'cu': 'Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic',
'cv': 'Chuvash', 'kw': 'Cornish', 'co': 'Corsican', 'cr': 'Cree', 'cy': 'Welsh',
'da': 'Danish', 'de': 'German', 'dv': 'Divehi; Dhivehi; Maldivian', 'nl': 'Dutch; Flemish',
'dz': 'Dzongkha', 'en': 'English', 'eo': 'Esperanto',
'et': 'Estonian', 'ee': 'Ewe', 'fo': 'Faroese', 'fa': 'Persian', 'fj': 'Fijian',
'fi': 'Finnish', 'fr': 'French', 'fy': 'Western Frisian', 'ff': 'Fulah',
'Ga': 'Georgian', 'gd': 'Gaelic; Scottish Gaelic', 'ga': 'Irish', 'gl': 'Galician',
'gv': 'Manx', 'el': 'Greek, Modern (1453-)', 'gn': 'Guarani', 'gu': 'Gujarati',
'ht': 'Haitian; Haitian Creole', 'ha': 'Hausa', 'he': 'Hebrew', 'hz': 'Herero', 'hi': 'Hindi',
'ho': 'Hiri Motu', 'hr': 'Croatian', 'hu': 'Hungarian', 'hy': 'Armenian', 'ig': 'Igbo',
'is': 'Icelandic', 'io': 'Ido', 'ii': 'Sichuan Yi; Nuosu', 'iu': 'Inuktitut',
'ie': 'Interlingue; Occidental', 'ia': 'Interlingua (International Auxiliary Language Association)',
'id': 'Indonesian', 'ik': 'Inupiaq', 'it': 'Italian', 'jv': 'Javanese',
'ja': 'Japanese', 'kl': 'Kalaallisut; Greenlandic', 'kn': 'Kannada', 'ks': 'Kashmiri',
'ka': 'Georgian', 'kr': 'Kanuri', 'kk': 'Kazakh', 'km': 'Central Khmer', 'ki': 'Kikuyu; Gikuyu',
'rw': 'Kinyarwanda', 'ky': 'Kirghiz; Kyrgyz', 'kv': 'Komi', 'kg': 'Kongo', 'ko': 'Korean',
'kj': 'Kuanyama; Kwanyama', 'ku': 'Kurdish', 'lo': 'Lao', 'la': 'Latin', 'lv': 'Latvian',
'li': 'Limburgan; Limburger; Limburgish', 'ln': 'Lingala', 'lt': 'Lithuanian',
'lb': 'Luxembourgish; Letzeburgesch', 'lu': 'Luba-Katanga', 'lg': 'Ganda', 'mk': 'Macedonian',
'mh': 'Marshallese', 'ml': 'Malayalam', 'mi': 'Maori', 'mr': 'Marathi', 'ms': 'Malay', 'Mi': 'Micmac',
'mg': 'Malagasy', 'mt': 'Maltese', 'mn': 'Mongolian', 'na': 'Nauru',
'nv': 'Navajo; Navaho', 'nr': 'Ndebele, South; South Ndebele', 'nd': 'Ndebele, North; North Ndebele',
'ng': 'Ndonga', 'ne': 'Nepali', 'nn': 'Norwegian Nynorsk; Nynorsk, Norwegian',
'nb': 'Bokmål, Norwegian; Norwegian Bokmål', 'no': 'Norwegian', 'oc': 'Occitan (post 1500)',
'oj': 'Ojibwa', 'or': 'Oriya', 'om': 'Oromo', 'os': 'Ossetian; Ossetic', 'pa': 'Panjabi; Punjabi',
'pi': 'Pali', 'pl': 'Polish', 'pt': 'Portuguese', 'ps': 'Pushto; Pashto', 'qu': 'Quechua',
'ro': 'Romanian; Moldavian; Moldovan', 'rn': 'Rundi', 'ru': 'Russian', 'sg': 'Sango', 'rm': 'Romansh',
'sa': 'Sanskrit', 'si': 'Sinhala; Sinhalese', 'sk': 'Slovak', 'sl': 'Slovenian', 'se': 'Northern Sami',
'sm': 'Samoan', 'sn': 'Shona', 'sd': 'Sindhi', 'so': 'Somali', 'st': 'Sotho, Southern', 'es': 'Spanish',
'sc': 'Sardinian', 'sr': 'Serbian', 'ss': 'Swati', 'su': 'Sundanese', 'sw': 'Swahili', 'sv': 'Swedish',
'ty': 'Tahitian', 'ta': 'Tamil', 'tt': 'Tatar', 'te': 'Telugu', 'tg': 'Tajik', 'tl': 'Tagalog',
'th': 'Thai', 'ti': 'Tigrinya', 'to': 'Tonga (Tonga Islands)', 'tn': 'Tswana', 'ts': 'Tsonga',
'tk': 'Turkmen', 'tr': 'Turkish', 'tw': 'Twi', 'ug': 'Uighur; Uyghur', 'uk': 'Ukrainian',
'ur': 'Urdu', 'uz': 'Uzbek', 've': 'Venda', 'vi': 'Vietnamese', 'vo': 'Volapük',
'wa': 'Walloon', 'wo': 'Wolof', 'xh': 'Xhosa', 'yi': 'Yiddish', 'yo': 'Yoruba', 'za': 'Zhuang; Chuang',
'zu': 'Zulu'}
return languages.get(lng, lng)
@staticmethod
def get_date(date):
# Cambiamos el formato de la fecha
if date:
return date.split("-")[2] + "/" + date.split("-")[1] + "/" + date.split("-")[0]
else:
return "N/A"
def get_episode_from_title(self, item):
# Patron para temporada y episodio "1x01"
pattern = re.compile("([0-9]+)[ ]*[x|X][ ]*([0-9]+)")
# Busca en title
matches = pattern.findall(item.title)
if len(matches):
self.item_temporada = matches[0][0]
self.item_episodio = matches[0][1]
# Busca en fulltitle
matches = pattern.findall(item.fulltitle)
if len(matches):
self.item_temporada = matches[0][0]
self.item_episodio = matches[0][1]
# Busca en contentTitle
matches = pattern.findall(item.contentTitle)
if len(matches):
self.item_temporada = matches[0][0]
self.item_episodio = matches[0][1]
def get_item_info(self, item):
# Recogemos los parametros del Item que nos interesan:
if "title" in item and item.title != "":
self.item_title = item.title
if "fulltitle" in item and item.fulltitle != "":
self.item_title = item.fulltitle
if "contentTitle" in item and item.contentTitle != "":
self.item_title = item.contentTitle
if "show" in item and item.show != "":
self.item_serie = item.show
if "contentSerieName" in item and item.contentSerieName != "":
self.item_serie = item.contentSerieName
if "contentSeason" in item and item.contentSeason != "":
self.item_temporada = item.contentSeason
if "contentepisodeNumber" in item and item.contentepisodeNumber != "":
self.item_episodio = item.contentepisodeNumber
# i no existen contentepisodeNumber o contentSeason intenta sacarlo del titulo
if not self.item_episodio or not self.item_temporada:
self.get_episode_from_title(item)
def get_dict_info(self, dct):
self.result = dct
def get_tmdb_movie_data(self, text):
# Buscamos la pelicula si no lo esta ya
if not self.otmdb:
self.otmdb = Tmdb(texto_buscado=text, idioma_busqueda="es", tipo="movie")
# Si no hay resultados salimos
if not self.otmdb.get_id():
return False
# Informacion de la pelicula
self.result["type"] = "movie"
self.result["id_Tmdb"] = self.otmdb.get_id()
self.result["title"] = self.otmdb.result["title"]
self.result["original_title"] = self.otmdb.result["original_title"]
self.result["date"] = self.get_date(self.otmdb.result["release_date"])
self.result["language"] = self.get_language(self.otmdb.result["original_language"])
self.result["rating"] = self.otmdb.result["vote_average"] + "/10 (" + self.otmdb.result["vote_count"] + ")"
self.result["genres"] = ", ".join(self.otmdb.result["genres"])
self.result["thumbnail"] = self.otmdb.get_poster()
self.result["fanart"] = self.otmdb.get_backdrop()
self.result["overview"] = self.otmdb.result["overview"]
return True
def get_tmdb_tv_data(self, text, season=0, episode=0):
# Pasamos la temporada y episodeo a int()
season = int(season)
episode = int(episode)
# Buscamos la serie si no esta cargada
if not self.otmdb:
self.otmdb = Tmdb(texto_buscado=text, idioma_busqueda="es", tipo="tv")
_id = self.otmdb.get_id()
# Si no hay resultados salimos
if not _id:
return False
# informacion generica de la serie
self.result["type"] = "tv"
self.result["id_Tmdb"] = self.otmdb.get_id()
self.result["title"] = self.otmdb.result.get("name", "N/A")
self.result["rating"] = self.otmdb.result["vote_average"] + "/10 (" + self.otmdb.result["vote_count"] + ")"
self.result["genres"] = ", ".join(self.otmdb.result["genres"])
self.result["language"] = self.get_language(self.otmdb.result["original_language"])
self.result["thumbnail"] = self.otmdb.get_poster()
self.result["fanart"] = self.otmdb.get_backdrop()
self.result["overview"] = self.otmdb.result.get("overview", "N/A")
# Si tenemos informacion de temporada y episodio
if season and episode:
if "seasons" not in self.result or self.result["seasons"] == "":
self.otmdb = Tmdb(id_Tmdb=id, idioma_busqueda="es", tipo="tv")
self.result["seasons"] = str(self.otmdb.result.get("number_of_seasons", 0))
if season > self.result["seasons"]:
season = self.result["season_count"]
if episode > self.otmdb.result.get("seasons")[season-1]["episode_count"]:
episode = self.otmdb.result.get("seasons")[season]["episode_count"]
# Solicitamos información del episodio concreto
episode_info = self.otmdb.get_episodio(season, episode)
# informacion de la temporada
self.result["season"] = str(season)
if episode_info.get("temporada_poster"):
self.result["thumbnail"] = episode_info.get("temporada_poster")
if self.otmdb.result.get("overview"):
self.result["overview"] = self.otmdb.result.get("overview")
# informacion del episodio
self.result["episode"] = str(episode)
self.result["episodes"] = str(episode_info.get('temporada_num_episodios', 0))
self.result["episode_title"] = episode_info.get("episodio_titulo", "N/A")
self.result["date"] = self.get_date(self.otmdb.temporada[season]["episodes"][episode-1].get("air_date"))
if episode_info.get("episodio_imagen"):
self.result["fanart"] = episode_info.get("episodio_imagen")
if episode_info.get("episodio_sinopsis"):
self.result["overview"] = episode_info.get("episodio_sinopsis")
return True
def get_tmdb_data(self, data_in):
self.otmdb = None
if self.listData:
data = {}
if data_in["type"] == "movie":
# Modo Listado de peliculas
data["title"] = data_in["title"]
data["original_title"] = data_in["original_title"]
data["date"] = self.get_date(data_in["release_date"])
else:
# Modo Listado de series
data["title"] = data_in.get("name", "N/A")
# Datos comunes a todos los listados
data["type"] = data_in["type"]
data["id_Tmdb"] = data_in["id"]
data["language"] = self.get_language(data_in["original_language"])
data["rating"] = data_in["vote_average"] + "/10 (" + data_in["vote_count"] + ")"
data["genres"] = ", ".join(data_in["genres"])
data["thumbnail"] = data_in["thumbnail"]
data["fanart"] = data_in["fanart"]
data["overview"] = data_in.get("overview")
self.from_tmdb = False
self.result = data
else:
if type(data_in) == Item:
self.from_tmdb = True
self.get_item_info(data_in)
# Modo Pelicula
if not self.item_serie:
encontrado = self.get_tmdb_movie_data(self.item_title)
if not encontrado:
encontrado = self.get_tmdb_tv_data(self.item_title, self.item_temporada, self.item_episodio)
else:
encontrado = self.get_tmdb_tv_data(self.item_serie, self.item_temporada, self.item_episodio)
if not encontrado:
encontrado = self.get_tmdb_movie_data(self.item_serie)
if type(data_in) == dict:
self.from_tmdb = False
self.get_dict_info(data_in)
def Start(self, data, caption="Información del vídeo", callback=None):
# Capturamos los parametros
self.caption = caption
self.callback = callback
self.indexList = -1
self.listData = None
# Obtenemos el canal desde donde se ha echo la llamada y cargamos los settings disponibles para ese canal
channelpath = inspect.currentframe().f_back.f_back.f_code.co_filename
self.channel = os.path.basename(channelpath).replace(".py", "")
if type(data) == list:
self.listData = data
self.indexList = 0
data = self.listData[self.indexList]
self.get_tmdb_data(data)
# Muestra la ventana
self.return_value = None
self.doModal()
return self.return_value
def onInit(self):
# Ponemos el foco en el boton de cerrar [X]
self.setFocus(self.getControl(10003))
# Ponemos el título y las imagenes
self.getControl(10002).setLabel(self.caption)
self.getControl(10004).setImage(self.result.get("fanart", ""))
self.getControl(10005).setImage(self.result.get("thumbnail", "InfoWindow/img_no_disponible.png"))
# Cargamos los datos para el formato pelicula
if self.result.get("type", "movie") == "movie":
self.getControl(10006).setLabel("Titulo:")
self.getControl(10007).setLabel(self.result.get("title", "N/A"))
self.getControl(10008).setLabel("Titulo Original:")
self.getControl(10009).setLabel(self.result.get("original_title", "N/A"))
self.getControl(100010).setLabel("Idioma original:")
self.getControl(100011).setLabel(self.result.get("language", "N/A"))
self.getControl(100012).setLabel("Puntuacion:")
self.getControl(100013).setLabel(self.result.get("rating", "N/A"))
self.getControl(100014).setLabel("Lanzamiento:")
self.getControl(100015).setLabel(self.result.get("date", "N/A"))
self.getControl(100016).setLabel("Generos:")
self.getControl(100017).setLabel(self.result.get("genres", "N/A"))
# Cargamos los datos para el formato serie
else:
self.getControl(10006).setLabel("Serie:")
self.getControl(10007).setLabel(self.result.get("title", "N/A"))
self.getControl(10008).setLabel("Idioma original:")
self.getControl(10009).setLabel(self.result.get("language", "N/A"))
self.getControl(100010).setLabel("Puntuacion:")
self.getControl(100011).setLabel(self.result.get("rating", "N/A"))
self.getControl(100012).setLabel("Generos:")
self.getControl(100013).setLabel(self.result.get("genres", "N/A"))
if self.result.get("season") and self.result.get("episode"):
self.getControl(100014).setLabel("Titulo:")
self.getControl(100015).setLabel(self.result.get("episode_title", "N/A"))
self.getControl(100016).setLabel("Temporada:")
self.getControl(100017).setLabel(self.result.get("season", "N/A") + " de " +
self.result.get("seasons", "N/A"))
self.getControl(100018).setLabel("Episodio:")
self.getControl(100019).setLabel(self.result.get("episode", "N/A") + " de " +
self.result.get("episodes", "N/A"))
self.getControl(100020).setLabel("Emision:")
self.getControl(100021).setLabel(self.result.get("date", "N/A"))
# Sinopsis
if "overview" in self.result and self.result['overview']:
self.getControl(100022).setLabel("Sinopsis:")
self.getControl(100023).setText(self.result.get("overview", "N/A"))
else:
self.getControl(100022).setLabel("")
self.getControl(100023).setText("")
# Cargamos los botones si es necesario
self.getControl(10024).setVisible(self.indexList > -1)
self.getControl(10025).setEnabled(self.indexList > 0)
self.getControl(10026).setEnabled(self.indexList + 1 != len(self.listData))
self.getControl(100029).setLabel("({0}/{1})".format(self.indexList + 1, len(self.listData)))
# Ponemos el foto en el botón "Anterior",
# si estuviera desactivado iria el foco al boton "Siguiente" y pasara lo mismo al botón "Cancelar"
self.setFocus(self.getControl(10024))
def onClick(self, id):
logger.info("pelisalacarta.platformcode.xbmc_info_window onClick id="+repr(id))
# Boton Cancelar y [X]
if id == 10003 or id == 10027:
self.close()
# Boton Anterior
if id == 10025 and self.indexList > 0:
self.indexList -= 1
self.get_tmdb_data(self.listData[self.indexList])
self.onInit()
# Boton Siguiente
if id == 10026 and self.indexList < len(self.listData) - 1:
self.indexList += 1
self.get_tmdb_data(self.listData[self.indexList])
self.onInit()
# Boton Aceptar, Cancelar y [X]
if id == 10028 or id == 10003 or id == 10027:
self.close()
if self.callback:
cb_channel = None
try:
cb_channel = __import__('platformcode.%s' % self.channel,
fromlist=["platformcode.%s" % self.channel])
except ImportError:
logger.error('Imposible importar %s' % self.channel)
if id == 10028: # Boton Aceptar
if cb_channel:
self.return_value = getattr(cb_channel, self.callback)(self.result)
else: # Boton Cancelar y [X]
if cb_channel:
self.return_value = getattr(cb_channel, self.callback)(None)
def onAction(self, action):
# logger.info("pelisalacarta.platformcode.xbmc_info_window onAction action="+repr(action.getId()))
# Accion 1: Flecha izquierda
if action == 1:
# Obtenemos el foco
focus = self.getFocusId()
# botón Aceptar
if focus == 10028:
self.setFocus(self.getControl(10027))
# botón Cancelar
elif focus == 10027:
if self.indexList + 1 != len(self.listData):
# vamos al botón Siguiente
self.setFocus(self.getControl(10026))
elif self.indexList > 0:
# vamos al botón Anterior ya que Siguiente no está activo (estamos al final de la lista)
self.setFocus(self.getControl(10025))
# botón Siguiente
elif focus == 10026:
if self.indexList > 0:
# vamos al botón Anterior
self.setFocus(self.getControl(10025))
# Accion 2: Flecha derecha
if action == 2:
# Obtenemos el foco
focus = self.getFocusId()
# botón Anterior
if focus == 10025:
if self.indexList + 1 != len(self.listData):
# vamos al botón Siguiente
self.setFocus(self.getControl(10026))
else:
# vamos al botón Cancelar ya que Siguiente no está activo (estamos al final de la lista)
self.setFocus(self.getControl(10027))
# botón Siguiente
elif focus == 10026:
self.setFocus(self.getControl(10027))
# boton Cancelar
elif focus == 10027:
self.setFocus(self.getControl(10028))
# Pulsa OK, simula click en boton aceptar
# if action == 107: # es mover el ratón
# logger.info("onAction he pulstado ok")
# # self.onClick(10028)
# Pulsa ESC o Atrás, simula click en boton cancelar
if action in [10, 92]:
# TODO arreglar
# self.close()
self.onClick(10027)
| gpl-3.0 |
luzheqi1987/nova-annotation | nova/tests/unit/test_hooks.py | 22 | 6320 | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for hook customization."""
import stevedore
from nova import hooks
from nova import test
class SampleHookA(object):
name = "a"
def _add_called(self, op, kwargs):
called = kwargs.get('called', None)
if called is not None:
called.append(op + self.name)
def pre(self, *args, **kwargs):
self._add_called("pre", kwargs)
class SampleHookB(SampleHookA):
name = "b"
def post(self, rv, *args, **kwargs):
self._add_called("post", kwargs)
class SampleHookC(SampleHookA):
name = "c"
def pre(self, f, *args, **kwargs):
self._add_called("pre" + f.__name__, kwargs)
def post(self, f, rv, *args, **kwargs):
self._add_called("post" + f.__name__, kwargs)
class SampleHookExceptionPre(SampleHookA):
name = "epre"
exception = Exception()
def pre(self, f, *args, **kwargs):
raise self.exception
class SampleHookExceptionPost(SampleHookA):
name = "epost"
exception = Exception()
def post(self, f, rv, *args, **kwargs):
raise self.exception
class MockEntryPoint(object):
def __init__(self, cls):
self.cls = cls
def load(self):
return self.cls
class MockedHookTestCase(test.BaseHookTestCase):
def _mock_load_plugins(self, iload, *iargs, **ikwargs):
return []
def setUp(self):
super(MockedHookTestCase, self).setUp()
hooks.reset()
self.stubs.Set(stevedore.extension.ExtensionManager, '_load_plugins',
self._mock_load_plugins)
class HookTestCase(MockedHookTestCase):
def _mock_load_plugins(self, iload, *iargs, **ikwargs):
return [
stevedore.extension.Extension('test_hook',
MockEntryPoint(SampleHookA), SampleHookA, SampleHookA()),
stevedore.extension.Extension('test_hook',
MockEntryPoint(SampleHookB), SampleHookB, SampleHookB()),
]
def setUp(self):
super(HookTestCase, self).setUp()
hooks.reset()
self.stubs.Set(stevedore.extension.ExtensionManager, '_load_plugins',
self._mock_load_plugins)
@hooks.add_hook('test_hook')
def _hooked(self, a, b=1, c=2, called=None):
return 42
def test_basic(self):
self.assertEqual(42, self._hooked(1))
mgr = hooks._HOOKS['test_hook']
self.assert_has_hook('test_hook', self._hooked)
self.assertEqual(2, len(mgr.extensions))
self.assertEqual(SampleHookA, mgr.extensions[0].plugin)
self.assertEqual(SampleHookB, mgr.extensions[1].plugin)
def test_order_of_execution(self):
called_order = []
self._hooked(42, called=called_order)
self.assertEqual(['prea', 'preb', 'postb'], called_order)
class HookTestCaseWithFunction(MockedHookTestCase):
def _mock_load_plugins(self, iload, *iargs, **ikwargs):
return [
stevedore.extension.Extension('function_hook',
MockEntryPoint(SampleHookC), SampleHookC, SampleHookC()),
]
@hooks.add_hook('function_hook', pass_function=True)
def _hooked(self, a, b=1, c=2, called=None):
return 42
def test_basic(self):
self.assertEqual(42, self._hooked(1))
mgr = hooks._HOOKS['function_hook']
self.assert_has_hook('function_hook', self._hooked)
self.assertEqual(1, len(mgr.extensions))
self.assertEqual(SampleHookC, mgr.extensions[0].plugin)
def test_order_of_execution(self):
called_order = []
self._hooked(42, called=called_order)
self.assertEqual(['pre_hookedc', 'post_hookedc'], called_order)
class HookFailPreTestCase(MockedHookTestCase):
def _mock_load_plugins(self, iload, *iargs, **ikwargs):
return [
stevedore.extension.Extension('fail_pre',
MockEntryPoint(SampleHookExceptionPre),
SampleHookExceptionPre, SampleHookExceptionPre()),
]
@hooks.add_hook('fail_pre', pass_function=True)
def _hooked(self, a, b=1, c=2, called=None):
return 42
def test_hook_fail_should_still_return(self):
self.assertEqual(42, self._hooked(1))
mgr = hooks._HOOKS['fail_pre']
self.assert_has_hook('fail_pre', self._hooked)
self.assertEqual(1, len(mgr.extensions))
self.assertEqual(SampleHookExceptionPre, mgr.extensions[0].plugin)
def test_hook_fail_should_raise_fatal(self):
self.stubs.Set(SampleHookExceptionPre, 'exception',
hooks.FatalHookException())
self.assertRaises(hooks.FatalHookException,
self._hooked, 1)
class HookFailPostTestCase(MockedHookTestCase):
def _mock_load_plugins(self, iload, *iargs, **ikwargs):
return [
stevedore.extension.Extension('fail_post',
MockEntryPoint(SampleHookExceptionPost),
SampleHookExceptionPost, SampleHookExceptionPost()),
]
@hooks.add_hook('fail_post', pass_function=True)
def _hooked(self, a, b=1, c=2, called=None):
return 42
def test_hook_fail_should_still_return(self):
self.assertEqual(42, self._hooked(1))
mgr = hooks._HOOKS['fail_post']
self.assert_has_hook('fail_post', self._hooked)
self.assertEqual(1, len(mgr.extensions))
self.assertEqual(SampleHookExceptionPost, mgr.extensions[0].plugin)
def test_hook_fail_should_raise_fatal(self):
self.stubs.Set(SampleHookExceptionPost, 'exception',
hooks.FatalHookException())
self.assertRaises(hooks.FatalHookException,
self._hooked, 1)
| apache-2.0 |
dario61081/koalixcrm | koalixcrm/crm/migrations/0045_auto_20180805_2047.py | 2 | 1557 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-08-05 20:47
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('crm', '0044_reportingperiod_status'),
]
operations = [
migrations.AddField(
model_name='work',
name='worked_hours',
field=models.DateTimeField(blank=True, null=True, verbose_name='Stop Time'),
),
migrations.AlterField(
model_name='task',
name='planned_end_date',
field=models.DateField(blank=True, null=True, verbose_name='Planned End'),
),
migrations.AlterField(
model_name='task',
name='planned_start_date',
field=models.DateField(blank=True, null=True, verbose_name='Planned Start'),
),
migrations.AlterField(
model_name='task',
name='status',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='crm.TaskStatus', verbose_name='Status'),
),
migrations.AlterField(
model_name='work',
name='start_time',
field=models.DateTimeField(blank=True, null=True, verbose_name='Start Time'),
),
migrations.AlterField(
model_name='work',
name='stop_time',
field=models.DateTimeField(blank=True, null=True, verbose_name='Stop Time'),
),
]
| bsd-3-clause |
odicraig/kodi2odi | addons/plugin.radio.disney/common.py | 2 | 7722 | #############################################################################
#############################################################################
import os,xbmc,xbmcgui,xbmcaddon,sys,logging,re,urllib,urllib2
#############################################################################
#############################################################################
addon=xbmcaddon.Addon();
addon_id =addon.getAddonInfo('id');
addon_name =addon.getAddonInfo('name');
addon_path =addon.getAddonInfo('path'); addon_path8=addon.getAddonInfo('path').decode("utf-8");
#MediaPath =xbmc.translatePath( os.path.join(addon_path8,'resources','skins','default','media').encode("utf-8") ).decode("utf-8");
MediaPath =xbmc.translatePath( os.path.join(addon_path,'resources','skins','default','media') );
addonPath=addon_path; addonId=addon_id; addonName=addon_name;
#############################################################################
#############################################################################
def tP(p): return xbmc.translatePath(p)
def MediaFile(n,e='',p=MediaPath): return os.path.join(p,n+e)
def MediaFileP(n,e='',p=MediaPath): return MediaFile(n,e='.png')
def MediaFileG(n,e='',p=MediaPath): return MediaFile(n,e='.gif')
def MediaFileJ(n,e='',p=MediaPath): return MediaFile(n,e='.jpg')
def getSet(id,d=''):
try: return addon.getSetting(id)
except: return d
def setSet(id,v):
try: return addon.setSetting(id,v)
except: pass
def tfalse(r,d=False): ## Get True / False
if (r.lower()=='true' ) or (r.lower()=='t') or (r.lower()=='y') or (r.lower()=='1') or (r.lower()=='yes'): return True
elif (r.lower()=='false') or (r.lower()=='f') or (r.lower()=='n') or (r.lower()=='0') or (r.lower()=='no'): return False
else: return d
def isPath(path): return os.path.exists(path)
def isFile(filename): return os.path.isfile(filename)
def deb(a,b):
try: print "%s: %s"%(str(a),str(b))
except: pass
def debob(o):
try: print o
except: pass
def nolines(t):
it=t.splitlines(); t=''
for L in it: t=t+L
t=((t.replace("\r","")).replace("\n",""))
return t
def cFL( t,c='tan'): ### For Coloring Text ###
try: return '[COLOR '+c+']'+t+'[/COLOR]'
except: pass
def cFL_(t,c='tan'): ### For Coloring Text (First Letter-Only) ###
try: return '[COLOR '+c+']'+t[0:1]+'[/COLOR]'+t[1:]
except: pass
def DoE(e): xbmc.executebuiltin(e)
def DoAW(e): xbmc.executebuiltin("ActivateWindow(%s)"%str(e))
def DoRW(e): xbmc.executebuiltin("ReplaceWindow(%s)"%str(e))
def DoRA(e): xbmc.executebuiltin("RunAddon(%s)"%str(e))
def DoRA2(e,e2="1",e3=""): xbmc.executebuiltin('RunAddon(%s,"%s","%s")'%(str(e),str(e2),e3));
def DoA(a): xbmc.executebuiltin("Action(%s)"%str(a))
def DoCM(a): xbmc.executebuiltin("Control.Message(windowid=%s)"%(str(a)))
def DoSC(a): xbmc.executebuiltin("SendClick(%s)"%(str(a)))
def DoSC2(a,Id): xbmc.executebuiltin("SendClick(%s,%s)"%(str(a),str(Id)))
def DoStopScript(e): xbmc.executebuiltin("StopScript(%s)"%str(e))
def showAddonSettings(): addon.openSettings()
def note(title='',msg='',delay=5000,image='http://upload.wikimedia.org/wikipedia/commons/thumb/a/a5/US_99_%281961%29.svg/40px-US_99_%281961%29.svg.png'): xbmc.executebuiltin('XBMC.Notification("%s","%s",%d,"%s")'%(title,msg,delay,image))
def popYN(title='',line1='',line2='',line3='',n='',y=''):
diag=xbmcgui.Dialog()
r=diag.yesno(title,line1,line2,line3,n,y)
if r: return r
else: return False
#del diag
def popOK(msg="",title="",line2="",line3=""):
dialog=xbmcgui.Dialog()
#ok=dialog.ok(title,msg,line2,line3)
dialog.ok(title,msg,line2,line3)
def FileSAVE(path,data): file=open(path,'w'); file.write(data); file.close()
def FileOPEN(path,d=''):
try:
#deb('File',path)
if os.path.isfile(path): ## File found.
#deb('Found',path)
file = open(path, 'r')
contents=file.read()
file.close()
return contents
else: return d ## File not found.
except: return d
def FolderNEW(dir_path):
dir_path=dir_path.strip()
if not os.path.exists(dir_path): os.makedirs(dir_path)
def FolderLIST(mypath,dirname): #...creates sub-directories if they are not found.
subpath=os.path.join(mypath,dirname)
if not os.path.exists(subpath): os.makedirs(subpath)
return subpath
def getURL(url):
try:
req=urllib2.Request(url)
#req.add_header(MyBrowser[0],MyBrowser[1])
response=urllib2.urlopen(req)
link=response.read()
response.close()
return(link)
except: deb('Failed to fetch url',url); return ''
def postURL(url,form_data={},headers={},compression=True):
try:
req=urllib2.Request(url)
if form_data: form_data=urllib.urlencode(form_data); req=urllib2.Request(url,form_data)
#req.add_header(MyBrowser[0],MyBrowser[1])
for k, v in headers.items(): req.add_header(k, v)
if compression: req.add_header('Accept-Encoding', 'gzip')
response=urllib2.urlopen(req)
link=response.read()
response.close()
return link
except: deb('Failed to fetch url',url); return ''
def postURL2(url,form_data={}):
try:
postData=urllib.urlencode(form_data)
req=urllib2.Request(url,postData)
#req.add_header(MyBrowser[0], MyBrowser[1])
response=urllib2.urlopen(req)
link=response.read()
response.close()
return(link)
except: deb('Failed to fetch url',url); return ''
def showkeyboard(txtMessage="",txtHeader="",passwordField=False):
try:
if txtMessage=='None': txtMessage=''
keyboard = xbmc.Keyboard(txtMessage, txtHeader, passwordField)#("text to show","header text", True="password field"/False="show text")
keyboard.doModal()
if keyboard.isConfirmed(): return keyboard.getText()
else: return '' #return False
except: return ''
def art(f,fe=''):
fe1='.png'; fe2='.jpg'; fe3='.gif';
if fe1 in f: f=f.replace(fe1,''); fe=fe1;
elif fe2 in f: f=f.replace(fe2,''); fe=fe2;
elif fe3 in f: f=f.replace(fe3,''); fe=fe3;
return xbmc.translatePath(os.path.join(addonPath,'art',f+fe))
def artp(f,fe='.png'):
return art(f,fe)
def artj(f,fe='.jpg'):
return art(f,fe)
def addonPath2(f,fe=''):
return xbmc.translatePath(os.path.join(addonPath,f+fe))
def get_xbmc_os():
try: xbmc_os=str(os.environ.get('OS'))
except:
try: xbmc_os=str(sys.platform)
except: xbmc_os="unknown"
return xbmc_os
def doCtoS(c,s="",d=""): ## Put's an array (Example: [68,68,68,68]) into a string, converting each number in the array into it's character form to make up a string.
try:
if len(c)==0: return d
for k in range(0,len(c)):
s+=str(chr(c[k]))
except: return d
#############################################################################
#############################################################################
def SKgetStringValue(Tag,ErResult=''):
try: return xbmc.getInfoLabel('Skin.String('+Tag+')')
except: return ErResult
def SKchange(Tag,NewValue=''): xbmc.executebuiltin('Skin.SetString('+Tag+', %s)' % NewValue)
def SKnchange(Tag,NewValue=0): xbmc.executebuiltin('Skin.SetNumeric('+Tag+', %s)' % NewValue)
def SKgchange(Tag,NewValue=''): xbmc.executebuiltin('Skin.SetImage('+Tag+', %s)' % NewValue)
def SKgLchange(Tag,NewValue=''): xbmc.executebuiltin('Skin.SetLargeImage('+Tag+', %s)' % NewValue)
def SKbchange(Tag,NewValue=False): xbmc.executebuiltin('Skin.SetBool('+Tag+', %s)' % NewValue)
def SKtchange(Tag,NewValue=False): xbmc.executebuiltin('Skin.ToggleSetting('+Tag+', %s)' % NewValue)
def SKsetStringValue(Tag,NewValue=''): xbmc.executebuiltin('Skin.SetString('+Tag+', %s)' % NewValue)
#############################################################################
#############################################################################
| gpl-3.0 |
rgabo/awsh | awsh/repl.py | 1 | 3713 | from __future__ import unicode_literals, print_function
import shlex
import sys
import traceback
from codeop import compile_command
from pathlib import Path
from shutil import which
from awsh.commands import *
from awsh.providers import Provider, PosixProvider
from awsh.util import lazy_property
from prompt_toolkit import prompt
from prompt_toolkit.history import InMemoryHistory
from pyspark.sql import SparkSession
class Context(object):
def __init__(self):
self.globals = {
"context": self,
}
def sql(self, sql):
self.provider(self.cwd).create_df(self.cwd).registerTempTable('cwd')
return self.spark.sql(sql)
@property
def cwd(self):
return Path.cwd()
@property
def sc(self):
return self.spark.sparkContext
@lazy_property
def spark(self):
return SparkSession.builder \
.appName("awsh") \
.getOrCreate()
def provider(self, path):
for provider in Provider.providers:
if str(path).startswith(provider.prefix):
return provider(self)
return PosixProvider(self)
class Session(object):
keyword_commands = ["import"]
def __init__(self):
self.context = Context()
self.history = InMemoryHistory()
def command(self, cmd, args):
for command in Command.commands:
if command.name == cmd:
return command(args, context=self.context)
def prompt(self):
text = prompt(self.get_prompt(), history=self.history)
if text:
self.handle_input(text)
def handle_input(self, input):
# handle input modifiers
if input.startswith('>'):
return self.exec_code(input[1:])
if input.startswith('!'):
return self.exec_shell(input[1:])
if input.startswith('%'):
return self.exec_sql(input[1:])
# parse input as single cmd with args
cmd, *args = self.parse_input(input)
command = self.command(cmd, args)
# 1. execute builtin command
if command:
self.exec_command(command)
# 2. execute Python keywords
elif cmd in self.keyword_commands:
self.exec_code(input)
# 3. execute shell command
elif which(cmd) is not None:
self.exec_shell(input)
# 4. execute as code
else:
self.exec_code(input)
def exec_code(self, input):
exec(compile_command(input), self.context.globals)
@staticmethod
def exec_command(command):
command.exec()
@staticmethod
def exec_shell(input):
call(input, shell=True)
def exec_sql(self, input):
self.context.sql(input).show()
def get_prompt(self):
return "{} $ ".format(self.context.cwd.name)
@staticmethod
def parse_input(input):
return shlex.split(input, posix=True)
def run():
session = Session()
print("""
Welcome to __
____ __ _______/ /_
/ __ `/ | /| / / ___/ __ \\
/ /_/ /| |/ |/ (__ ) / / /
\__,_/ |__/|__/____/_/ /_/
""")
while True:
# noinspection PyBroadException
try:
session.prompt()
except (KeyboardInterrupt, EOFError):
break
except Exception:
handle_exception(sys.exc_info())
def handle_exception(exc_tuple):
last_type, last_value, last_traceback = exc_tuple
print(traceback.format_exception_only(last_type, last_value)[-1].rstrip('\n'))
sys.last_type = last_type
sys.last_value = last_value
sys.last_traceback = last_traceback
if __name__ == '__main__':
run()
| bsd-3-clause |
spallavolu/scikit-learn | sklearn/linear_model/ridge.py | 60 | 44642 | """
Ridge regression
"""
# Author: Mathieu Blondel <[email protected]>
# Reuben Fletcher-Costin <[email protected]>
# Fabian Pedregosa <[email protected]>
# Michael Eickenberg <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel, _rescale_data
from .sag import sag_solver
from .sag_fast import get_max_squared_sum
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils import check_X_y
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..grid_search import GridSearchCV
from ..externals import six
from ..metrics.scorer import check_scoring
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features))
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
info = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)
coefs[i] = info[0]
n_iter[i] = info[2]
return coefs, n_iter
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features])
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples])
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size))
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0, random_state=None,
return_n_iter=False):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
The l_2 penalty to be used. If an array is passed, penalties are
assumed to be specific to targets
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample. If sample_weight is not None and
solver='auto', the solver will be set to 'cholesky'.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' fast
convergence is only guaranteed on features with approximately the
same scale. You can preprocess the data with a scaler from
sklearn.preprocessing.
All last four solvers support both dense and sparse data.
tol : float
Precision of the solution.
verbose : int
Verbosity level. Setting verbose > 0 will display additional
information depending on the solver used.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
return_n_iter : boolean, default False
If True, the method also returns `n_iter`, the actual number of
iteration performed by the solver.
Returns
-------
coef : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
n_iter : int, optional
The actual number of iteration performed by the solver.
Only returned if `return_n_iter` is True.
Notes
-----
This function won't compute the intercept.
"""
# SAG needs X and y columns to be C-contiguous and np.float64
if solver == 'sag':
X = check_array(X, accept_sparse=['csr'],
dtype=np.float64, order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='F')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
y = check_array(y, dtype='numeric', ensure_2d=False)
check_consistent_length(X, y)
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
if solver == 'auto':
# cholesky if it's a dense array and cg in any other case
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):
warnings.warn("""lsqr not available on this machine, falling back
to sparse_cg.""")
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
if solver != 'sag':
# SAG supports sample_weight directly. For other solvers,
# we implement sample_weight via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr', 'sag'):
raise ValueError('Solver %s not understood' % solver)
n_iter = None
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == 'lsqr':
coef, n_iter = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
elif solver == 'sag':
# precompute max_squared_sum for all targets
max_squared_sum = get_max_squared_sum(X)
coef = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
for i, (alpha_i, target) in enumerate(zip(alpha, y.T)):
coef_, n_iter_, _ = sag_solver(
X, target.ravel(), sample_weight, 'squared', alpha_i,
max_iter, tol, verbose, random_state, False, max_squared_sum,
dict())
coef[i] = coef_
n_iter[i] = n_iter_
coef = np.asarray(coef)
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
if return_n_iter:
return coef, n_iter
else:
return coef
class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64,
multi_output=True, y_numeric=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
self.coef_, self.n_iter_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True)
self._set_intercept(X_mean, y_mean, X_std)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, array-like}, shape (n_targets)
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``C^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' fast
convergence is only guaranteed on features with approximately the
same scale. You can preprocess the data with a scaler from
sklearn.preprocessing.
All last four solvers support both dense and sparse data.
tol : float
Precision of the solution.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_targets, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
See also
--------
RidgeClassifier, RidgeCV, KernelRidge
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super(Ridge, self).fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``C^-1`` in other linear models such as LogisticRegression or
LinearSVC.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is faster than other solvers when both
n_samples and n_features are large.
tol : float
Precision of the solution.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto", random_state=None):
super(RidgeClassifier, self).__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : returns an instance of self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case when y is 2-d
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float,
multi_output=True, y_numeric=True)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
v, Q, QT_y = _pre_compute(X, y)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
for i, alpha in enumerate(self.alphas):
weighted_alpha = (sample_weight * alpha
if sample_weight is not None
else alpha)
if error:
out, c = _errors(weighted_alpha, y, v, Q, QT_y)
else:
out, c = _values(weighted_alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_mean, y_mean, X_std)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, fit_params=fit_params, cv=self.cv)
gs.fit(X, y)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used, else, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features or when X is a sparse
matrix, otherwise use eigen
'svd' : force computation via singular value decomposition of X
(does not work for sparse matrices)
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending upon the shape and format of the training
data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the `cv_values_` attribute (see
below). This flag is only compatible with `cv=None` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and \
`cv=None`). After `fit()` has been called, this attribute will \
contain the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter.
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeClassifierCV: Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_responses, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and
`cv=None`). After `fit()` has been called, this attribute will contain \
the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeCV: Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None):
super(RidgeClassifierCV, self).__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : object
Returns self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
| bsd-3-clause |
partofthething/home-assistant | tests/components/nest/test_events.py | 3 | 9106 | """Test for Nest binary sensor platform for the Smart Device Management API.
These tests fake out the subscriber/devicemanager, and are not using a real
pubsub subscriber.
"""
from google_nest_sdm.device import Device
from google_nest_sdm.event import EventMessage
from homeassistant.util.dt import utcnow
from .common import async_setup_sdm_platform
from tests.common import async_capture_events
DOMAIN = "nest"
DEVICE_ID = "some-device-id"
PLATFORM = "camera"
NEST_EVENT = "nest_event"
EVENT_SESSION_ID = "CjY5Y3VKaTZwR3o4Y19YbTVfMF..."
EVENT_ID = "FWWVQVUdGNUlTU2V4MGV2aTNXV..."
async def async_setup_devices(hass, device_type, traits={}):
"""Set up the platform and prerequisites."""
devices = {
DEVICE_ID: Device.MakeDevice(
{
"name": DEVICE_ID,
"type": device_type,
"traits": traits,
},
auth=None,
),
}
return await async_setup_sdm_platform(hass, PLATFORM, devices=devices)
def create_device_traits(event_trait):
"""Create fake traits for a device."""
return {
"sdm.devices.traits.Info": {
"customName": "Front",
},
event_trait: {},
"sdm.devices.traits.CameraLiveStream": {
"maxVideoResolution": {
"width": 640,
"height": 480,
},
"videoCodecs": ["H264"],
"audioCodecs": ["AAC"],
},
}
def create_event(event_type, device_id=DEVICE_ID, timestamp=None):
"""Create an EventMessage for a single event type."""
events = {
event_type: {
"eventSessionId": EVENT_SESSION_ID,
"eventId": EVENT_ID,
},
}
return create_events(events=events, device_id=device_id)
def create_events(events, device_id=DEVICE_ID, timestamp=None):
"""Create an EventMessage for events."""
if not timestamp:
timestamp = utcnow()
return EventMessage(
{
"eventId": "some-event-id",
"timestamp": timestamp.isoformat(timespec="seconds"),
"resourceUpdate": {
"name": device_id,
"events": events,
},
},
auth=None,
)
async def test_doorbell_chime_event(hass):
"""Test a pubsub message for a doorbell event."""
events = async_capture_events(hass, NEST_EVENT)
subscriber = await async_setup_devices(
hass,
"sdm.devices.types.DOORBELL",
create_device_traits("sdm.devices.traits.DoorbellChime"),
)
registry = await hass.helpers.entity_registry.async_get_registry()
entry = registry.async_get("camera.front")
assert entry is not None
assert entry.unique_id == "some-device-id-camera"
assert entry.original_name == "Front"
assert entry.domain == "camera"
device_registry = await hass.helpers.device_registry.async_get_registry()
device = device_registry.async_get(entry.device_id)
assert device.name == "Front"
assert device.model == "Doorbell"
assert device.identifiers == {("nest", DEVICE_ID)}
timestamp = utcnow()
await subscriber.async_receive_event(
create_event("sdm.devices.events.DoorbellChime.Chime", timestamp=timestamp)
)
await hass.async_block_till_done()
event_time = timestamp.replace(microsecond=0)
assert len(events) == 1
assert events[0].data == {
"device_id": entry.device_id,
"type": "doorbell_chime",
"timestamp": event_time,
}
async def test_camera_motion_event(hass):
"""Test a pubsub message for a camera motion event."""
events = async_capture_events(hass, NEST_EVENT)
subscriber = await async_setup_devices(
hass,
"sdm.devices.types.CAMERA",
create_device_traits("sdm.devices.traits.CameraMotion"),
)
registry = await hass.helpers.entity_registry.async_get_registry()
entry = registry.async_get("camera.front")
assert entry is not None
timestamp = utcnow()
await subscriber.async_receive_event(
create_event("sdm.devices.events.CameraMotion.Motion", timestamp=timestamp)
)
await hass.async_block_till_done()
event_time = timestamp.replace(microsecond=0)
assert len(events) == 1
assert events[0].data == {
"device_id": entry.device_id,
"type": "camera_motion",
"timestamp": event_time,
}
async def test_camera_sound_event(hass):
"""Test a pubsub message for a camera sound event."""
events = async_capture_events(hass, NEST_EVENT)
subscriber = await async_setup_devices(
hass,
"sdm.devices.types.CAMERA",
create_device_traits("sdm.devices.traits.CameraSound"),
)
registry = await hass.helpers.entity_registry.async_get_registry()
entry = registry.async_get("camera.front")
assert entry is not None
timestamp = utcnow()
await subscriber.async_receive_event(
create_event("sdm.devices.events.CameraSound.Sound", timestamp=timestamp)
)
await hass.async_block_till_done()
event_time = timestamp.replace(microsecond=0)
assert len(events) == 1
assert events[0].data == {
"device_id": entry.device_id,
"type": "camera_sound",
"timestamp": event_time,
}
async def test_camera_person_event(hass):
"""Test a pubsub message for a camera person event."""
events = async_capture_events(hass, NEST_EVENT)
subscriber = await async_setup_devices(
hass,
"sdm.devices.types.DOORBELL",
create_device_traits("sdm.devices.traits.CameraEventImage"),
)
registry = await hass.helpers.entity_registry.async_get_registry()
entry = registry.async_get("camera.front")
assert entry is not None
timestamp = utcnow()
await subscriber.async_receive_event(
create_event("sdm.devices.events.CameraPerson.Person", timestamp=timestamp)
)
await hass.async_block_till_done()
event_time = timestamp.replace(microsecond=0)
assert len(events) == 1
assert events[0].data == {
"device_id": entry.device_id,
"type": "camera_person",
"timestamp": event_time,
}
async def test_camera_multiple_event(hass):
"""Test a pubsub message for a camera person event."""
events = async_capture_events(hass, NEST_EVENT)
subscriber = await async_setup_devices(
hass,
"sdm.devices.types.DOORBELL",
create_device_traits("sdm.devices.traits.CameraEventImage"),
)
registry = await hass.helpers.entity_registry.async_get_registry()
entry = registry.async_get("camera.front")
assert entry is not None
event_map = {
"sdm.devices.events.CameraMotion.Motion": {
"eventSessionId": EVENT_SESSION_ID,
"eventId": EVENT_ID,
},
"sdm.devices.events.CameraPerson.Person": {
"eventSessionId": EVENT_SESSION_ID,
"eventId": EVENT_ID,
},
}
timestamp = utcnow()
await subscriber.async_receive_event(create_events(event_map, timestamp=timestamp))
await hass.async_block_till_done()
event_time = timestamp.replace(microsecond=0)
assert len(events) == 2
assert events[0].data == {
"device_id": entry.device_id,
"type": "camera_motion",
"timestamp": event_time,
}
assert events[1].data == {
"device_id": entry.device_id,
"type": "camera_person",
"timestamp": event_time,
}
async def test_unknown_event(hass):
"""Test a pubsub message for an unknown event type."""
events = async_capture_events(hass, NEST_EVENT)
subscriber = await async_setup_devices(
hass,
"sdm.devices.types.DOORBELL",
create_device_traits("sdm.devices.traits.DoorbellChime"),
)
await subscriber.async_receive_event(create_event("some-event-id"))
await hass.async_block_till_done()
assert len(events) == 0
async def test_unknown_device_id(hass):
"""Test a pubsub message for an unknown event type."""
events = async_capture_events(hass, NEST_EVENT)
subscriber = await async_setup_devices(
hass,
"sdm.devices.types.DOORBELL",
create_device_traits("sdm.devices.traits.DoorbellChime"),
)
await subscriber.async_receive_event(
create_event("sdm.devices.events.DoorbellChime.Chime", "invalid-device-id")
)
await hass.async_block_till_done()
assert len(events) == 0
async def test_event_message_without_device_event(hass):
"""Test a pubsub message for an unknown event type."""
events = async_capture_events(hass, NEST_EVENT)
subscriber = await async_setup_devices(
hass,
"sdm.devices.types.DOORBELL",
create_device_traits("sdm.devices.traits.DoorbellChime"),
)
timestamp = utcnow()
event = EventMessage(
{
"eventId": "some-event-id",
"timestamp": timestamp.isoformat(timespec="seconds"),
},
auth=None,
)
await subscriber.async_receive_event(event)
await hass.async_block_till_done()
assert len(events) == 0
| mit |
RefugeeMatchmaking/HackZurich | GAE_Playground/libs/networkx/algorithms/bipartite/tests/test_cluster.py | 85 | 2709 | import networkx as nx
from nose.tools import *
from networkx.algorithms.bipartite.cluster import cc_dot,cc_min,cc_max
import networkx.algorithms.bipartite as bipartite
def test_pairwise_bipartite_cc_functions():
# Test functions for different kinds of bipartite clustering coefficients
# between pairs of nodes using 3 example graphs from figure 5 p. 40
# Latapy et al (2008)
G1 = nx.Graph([(0,2),(0,3),(0,4),(0,5),(0,6),(1,5),(1,6),(1,7)])
G2 = nx.Graph([(0,2),(0,3),(0,4),(1,3),(1,4),(1,5)])
G3 = nx.Graph([(0,2),(0,3),(0,4),(0,5),(0,6),(1,5),(1,6),(1,7),(1,8),(1,9)])
result = {0:[1/3.0, 2/3.0, 2/5.0],
1:[1/2.0, 2/3.0, 2/3.0],
2:[2/8.0, 2/5.0, 2/5.0]}
for i, G in enumerate([G1, G2, G3]):
assert(bipartite.is_bipartite(G))
assert(cc_dot(set(G[0]), set(G[1])) == result[i][0])
assert(cc_min(set(G[0]), set(G[1])) == result[i][1])
assert(cc_max(set(G[0]), set(G[1])) == result[i][2])
def test_star_graph():
G=nx.star_graph(3)
# all modes are the same
answer={0:0,1:1,2:1,3:1}
assert_equal(bipartite.clustering(G,mode='dot'),answer)
assert_equal(bipartite.clustering(G,mode='min'),answer)
assert_equal(bipartite.clustering(G,mode='max'),answer)
@raises(nx.NetworkXError)
def test_not_bipartite():
bipartite.clustering(nx.complete_graph(4))
@raises(nx.NetworkXError)
def test_bad_mode():
bipartite.clustering(nx.path_graph(4),mode='foo')
def test_path_graph():
G=nx.path_graph(4)
answer={0:0.5,1:0.5,2:0.5,3:0.5}
assert_equal(bipartite.clustering(G,mode='dot'),answer)
assert_equal(bipartite.clustering(G,mode='max'),answer)
answer={0:1,1:1,2:1,3:1}
assert_equal(bipartite.clustering(G,mode='min'),answer)
def test_average_path_graph():
G=nx.path_graph(4)
assert_equal(bipartite.average_clustering(G,mode='dot'),0.5)
assert_equal(bipartite.average_clustering(G,mode='max'),0.5)
assert_equal(bipartite.average_clustering(G,mode='min'),1)
def test_ra_clustering_davis():
G = nx.davis_southern_women_graph()
cc4 = round(bipartite.robins_alexander_clustering(G), 3)
assert_equal(cc4, 0.468)
def test_ra_clustering_square():
G = nx.path_graph(4)
G.add_edge(0, 3)
assert_equal(bipartite.robins_alexander_clustering(G), 1.0)
def test_ra_clustering_zero():
G = nx.Graph()
assert_equal(bipartite.robins_alexander_clustering(G), 0)
G.add_nodes_from(range(4))
assert_equal(bipartite.robins_alexander_clustering(G), 0)
G.add_edges_from([(0,1),(2,3),(3,4)])
assert_equal(bipartite.robins_alexander_clustering(G), 0)
G.add_edge(1,2)
assert_equal(bipartite.robins_alexander_clustering(G), 0)
| mit |
StefanRijnhart/OpenUpgrade | openerp/addons/test_convert/tests/test_convert.py | 382 | 2303 | import collections
import unittest2
from lxml import etree as ET
from lxml.builder import E
from openerp.tests import common
from openerp.tools.convert import _eval_xml
Field = E.field
Value = E.value
class TestEvalXML(common.TransactionCase):
def eval_xml(self, node, obj=None, idref=None):
return _eval_xml(obj, node, pool=None, cr=self.cr, uid=self.uid,
idref=idref, context=None)
def test_char(self):
self.assertEqual(
self.eval_xml(Field("foo")),
"foo")
self.assertEqual(
self.eval_xml(Field("None")),
"None")
def test_int(self):
self.assertIsNone(
self.eval_xml(Field("None", type='int')),
"what the fuck?")
self.assertEqual(
self.eval_xml(Field(" 42 ", type="int")),
42)
with self.assertRaises(ValueError):
self.eval_xml(Field("4.82", type="int"))
with self.assertRaises(ValueError):
self.eval_xml(Field("Whelp", type="int"))
def test_float(self):
self.assertEqual(
self.eval_xml(Field("4.78", type="float")),
4.78)
with self.assertRaises(ValueError):
self.eval_xml(Field("None", type="float"))
with self.assertRaises(ValueError):
self.eval_xml(Field("Foo", type="float"))
def test_list(self):
self.assertEqual(
self.eval_xml(Field(type="list")),
[])
self.assertEqual(
self.eval_xml(Field(
Value("foo"),
Value("5", type="int"),
Value("4.76", type="float"),
Value("None", type="int"),
type="list"
)),
["foo", 5, 4.76, None])
def test_file(self):
Obj = collections.namedtuple('Obj', 'module')
obj = Obj('test_convert')
self.assertEqual(
self.eval_xml(Field('test_file.txt', type='file'), obj),
'test_convert,test_file.txt')
with self.assertRaises(IOError):
self.eval_xml(Field('test_nofile.txt', type='file'), obj)
@unittest2.skip("not tested")
def test_xml(self):
pass
@unittest2.skip("not tested")
def test_html(self):
pass
| agpl-3.0 |
devekko/ansible-role-manager | arm/commands/install.py | 1 | 5123 | import os, shutil, re
from . import Command, CommandException
from arm.conf import settings
from arm.odict import odict
from arm.util import retrieve_role, retrieve_all_roles, get_playbook_root
from arm import Role, Module
class install(Command):
help = "install playbook role"
def __init__(self, parser):
parser.description = self.help
parser.add_argument('-U','--upgrade', action='store_true')
parser.add_argument('-n', '--no-deps', action='store_true', help="install without this item's dependencies")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-r', '--requirements', nargs=1, help="install from requirements file (see `arm help freeze`)")
group.add_argument('role_or_module', nargs='?', help="specifier of role or module to install locally")
# TODO : add argument of where the role is to be installed
# TODO : add argument of where the installed role should be linked
def run(self, argv):
root = get_playbook_root(os.getcwd())
if not root:
print '''
can't find playbook.
use `arm init` to create recommended structure.
or use the `--no-dependencies` option.'''
return 1
roles = odict()
if getattr(argv, 'requirements', ''):
for role_ident in open(argv.requirements[0],'r'):
roles = self._fetch(role_ident, argv.no_deps, roles)
else:
roles = self._fetch(argv.role_or_module, argv.no_deps, roles )
for alias,role in roles.items():
self._install_and_link(alias, role, getattr(argv, 'upgrade', False))
print "\nrole(s) '%s' installed succesfully.\n" % (", ".join(roles.keys()))
exit(0)
def _fetch(self, role_ident, no_deps, roles):
aliasRE = re.compile(r'^(?P<ident>.+?)(\#alias\=(?P<alias>[a-zA-Z][a-zA-Z0-9]+?)){0,1}$')
alias_match = aliasRE.match(role_ident)
if not alias_match:
print "error : could not find format"
return 1
role_ident = alias_match.groupdict()['ident']
alias = alias_match.groupdict().get('alias',None)
if no_deps:
role = retrieve_role(role_ident)
if alias:
roles.update( { alias:role } )
return roles
roles.update( { role.get_name():role } )
return roles
return retrieve_all_roles(role_ident, alias, roles)
def _install_and_link(self, alias, rmp, upgrade=False):
root = get_playbook_root(os.getcwd())
source_path = rmp.get_path()
library_path = None
link_path = None
if type(rmp) == Role:
installed_rmp_dir = settings.installed_roles_dir
ansible_rmp_dir = settings.ansible_roles_dir
elif type(rmp) == Module:
installed_rmp_dir = settings.installed_modules_dir
ansible_rmp_dir = settings.ansible_modules_dir
installed_rmp_path = os.path.join(installed_rmp_dir, rmp.get_name())
library_path = os.path.join(root, installed_rmp_path)
link_path = os.path.join(root, ansible_rmp_dir, alias)
# TODO : test if a 'local' route makes sense for a role dependency
# if the library path is also the role, local role dependency
#if os.path.realpath(link_path) == os.path.realpath(library_path):
#return
if os.path.exists(library_path) and not upgrade:
raise CommandException("'%s' already installed in library, use --upgrade to install latest" % rmp.get_name())
if os.path.exists(link_path):
if not os.path.islink(link_path):
if type(rmp) == Role:
raise Exception("role '%s' already exists as a non-installed role" % rmp)
elif type(rmp) == Module:
raise Exception("module '%s' aleady exists as a non-installed module" % rmp)
if not upgrade:
raise CommandException("'%s' already installed in library, use --upgrade to install latest" % rmp.get_name())
if upgrade:
if os.path.exists(library_path):
print "\t upgrading :: removing old version"
shutil.rmtree(library_path)
if os.path.islink(link_path):
print "\t upgrading :: unlinking old version"
os.unlink(link_path)
shutil.copytree(source_path, library_path)
ansible_rmp_path = os.path.join(root,ansible_rmp_dir)
if not os.path.exists(ansible_rmp_path):
os.mkdir(ansible_rmp_path)
os.symlink(
os.path.relpath(installed_rmp_path, ansible_rmp_dir),
os.path.join(link_path)
)
| apache-2.0 |
ianyh/heroku-buildpack-python-nodejs | vendor/distribute-0.6.36/setuptools/tests/test_markerlib.py | 71 | 2237 | import os
import unittest
from setuptools.tests.py26compat import skipIf
try:
import ast
except ImportError:
pass
class TestMarkerlib(unittest.TestCase):
@skipIf('ast' not in globals(),
"ast not available (Python < 2.6?)")
def test_markers(self):
from _markerlib import interpret, default_environment, compile
os_name = os.name
self.assertTrue(interpret(""))
self.assertTrue(interpret("os.name != 'buuuu'"))
self.assertTrue(interpret("python_version > '1.0'"))
self.assertTrue(interpret("python_version < '5.0'"))
self.assertTrue(interpret("python_version <= '5.0'"))
self.assertTrue(interpret("python_version >= '1.0'"))
self.assertTrue(interpret("'%s' in os.name" % os_name))
self.assertTrue(interpret("'buuuu' not in os.name"))
self.assertFalse(interpret("os.name == 'buuuu'"))
self.assertFalse(interpret("python_version < '1.0'"))
self.assertFalse(interpret("python_version > '5.0'"))
self.assertFalse(interpret("python_version >= '5.0'"))
self.assertFalse(interpret("python_version <= '1.0'"))
self.assertFalse(interpret("'%s' not in os.name" % os_name))
self.assertFalse(interpret("'buuuu' in os.name and python_version >= '5.0'"))
environment = default_environment()
environment['extra'] = 'test'
self.assertTrue(interpret("extra == 'test'", environment))
self.assertFalse(interpret("extra == 'doc'", environment))
def raises_nameError():
try:
interpret("python.version == '42'")
except NameError:
pass
else:
raise Exception("Expected NameError")
raises_nameError()
def raises_syntaxError():
try:
interpret("(x for x in (4,))")
except SyntaxError:
pass
else:
raise Exception("Expected SyntaxError")
raises_syntaxError()
statement = "python_version == '5'"
self.assertEqual(compile(statement).__doc__, statement)
| mit |
houzhenggang/hiwifi-openwrt-HC5661-HC5761 | staging_dir/host/lib/python2.7/ctypes/test/test_prototypes.py | 81 | 6738 | from ctypes import *
import unittest
# IMPORTANT INFO:
#
# Consider this call:
# func.restype = c_char_p
# func(c_char_p("123"))
# It returns
# "123"
#
# WHY IS THIS SO?
#
# argument tuple (c_char_p("123"), ) is destroyed after the function
# func is called, but NOT before the result is actually built.
#
# If the arglist would be destroyed BEFORE the result has been built,
# the c_char_p("123") object would already have a zero refcount,
# and the pointer passed to (and returned by) the function would
# probably point to deallocated space.
#
# In this case, there would have to be an additional reference to the argument...
import _ctypes_test
testdll = CDLL(_ctypes_test.__file__)
# Return machine address `a` as a (possibly long) non-negative integer.
# Starting with Python 2.5, id(anything) is always non-negative, and
# the ctypes addressof() inherits that via PyLong_FromVoidPtr().
def positive_address(a):
if a >= 0:
return a
# View the bits in `a` as unsigned instead.
import struct
num_bits = struct.calcsize("P") * 8 # num bits in native machine address
a += 1L << num_bits
assert a >= 0
return a
def c_wbuffer(init):
n = len(init) + 1
return (c_wchar * n)(*init)
class CharPointersTestCase(unittest.TestCase):
def setUp(self):
func = testdll._testfunc_p_p
func.restype = c_long
func.argtypes = None
def test_paramflags(self):
# function returns c_void_p result,
# and has a required parameter named 'input'
prototype = CFUNCTYPE(c_void_p, c_void_p)
func = prototype(("_testfunc_p_p", testdll),
((1, "input"),))
try:
func()
except TypeError, details:
self.assertEqual(str(details), "required argument 'input' missing")
else:
self.fail("TypeError not raised")
self.assertEqual(func(None), None)
self.assertEqual(func(input=None), None)
def test_int_pointer_arg(self):
func = testdll._testfunc_p_p
func.restype = c_long
self.assertEqual(0, func(0))
ci = c_int(0)
func.argtypes = POINTER(c_int),
self.assertEqual(positive_address(addressof(ci)),
positive_address(func(byref(ci))))
func.argtypes = c_char_p,
self.assertRaises(ArgumentError, func, byref(ci))
func.argtypes = POINTER(c_short),
self.assertRaises(ArgumentError, func, byref(ci))
func.argtypes = POINTER(c_double),
self.assertRaises(ArgumentError, func, byref(ci))
def test_POINTER_c_char_arg(self):
func = testdll._testfunc_p_p
func.restype = c_char_p
func.argtypes = POINTER(c_char),
self.assertEqual(None, func(None))
self.assertEqual("123", func("123"))
self.assertEqual(None, func(c_char_p(None)))
self.assertEqual("123", func(c_char_p("123")))
self.assertEqual("123", func(c_buffer("123")))
ca = c_char("a")
self.assertEqual("a", func(pointer(ca))[0])
self.assertEqual("a", func(byref(ca))[0])
def test_c_char_p_arg(self):
func = testdll._testfunc_p_p
func.restype = c_char_p
func.argtypes = c_char_p,
self.assertEqual(None, func(None))
self.assertEqual("123", func("123"))
self.assertEqual(None, func(c_char_p(None)))
self.assertEqual("123", func(c_char_p("123")))
self.assertEqual("123", func(c_buffer("123")))
ca = c_char("a")
self.assertEqual("a", func(pointer(ca))[0])
self.assertEqual("a", func(byref(ca))[0])
def test_c_void_p_arg(self):
func = testdll._testfunc_p_p
func.restype = c_char_p
func.argtypes = c_void_p,
self.assertEqual(None, func(None))
self.assertEqual("123", func("123"))
self.assertEqual("123", func(c_char_p("123")))
self.assertEqual(None, func(c_char_p(None)))
self.assertEqual("123", func(c_buffer("123")))
ca = c_char("a")
self.assertEqual("a", func(pointer(ca))[0])
self.assertEqual("a", func(byref(ca))[0])
func(byref(c_int()))
func(pointer(c_int()))
func((c_int * 3)())
try:
func.restype = c_wchar_p
except NameError:
pass
else:
self.assertEqual(None, func(c_wchar_p(None)))
self.assertEqual(u"123", func(c_wchar_p(u"123")))
def test_instance(self):
func = testdll._testfunc_p_p
func.restype = c_void_p
class X:
_as_parameter_ = None
func.argtypes = c_void_p,
self.assertEqual(None, func(X()))
func.argtypes = None
self.assertEqual(None, func(X()))
try:
c_wchar
except NameError:
pass
else:
class WCharPointersTestCase(unittest.TestCase):
def setUp(self):
func = testdll._testfunc_p_p
func.restype = c_int
func.argtypes = None
def test_POINTER_c_wchar_arg(self):
func = testdll._testfunc_p_p
func.restype = c_wchar_p
func.argtypes = POINTER(c_wchar),
self.assertEqual(None, func(None))
self.assertEqual(u"123", func(u"123"))
self.assertEqual(None, func(c_wchar_p(None)))
self.assertEqual(u"123", func(c_wchar_p(u"123")))
self.assertEqual(u"123", func(c_wbuffer(u"123")))
ca = c_wchar("a")
self.assertEqual(u"a", func(pointer(ca))[0])
self.assertEqual(u"a", func(byref(ca))[0])
def test_c_wchar_p_arg(self):
func = testdll._testfunc_p_p
func.restype = c_wchar_p
func.argtypes = c_wchar_p,
c_wchar_p.from_param(u"123")
self.assertEqual(None, func(None))
self.assertEqual("123", func(u"123"))
self.assertEqual(None, func(c_wchar_p(None)))
self.assertEqual("123", func(c_wchar_p("123")))
# XXX Currently, these raise TypeErrors, although they shouldn't:
self.assertEqual("123", func(c_wbuffer("123")))
ca = c_wchar("a")
self.assertEqual("a", func(pointer(ca))[0])
self.assertEqual("a", func(byref(ca))[0])
class ArrayTest(unittest.TestCase):
def test(self):
func = testdll._testfunc_ai8
func.restype = POINTER(c_int)
func.argtypes = c_int * 8,
func((c_int * 8)(1, 2, 3, 4, 5, 6, 7, 8))
# This did crash before:
def func(): pass
CFUNCTYPE(None, c_int * 3)(func)
################################################################
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
FireWRT/OpenWrt-Firefly-Libraries | staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python2.7/ConfigParser.py | 186 | 27746 | """Configuration file parser.
A setup file consists of sections, lead by a "[section]" header,
and followed by "name: value" entries, with continuations and such in
the style of RFC 822.
The option values can contain format strings which refer to other values in
the same section, or values in a special [DEFAULT] section.
For example:
something: %(dir)s/whatever
would resolve the "%(dir)s" to the value of dir. All reference
expansions are done late, on demand.
Intrinsic defaults can be specified by passing them into the
ConfigParser constructor as a dictionary.
class:
ConfigParser -- responsible for parsing a list of
configuration files, and managing the parsed database.
methods:
__init__(defaults=None)
create the parser and specify a dictionary of intrinsic defaults. The
keys must be strings, the values must be appropriate for %()s string
interpolation. Note that `__name__' is always an intrinsic default;
its value is the section's name.
sections()
return all the configuration section names, sans DEFAULT
has_section(section)
return whether the given section exists
has_option(section, option)
return whether the given option exists in the given section
options(section)
return list of configuration options for the named section
read(filenames)
read and parse the list of named configuration files, given by
name. A single filename is also allowed. Non-existing files
are ignored. Return list of successfully read files.
readfp(fp, filename=None)
read and parse one configuration file, given as a file object.
The filename defaults to fp.name; it is only used in error
messages (if fp has no `name' attribute, the string `<???>' is used).
get(section, option, raw=False, vars=None)
return a string value for the named option. All % interpolations are
expanded in the return values, based on the defaults passed into the
constructor and the DEFAULT section. Additional substitutions may be
provided using the `vars' argument, which must be a dictionary whose
contents override any pre-existing defaults.
getint(section, options)
like get(), but convert value to an integer
getfloat(section, options)
like get(), but convert value to a float
getboolean(section, options)
like get(), but convert value to a boolean (currently case
insensitively defined as 0, false, no, off for False, and 1, true,
yes, on for True). Returns False or True.
items(section, raw=False, vars=None)
return a list of tuples with (name, value) for each option
in the section.
remove_section(section)
remove the given file section and all its options
remove_option(section, option)
remove the given option from the given section
set(section, option, value)
set the given option
write(fp)
write the configuration state in .ini format
"""
try:
from collections import OrderedDict as _default_dict
except ImportError:
# fallback for setup.py which hasn't yet built _collections
_default_dict = dict
import re
__all__ = ["NoSectionError", "DuplicateSectionError", "NoOptionError",
"InterpolationError", "InterpolationDepthError",
"InterpolationSyntaxError", "ParsingError",
"MissingSectionHeaderError",
"ConfigParser", "SafeConfigParser", "RawConfigParser",
"DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"]
DEFAULTSECT = "DEFAULT"
MAX_INTERPOLATION_DEPTH = 10
# exception classes
class Error(Exception):
"""Base class for ConfigParser exceptions."""
def _get_message(self):
"""Getter for 'message'; needed only to override deprecation in
BaseException."""
return self.__message
def _set_message(self, value):
"""Setter for 'message'; needed only to override deprecation in
BaseException."""
self.__message = value
# BaseException.message has been deprecated since Python 2.6. To prevent
# DeprecationWarning from popping up over this pre-existing attribute, use
# a new property that takes lookup precedence.
message = property(_get_message, _set_message)
def __init__(self, msg=''):
self.message = msg
Exception.__init__(self, msg)
def __repr__(self):
return self.message
__str__ = __repr__
class NoSectionError(Error):
"""Raised when no section matches a requested option."""
def __init__(self, section):
Error.__init__(self, 'No section: %r' % (section,))
self.section = section
self.args = (section, )
class DuplicateSectionError(Error):
"""Raised when a section is multiply-created."""
def __init__(self, section):
Error.__init__(self, "Section %r already exists" % section)
self.section = section
self.args = (section, )
class NoOptionError(Error):
"""A requested option was not found."""
def __init__(self, option, section):
Error.__init__(self, "No option %r in section: %r" %
(option, section))
self.option = option
self.section = section
self.args = (option, section)
class InterpolationError(Error):
"""Base class for interpolation-related exceptions."""
def __init__(self, option, section, msg):
Error.__init__(self, msg)
self.option = option
self.section = section
self.args = (option, section, msg)
class InterpolationMissingOptionError(InterpolationError):
"""A string substitution required a setting which was not available."""
def __init__(self, option, section, rawval, reference):
msg = ("Bad value substitution:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\tkey : %s\n"
"\trawval : %s\n"
% (section, option, reference, rawval))
InterpolationError.__init__(self, option, section, msg)
self.reference = reference
self.args = (option, section, rawval, reference)
class InterpolationSyntaxError(InterpolationError):
"""Raised when the source text into which substitutions are made
does not conform to the required syntax."""
class InterpolationDepthError(InterpolationError):
"""Raised when substitutions are nested too deeply."""
def __init__(self, option, section, rawval):
msg = ("Value interpolation too deeply recursive:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\trawval : %s\n"
% (section, option, rawval))
InterpolationError.__init__(self, option, section, msg)
self.args = (option, section, rawval)
class ParsingError(Error):
"""Raised when a configuration file does not follow legal syntax."""
def __init__(self, filename):
Error.__init__(self, 'File contains parsing errors: %s' % filename)
self.filename = filename
self.errors = []
self.args = (filename, )
def append(self, lineno, line):
self.errors.append((lineno, line))
self.message += '\n\t[line %2d]: %s' % (lineno, line)
class MissingSectionHeaderError(ParsingError):
"""Raised when a key-value pair is found before any section header."""
def __init__(self, filename, lineno, line):
Error.__init__(
self,
'File contains no section headers.\nfile: %s, line: %d\n%r' %
(filename, lineno, line))
self.filename = filename
self.lineno = lineno
self.line = line
self.args = (filename, lineno, line)
class RawConfigParser:
def __init__(self, defaults=None, dict_type=_default_dict,
allow_no_value=False):
self._dict = dict_type
self._sections = self._dict()
self._defaults = self._dict()
if allow_no_value:
self._optcre = self.OPTCRE_NV
else:
self._optcre = self.OPTCRE
if defaults:
for key, value in defaults.items():
self._defaults[self.optionxform(key)] = value
def defaults(self):
return self._defaults
def sections(self):
"""Return a list of section names, excluding [DEFAULT]"""
# self._sections will never have [DEFAULT] in it
return self._sections.keys()
def add_section(self, section):
"""Create a new section in the configuration.
Raise DuplicateSectionError if a section by the specified name
already exists. Raise ValueError if name is DEFAULT or any of it's
case-insensitive variants.
"""
if section.lower() == "default":
raise ValueError, 'Invalid section name: %s' % section
if section in self._sections:
raise DuplicateSectionError(section)
self._sections[section] = self._dict()
def has_section(self, section):
"""Indicate whether the named section is present in the configuration.
The DEFAULT section is not acknowledged.
"""
return section in self._sections
def options(self, section):
"""Return a list of option names for the given section name."""
try:
opts = self._sections[section].copy()
except KeyError:
raise NoSectionError(section)
opts.update(self._defaults)
if '__name__' in opts:
del opts['__name__']
return opts.keys()
def read(self, filenames):
"""Read and parse a filename or a list of filenames.
Files that cannot be opened are silently ignored; this is
designed so that you can specify a list of potential
configuration file locations (e.g. current directory, user's
home directory, systemwide directory), and all existing
configuration files in the list will be read. A single
filename may also be given.
Return list of successfully read files.
"""
if isinstance(filenames, basestring):
filenames = [filenames]
read_ok = []
for filename in filenames:
try:
fp = open(filename)
except IOError:
continue
self._read(fp, filename)
fp.close()
read_ok.append(filename)
return read_ok
def readfp(self, fp, filename=None):
"""Like read() but the argument must be a file-like object.
The `fp' argument must have a `readline' method. Optional
second argument is the `filename', which if not given, is
taken from fp.name. If fp has no `name' attribute, `<???>' is
used.
"""
if filename is None:
try:
filename = fp.name
except AttributeError:
filename = '<???>'
self._read(fp, filename)
def get(self, section, option):
opt = self.optionxform(option)
if section not in self._sections:
if section != DEFAULTSECT:
raise NoSectionError(section)
if opt in self._defaults:
return self._defaults[opt]
else:
raise NoOptionError(option, section)
elif opt in self._sections[section]:
return self._sections[section][opt]
elif opt in self._defaults:
return self._defaults[opt]
else:
raise NoOptionError(option, section)
def items(self, section):
try:
d2 = self._sections[section]
except KeyError:
if section != DEFAULTSECT:
raise NoSectionError(section)
d2 = self._dict()
d = self._defaults.copy()
d.update(d2)
if "__name__" in d:
del d["__name__"]
return d.items()
def _get(self, section, conv, option):
return conv(self.get(section, option))
def getint(self, section, option):
return self._get(section, int, option)
def getfloat(self, section, option):
return self._get(section, float, option)
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
def getboolean(self, section, option):
v = self.get(section, option)
if v.lower() not in self._boolean_states:
raise ValueError, 'Not a boolean: %s' % v
return self._boolean_states[v.lower()]
def optionxform(self, optionstr):
return optionstr.lower()
def has_option(self, section, option):
"""Check for the existence of a given option in a given section."""
if not section or section == DEFAULTSECT:
option = self.optionxform(option)
return option in self._defaults
elif section not in self._sections:
return False
else:
option = self.optionxform(option)
return (option in self._sections[section]
or option in self._defaults)
def set(self, section, option, value=None):
"""Set an option."""
if not section or section == DEFAULTSECT:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
sectdict[self.optionxform(option)] = value
def write(self, fp):
"""Write an .ini-format representation of the configuration state."""
if self._defaults:
fp.write("[%s]\n" % DEFAULTSECT)
for (key, value) in self._defaults.items():
fp.write("%s = %s\n" % (key, str(value).replace('\n', '\n\t')))
fp.write("\n")
for section in self._sections:
fp.write("[%s]\n" % section)
for (key, value) in self._sections[section].items():
if key == "__name__":
continue
if (value is not None) or (self._optcre == self.OPTCRE):
key = " = ".join((key, str(value).replace('\n', '\n\t')))
fp.write("%s\n" % (key))
fp.write("\n")
def remove_option(self, section, option):
"""Remove an option."""
if not section or section == DEFAULTSECT:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
option = self.optionxform(option)
existed = option in sectdict
if existed:
del sectdict[option]
return existed
def remove_section(self, section):
"""Remove a file section."""
existed = section in self._sections
if existed:
del self._sections[section]
return existed
#
# Regular expressions for parsing section headers and options.
#
SECTCRE = re.compile(
r'\[' # [
r'(?P<header>[^]]+)' # very permissive!
r'\]' # ]
)
OPTCRE = re.compile(
r'(?P<option>[^:=\s][^:=]*)' # very permissive!
r'\s*(?P<vi>[:=])\s*' # any number of space/tab,
# followed by separator
# (either : or =), followed
# by any # space/tab
r'(?P<value>.*)$' # everything up to eol
)
OPTCRE_NV = re.compile(
r'(?P<option>[^:=\s][^:=]*)' # very permissive!
r'\s*(?:' # any number of space/tab,
r'(?P<vi>[:=])\s*' # optionally followed by
# separator (either : or
# =), followed by any #
# space/tab
r'(?P<value>.*))?$' # everything up to eol
)
def _read(self, fp, fpname):
"""Parse a sectioned setup file.
The sections in setup file contains a title line at the top,
indicated by a name in square brackets (`[]'), plus key/value
options lines, indicated by `name: value' format lines.
Continuations are represented by an embedded newline then
leading whitespace. Blank lines, lines beginning with a '#',
and just about everything else are ignored.
"""
cursect = None # None, or a dictionary
optname = None
lineno = 0
e = None # None, or an exception
while True:
line = fp.readline()
if not line:
break
lineno = lineno + 1
# comment or blank line?
if line.strip() == '' or line[0] in '#;':
continue
if line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR":
# no leading whitespace
continue
# continuation line?
if line[0].isspace() and cursect is not None and optname:
value = line.strip()
if value:
cursect[optname].append(value)
# a section header or option header?
else:
# is it a section header?
mo = self.SECTCRE.match(line)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
cursect = self._sections[sectname]
elif sectname == DEFAULTSECT:
cursect = self._defaults
else:
cursect = self._dict()
cursect['__name__'] = sectname
self._sections[sectname] = cursect
# So sections can't start with a continuation line
optname = None
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self._optcre.match(line)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
optname = self.optionxform(optname.rstrip())
# This check is fine because the OPTCRE cannot
# match if it would set optval to None
if optval is not None:
if vi in ('=', ':') and ';' in optval:
# ';' is a comment delimiter only if it follows
# a spacing character
pos = optval.find(';')
if pos != -1 and optval[pos-1].isspace():
optval = optval[:pos]
optval = optval.strip()
# allow empty values
if optval == '""':
optval = ''
cursect[optname] = [optval]
else:
# valueless option handling
cursect[optname] = optval
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
if not e:
e = ParsingError(fpname)
e.append(lineno, repr(line))
# if any parsing errors occurred, raise an exception
if e:
raise e
# join the multi-line values collected while reading
all_sections = [self._defaults]
all_sections.extend(self._sections.values())
for options in all_sections:
for name, val in options.items():
if isinstance(val, list):
options[name] = '\n'.join(val)
import UserDict as _UserDict
class _Chainmap(_UserDict.DictMixin):
"""Combine multiple mappings for successive lookups.
For example, to emulate Python's normal lookup sequence:
import __builtin__
pylookup = _Chainmap(locals(), globals(), vars(__builtin__))
"""
def __init__(self, *maps):
self._maps = maps
def __getitem__(self, key):
for mapping in self._maps:
try:
return mapping[key]
except KeyError:
pass
raise KeyError(key)
def keys(self):
result = []
seen = set()
for mapping in self._maps:
for key in mapping:
if key not in seen:
result.append(key)
seen.add(key)
return result
class ConfigParser(RawConfigParser):
def get(self, section, option, raw=False, vars=None):
"""Get an option value for a given section.
If `vars' is provided, it must be a dictionary. The option is looked up
in `vars' (if provided), `section', and in `defaults' in that order.
All % interpolations are expanded in the return values, unless the
optional argument `raw' is true. Values for interpolation keys are
looked up in the same manner as the option.
The section DEFAULT is special.
"""
sectiondict = {}
try:
sectiondict = self._sections[section]
except KeyError:
if section != DEFAULTSECT:
raise NoSectionError(section)
# Update with the entry specific variables
vardict = {}
if vars:
for key, value in vars.items():
vardict[self.optionxform(key)] = value
d = _Chainmap(vardict, sectiondict, self._defaults)
option = self.optionxform(option)
try:
value = d[option]
except KeyError:
raise NoOptionError(option, section)
if raw or value is None:
return value
else:
return self._interpolate(section, option, value, d)
def items(self, section, raw=False, vars=None):
"""Return a list of tuples with (name, value) for each option
in the section.
All % interpolations are expanded in the return values, based on the
defaults passed into the constructor, unless the optional argument
`raw' is true. Additional substitutions may be provided using the
`vars' argument, which must be a dictionary whose contents overrides
any pre-existing defaults.
The section DEFAULT is special.
"""
d = self._defaults.copy()
try:
d.update(self._sections[section])
except KeyError:
if section != DEFAULTSECT:
raise NoSectionError(section)
# Update with the entry specific variables
if vars:
for key, value in vars.items():
d[self.optionxform(key)] = value
options = d.keys()
if "__name__" in options:
options.remove("__name__")
if raw:
return [(option, d[option])
for option in options]
else:
return [(option, self._interpolate(section, option, d[option], d))
for option in options]
def _interpolate(self, section, option, rawval, vars):
# do the string interpolation
value = rawval
depth = MAX_INTERPOLATION_DEPTH
while depth: # Loop through this until it's done
depth -= 1
if value and "%(" in value:
value = self._KEYCRE.sub(self._interpolation_replace, value)
try:
value = value % vars
except KeyError, e:
raise InterpolationMissingOptionError(
option, section, rawval, e.args[0])
else:
break
if value and "%(" in value:
raise InterpolationDepthError(option, section, rawval)
return value
_KEYCRE = re.compile(r"%\(([^)]*)\)s|.")
def _interpolation_replace(self, match):
s = match.group(1)
if s is None:
return match.group()
else:
return "%%(%s)s" % self.optionxform(s)
class SafeConfigParser(ConfigParser):
def _interpolate(self, section, option, rawval, vars):
# do the string interpolation
L = []
self._interpolate_some(option, L, rawval, section, vars, 1)
return ''.join(L)
_interpvar_re = re.compile(r"%\(([^)]+)\)s")
def _interpolate_some(self, option, accum, rest, section, map, depth):
if depth > MAX_INTERPOLATION_DEPTH:
raise InterpolationDepthError(option, section, rest)
while rest:
p = rest.find("%")
if p < 0:
accum.append(rest)
return
if p > 0:
accum.append(rest[:p])
rest = rest[p:]
# p is no longer used
c = rest[1:2]
if c == "%":
accum.append("%")
rest = rest[2:]
elif c == "(":
m = self._interpvar_re.match(rest)
if m is None:
raise InterpolationSyntaxError(option, section,
"bad interpolation variable reference %r" % rest)
var = self.optionxform(m.group(1))
rest = rest[m.end():]
try:
v = map[var]
except KeyError:
raise InterpolationMissingOptionError(
option, section, rest, var)
if "%" in v:
self._interpolate_some(option, accum, v,
section, map, depth + 1)
else:
accum.append(v)
else:
raise InterpolationSyntaxError(
option, section,
"'%%' must be followed by '%%' or '(', found: %r" % (rest,))
def set(self, section, option, value=None):
"""Set an option. Extend ConfigParser.set: check for string values."""
# The only legal non-string value if we allow valueless
# options is None, so we need to check if the value is a
# string if:
# - we do not allow valueless options, or
# - we allow valueless options but the value is not None
if self._optcre is self.OPTCRE or value:
if not isinstance(value, basestring):
raise TypeError("option values must be strings")
if value is not None:
# check for bad percent signs:
# first, replace all "good" interpolations
tmp_value = value.replace('%%', '')
tmp_value = self._interpvar_re.sub('', tmp_value)
# then, check if there's a lone percent sign left
if '%' in tmp_value:
raise ValueError("invalid interpolation syntax in %r at "
"position %d" % (value, tmp_value.find('%')))
ConfigParser.set(self, section, option, value)
| gpl-2.0 |
huntxu/fuel-web | nailgun/manage.py | 2 | 11684 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import __main__
import argparse
import code
import os
import sys
def add_config_parameter(parser):
parser.add_argument(
'-c', '--config', dest='config_file', action='store', type=str,
help='custom config file', default=None
)
def load_run_parsers(subparsers):
run_parser = subparsers.add_parser(
'run', help='run application locally'
)
run_parser.add_argument(
'-p', '--port', dest='port', action='store', type=str,
help='application port', default='8000'
)
run_parser.add_argument(
'-a', '--address', dest='address', action='store', type=str,
help='application address', default='0.0.0.0'
)
run_parser.add_argument(
'--fake-tasks', action='store_true', help='fake tasks'
)
run_parser.add_argument(
'--fake-tasks-amqp', action='store_true',
help='fake tasks with real AMQP'
)
run_parser.add_argument(
'--keepalive',
action='store_true',
help='run keep alive thread'
)
add_config_parameter(run_parser)
run_parser.add_argument(
'--fake-tasks-tick-count', action='store', type=int,
help='Fake tasks tick count'
)
run_parser.add_argument(
'--fake-tasks-tick-interval', action='store', type=int,
help='Fake tasks tick interval in seconds'
)
run_parser.add_argument(
'--authentication-method', action='store', type=str,
help='Choose authentication type',
choices=['none', 'fake', 'keystone'],
)
def load_db_parsers(subparsers):
subparsers.add_parser(
'syncdb', help='sync application database'
)
subparsers.add_parser(
'dropdb', help='drop application database'
)
# fixtures
loaddata_parser = subparsers.add_parser(
'loaddata', help='load data from fixture'
)
loaddata_parser.add_argument(
'fixture', action='store', help='json fixture to load'
)
dumpdata_parser = subparsers.add_parser(
'dumpdata', help='dump models as fixture'
)
dumpdata_parser.add_argument(
'model', action='store', help='model name to dump; underscored name'
'should be used, e.g. network_group for NetworkGroup model'
)
generate_parser = subparsers.add_parser(
'generate_nodes_fixture', help='generate new nodes fixture'
)
generate_parser.add_argument(
'-n', '--total-nodes', dest='total_nodes', action='store', type=int,
help='total nodes count to generate', required=True
)
generate_parser.add_argument(
'-e', '--error-nodes', dest='error_nodes', action='store', type=int,
help='error nodes count to generate'
)
generate_parser.add_argument(
'-o', '--offline-nodes', dest='offline_nodes', action='store', type=int,
help='offline nodes count to generate'
)
generate_parser.add_argument(
'-i', '--min-ifaces-num', dest='min_ifaces_num', action='store',
type=int, default=1,
help='minimal number of ethernet interfaces for node'
)
subparsers.add_parser(
'loaddefault',
help='load data from default fixtures (settings.FIXTURES_TO_UPLOAD) '
'and apply fake deployment tasks for all releases in database'
)
def load_alembic_parsers(migrate_parser):
alembic_parser = migrate_parser.add_subparsers(
dest="alembic_command",
help='alembic command'
)
for name in ['current', 'history', 'branches']:
parser = alembic_parser.add_parser(name)
for name in ['upgrade', 'downgrade']:
parser = alembic_parser.add_parser(name)
parser.add_argument('--delta', type=int)
parser.add_argument('--sql', action='store_true')
parser.add_argument('revision', nargs='?')
parser = alembic_parser.add_parser('stamp')
parser.add_argument('--sql', action='store_true')
parser.add_argument('revision')
parser = alembic_parser.add_parser('revision')
parser.add_argument('-m', '--message')
parser.add_argument('--autogenerate', action='store_true')
parser.add_argument('--sql', action='store_true')
def load_db_migrate_parsers(subparsers):
migrate_parser = subparsers.add_parser(
'migrate', help='dealing with DB migration'
)
load_alembic_parsers(migrate_parser)
def load_dbshell_parsers(subparsers):
dbshell_parser = subparsers.add_parser(
'dbshell', help='open database shell'
)
add_config_parameter(dbshell_parser)
def load_test_parsers(subparsers):
subparsers.add_parser(
'test', help='run unit tests'
)
def load_shell_parsers(subparsers):
shell_parser = subparsers.add_parser(
'shell', help='open python REPL'
)
add_config_parameter(shell_parser)
def load_settings_parsers(subparsers):
subparsers.add_parser(
'dump_settings', help='dump current settings to YAML'
)
def load_extensions_parsers(subparsers):
extensions_parser = subparsers.add_parser(
'extensions', help='extensions related actions')
load_alembic_parsers(extensions_parser)
def action_dumpdata(params):
import logging
logging.disable(logging.WARNING)
from nailgun.db.sqlalchemy import fixman
fixman.dump_fixture(params.model)
sys.exit(0)
def action_generate_nodes_fixture(params):
try:
from oslo.serialization import jsonutils
except ImportError:
from oslo_serialization import jsonutils
from nailgun.logger import logger
from nailgun.utils import fake_generator
logger.info('Generating new nodes fixture...')
total_nodes_count = params.total_nodes
fixtures_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'nailgun/fixtures/')
file_path = os.path.join(
fixtures_dir,
'{0}_fake_nodes_environment.json'.format(total_nodes_count)
)
generator = fake_generator.FakeNodesGenerator()
res = generator.generate_fake_nodes(
total_nodes_count, error_nodes_count=params.error_nodes,
offline_nodes_count=params.offline_nodes,
min_ifaces_num=params.min_ifaces_num)
with open(file_path, 'w') as file_to_write:
jsonutils.dump(res, file_to_write, indent=4)
logger.info('Done. New fixture was stored in {0} file'.format(file_path))
def action_loaddata(params):
from nailgun.db.sqlalchemy import fixman
from nailgun.logger import logger
logger.info("Uploading fixture...")
with open(params.fixture, "r") as fileobj:
fixman.upload_fixture(fileobj)
logger.info("Done")
def action_loadfakedeploymenttasks(params):
from nailgun.db.sqlalchemy import fixman
from nailgun.logger import logger
logger.info("Applying fake deployment tasks to all releases...")
fixman.load_fake_deployment_tasks()
logger.info("Done")
def action_loaddefault(params):
from nailgun.db.sqlalchemy import fixman
from nailgun.logger import logger
logger.info("Uploading fixture...")
fixman.upload_fixtures()
logger.info("Applying fake deployment tasks to all releases...")
fixman.load_fake_deployment_tasks()
logger.info("Done")
def action_syncdb(params):
from nailgun.db import syncdb
from nailgun.logger import logger
logger.info("Syncing database...")
syncdb()
logger.info("Done")
def action_dropdb(params):
from nailgun.db import dropdb
from nailgun.logger import logger
logger.info("Dropping database...")
dropdb()
logger.info("Done")
def action_migrate(params):
from nailgun.db.migration import action_migrate_alembic_core
action_migrate_alembic_core(params)
def action_extensions(params):
from nailgun.logger import logger
from nailgun.db.migration import action_migrate_alembic_extension
from nailgun.extensions import get_all_extensions
for extension in get_all_extensions():
if extension.alembic_migrations_path():
logger.info('Running command for extension {0}'.format(
extension.full_name()))
action_migrate_alembic_extension(params, extension=extension)
else:
logger.info(
'Extension {0} does not have migrations. '
'Skipping...'.format(extension.full_name()))
def action_test(params):
from nailgun.logger import logger
from nailgun.unit_test import TestRunner
logger.info("Running tests...")
TestRunner.run()
logger.info("Done")
def action_dbshell(params):
from nailgun.settings import settings
if params.config_file:
settings.update_from_file(params.config_file)
args = ['psql']
env = {}
if settings.DATABASE['passwd']:
env['PGPASSWORD'] = settings.DATABASE['passwd']
if settings.DATABASE['user']:
args += ["-U", settings.DATABASE['user']]
if settings.DATABASE['host']:
args.extend(["-h", settings.DATABASE['host']])
if settings.DATABASE['port']:
args.extend(["-p", str(settings.DATABASE['port'])])
args += [settings.DATABASE['name']]
if os.name == 'nt':
sys.exit(os.system(" ".join(args)))
else:
os.execvpe('psql', args, env)
def action_dump_settings(params):
from nailgun.settings import settings
sys.stdout.write(settings.dump())
def action_shell(params):
from nailgun.db import db
from nailgun.settings import settings
if params.config_file:
settings.update_from_file(params.config_file)
try:
from IPython import embed
embed()
except ImportError:
code.interact(local={'db': db, 'settings': settings})
def action_run(params):
from nailgun.settings import settings
settings.update({
'LISTEN_PORT': int(params.port),
'LISTEN_ADDRESS': params.address,
})
for attr in ['FAKE_TASKS', 'FAKE_TASKS_TICK_COUNT',
'FAKE_TASKS_TICK_INTERVAL', 'FAKE_TASKS_AMQP']:
param = getattr(params, attr.lower())
if param is not None:
settings.update({attr: param})
if params.authentication_method:
auth_method = params.authentication_method
settings.AUTH.update({'AUTHENTICATION_METHOD': auth_method})
if params.config_file:
settings.update_from_file(params.config_file)
from nailgun.app import appstart
appstart()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(
dest="action", help='actions'
)
load_run_parsers(subparsers)
load_db_parsers(subparsers)
load_db_migrate_parsers(subparsers)
load_dbshell_parsers(subparsers)
load_test_parsers(subparsers)
load_shell_parsers(subparsers)
load_settings_parsers(subparsers)
load_extensions_parsers(subparsers)
params, other_params = parser.parse_known_args()
sys.argv.pop(1)
action = getattr(
__main__,
"action_{0}".format(params.action)
)
action(params) if action else parser.print_help()
| apache-2.0 |
petewarden/tensorflow | tensorflow/python/data/kernel_tests/as_numpy_iterator_test.py | 9 | 3745 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.numpy()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import test
class AsNumpyIteratorTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.eager_only_combinations())
def testBasic(self):
ds = dataset_ops.Dataset.range(3)
self.assertEqual([0, 1, 2], list(ds.as_numpy_iterator()))
@combinations.generate(test_base.eager_only_combinations())
def testImmutable(self):
ds = dataset_ops.Dataset.from_tensors([1, 2, 3])
arr = next(ds.as_numpy_iterator())
with self.assertRaisesRegex(ValueError,
'assignment destination is read-only'):
arr[0] = 0
@combinations.generate(test_base.eager_only_combinations())
def testNestedStructure(self):
point = collections.namedtuple('Point', ['x', 'y'])
ds = dataset_ops.Dataset.from_tensor_slices({
'a': ([1, 2], [3, 4]),
'b': [5, 6],
'c': point([7, 8], [9, 10])
})
self.assertEqual([{
'a': (1, 3),
'b': 5,
'c': point(7, 9)
}, {
'a': (2, 4),
'b': 6,
'c': point(8, 10)
}], list(ds.as_numpy_iterator()))
@combinations.generate(test_base.graph_only_combinations())
def testNonEager(self):
ds = dataset_ops.Dataset.range(10)
with self.assertRaises(RuntimeError):
ds.as_numpy_iterator()
def _testInvalidElement(self, element):
ds = dataset_ops.Dataset.from_tensors(element)
with self.assertRaisesRegex(TypeError,
'.*does not support datasets containing.*'):
ds.as_numpy_iterator()
@combinations.generate(test_base.eager_only_combinations())
def testSparseElement(self):
self._testInvalidElement(sparse_tensor.SparseTensorValue([[0]], [0], [1]))
@combinations.generate(test_base.eager_only_combinations())
def testRaggedElement(self):
lst = [[1, 2], [3], [4, 5, 6]]
rt = ragged_factory_ops.constant(lst)
ds = dataset_ops.Dataset.from_tensor_slices(rt)
for actual, expected in zip(ds.as_numpy_iterator(), lst):
self.assertTrue(np.array_equal(actual, expected))
@combinations.generate(test_base.eager_only_combinations())
def testDatasetElement(self):
self._testInvalidElement(dataset_ops.Dataset.range(3))
@combinations.generate(test_base.eager_only_combinations())
def testNestedNonTensorElement(self):
tuple_elem = (constant_op.constant([1, 2, 3]), dataset_ops.Dataset.range(3))
self._testInvalidElement(tuple_elem)
if __name__ == '__main__':
test.main()
| apache-2.0 |
gmnamra/python-image-utils | Tools/guimaker.py | 1 | 6773 | """
###############################################################################
An extended Frame that makes window menus and toolbars automatically.
Use GuiMakerFrameMenu for embedded components (makes frame-based menus).
Use GuiMakerWindowMenu for top-level windows (makes Tk8.0 window menus).
See the self-test code (and PyEdit) for an example layout tree format.
###############################################################################
"""
import sys
from Tkinter import * # widget classes
from Tkinter.messagebox import showinfo
class GuiMaker(Frame):
menuBar = [] # class defaults
toolBar = [] # change per instance in subclasses
helpButton = True # set these in start() if need self
def __init__(self, parent=None):
Frame.__init__(self, parent)
self.pack(expand=YES, fill=BOTH) # make frame stretchable
self.start() # for subclass: set menu/toolBar
self.makeMenuBar() # done here: build menu bar
self.makeToolBar() # done here: build toolbar
self.makeWidgets() # for subclass: add middle part
def makeMenuBar(self):
"""
make menu bar at the top (Tk8.0 menus below)
expand=no, fill=x so same width on resize
"""
menubar = Frame(self, relief=RAISED, bd=2)
menubar.pack(side=TOP, fill=X)
for (name, key, items) in self.menuBar:
mbutton = Menubutton(menubar, text=name, underline=key)
mbutton.pack(side=LEFT)
pulldown = Menu(mbutton)
self.addMenuItems(pulldown, items)
mbutton.config(menu=pulldown)
if self.helpButton:
Button(menubar, text = 'Help',
cursor = 'gumby',
relief = FLAT,
command = self.help).pack(side=RIGHT)
def addMenuItems(self, menu, items):
for item in items: # scan nested items list
if item == 'separator': # string: add separator
menu.add_separator({})
elif type(item) == list: # list: disabled item list
for num in item:
menu.entryconfig(num, state=DISABLED)
elif type(item[2]) != list:
menu.add_command(label = item[0], # command:
underline = item[1], # add command
command = item[2]) # cmd=callable
else:
pullover = Menu(menu)
self.addMenuItems(pullover, item[2]) # sublist:
menu.add_cascade(label = item[0], # make submenu
underline = item[1], # add cascade
menu = pullover)
def makeToolBar(self):
"""
make button bar at bottom, if any
expand=no, fill=x so same width on resize
this could support images too: see Chapter 9,
would need prebuilt gifs or PIL for thumbnails
"""
if self.toolBar:
toolbar = Frame(self, cursor='hand2', relief=SUNKEN, bd=2)
toolbar.pack(side=BOTTOM, fill=X)
for (name, action, where) in self.toolBar:
Button(toolbar, text=name, command=action).pack(where)
def makeWidgets(self):
"""
make 'middle' part last, so menu/toolbar
is always on top/bottom and clipped last;
override this default, pack middle any side;
for grid: grid middle part in a packed frame
"""
name = Label(self,
width=40, height=10,
relief=SUNKEN, bg='white',
text = self.__class__.__name__,
cursor = 'crosshair')
name.pack(expand=YES, fill=BOTH, side=TOP)
def help(self):
"override me in subclass"
showinfo('Help', 'Sorry, no help for ' + self.__class__.__name__)
def start(self):
"override me in subclass: set menu/toolbar with self"
pass
###############################################################################
# Customize for Tk 8.0 main window menu bar, instead of a frame
###############################################################################
GuiMakerFrameMenu = GuiMaker # use this for embedded component menus
class GuiMakerWindowMenu(GuiMaker): # use this for top-level window menus
def makeMenuBar(self):
menubar = Menu(self.master)
self.master.config(menu=menubar)
for (name, key, items) in self.menuBar:
pulldown = Menu(menubar)
self.addMenuItems(pulldown, items)
menubar.add_cascade(label=name, underline=key, menu=pulldown)
if self.helpButton:
if sys.platform[:3] == 'win':
menubar.add_command(label='Help', command=self.help)
else:
pulldown = Menu(menubar) # Linux needs real pull down
pulldown.add_command(label='About', command=self.help)
menubar.add_cascade(label='Help', menu=pulldown)
###############################################################################
# Self-test when file run standalone: 'python guimaker.py'
###############################################################################
if __name__ == '__main__':
from guimixin import GuiMixin # mix in a help method
menuBar = [
('File', 0,
[('Open', 0, lambda:0), # lambda:0 is a no-op
('Quit', 0, sys.exit)]), # use sys, no self here
('Edit', 0,
[('Cut', 0, lambda:0),
('Paste', 0, lambda:0)]) ]
toolBar = [('Quit', sys.exit, {'side': LEFT})]
class TestAppFrameMenu(GuiMixin, GuiMakerFrameMenu):
def start(self):
self.menuBar = menuBar
self.toolBar = toolBar
class TestAppWindowMenu(GuiMixin, GuiMakerWindowMenu):
def start(self):
self.menuBar = menuBar
self.toolBar = toolBar
class TestAppWindowMenuBasic(GuiMakerWindowMenu):
def start(self):
self.menuBar = menuBar
self.toolBar = toolBar # guimaker help, not guimixin
root = Tk()
TestAppFrameMenu(Toplevel())
TestAppWindowMenu(Toplevel())
TestAppWindowMenuBasic(root)
root.mainloop()
| mit |
int19h/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/win32comext/shell/demos/explorer_browser.py | 10 | 4758 | # A sample of using Vista's IExplorerBrowser interfaces...
# Currently doesn't quite work:
# * CPU sits at 100% while running.
import sys
import pythoncom
from win32com.shell import shell, shellcon
import win32gui, win32con, win32api
from win32com.server.util import wrap, unwrap
# event handler for the browser.
IExplorerBrowserEvents_Methods = """OnNavigationComplete OnNavigationFailed
OnNavigationPending OnViewCreated""".split()
class EventHandler:
_com_interfaces_ = [shell.IID_IExplorerBrowserEvents]
_public_methods_ = IExplorerBrowserEvents_Methods
def OnNavigationComplete(self, pidl):
print("OnNavComplete", pidl)
def OnNavigationFailed(self, pidl):
print("OnNavigationFailed", pidl)
def OnNavigationPending(self, pidl):
print("OnNavigationPending", pidl)
def OnViewCreated(self, view):
print("OnViewCreated", view)
# And if our demo view has been registered, it may well
# be that view!
try:
pyview = unwrap(view)
print("and look - its a Python implemented view!", pyview)
except ValueError:
pass
class MainWindow:
def __init__(self):
message_map = {
win32con.WM_DESTROY: self.OnDestroy,
win32con.WM_COMMAND: self.OnCommand,
win32con.WM_SIZE: self.OnSize,
}
# Register the Window class.
wc = win32gui.WNDCLASS()
hinst = wc.hInstance = win32api.GetModuleHandle(None)
wc.lpszClassName = "test_explorer_browser"
wc.lpfnWndProc = message_map # could also specify a wndproc.
classAtom = win32gui.RegisterClass(wc)
# Create the Window.
style = win32con.WS_OVERLAPPEDWINDOW | win32con.WS_VISIBLE
self.hwnd = win32gui.CreateWindow( classAtom, "Python IExplorerBrowser demo", style, \
0, 0, win32con.CW_USEDEFAULT, win32con.CW_USEDEFAULT, \
0, 0, hinst, None)
eb = pythoncom.CoCreateInstance(shellcon.CLSID_ExplorerBrowser, None, pythoncom.CLSCTX_ALL, shell.IID_IExplorerBrowser)
# as per MSDN docs, hook up events early
self.event_cookie = eb.Advise(wrap(EventHandler()))
eb.SetOptions(shellcon.EBO_SHOWFRAMES)
rect = win32gui.GetClientRect(self.hwnd)
# Set the flags such that the folders autoarrange and non web view is presented
flags = (shellcon.FVM_LIST, shellcon.FWF_AUTOARRANGE | shellcon.FWF_NOWEBVIEW)
eb.Initialize(self.hwnd, rect, (0, shellcon.FVM_DETAILS))
if len(sys.argv)==2:
# If an arg was specified, ask the desktop parse it.
# You can pass anything explorer accepts as its '/e' argument -
# eg, "::{guid}\::{guid}" etc.
# "::{20D04FE0-3AEA-1069-A2D8-08002B30309D}" is "My Computer"
pidl = shell.SHGetDesktopFolder().ParseDisplayName(0, None, sys.argv[1])[1]
else:
# And start browsing at the root of the namespace.
pidl = []
eb.BrowseToIDList(pidl, shellcon.SBSP_ABSOLUTE)
# and for some reason the "Folder" view in the navigator pane doesn't
# magically synchronize itself - so let's do that ourself.
# Get the tree control.
sp = eb.QueryInterface(pythoncom.IID_IServiceProvider)
try:
tree = sp.QueryService(shell.IID_INameSpaceTreeControl,
shell.IID_INameSpaceTreeControl)
except pythoncom.com_error as exc:
# this should really only fail if no "nav" frame exists...
print("Strange - failed to get the tree control even though " \
"we asked for a EBO_SHOWFRAMES")
print(exc)
else:
# get the IShellItem for the selection.
si = shell.SHCreateItemFromIDList(pidl, shell.IID_IShellItem)
# set it to selected.
tree.SetItemState(si, shellcon.NSTCIS_SELECTED, shellcon.NSTCIS_SELECTED)
#eb.FillFromObject(None, shellcon.EBF_NODROPTARGET);
#eb.SetEmptyText("No known folders yet...");
self.eb = eb
def OnCommand(self, hwnd, msg, wparam, lparam):
pass
def OnDestroy(self, hwnd, msg, wparam, lparam):
print("tearing down ExplorerBrowser...")
self.eb.Unadvise(self.event_cookie)
self.eb.Destroy()
self.eb = None
print("shutting down app...")
win32gui.PostQuitMessage(0)
def OnSize(self, hwnd, msg, wparam, lparam):
x = win32api.LOWORD(lparam)
y = win32api.HIWORD(lparam)
self.eb.SetRect(None, (0, 0, x, y))
def main():
w=MainWindow()
win32gui.PumpMessages()
if __name__=='__main__':
main()
| apache-2.0 |
synopat/pyload | module/lib/thrift/transport/TTransport.py | 74 | 8384 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from cStringIO import StringIO
from struct import pack,unpack
from thrift.Thrift import TException
class TTransportException(TException):
"""Custom Transport Exception class"""
UNKNOWN = 0
NOT_OPEN = 1
ALREADY_OPEN = 2
TIMED_OUT = 3
END_OF_FILE = 4
def __init__(self, type=UNKNOWN, message=None):
TException.__init__(self, message)
self.type = type
class TTransportBase:
"""Base class for Thrift transport layer."""
def isOpen(self):
pass
def open(self):
pass
def close(self):
pass
def read(self, sz):
pass
def readAll(self, sz):
buff = ''
have = 0
while (have < sz):
chunk = self.read(sz-have)
have += len(chunk)
buff += chunk
if len(chunk) == 0:
raise EOFError()
return buff
def write(self, buf):
pass
def flush(self):
pass
# This class should be thought of as an interface.
class CReadableTransport:
"""base class for transports that are readable from C"""
# TODO(dreiss): Think about changing this interface to allow us to use
# a (Python, not c) StringIO instead, because it allows
# you to write after reading.
# NOTE: This is a classic class, so properties will NOT work
# correctly for setting.
@property
def cstringio_buf(self):
"""A cStringIO buffer that contains the current chunk we are reading."""
pass
def cstringio_refill(self, partialread, reqlen):
"""Refills cstringio_buf.
Returns the currently used buffer (which can but need not be the same as
the old cstringio_buf). partialread is what the C code has read from the
buffer, and should be inserted into the buffer before any more reads. The
return value must be a new, not borrowed reference. Something along the
lines of self._buf should be fine.
If reqlen bytes can't be read, throw EOFError.
"""
pass
class TServerTransportBase:
"""Base class for Thrift server transports."""
def listen(self):
pass
def accept(self):
pass
def close(self):
pass
class TTransportFactoryBase:
"""Base class for a Transport Factory"""
def getTransport(self, trans):
return trans
class TBufferedTransportFactory:
"""Factory transport that builds buffered transports"""
def getTransport(self, trans):
buffered = TBufferedTransport(trans)
return buffered
class TBufferedTransport(TTransportBase,CReadableTransport):
"""Class that wraps another transport and buffers its I/O.
The implementation uses a (configurable) fixed-size read buffer
but buffers all writes until a flush is performed.
"""
DEFAULT_BUFFER = 4096
def __init__(self, trans, rbuf_size = DEFAULT_BUFFER):
self.__trans = trans
self.__wbuf = StringIO()
self.__rbuf = StringIO("")
self.__rbuf_size = rbuf_size
def isOpen(self):
return self.__trans.isOpen()
def open(self):
return self.__trans.open()
def close(self):
return self.__trans.close()
def read(self, sz):
ret = self.__rbuf.read(sz)
if len(ret) != 0:
return ret
self.__rbuf = StringIO(self.__trans.read(max(sz, self.__rbuf_size)))
return self.__rbuf.read(sz)
def write(self, buf):
self.__wbuf.write(buf)
def flush(self):
out = self.__wbuf.getvalue()
# reset wbuf before write/flush to preserve state on underlying failure
self.__wbuf = StringIO()
self.__trans.write(out)
self.__trans.flush()
# Implement the CReadableTransport interface.
@property
def cstringio_buf(self):
return self.__rbuf
def cstringio_refill(self, partialread, reqlen):
retstring = partialread
if reqlen < self.__rbuf_size:
# try to make a read of as much as we can.
retstring += self.__trans.read(self.__rbuf_size)
# but make sure we do read reqlen bytes.
if len(retstring) < reqlen:
retstring += self.__trans.readAll(reqlen - len(retstring))
self.__rbuf = StringIO(retstring)
return self.__rbuf
class TMemoryBuffer(TTransportBase, CReadableTransport):
"""Wraps a cStringIO object as a TTransport.
NOTE: Unlike the C++ version of this class, you cannot write to it
then immediately read from it. If you want to read from a
TMemoryBuffer, you must either pass a string to the constructor.
TODO(dreiss): Make this work like the C++ version.
"""
def __init__(self, value=None):
"""value -- a value to read from for stringio
If value is set, this will be a transport for reading,
otherwise, it is for writing"""
if value is not None:
self._buffer = StringIO(value)
else:
self._buffer = StringIO()
def isOpen(self):
return not self._buffer.closed
def open(self):
pass
def close(self):
self._buffer.close()
def read(self, sz):
return self._buffer.read(sz)
def write(self, buf):
self._buffer.write(buf)
def flush(self):
pass
def getvalue(self):
return self._buffer.getvalue()
# Implement the CReadableTransport interface.
@property
def cstringio_buf(self):
return self._buffer
def cstringio_refill(self, partialread, reqlen):
# only one shot at reading...
raise EOFError()
class TFramedTransportFactory:
"""Factory transport that builds framed transports"""
def getTransport(self, trans):
framed = TFramedTransport(trans)
return framed
class TFramedTransport(TTransportBase, CReadableTransport):
"""Class that wraps another transport and frames its I/O when writing."""
def __init__(self, trans,):
self.__trans = trans
self.__rbuf = StringIO()
self.__wbuf = StringIO()
def isOpen(self):
return self.__trans.isOpen()
def open(self):
return self.__trans.open()
def close(self):
return self.__trans.close()
def read(self, sz):
ret = self.__rbuf.read(sz)
if len(ret) != 0:
return ret
self.readFrame()
return self.__rbuf.read(sz)
def readFrame(self):
buff = self.__trans.readAll(4)
sz, = unpack('!i', buff)
self.__rbuf = StringIO(self.__trans.readAll(sz))
def write(self, buf):
self.__wbuf.write(buf)
def flush(self):
wout = self.__wbuf.getvalue()
wsz = len(wout)
# reset wbuf before write/flush to preserve state on underlying failure
self.__wbuf = StringIO()
# N.B.: Doing this string concatenation is WAY cheaper than making
# two separate calls to the underlying socket object. Socket writes in
# Python turn out to be REALLY expensive, but it seems to do a pretty
# good job of managing string buffer operations without excessive copies
buf = pack("!i", wsz) + wout
self.__trans.write(buf)
self.__trans.flush()
# Implement the CReadableTransport interface.
@property
def cstringio_buf(self):
return self.__rbuf
def cstringio_refill(self, prefix, reqlen):
# self.__rbuf will already be empty here because fastbinary doesn't
# ask for a refill until the previous buffer is empty. Therefore,
# we can start reading new frames immediately.
while len(prefix) < reqlen:
self.readFrame()
prefix += self.__rbuf.getvalue()
self.__rbuf = StringIO(prefix)
return self.__rbuf
class TFileObjectTransport(TTransportBase):
"""Wraps a file-like object to make it work as a Thrift transport."""
def __init__(self, fileobj):
self.fileobj = fileobj
def isOpen(self):
return True
def close(self):
self.fileobj.close()
def read(self, sz):
return self.fileobj.read(sz)
def write(self, buf):
self.fileobj.write(buf)
def flush(self):
self.fileobj.flush()
| gpl-3.0 |
wwj718/edx-platform | common/djangoapps/track/views/tests/test_views.py | 81 | 10225 | # pylint: disable=missing-docstring,maybe-no-member
from mock import patch, sentinel
from django.contrib.auth.models import User
from django.test.client import RequestFactory
from django.test.utils import override_settings
from track import views
from track.middleware import TrackMiddleware
from track.tests import EventTrackingTestCase, FROZEN_TIME
from openedx.core.lib.tests.assertions.events import assert_event_matches
class TestTrackViews(EventTrackingTestCase):
def setUp(self):
super(TestTrackViews, self).setUp()
self.request_factory = RequestFactory()
patcher = patch('track.views.tracker', autospec=True)
self.mock_tracker = patcher.start()
self.addCleanup(patcher.stop)
self.path_with_course = '/courses/foo/bar/baz/xmod/'
self.url_with_course = 'http://www.edx.org' + self.path_with_course
self.event = {
sentinel.key: sentinel.value
}
def test_user_track(self):
request = self.request_factory.get('/event', {
'page': self.url_with_course,
'event_type': sentinel.event_type,
'event': '{}'
})
views.user_track(request)
actual_event = self.get_event()
expected_event = {
'context': {
'course_id': 'foo/bar/baz',
'org_id': 'foo',
'event_source': 'browser',
'page': self.url_with_course,
'username': 'anonymous'
},
'data': {},
'timestamp': FROZEN_TIME,
'name': str(sentinel.event_type)
}
assert_event_matches(expected_event, actual_event)
def test_user_track_with_missing_values(self):
request = self.request_factory.get('/event')
views.user_track(request)
actual_event = self.get_event()
expected_event = {
'context': {
'course_id': '',
'org_id': '',
'event_source': 'browser',
'page': '',
'username': 'anonymous'
},
'data': {},
'timestamp': FROZEN_TIME,
'name': 'unknown'
}
assert_event_matches(expected_event, actual_event)
views.user_track(request)
def test_user_track_with_empty_event(self):
request = self.request_factory.get('/event', {
'page': self.url_with_course,
'event_type': sentinel.event_type,
'event': ''
})
views.user_track(request)
actual_event = self.get_event()
expected_event = {
'context': {
'course_id': 'foo/bar/baz',
'org_id': 'foo',
'event_source': 'browser',
'page': self.url_with_course,
'username': 'anonymous'
},
'data': {},
'timestamp': FROZEN_TIME,
'name': str(sentinel.event_type)
}
assert_event_matches(expected_event, actual_event)
@override_settings(
EVENT_TRACKING_PROCESSORS=[{'ENGINE': 'track.shim.LegacyFieldMappingProcessor'}],
)
def test_user_track_with_middleware_and_processors(self):
self.recreate_tracker()
middleware = TrackMiddleware()
payload = '{"foo": "bar"}'
user_id = 1
request = self.request_factory.get('/event', {
'page': self.url_with_course,
'event_type': sentinel.event_type,
'event': payload
})
request.user = User.objects.create(pk=user_id, username=str(sentinel.username))
request.META['REMOTE_ADDR'] = '10.0.0.1'
request.META['HTTP_REFERER'] = str(sentinel.referer)
request.META['HTTP_ACCEPT_LANGUAGE'] = str(sentinel.accept_language)
request.META['HTTP_USER_AGENT'] = str(sentinel.user_agent)
request.META['SERVER_NAME'] = 'testserver2'
middleware.process_request(request)
try:
views.user_track(request)
expected_event = {
'accept_language': str(sentinel.accept_language),
'referer': str(sentinel.referer),
'username': str(sentinel.username),
'session': '',
'ip': '10.0.0.1',
'event_source': 'browser',
'event_type': str(sentinel.event_type),
'name': str(sentinel.event_type),
'event': payload,
'agent': str(sentinel.user_agent),
'page': self.url_with_course,
'time': FROZEN_TIME,
'host': 'testserver2',
'context': {
'course_id': 'foo/bar/baz',
'org_id': 'foo',
'user_id': user_id,
'path': u'/event'
},
}
finally:
middleware.process_response(request, None)
actual_event = self.get_event()
assert_event_matches(expected_event, actual_event)
def test_server_track(self):
request = self.request_factory.get(self.path_with_course)
views.server_track(request, str(sentinel.event_type), '{}')
expected_event = {
'accept_language': '',
'referer': '',
'username': 'anonymous',
'ip': '127.0.0.1',
'event_source': 'server',
'event_type': str(sentinel.event_type),
'event': '{}',
'agent': '',
'page': None,
'time': FROZEN_TIME,
'host': 'testserver',
'context': {},
}
self.assert_mock_tracker_call_matches(expected_event)
def assert_mock_tracker_call_matches(self, expected_event):
self.assertEqual(len(self.mock_tracker.send.mock_calls), 1)
actual_event = self.mock_tracker.send.mock_calls[0][1][0]
assert_event_matches(expected_event, actual_event)
def test_server_track_with_middleware(self):
middleware = TrackMiddleware()
request = self.request_factory.get(self.path_with_course)
middleware.process_request(request)
# The middleware emits an event, reset the mock to ignore it since we aren't testing that feature.
self.mock_tracker.reset_mock()
try:
views.server_track(request, str(sentinel.event_type), '{}')
expected_event = {
'accept_language': '',
'referer': '',
'username': 'anonymous',
'ip': '127.0.0.1',
'event_source': 'server',
'event_type': str(sentinel.event_type),
'event': '{}',
'agent': '',
'page': None,
'time': FROZEN_TIME,
'host': 'testserver',
'context': {
'user_id': '',
'course_id': u'foo/bar/baz',
'org_id': 'foo',
'path': u'/courses/foo/bar/baz/xmod/'
},
}
finally:
middleware.process_response(request, None)
self.assert_mock_tracker_call_matches(expected_event)
def test_server_track_with_middleware_and_google_analytics_cookie(self):
middleware = TrackMiddleware()
request = self.request_factory.get(self.path_with_course)
request.COOKIES['_ga'] = 'GA1.2.1033501218.1368477899'
middleware.process_request(request)
# The middleware emits an event, reset the mock to ignore it since we aren't testing that feature.
self.mock_tracker.reset_mock()
try:
views.server_track(request, str(sentinel.event_type), '{}')
expected_event = {
'accept_language': '',
'referer': '',
'username': 'anonymous',
'ip': '127.0.0.1',
'event_source': 'server',
'event_type': str(sentinel.event_type),
'event': '{}',
'agent': '',
'page': None,
'time': FROZEN_TIME,
'host': 'testserver',
'context': {
'user_id': '',
'course_id': u'foo/bar/baz',
'org_id': 'foo',
'path': u'/courses/foo/bar/baz/xmod/'
},
}
finally:
middleware.process_response(request, None)
self.assert_mock_tracker_call_matches(expected_event)
def test_server_track_with_no_request(self):
request = None
views.server_track(request, str(sentinel.event_type), '{}')
expected_event = {
'accept_language': '',
'referer': '',
'username': 'anonymous',
'ip': '',
'event_source': 'server',
'event_type': str(sentinel.event_type),
'event': '{}',
'agent': '',
'page': None,
'time': FROZEN_TIME,
'host': '',
'context': {},
}
self.assert_mock_tracker_call_matches(expected_event)
def test_task_track(self):
request_info = {
'accept_language': '',
'referer': '',
'username': 'anonymous',
'ip': '127.0.0.1',
'agent': 'agent',
'host': 'testserver',
}
task_info = {
sentinel.task_key: sentinel.task_value
}
expected_event_data = dict(task_info)
expected_event_data.update(self.event)
views.task_track(request_info, task_info, str(sentinel.event_type), self.event)
expected_event = {
'username': 'anonymous',
'ip': '127.0.0.1',
'event_source': 'task',
'event_type': str(sentinel.event_type),
'event': expected_event_data,
'agent': 'agent',
'page': None,
'time': FROZEN_TIME,
'host': 'testserver',
'context': {
'course_id': '',
'org_id': ''
},
}
self.assert_mock_tracker_call_matches(expected_event)
| agpl-3.0 |
crichardson17/starburst_atlas | Low_resolution_sims/DustFree_LowRes/Padova_cont/padova_cont_4/Optical1.py | 33 | 7366 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith(".grd"):
inputfile = file
for file in os.listdir('.'):
if file.endswith(".txt"):
inputfile2 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
def add_sub_plot(sub_num):
numplots = 16
plt.subplot(numplots/4.,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid = [];
with open(inputfile, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid.append(row);
grid = asarray(grid)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines = [];
with open(inputfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines.append(row);
dataEmissionlines = asarray(dataEmissionlines)
print "import files complete"
# ---------------------------------------------------
#for grid
phi_values = grid[1:len(dataEmissionlines)+1,6]
hdens_values = grid[1:len(dataEmissionlines)+1,7]
#for lines
headers = headers[1:]
Emissionlines = dataEmissionlines[:, 1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(Emissionlines[0]),4))
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = Emissionlines[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(Emissionlines),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
#change desired lines here!
line = [36, #NE 3 3343A
38, #BA C
39, #3646
40, #3726
41, #3727
42, #3729
43, #3869
44, #3889
45, #3933
46, #4026
47, #4070
48, #4074
49, #4078
50, #4102
51, #4340
52] #4363
#create z array for this plot
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("Optical Lines", fontsize=14)
# ---------------------------------------------------
for i in range(16):
add_sub_plot(i)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
print "complete"
plt.savefig('optical_lines.pdf')
plt.clf()
| gpl-2.0 |
matthiask/django-chet | chet/views.py | 1 | 1373 | from django.shortcuts import get_object_or_404, render
from django.views import generic
from chet.models import Album, Photo
def visible_albums(user):
if user.is_staff:
return Album.objects.active()
else:
return Album.objects.public()
def visible_photos(user):
if user.is_staff:
return Photo.objects.active()
else:
return Photo.objects.public()
class AlbumMixin(object):
allow_empty = True
date_field = 'date'
make_object_list = True
month_format = '%m'
paginate_by = 10
paginate_orphans = 3
def get_queryset(self):
return visible_albums(self.request.user)
class AlbumArchiveView(AlbumMixin, generic.ArchiveIndexView):
pass
def album_detail(request, year, slug):
album = get_object_or_404(
visible_albums(request.user),
date__year=year,
slug=slug,
)
return render(request, 'chet/album_detail.html', {
'album': album,
'object': album,
'photos': visible_photos(request.user).filter(album=album),
})
def photo_detail(request, year, slug, photo):
photo = get_object_or_404(
visible_photos(request.user),
album__date__year=year,
album__slug=slug,
pk=photo,
)
return render(request, 'chet/photo_detail.html', {
'photo': photo,
'object': photo,
})
| bsd-3-clause |
nerzhul/ansible | lib/ansible/modules/files/acl.py | 25 | 11472 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = '''
---
module: acl
version_added: "1.4"
short_description: Sets and retrieves file ACL information.
description:
- Sets and retrieves file ACL information.
options:
name:
required: true
default: null
description:
- The full path of the file or object.
aliases: ['path']
state:
required: false
default: query
choices: [ 'query', 'present', 'absent' ]
description:
- defines whether the ACL should be present or not. The C(query) state gets the current acl without changing it, for use in 'register' operations.
follow:
required: false
default: yes
choices: [ 'yes', 'no' ]
description:
- whether to follow symlinks on the path if a symlink is encountered.
default:
version_added: "1.5"
required: false
default: no
choices: [ 'yes', 'no' ]
description:
- if the target is a directory, setting this to yes will make it the default acl for entities created inside the directory. It causes an error if name is a file.
entity:
version_added: "1.5"
required: false
description:
- actual user or group that the ACL applies to when matching entity types user or group are selected.
etype:
version_added: "1.5"
required: false
default: null
choices: [ 'user', 'group', 'mask', 'other' ]
description:
- the entity type of the ACL to apply, see setfacl documentation for more info.
permissions:
version_added: "1.5"
required: false
default: null
description:
- Permissions to apply/remove can be any combination of r, w and x (read, write and execute respectively)
entry:
required: false
default: null
description:
- DEPRECATED. The acl to set or remove. This must always be quoted in the form of '<etype>:<qualifier>:<perms>'. The qualifier may be empty for some types, but the type and perms are always required. '-' can be used as placeholder when you do not care about permissions. This is now superseded by entity, type and permissions fields.
recursive:
version_added: "2.0"
required: false
default: no
choices: [ 'yes', 'no' ]
description:
- Recursively sets the specified ACL (added in Ansible 2.0). Incompatible with C(state=query).
author:
- "Brian Coca (@bcoca)"
- "Jérémie Astori (@astorije)"
notes:
- The "acl" module requires that acls are enabled on the target filesystem and that the setfacl and getfacl binaries are installed.
- As of Ansible 2.0, this module only supports Linux distributions.
'''
EXAMPLES = '''
# Grant user Joe read access to a file
- acl:
name: /etc/foo.conf
entity: joe
etype: user
permissions: r
state: present
# Removes the acl for Joe on a specific file
- acl:
name: /etc/foo.conf
entity: joe
etype: user
state: absent
# Sets default acl for joe on foo.d
- acl:
name: /etc/foo.d
entity: joe
etype: user
permissions: rw
default: yes
state: present
# Same as previous but using entry shorthand
- acl:
name: /etc/foo.d
entry: "default:user:joe:rw-"
state: present
# Obtain the acl for a specific file
- acl:
name: /etc/foo.conf
register: acl_info
'''
RETURN = '''
acl:
description: Current acl on provided path (after changes, if any)
returned: success
type: list
sample: [ "user::rwx", "group::rwx", "other::rwx" ]
'''
def split_entry(entry):
''' splits entry and ensures normalized return'''
a = entry.split(':')
d = None
if entry.lower().startswith("d"):
d = True
a.pop(0)
if len(a) == 2:
a.append(None)
t, e, p = a
t = t.lower()
if t.startswith("u"):
t = "user"
elif t.startswith("g"):
t = "group"
elif t.startswith("m"):
t = "mask"
elif t.startswith("o"):
t = "other"
else:
t = None
return [d, t, e, p]
def build_entry(etype, entity, permissions=None, use_nfsv4_acls=False):
'''Builds and returns an entry string. Does not include the permissions bit if they are not provided.'''
if use_nfsv4_acls:
return ':'.join([etype, entity, permissions, 'allow'])
if permissions:
return etype + ':' + entity + ':' + permissions
else:
return etype + ':' + entity
def build_command(module, mode, path, follow, default, recursive, entry=''):
'''Builds and returns a getfacl/setfacl command.'''
if mode == 'set':
cmd = [module.get_bin_path('setfacl', True)]
cmd.append('-m "%s"' % entry)
elif mode == 'rm':
cmd = [module.get_bin_path('setfacl', True)]
cmd.append('-x "%s"' % entry)
else: # mode == 'get'
cmd = [module.get_bin_path('getfacl', True)]
# prevents absolute path warnings and removes headers
if get_platform().lower() == 'linux':
cmd.append('--omit-header')
cmd.append('--absolute-names')
if recursive:
cmd.append('--recursive')
if not follow:
if get_platform().lower() == 'linux':
cmd.append('--physical')
elif get_platform().lower() == 'freebsd':
cmd.append('-h')
if default:
if(mode == 'rm'):
cmd.insert(1, '-k')
else: # mode == 'set' or mode == 'get'
cmd.insert(1, '-d')
cmd.append(path)
return cmd
def acl_changed(module, cmd):
'''Returns true if the provided command affects the existing ACLs, false otherwise.'''
# FreeBSD do not have a --test flag, so by default, it is safer to always say "true"
if get_platform().lower() == 'freebsd':
return True
cmd = cmd[:] # lists are mutables so cmd would be overwritten without this
cmd.insert(1, '--test')
lines = run_acl(module, cmd)
for line in lines:
if not line.endswith('*,*'):
return True
return False
def run_acl(module, cmd, check_rc=True):
try:
(rc, out, err) = module.run_command(' '.join(cmd), check_rc=check_rc)
except Exception:
e = get_exception()
module.fail_json(msg=e.strerror)
lines = []
for l in out.splitlines():
if not l.startswith('#'):
lines.append(l.strip())
if lines and not lines[-1].split():
# trim last line only when it is empty
return lines[:-1]
else:
return lines
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, aliases=['path'], type='path'),
entry=dict(required=False, type='str'),
entity=dict(required=False, type='str', default=''),
etype=dict(
required=False,
choices=['other', 'user', 'group', 'mask'],
type='str'
),
permissions=dict(required=False, type='str'),
state=dict(
required=False,
default='query',
choices=['query', 'present', 'absent'],
type='str'
),
follow=dict(required=False, type='bool', default=True),
default=dict(required=False, type='bool', default=False),
recursive=dict(required=False, type='bool', default=False),
use_nfsv4_acls=dict(required=False, type='bool', default=False)
),
supports_check_mode=True,
)
if get_platform().lower() not in ['linux', 'freebsd']:
module.fail_json(msg="The acl module is not available on this system.")
path = module.params.get('name')
entry = module.params.get('entry')
entity = module.params.get('entity')
etype = module.params.get('etype')
permissions = module.params.get('permissions')
state = module.params.get('state')
follow = module.params.get('follow')
default = module.params.get('default')
recursive = module.params.get('recursive')
use_nfsv4_acls = module.params.get('use_nfsv4_acls')
if not os.path.exists(path):
module.fail_json(msg="Path not found or not accessible.")
if state == 'query' and recursive:
module.fail_json(msg="'recursive' MUST NOT be set when 'state=query'.")
if not entry:
if state == 'absent' and permissions:
module.fail_json(msg="'permissions' MUST NOT be set when 'state=absent'.")
if state == 'absent' and not entity:
module.fail_json(msg="'entity' MUST be set when 'state=absent'.")
if state in ['present', 'absent'] and not etype:
module.fail_json(msg="'etype' MUST be set when 'state=%s'." % state)
if entry:
if etype or entity or permissions:
module.fail_json(msg="'entry' MUST NOT be set when 'entity', 'etype' or 'permissions' are set.")
if state == 'present' and not entry.count(":") in [2, 3]:
module.fail_json(msg="'entry' MUST have 3 or 4 sections divided by ':' when 'state=present'.")
if state == 'absent' and not entry.count(":") in [1, 2]:
module.fail_json(msg="'entry' MUST have 2 or 3 sections divided by ':' when 'state=absent'.")
if state == 'query':
module.fail_json(msg="'entry' MUST NOT be set when 'state=query'.")
default_flag, etype, entity, permissions = split_entry(entry)
if default_flag != None:
default = default_flag
if get_platform().lower() == 'freebsd':
if recursive:
module.fail_json(msg="recursive is not supported on that platform.")
changed = False
msg = ""
if state == 'present':
entry = build_entry(etype, entity, permissions, use_nfsv4_acls)
command = build_command(
module, 'set', path, follow,
default, recursive, entry
)
changed = acl_changed(module, command)
if changed and not module.check_mode:
run_acl(module, command)
msg = "%s is present" % entry
elif state == 'absent':
entry = build_entry(etype, entity, use_nfsv4_acls)
command = build_command(
module, 'rm', path, follow,
default, recursive, entry
)
changed = acl_changed(module, command)
if changed and not module.check_mode:
run_acl(module, command, False)
msg = "%s is absent" % entry
elif state == 'query':
msg = "current acl"
acl = run_acl(
module,
build_command(module, 'get', path, follow, default, recursive)
)
module.exit_json(changed=changed, msg=msg, acl=acl)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
ybellavance/python-for-android | python3-alpha/extra_modules/atom/mock_service.py | 48 | 10344 | #!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MockService provides CRUD ops. for mocking calls to AtomPub services.
MockService: Exposes the publicly used methods of AtomService to provide
a mock interface which can be used in unit tests.
"""
import atom.service
import pickle
__author__ = 'api.jscudder (Jeffrey Scudder)'
# Recordings contains pairings of HTTP MockRequest objects with MockHttpResponse objects.
recordings = []
# If set, the mock service HttpRequest are actually made through this object.
real_request_handler = None
def ConcealValueWithSha(source):
import sha
return sha.new(source[:-5]).hexdigest()
def DumpRecordings(conceal_func=ConcealValueWithSha):
if conceal_func:
for recording_pair in recordings:
recording_pair[0].ConcealSecrets(conceal_func)
return pickle.dumps(recordings)
def LoadRecordings(recordings_file_or_string):
if isinstance(recordings_file_or_string, str):
atom.mock_service.recordings = pickle.loads(recordings_file_or_string)
elif hasattr(recordings_file_or_string, 'read'):
atom.mock_service.recordings = pickle.loads(
recordings_file_or_string.read())
def HttpRequest(service, operation, data, uri, extra_headers=None,
url_params=None, escape_params=True, content_type='application/atom+xml'):
"""Simulates an HTTP call to the server, makes an actual HTTP request if
real_request_handler is set.
This function operates in two different modes depending on if
real_request_handler is set or not. If real_request_handler is not set,
HttpRequest will look in this module's recordings list to find a response
which matches the parameters in the function call. If real_request_handler
is set, this function will call real_request_handler.HttpRequest, add the
response to the recordings list, and respond with the actual response.
Args:
service: atom.AtomService object which contains some of the parameters
needed to make the request. The following members are used to
construct the HTTP call: server (str), additional_headers (dict),
port (int), and ssl (bool).
operation: str The HTTP operation to be performed. This is usually one of
'GET', 'POST', 'PUT', or 'DELETE'
data: ElementTree, filestream, list of parts, or other object which can be
converted to a string.
Should be set to None when performing a GET or PUT.
If data is a file-like object which can be read, this method will read
a chunk of 100K bytes at a time and send them.
If the data is a list of parts to be sent, each part will be evaluated
and sent.
uri: The beginning of the URL to which the request should be sent.
Examples: '/', '/base/feeds/snippets',
'/m8/feeds/contacts/default/base'
extra_headers: dict of strings. HTTP headers which should be sent
in the request. These headers are in addition to those stored in
service.additional_headers.
url_params: dict of strings. Key value pairs to be added to the URL as
URL parameters. For example {'foo':'bar', 'test':'param'} will
become ?foo=bar&test=param.
escape_params: bool default True. If true, the keys and values in
url_params will be URL escaped when the form is constructed
(Special characters converted to %XX form.)
content_type: str The MIME type for the data being sent. Defaults to
'application/atom+xml', this is only used if data is set.
"""
full_uri = atom.service.BuildUri(uri, url_params, escape_params)
(server, port, ssl, uri) = atom.service.ProcessUrl(service, uri)
current_request = MockRequest(operation, full_uri, host=server, ssl=ssl,
data=data, extra_headers=extra_headers, url_params=url_params,
escape_params=escape_params, content_type=content_type)
# If the request handler is set, we should actually make the request using
# the request handler and record the response to replay later.
if real_request_handler:
response = real_request_handler.HttpRequest(service, operation, data, uri,
extra_headers=extra_headers, url_params=url_params,
escape_params=escape_params, content_type=content_type)
# TODO: need to copy the HTTP headers from the real response into the
# recorded_response.
recorded_response = MockHttpResponse(body=response.read(),
status=response.status, reason=response.reason)
# Insert a tuple which maps the request to the response object returned
# when making an HTTP call using the real_request_handler.
recordings.append((current_request, recorded_response))
return recorded_response
else:
# Look through available recordings to see if one matches the current
# request.
for request_response_pair in recordings:
if request_response_pair[0].IsMatch(current_request):
return request_response_pair[1]
return None
class MockRequest(object):
"""Represents a request made to an AtomPub server.
These objects are used to determine if a client request matches a recorded
HTTP request to determine what the mock server's response will be.
"""
def __init__(self, operation, uri, host=None, ssl=False, port=None,
data=None, extra_headers=None, url_params=None, escape_params=True,
content_type='application/atom+xml'):
"""Constructor for a MockRequest
Args:
operation: str One of 'GET', 'POST', 'PUT', or 'DELETE' this is the
HTTP operation requested on the resource.
uri: str The URL describing the resource to be modified or feed to be
retrieved. This should include the protocol (http/https) and the host
(aka domain). For example, these are some valud full_uris:
'http://example.com', 'https://www.google.com/accounts/ClientLogin'
host: str (optional) The server name which will be placed at the
beginning of the URL if the uri parameter does not begin with 'http'.
Examples include 'example.com', 'www.google.com', 'www.blogger.com'.
ssl: boolean (optional) If true, the request URL will begin with https
instead of http.
data: ElementTree, filestream, list of parts, or other object which can be
converted to a string. (optional)
Should be set to None when performing a GET or PUT.
If data is a file-like object which can be read, the constructor
will read the entire file into memory. If the data is a list of
parts to be sent, each part will be evaluated and stored.
extra_headers: dict (optional) HTTP headers included in the request.
url_params: dict (optional) Key value pairs which should be added to
the URL as URL parameters in the request. For example uri='/',
url_parameters={'foo':'1','bar':'2'} could become '/?foo=1&bar=2'.
escape_params: boolean (optional) Perform URL escaping on the keys and
values specified in url_params. Defaults to True.
content_type: str (optional) Provides the MIME type of the data being
sent.
"""
self.operation = operation
self.uri = _ConstructFullUrlBase(uri, host=host, ssl=ssl)
self.data = data
self.extra_headers = extra_headers
self.url_params = url_params or {}
self.escape_params = escape_params
self.content_type = content_type
def ConcealSecrets(self, conceal_func):
"""Conceal secret data in this request."""
if 'Authorization' in self.extra_headers:
self.extra_headers['Authorization'] = conceal_func(
self.extra_headers['Authorization'])
def IsMatch(self, other_request):
"""Check to see if the other_request is equivalent to this request.
Used to determine if a recording matches an incoming request so that a
recorded response should be sent to the client.
The matching is not exact, only the operation and URL are examined
currently.
Args:
other_request: MockRequest The request which we want to check this
(self) MockRequest against to see if they are equivalent.
"""
# More accurate matching logic will likely be required.
return (self.operation == other_request.operation and self.uri ==
other_request.uri)
def _ConstructFullUrlBase(uri, host=None, ssl=False):
"""Puts URL components into the form http(s)://full.host.strinf/uri/path
Used to construct a roughly canonical URL so that URLs which begin with
'http://example.com/' can be compared to a uri of '/' when the host is
set to 'example.com'
If the uri contains 'http://host' already, the host and ssl parameters
are ignored.
Args:
uri: str The path component of the URL, examples include '/'
host: str (optional) The host name which should prepend the URL. Example:
'example.com'
ssl: boolean (optional) If true, the returned URL will begin with https
instead of http.
Returns:
String which has the form http(s)://example.com/uri/string/contents
"""
if uri.startswith('http'):
return uri
if ssl:
return 'https://%s%s' % (host, uri)
else:
return 'http://%s%s' % (host, uri)
class MockHttpResponse(object):
"""Returned from MockService crud methods as the server's response."""
def __init__(self, body=None, status=None, reason=None, headers=None):
"""Construct a mock HTTPResponse and set members.
Args:
body: str (optional) The HTTP body of the server's response.
status: int (optional)
reason: str (optional)
headers: dict (optional)
"""
self.body = body
self.status = status
self.reason = reason
self.headers = headers or {}
def read(self):
return self.body
def getheader(self, header_name):
return self.headers[header_name]
| apache-2.0 |
niteoweb/libcloud | libcloud/container/drivers/dummy.py | 24 | 1589 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libcloud.container.base import ContainerDriver
class DummyContainerDriver(ContainerDriver):
"""
Dummy Container driver.
>>> from libcloud.container.drivers.dummy import DummyContainerDriver
>>> driver = DummyContainerDriver('key', 'secret')
>>> driver.name
'Dummy Container Provider'
"""
name = 'Dummy Container Provider'
website = 'http://example.com'
supports_clusters = False
def __init__(self, api_key, api_secret):
"""
:param api_key: API key or username to used (required)
:type api_key: ``str``
:param api_secret: Secret password to be used (required)
:type api_secret: ``str``
:rtype: ``None``
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| apache-2.0 |
tedelhourani/ansible | lib/ansible/modules/cloud/amazon/ec2_vol.py | 10 | 20561 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: ec2_vol
short_description: create and attach a volume, return volume id and device map
description:
- creates an EBS volume and optionally attaches it to an instance.
If both an instance ID and a device name is given and the instance has a device at the device name, then no volume is created and no attachment is made.
This module has a dependency on python-boto.
version_added: "1.1"
options:
instance:
description:
- instance ID if you wish to attach the volume. Since 1.9 you can set to None to detach.
required: false
default: null
name:
description:
- volume Name tag if you wish to attach an existing volume (requires instance)
required: false
default: null
version_added: "1.6"
id:
description:
- volume id if you wish to attach an existing volume (requires instance) or remove an existing volume
required: false
default: null
version_added: "1.6"
volume_size:
description:
- size of volume (in GB) to create.
required: false
default: null
volume_type:
description:
- Type of EBS volume; standard (magnetic), gp2 (SSD), io1 (Provisioned IOPS), st1 (Throughput Optimized HDD), sc1 (Cold HDD).
"Standard" is the old EBS default and continues to remain the Ansible default for backwards compatibility.
required: false
default: standard
version_added: "1.9"
iops:
description:
- the provisioned IOPs you want to associate with this volume (integer).
required: false
default: 100
version_added: "1.3"
encrypted:
description:
- Enable encryption at rest for this volume.
default: false
version_added: "1.8"
kms_key_id:
description:
- Specify the id of the KMS key to use.
default: null
version_added: "2.3"
device_name:
description:
- device id to override device mapping. Assumes /dev/sdf for Linux/UNIX and /dev/xvdf for Windows.
required: false
default: null
delete_on_termination:
description:
- When set to "yes", the volume will be deleted upon instance termination.
required: false
default: "no"
choices: ["yes", "no"]
version_added: "2.1"
zone:
description:
- zone in which to create the volume, if unset uses the zone the instance is in (if set)
required: false
default: null
aliases: ['aws_zone', 'ec2_zone']
snapshot:
description:
- snapshot ID on which to base the volume
required: false
default: null
version_added: "1.5"
validate_certs:
description:
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
required: false
default: "yes"
choices: ["yes", "no"]
version_added: "1.5"
state:
description:
- whether to ensure the volume is present or absent, or to list existing volumes (The C(list) option was added in version 1.8).
required: false
default: present
choices: ['absent', 'present', 'list']
version_added: "1.6"
tags:
description:
- tag:value pairs to add to the volume after creation
required: false
default: {}
version_added: "2.3"
author: "Lester Wade (@lwade)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Simple attachment action
- ec2_vol:
instance: XXXXXX
volume_size: 5
device_name: sdd
# Example using custom iops params
- ec2_vol:
instance: XXXXXX
volume_size: 5
iops: 100
device_name: sdd
# Example using snapshot id
- ec2_vol:
instance: XXXXXX
snapshot: "{{ snapshot }}"
# Playbook example combined with instance launch
- ec2:
keypair: "{{ keypair }}"
image: "{{ image }}"
wait: yes
count: 3
register: ec2
- ec2_vol:
instance: "{{ item.id }}"
volume_size: 5
with_items: "{{ ec2.instances }}"
register: ec2_vol
# Example: Launch an instance and then add a volume if not already attached
# * Volume will be created with the given name if not already created.
# * Nothing will happen if the volume is already attached.
# * Requires Ansible 2.0
- ec2:
keypair: "{{ keypair }}"
image: "{{ image }}"
zone: YYYYYY
id: my_instance
wait: yes
count: 1
register: ec2
- ec2_vol:
instance: "{{ item.id }}"
name: my_existing_volume_Name_tag
device_name: /dev/xvdf
with_items: "{{ ec2.instances }}"
register: ec2_vol
# Remove a volume
- ec2_vol:
id: vol-XXXXXXXX
state: absent
# Detach a volume (since 1.9)
- ec2_vol:
id: vol-XXXXXXXX
instance: None
# List volumes for an instance
- ec2_vol:
instance: i-XXXXXX
state: list
# Create new volume using SSD storage
- ec2_vol:
instance: XXXXXX
volume_size: 50
volume_type: gp2
device_name: /dev/xvdf
# Attach an existing volume to instance. The volume will be deleted upon instance termination.
- ec2_vol:
instance: XXXXXX
id: XXXXXX
device_name: /dev/sdf
delete_on_termination: yes
'''
RETURN = '''
device:
description: device name of attached volume
returned: when success
type: string
sample: "/def/sdf"
volume_id:
description: the id of volume
returned: when success
type: string
sample: "vol-35b333d9"
volume_type:
description: the volume type
returned: when success
type: string
sample: "standard"
volume:
description: a dictionary containing detailed attributes of the volume
returned: when success
type: string
sample: {
"attachment_set": {
"attach_time": "2015-10-23T00:22:29.000Z",
"deleteOnTermination": "false",
"device": "/dev/sdf",
"instance_id": "i-8356263c",
"status": "attached"
},
"create_time": "2015-10-21T14:36:08.870Z",
"encrypted": false,
"id": "vol-35b333d9",
"iops": null,
"size": 1,
"snapshot_id": "",
"status": "in-use",
"tags": {
"env": "dev"
},
"type": "standard",
"zone": "us-east-1b"
}
'''
import time
from distutils.version import LooseVersion
try:
import boto
import boto.ec2
import boto.exception
from boto.exception import BotoServerError
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
except ImportError:
pass # Taken care of by ec2.HAS_BOTO
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (HAS_BOTO, AnsibleAWSError, connect_to_aws, ec2_argument_spec,
get_aws_connection_info)
def get_volume(module, ec2):
name = module.params.get('name')
id = module.params.get('id')
zone = module.params.get('zone')
filters = {}
volume_ids = None
# If no name or id supplied, just try volume creation based on module parameters
if id is None and name is None:
return None
if zone:
filters['availability_zone'] = zone
if name:
filters = {'tag:Name': name}
if id:
volume_ids = [id]
try:
vols = ec2.get_all_volumes(volume_ids=volume_ids, filters=filters)
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
if not vols:
if id:
msg = "Could not find the volume with id: %s" % id
if name:
msg += (" and name: %s" % name)
module.fail_json(msg=msg)
else:
return None
if len(vols) > 1:
module.fail_json(msg="Found more than one volume in zone (if specified) with name: %s" % name)
return vols[0]
def get_volumes(module, ec2):
instance = module.params.get('instance')
try:
if not instance:
vols = ec2.get_all_volumes()
else:
vols = ec2.get_all_volumes(filters={'attachment.instance-id': instance})
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
return vols
def delete_volume(module, ec2):
volume_id = module.params['id']
try:
ec2.delete_volume(volume_id)
module.exit_json(changed=True)
except boto.exception.EC2ResponseError as ec2_error:
if ec2_error.code == 'InvalidVolume.NotFound':
module.exit_json(changed=False)
module.fail_json(msg=ec2_error.message)
def boto_supports_volume_encryption():
"""
Check if Boto library supports encryption of EBS volumes (added in 2.29.0)
Returns:
True if boto library has the named param as an argument on the request_spot_instances method, else False
"""
return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0')
def boto_supports_kms_key_id():
"""
Check if Boto library supports kms_key_ids (added in 2.39.0)
Returns:
True if version is equal to or higher then the version needed, else False
"""
return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.39.0')
def create_volume(module, ec2, zone):
changed = False
name = module.params.get('name')
iops = module.params.get('iops')
encrypted = module.params.get('encrypted')
kms_key_id = module.params.get('kms_key_id')
volume_size = module.params.get('volume_size')
volume_type = module.params.get('volume_type')
snapshot = module.params.get('snapshot')
tags = module.params.get('tags')
# If custom iops is defined we use volume_type "io1" rather than the default of "standard"
if iops:
volume_type = 'io1'
volume = get_volume(module, ec2)
if volume is None:
try:
if boto_supports_volume_encryption():
if kms_key_id is not None:
volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops, encrypted, kms_key_id)
else:
volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops, encrypted)
changed = True
else:
volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops)
changed = True
while volume.status != 'available':
time.sleep(3)
volume.update()
if name:
tags["Name"] = name
if tags:
ec2.create_tags([volume.id], tags)
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
return volume, changed
def attach_volume(module, ec2, volume, instance):
device_name = module.params.get('device_name')
delete_on_termination = module.params.get('delete_on_termination')
changed = False
# If device_name isn't set, make a choice based on best practices here:
# http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html
# In future this needs to be more dynamic but combining block device mapping best practices
# (bounds for devices, as above) with instance.block_device_mapping data would be tricky. For me ;)
# Use password data attribute to tell whether the instance is Windows or Linux
if device_name is None:
try:
if not ec2.get_password_data(instance.id):
device_name = '/dev/sdf'
else:
device_name = '/dev/xvdf'
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
if volume.attachment_state() is not None:
adata = volume.attach_data
if adata.instance_id != instance.id:
module.fail_json(msg = "Volume %s is already attached to another instance: %s"
% (volume.id, adata.instance_id))
else:
# Volume is already attached to right instance
changed = modify_dot_attribute(module, ec2, instance, device_name)
else:
try:
volume.attach(instance.id, device_name)
while volume.attachment_state() != 'attached':
time.sleep(3)
volume.update()
changed = True
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
modify_dot_attribute(module, ec2, instance, device_name)
return volume, changed
def modify_dot_attribute(module, ec2, instance, device_name):
""" Modify delete_on_termination attribute """
delete_on_termination = module.params.get('delete_on_termination')
changed = False
try:
instance.update()
dot = instance.block_device_mapping[device_name].delete_on_termination
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
if delete_on_termination != dot:
try:
bdt = BlockDeviceType(delete_on_termination=delete_on_termination)
bdm = BlockDeviceMapping()
bdm[device_name] = bdt
ec2.modify_instance_attribute(instance_id=instance.id, attribute='blockDeviceMapping', value=bdm)
while instance.block_device_mapping[device_name].delete_on_termination != delete_on_termination:
time.sleep(3)
instance.update()
changed = True
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
return changed
def detach_volume(module, ec2, volume):
changed = False
if volume.attachment_state() is not None:
adata = volume.attach_data
volume.detach()
while volume.attachment_state() is not None:
time.sleep(3)
volume.update()
changed = True
return volume, changed
def get_volume_info(volume, state):
# If we're just listing volumes then do nothing, else get the latest update for the volume
if state != 'list':
volume.update()
volume_info = {}
attachment = volume.attach_data
volume_info = {
'create_time': volume.create_time,
'encrypted': volume.encrypted,
'id': volume.id,
'iops': volume.iops,
'size': volume.size,
'snapshot_id': volume.snapshot_id,
'status': volume.status,
'type': volume.type,
'zone': volume.zone,
'attachment_set': {
'attach_time': attachment.attach_time,
'device': attachment.device,
'instance_id': attachment.instance_id,
'status': attachment.status
},
'tags': volume.tags
}
if hasattr(attachment, 'deleteOnTermination'):
volume_info['attachment_set']['deleteOnTermination'] = attachment.deleteOnTermination
return volume_info
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
instance = dict(),
id = dict(),
name = dict(),
volume_size = dict(),
volume_type = dict(choices=['standard', 'gp2', 'io1', 'st1', 'sc1'], default='standard'),
iops = dict(),
encrypted = dict(type='bool', default=False),
kms_key_id = dict(),
device_name = dict(),
delete_on_termination = dict(type='bool', default=False),
zone = dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']),
snapshot = dict(),
state = dict(choices=['absent', 'present', 'list'], default='present'),
tags = dict(type='dict', default={})
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
id = module.params.get('id')
name = module.params.get('name')
instance = module.params.get('instance')
volume_size = module.params.get('volume_size')
encrypted = module.params.get('encrypted')
kms_key_id = module.params.get('kms_key_id')
device_name = module.params.get('device_name')
zone = module.params.get('zone')
snapshot = module.params.get('snapshot')
state = module.params.get('state')
tags = module.params.get('tags')
# Ensure we have the zone or can get the zone
if instance is None and zone is None and state == 'present':
module.fail_json(msg="You must specify either instance or zone")
# Set volume detach flag
if instance == 'None' or instance == '':
instance = None
detach_vol_flag = True
else:
detach_vol_flag = False
# Set changed flag
changed = False
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
if state == 'list':
returned_volumes = []
vols = get_volumes(module, ec2)
for v in vols:
attachment = v.attach_data
returned_volumes.append(get_volume_info(v, state))
module.exit_json(changed=False, volumes=returned_volumes)
if encrypted and not boto_supports_volume_encryption():
module.fail_json(msg="You must use boto >= v2.29.0 to use encrypted volumes")
if kms_key_id is not None and not boto_supports_kms_key_id():
module.fail_json(msg="You must use boto >= v2.39.0 to use kms_key_id")
# Here we need to get the zone info for the instance. This covers situation where
# instance is specified but zone isn't.
# Useful for playbooks chaining instance launch with volume create + attach and where the
# zone doesn't matter to the user.
inst = None
if instance:
try:
reservation = ec2.get_all_instances(instance_ids=instance)
except BotoServerError as e:
module.fail_json(msg=e.message)
inst = reservation[0].instances[0]
zone = inst.placement
# Check if there is a volume already mounted there.
if device_name:
if device_name in inst.block_device_mapping:
module.exit_json(msg="Volume mapping for %s already exists on instance %s" % (device_name, instance),
volume_id=inst.block_device_mapping[device_name].volume_id,
device=device_name,
changed=False)
# Delaying the checks until after the instance check allows us to get volume ids for existing volumes
# without needing to pass an unused volume_size
if not volume_size and not (id or name or snapshot):
module.fail_json(msg="You must specify volume_size or identify an existing volume by id, name, or snapshot")
if volume_size and id:
module.fail_json(msg="Cannot specify volume_size together with id")
if state == 'present':
volume, changed = create_volume(module, ec2, zone)
if detach_vol_flag:
volume, changed = detach_volume(module, ec2, volume)
elif inst is not None:
volume, changed = attach_volume(module, ec2, volume, inst)
# Add device, volume_id and volume_type parameters separately to maintain backward compatibility
volume_info = get_volume_info(volume, state)
# deleteOnTermination is not correctly reflected on attachment
if module.params.get('delete_on_termination'):
for attempt in range(0, 8):
if volume_info['attachment_set'].get('deleteOnTermination') == 'true':
break
time.sleep(5)
volume = ec2.get_all_volumes(volume_ids=volume.id)[0]
volume_info = get_volume_info(volume, state)
module.exit_json(changed=changed, volume=volume_info, device=volume_info['attachment_set']['device'],
volume_id=volume_info['id'], volume_type=volume_info['type'])
elif state == 'absent':
delete_volume(module, ec2)
if __name__ == '__main__':
main()
| gpl-3.0 |
HaydenFaulkner/phd | keras_code/cnns/model_defs/faulkner/c3d.py | 1 | 3008 | '''
C3D CNN architecture
Webpage for original:
http://vlg.cs.dartmouth.edu/c3d/
Paper for original:
D. Tran, L. Bourdev, R. Fergus, L. Torresani, and M. Paluri
Learning Spatiotemporal Features with 3D Convolutional Networks
ICCV 2015
http://vlg.cs.dartmouth.edu/c3d/c3d_video.pdf
Designed for Sports-1M Dataset
'''
from keras.layers import Flatten, Dense, Dropout, Input, Conv3D, MaxPool3D, ZeroPadding3D
from keras.models import Model
def c3d(t=16, input_tensor=None, fc_size=4096, image_dims=112, image_channels=3, output_channels=487, dropout=None, layers=[1,2,3,4,5,6,7]):
assert(input_tensor is not None)
x = input_tensor
if 1 in layers:
x = Conv3D(64, (3, 3, 3), strides=(1, 1, 1), padding='same', activation='relu', name='conv1')(x)
if 5 not in layers and 4 not in layers and 3 not in layers and 2 not in layers:
x = ZeroPadding3D((0, 1, 1))(x)
x = MaxPool3D((1, 2, 2), strides=(1, 2, 2), padding='valid', name="pool1")(x)
if 2 in layers:
x = Conv3D(128, (3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same', name='conv2')(x)
if 5 not in layers and 4 not in layers and 3 not in layers:
x = ZeroPadding3D((0, 1, 1))(x)
x = MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='valid', name="pool2")(x)
if 3 in layers:
x = Conv3D(256, (3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same', name='conv3a')(x)
x = Conv3D(256, (3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same', name='conv3b')(x)
if 5 not in layers and 4 not in layers:
x = ZeroPadding3D((0, 1, 1))(x)
x = MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='valid', name="pool3")(x)
if 4 in layers:
x = Conv3D(512, (3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same', name='conv4a')(x)
x = Conv3D(512, (3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same', name='conv4b')(x)
if 5 not in layers:
x = ZeroPadding3D((0, 1, 1))(x)
x = MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='valid', name="pool4")(x)
if 5 in layers:
x = Conv3D(512, (3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same', name='conv5a')(x)
x = Conv3D(512, (3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same', name='conv5b')(x)
x = ZeroPadding3D((0, 1, 1), name='zeropad5')(x)
x = MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool5')(x)
x = Flatten(name='flatten_1')(x)
if 6 in layers:
x = Dense(fc_size, activation='relu', name='fc6')(x)
if dropout is not None:
x = Dropout(dropout, name='dropout_1')(x)
if 7 in layers:
x = Dense(fc_size, activation='relu', name='fc7')(x)
if dropout is not None:
x = Dropout(dropout, name='dropout_2')(x)
sm = Dense(output_channels, activation='softmax', name='pred')(x)
model = Model(inputs=input_tensor, outputs=sm)
return model
| mit |
mbernasocchi/QGIS | tests/src/python/test_qgsziputils.py | 45 | 3763 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for zip functions.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Paul Blottiere'
__date__ = '06/7/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
import os
from qgis.core import QgsZipUtils
from qgis.testing import unittest
from utilities import unitTestDataPath
from qgis.PyQt.QtCore import QTemporaryFile, QTemporaryDir
def tmpPath():
f = QTemporaryFile()
f.open()
f.close()
os.remove(f.fileName())
return f.fileName()
class TestQgsZip(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.zipDir = os.path.join(unitTestDataPath(), "zip")
def test_zip_ok(self):
f0 = os.path.join(unitTestDataPath(), 'multipoint.shp')
f1 = os.path.join(unitTestDataPath(), 'lines.shp')
f2 = os.path.join(unitTestDataPath(), 'joins.qgs')
rc = QgsZipUtils.zip(tmpPath(), [f0, f1, f2])
self.assertTrue(rc)
def test_zip_file_yet_exist(self):
zip = QTemporaryFile()
zip.open()
zip.close()
os.remove(zip.fileName())
f0 = os.path.join(unitTestDataPath(), 'multipoint.shp')
f1 = os.path.join(unitTestDataPath(), 'lines.shp')
f2 = os.path.join(unitTestDataPath(), 'joins.qgs')
rc = QgsZipUtils.zip(zip.fileName(), [f0, f1, f2])
self.assertTrue(rc)
rc = QgsZipUtils.zip(zip.fileName(), [f0, f1, f2])
self.assertFalse(rc)
def test_zip_file_empty(self):
f0 = os.path.join(unitTestDataPath(), 'multipoint.shp')
f1 = os.path.join(unitTestDataPath(), 'lines.shp')
f2 = os.path.join(unitTestDataPath(), 'joins.qgs')
rc = QgsZipUtils.zip("", [f0, f1, f2])
self.assertFalse(rc)
def test_zip_input_file_not_exist(self):
f0 = os.path.join(unitTestDataPath(), 'multipoint.shp')
f1 = os.path.join(unitTestDataPath(), 'fake.shp')
f2 = os.path.join(unitTestDataPath(), 'joins.qgs')
rc = QgsZipUtils.zip(tmpPath(), [f0, f1, f2])
self.assertFalse(rc)
def test_unzip_ok(self):
outDir = QTemporaryDir()
zip = os.path.join(self.zipDir, 'testzip.zip')
rc, files = QgsZipUtils.unzip(zip, outDir.path())
self.assertTrue(rc)
self.assertEqual(len(files), 11)
def test_unzip_file_not_exist(self):
outDir = QTemporaryDir()
zip = os.path.join(self.zipDir, 'fake.zip')
rc, files = QgsZipUtils.unzip(zip, outDir.path())
self.assertFalse(rc)
def test_unzip_file_empty(self):
outDir = QTemporaryDir()
rc, files = QgsZipUtils.unzip("", outDir.path())
self.assertFalse(rc)
def test_unzip_dir_not_exist(self):
zip = os.path.join(self.zipDir, 'testzip.zip')
rc, files = QgsZipUtils.unzip(zip, '/tmp/fake')
self.assertFalse(rc)
def test_unzip_dir_empty(self):
zip = os.path.join(self.zipDir, 'testzip.zip')
rc, files = QgsZipUtils.unzip(zip, '')
self.assertFalse(rc)
def test_zip_unzip_ok(self):
zip = tmpPath()
f0 = os.path.join(unitTestDataPath(), 'multipoint.shp')
f1 = os.path.join(unitTestDataPath(), 'lines.shp')
f2 = os.path.join(unitTestDataPath(), 'joins.qgs')
rc = QgsZipUtils.zip(zip, [f0, f1, f2])
self.assertTrue(rc)
outDir = QTemporaryDir()
rc, files = QgsZipUtils.unzip(zip, outDir.path())
self.assertTrue(rc)
self.assertEqual(len(files), 3)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
krez13/scikit-learn | sklearn/externals/joblib/_memory_helpers.py | 303 | 3605 | try:
# Available in Python 3
from tokenize import open as open_py_source
except ImportError:
# Copied from python3 tokenize
from codecs import lookup, BOM_UTF8
import re
from io import TextIOWrapper, open
cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def _detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that
should be used to decode a Python source file. It requires one
argment, readline, in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are
present, but disagree, a SyntaxError will be raised. If the encoding
cookie is an invalid charset, raise a SyntaxError. Note that if a
utf-8 bom is found, 'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be
returned.
"""
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return b''
def find_cookie(line):
try:
line_string = line.decode('ascii')
except UnicodeDecodeError:
return None
matches = cookie_re.findall(line_string)
if not matches:
return None
encoding = _get_normal_name(matches[0])
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
raise SyntaxError("unknown encoding: " + encoding)
if bom_found:
if codec.name != 'utf-8':
# This behaviour mimics the Python interpreter
raise SyntaxError('encoding problem: utf-8')
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
def open_py_source(filename):
"""Open a file in read only mode using the encoding detected by
detect_encoding().
"""
buffer = open(filename, 'rb')
encoding, lines = _detect_encoding(buffer.readline)
buffer.seek(0)
text = TextIOWrapper(buffer, encoding, line_buffering=True)
text.mode = 'r'
return text | bsd-3-clause |
prarthitm/edxplatform | openedx/core/lib/block_structure/factory.py | 7 | 3851 | """
Module for factory class for BlockStructure objects.
"""
from .block_structure import BlockStructureModulestoreData, BlockStructureBlockData
class BlockStructureFactory(object):
"""
Factory class for BlockStructure objects.
"""
@classmethod
def create_from_modulestore(cls, root_block_usage_key, modulestore):
"""
Creates and returns a block structure from the modulestore
starting at the given root_block_usage_key.
Arguments:
root_block_usage_key (UsageKey) - The usage_key for the root
of the block structure that is to be created.
modulestore (ModuleStoreRead) - The modulestore that
contains the data for the xBlocks within the block
structure starting at root_block_usage_key.
Returns:
BlockStructureModulestoreData - The created block structure
with instantiated xBlocks from the given modulestore
starting at root_block_usage_key.
Raises:
xmodule.modulestore.exceptions.ItemNotFoundError if a block for
root_block_usage_key is not found in the modulestore.
"""
block_structure = BlockStructureModulestoreData(root_block_usage_key)
blocks_visited = set()
def build_block_structure(xblock):
"""
Recursively update the block structure with the given xBlock
and its descendants.
"""
# Check if the xblock was already visited (can happen in
# DAGs).
if xblock.location in blocks_visited:
return
# Add the xBlock.
blocks_visited.add(xblock.location)
block_structure._add_xblock(xblock.location, xblock) # pylint: disable=protected-access
# Add relations with its children and recurse.
for child in xblock.get_children():
block_structure._add_relation(xblock.location, child.location) # pylint: disable=protected-access
build_block_structure(child)
root_xblock = modulestore.get_item(root_block_usage_key, depth=None, lazy=False)
build_block_structure(root_xblock)
return block_structure
@classmethod
def create_from_cache(cls, root_block_usage_key, block_structure_cache):
"""
Deserializes and returns the block structure starting at
root_block_usage_key from the given cache, if it's found in the cache.
The given root_block_usage_key must equate the root_block_usage_key
previously passed to serialize_to_cache.
Arguments:
root_block_usage_key (UsageKey) - The usage_key for the root
of the block structure that is to be deserialized from
the given cache.
block_structure_cache (BlockStructureCache) - The
cache from which the block structure is to be
deserialized.
Returns:
BlockStructure - The deserialized block structure starting
at root_block_usage_key, if found in the cache.
NoneType - If the root_block_usage_key is not found in the cache.
"""
return block_structure_cache.get(root_block_usage_key)
@classmethod
def create_new(cls, root_block_usage_key, block_relations, transformer_data, block_data_map):
"""
Returns a new block structure for given the arguments.
"""
block_structure = BlockStructureBlockData(root_block_usage_key)
block_structure._block_relations = block_relations # pylint: disable=protected-access
block_structure.transformer_data = transformer_data
block_structure._block_data_map = block_data_map # pylint: disable=protected-access
return block_structure
| agpl-3.0 |
tal-nino/ansible | lib/ansible/plugins/lookup/ini.py | 82 | 3304 | # (c) 2015, Yannig Perre <yannig.perre(at)gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import StringIO
import os
import codecs
import ConfigParser
import re
from ansible.errors import *
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def read_properties(self, filename, key, dflt, is_regexp):
config = StringIO.StringIO()
config.write('[java_properties]\n' + open(filename).read())
config.seek(0, os.SEEK_SET)
self.cp.readfp(config)
return self.get_value(key, 'java_properties', dflt, is_regexp)
def read_ini(self, filename, key, section, dflt, is_regexp):
self.cp.readfp(open(filename))
return self.get_value(key, section, dflt, is_regexp)
def get_value(self, key, section, dflt, is_regexp):
# Retrieve all values from a section using a regexp
if is_regexp:
return [v for k, v in self.cp.items(section) if re.match(key, k)]
value = None
# Retrieve a single value
try:
value = self.cp.get(section, key)
except ConfigParser.NoOptionError as e:
return dflt
return value
def run(self, terms, variables=None, **kwargs):
basedir = self.get_basedir(variables)
self.basedir = basedir
self.cp = ConfigParser.ConfigParser()
ret = []
for term in terms:
params = term.split()
key = params[0]
paramvals = {
'file' : 'ansible.ini',
're' : False,
'default' : None,
'section' : "global",
'type' : "ini",
}
# parameters specified?
try:
for param in params[1:]:
name, value = param.split('=')
assert(name in paramvals)
paramvals[name] = value
except (ValueError, AssertionError) as e:
raise errors.AnsibleError(e)
path = self._loader.path_dwim_relative(basedir, 'files', paramvals['file'])
if paramvals['type'] == "properties":
var = self.read_properties(path, key, paramvals['default'], paramvals['re'])
else:
var = self.read_ini(path, key, paramvals['section'], paramvals['default'], paramvals['re'])
if var is not None:
if type(var) is list:
for v in var:
ret.append(v)
else:
ret.append(var)
return ret
| gpl-3.0 |
davidsoncolin/IMS | UI/QActorWidget.py | 1 | 2542 | #!/usr/bin/env python
from PySide import QtCore, QtGui
class QActorWidget(QtGui.QWidget):
def __init__(self, cb, parent=None):
self.cb = cb
self.parent = parent
QtGui.QWidget.__init__(self, parent)
self.groupLayout = QtGui.QVBoxLayout(self)
self.groupTabs = QtGui.QTabWidget()
self.groupLayout.addWidget(self.groupTabs)
self.groupLayout.addStretch(1)
self.setLayout(self.groupLayout)
self.actors = {}
self.actorNames = []
self.notRecordingPixmap = QtGui.QPixmap("img/NotRecording.png").scaledToHeight(16)
self.recordingPixmap = QtGui.QPixmap("img/Recording.png").scaledToHeight(16)
def addActor(self, name):
if self.actors.has_key(name):
return self.actors[name]
self.actors[name] = actor = {}
self.actorNames.append(name)
actorGroup = QtGui.QWidget()
actorGroupLayout = QtGui.QVBoxLayout(actorGroup)
actorVisible = QtGui.QCheckBox('Visible', actorGroup)
actorVisible.setCheckState(QtCore.Qt.Checked)
actorGroup.setLayout(actorGroupLayout)
actorLabel = QtGui.QLabel()
actor['group'] = actorGroup
actor['layout'] = actorGroupLayout
actor['data'] = actorLabel
actor['visible'] = actorVisible
actorVisible.cb = lambda x : self.cb(name, x)
actorVisible.stateChanged.connect(actorVisible.cb)
self.groupTabs.addTab(actorGroup,name)
actorGroupLayout.addWidget(actorVisible)
actorGroupLayout.addWidget(actorLabel)
actorLabel.setPixmap(self.recordingPixmap)
return actor
def setActorDofs(self, name, dofNames, sharedDofs, cb):
actor = self.actors[name]
layout = actor['layout']
import QActorDofsWidget
actor['dofsWidget'] = dofsWidget = QActorDofsWidget.QActorDofsWidget(name, cb, self)
layout.addWidget(dofsWidget)
dofsWidget.setDofs(dofNames,sharedDofs)
def syncActorDofs(self, name, dofValues):
self.actors[name]['dofsWidget'].syncSliders(dofValues)
def setActorData(self, name, value):
self.actors[name]['data'].setPixmap(self.notRecordingPixmap if value else self.recordingPixmap)
def removeActor(self, name):
if self.actors.has_key(name):
self.actorNames.remove(name)
self.actors.pop(name)['group'].deleteLater() # mark for deletion!
if __name__ == '__main__':
import sys
global app, win
app = QtGui.QApplication(sys.argv)
def test(actor, value):
print 'cb',actor,value
win = QActorWidget(test)
win.addActor('charles')
win.addActor('colin')
win.addActor('fred')
win.setActorData('fred', True)
win.removeActor('colin')
win.show()
app.connect(app, QtCore.SIGNAL('lastWindowClosed()') , app.quit)
sys.exit(app.exec_())
| mit |
zemanel/ansible | lib/ansible/inventory/ini.py | 25 | 7628 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
import ansible.constants as C
from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible.inventory.expand_hosts import detect_range
from ansible.inventory.expand_hosts import expand_hostname_range
from ansible import errors
from ansible import utils
import shlex
import re
import ast
class InventoryParser(object):
"""
Host inventory for ansible.
"""
def __init__(self, filename=C.DEFAULT_HOST_LIST):
with open(filename) as fh:
self.lines = fh.readlines()
self.groups = {}
self.hosts = {}
self._parse()
def _parse(self):
self._parse_base_groups()
self._parse_group_children()
self._add_allgroup_children()
self._parse_group_variables()
return self.groups
@staticmethod
def _parse_value(v):
if "#" not in v:
try:
return ast.literal_eval(v)
# Using explicit exceptions.
# Likely a string that literal_eval does not like. We wil then just set it.
except ValueError:
# For some reason this was thought to be malformed.
pass
except SyntaxError:
# Is this a hash with an equals at the end?
pass
return v
# [webservers]
# alpha
# beta:2345
# gamma sudo=True user=root
# delta asdf=jkl favcolor=red
def _add_allgroup_children(self):
for group in self.groups.values():
if group.depth == 0 and group.name != 'all':
self.groups['all'].add_child_group(group)
def _parse_base_groups(self):
# FIXME: refactor
ungrouped = Group(name='ungrouped')
all = Group(name='all')
all.add_child_group(ungrouped)
self.groups = dict(all=all, ungrouped=ungrouped)
active_group_name = 'ungrouped'
for line in self.lines:
line = utils.before_comment(line).strip()
if line.startswith("[") and line.endswith("]"):
active_group_name = line.replace("[","").replace("]","")
if ":vars" in line or ":children" in line:
active_group_name = active_group_name.rsplit(":", 1)[0]
if active_group_name not in self.groups:
new_group = self.groups[active_group_name] = Group(name=active_group_name)
active_group_name = None
elif active_group_name not in self.groups:
new_group = self.groups[active_group_name] = Group(name=active_group_name)
elif line.startswith(";") or line == '':
pass
elif active_group_name:
tokens = shlex.split(line)
if len(tokens) == 0:
continue
hostname = tokens[0]
port = C.DEFAULT_REMOTE_PORT
# Three cases to check:
# 0. A hostname that contains a range pesudo-code and a port
# 1. A hostname that contains just a port
if hostname.count(":") > 1:
# Possible an IPv6 address, or maybe a host line with multiple ranges
# IPv6 with Port XXX:XXX::XXX.port
# FQDN foo.example.com
if hostname.count(".") == 1:
(hostname, port) = hostname.rsplit(".", 1)
elif ("[" in hostname and
"]" in hostname and
":" in hostname and
(hostname.rindex("]") < hostname.rindex(":")) or
("]" not in hostname and ":" in hostname)):
(hostname, port) = hostname.rsplit(":", 1)
hostnames = []
if detect_range(hostname):
hostnames = expand_hostname_range(hostname)
else:
hostnames = [hostname]
for hn in hostnames:
host = None
if hn in self.hosts:
host = self.hosts[hn]
else:
host = Host(name=hn, port=port)
self.hosts[hn] = host
if len(tokens) > 1:
for t in tokens[1:]:
if t.startswith('#'):
break
try:
(k,v) = t.split("=", 1)
except ValueError, e:
raise errors.AnsibleError("Invalid ini entry: %s - %s" % (t, str(e)))
host.set_variable(k, self._parse_value(v))
self.groups[active_group_name].add_host(host)
# [southeast:children]
# atlanta
# raleigh
def _parse_group_children(self):
group = None
for line in self.lines:
line = line.strip()
if line is None or line == '':
continue
if line.startswith("[") and ":children]" in line:
line = line.replace("[","").replace(":children]","")
group = self.groups.get(line, None)
if group is None:
group = self.groups[line] = Group(name=line)
elif line.startswith("#") or line.startswith(";"):
pass
elif line.startswith("["):
group = None
elif group:
kid_group = self.groups.get(line, None)
if kid_group is None:
raise errors.AnsibleError("child group is not defined: (%s)" % line)
else:
group.add_child_group(kid_group)
# [webservers:vars]
# http_port=1234
# maxRequestsPerChild=200
def _parse_group_variables(self):
group = None
for line in self.lines:
line = line.strip()
if line.startswith("[") and ":vars]" in line:
line = line.replace("[","").replace(":vars]","")
group = self.groups.get(line, None)
if group is None:
raise errors.AnsibleError("can't add vars to undefined group: %s" % line)
elif line.startswith("#") or line.startswith(";"):
pass
elif line.startswith("["):
group = None
elif line == '':
pass
elif group:
if "=" not in line:
raise errors.AnsibleError("variables assigned to group must be in key=value form")
else:
(k, v) = [e.strip() for e in line.split("=", 1)]
group.set_variable(k, self._parse_value(v))
def get_host_variables(self, host):
return {}
| gpl-3.0 |
fbossy/SickRage | lib/hachoir_parser/common/win32.py | 74 | 6210 | from hachoir_core.field import (FieldSet,
UInt16, UInt32, Enum, String, Bytes, Bits, TimestampUUID60)
from hachoir_parser.video.fourcc import video_fourcc_name
from hachoir_core.bits import str2hex
from hachoir_core.text_handler import textHandler, hexadecimal
from hachoir_parser.network.common import MAC48_Address
# Dictionary: Windows codepage => Python charset name
CODEPAGE_CHARSET = {
874: "CP874",
# 932: Japanese Shift-JIS
# 936: Simplified Chinese GBK
# 949: Korean
# 950: Traditional Chinese Big5
1250: "WINDOWS-1250",
1251: "WINDOWS-1251",
1252: "WINDOWS-1252",
1253: "WINDOWS-1253",
1254: "WINDOWS-1254",
1255: "WINDOWS-1255",
1256: "WINDOWS-1256",
1257: "WINDOWS-1257",
1258: "WINDOWS-1258",
65001: "UTF-8",
}
class PascalStringWin16(FieldSet):
def __init__(self, parent, name, description=None, strip=None, charset="UTF-16-LE"):
FieldSet.__init__(self, parent, name, description)
length = self["length"].value
self._size = 16 + length * 16
self.strip = strip
self.charset = charset
def createFields(self):
yield UInt16(self, "length", "Length in widechar characters")
size = self["length"].value
if size:
yield String(self, "text", size*2, charset=self.charset, strip=self.strip)
def createValue(self):
if "text" in self:
return self["text"].value
else:
return None
class PascalStringWin32(FieldSet):
def __init__(self, parent, name, description=None, strip=None, charset="UTF-16-LE"):
FieldSet.__init__(self, parent, name, description)
length = self["length"].value
self._size = 32 + length * 16
self.strip = strip
self.charset = charset
def createFields(self):
yield UInt32(self, "length", "Length in widechar characters")
size = self["length"].value
if size:
yield String(self, "text", size*2, charset=self.charset, strip=self.strip)
def createValue(self):
if "text" in self:
return self["text"].value
else:
return None
class GUID(FieldSet):
"""
Windows 128 bits Globally Unique Identifier (GUID)
See RFC 4122
"""
static_size = 128
NULL = "00000000-0000-0000-0000-000000000000"
FIELD_NAMES = {
3: ("sha1_high", "sha1_low"),
4: ("random_high", "random_low"),
5: ("md5_high", "md5_low"),
}
VERSION_NAME = {
1: "Timestamp & MAC-48",
2: "DCE Security version",
3: "Name SHA-1 hash",
4: "Randomly generated",
5: "Name MD5 hash",
}
VARIANT_NAME = {
0: "NCS",
2: "Leach-Salz",
# 5: Microsoft Corporation?
6: "Microsoft Corporation",
7: "Reserved Future",
}
def __init__(self, *args):
FieldSet.__init__(self, *args)
self.version = self.stream.readBits(self.absolute_address + 32 + 16 + 12, 4, self.endian)
def createFields(self):
if self.version == 1:
yield TimestampUUID60(self, "time")
yield Enum(Bits(self, "version", 4), self.VERSION_NAME)
yield Enum(Bits(self, "variant", 3), self.VARIANT_NAME)
yield textHandler(Bits(self, "clock", 13), hexadecimal)
# yield textHandler(Bits(self, "clock", 16), hexadecimal)
if self.version == 1:
yield MAC48_Address(self, "mac", "IEEE 802 MAC address")
else:
yield Bytes(self, "node", 6)
else:
namea, nameb = self.FIELD_NAMES.get(
self.version, ("data_a", "data_b"))
yield textHandler(Bits(self, namea, 60), hexadecimal)
yield Enum(Bits(self, "version", 4), self.VERSION_NAME)
yield Enum(Bits(self, "variant", 3), self.VARIANT_NAME)
yield textHandler(Bits(self, nameb, 61), hexadecimal)
def createValue(self):
addr = self.absolute_address
a = self.stream.readBits (addr, 32, self.endian)
b = self.stream.readBits (addr + 32, 16, self.endian)
c = self.stream.readBits (addr + 48, 16, self.endian)
d = self.stream.readBytes(addr + 64, 2)
e = self.stream.readBytes(addr + 80, 6)
return "%08X-%04X-%04X-%s-%s" % (a, b, c, str2hex(d), str2hex(e))
def createDisplay(self):
value = self.value
if value == self.NULL:
name = "Null GUID: "
else:
name = "GUID v%u (%s): " % (self.version, self["version"].display)
return name + value
def createRawDisplay(self):
value = self.stream.readBytes(self.absolute_address, 16)
return str2hex(value, format=r"\x%02x")
class BitmapInfoHeader(FieldSet):
""" Win32 BITMAPINFOHEADER structure from GDI """
static_size = 40*8
COMPRESSION_NAME = {
0: u"Uncompressed (RGB)",
1: u"RLE (8 bits)",
2: u"RLE (4 bits)",
3: u"Bitfields",
4: u"JPEG",
5: u"PNG"
}
def __init__(self, parent, name, use_fourcc=False):
FieldSet.__init__(self, parent, name)
self._use_fourcc = use_fourcc
def createFields(self):
yield UInt32(self, "hdr_size", "Header size (in bytes) (=40)")
yield UInt32(self, "width", "Width")
yield UInt32(self, "height", "Height")
yield UInt16(self, "nb_planes", "Color planes")
yield UInt16(self, "bpp", "Bits/pixel")
if self._use_fourcc:
yield Enum(String(self, "codec", 4, charset="ASCII"), video_fourcc_name)
else:
yield Enum(UInt32(self, "codec", "Compression"), self.COMPRESSION_NAME)
yield UInt32(self, "size", "Image size (in bytes)")
yield UInt32(self, "xres", "X pixels per meter")
yield UInt32(self, "yres", "Y pixels per meter")
yield UInt32(self, "color_used", "Number of used colors")
yield UInt32(self, "color_important", "Number of important colors")
def createDescription(self):
return "Bitmap info header: %ux%u pixels, %u bits/pixel" % \
(self["width"].value, self["height"].value, self["bpp"].value)
| gpl-3.0 |
p4datasystems/CarnotKEdist | dist/Lib/types.py | 30 | 2149 | """Define names for all type symbols known in the standard interpreter.
Types that are part of optional modules (e.g. array) are not listed.
"""
import sys
# Iterators in Python aren't a matter of type but of protocol. A large
# and changing number of builtin types implement *some* flavor of
# iterator. Don't check the type! Use hasattr to check for both
# "__iter__" and "next" attributes instead.
NoneType = type(None)
TypeType = type
ObjectType = object
IntType = int
LongType = long
FloatType = float
BooleanType = bool
try:
ComplexType = complex
except NameError:
pass
StringType = str
# StringTypes is already outdated. Instead of writing "type(x) in
# types.StringTypes", you should use "isinstance(x, basestring)". But
# we keep around for compatibility with Python 2.2.
try:
UnicodeType = unicode
StringTypes = (StringType, UnicodeType)
except NameError:
StringTypes = (StringType,)
# XXX: no buffer in jython
#BufferType = buffer
TupleType = tuple
ListType = list
DictType = DictionaryType = dict
def _f(): pass
FunctionType = type(_f)
LambdaType = type(lambda: None) # Same as FunctionType
CodeType = type(_f.func_code)
def _g():
yield 1
GeneratorType = type(_g())
class _C:
def _m(self): pass
ClassType = type(_C)
UnboundMethodType = type(_C._m) # Same as MethodType
_x = _C()
InstanceType = type(_x)
MethodType = type(_x._m)
BuiltinFunctionType = type(len)
BuiltinMethodType = type([].append) # Same as BuiltinFunctionType
# XXX: Jython sys is not a real module
#ModuleType = type(sys)
ModuleType = type(sys.modules[__name__])
FileType = file
XRangeType = xrange
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
TracebackType = type(tb)
FrameType = type(tb.tb_frame)
del tb
SliceType = slice
EllipsisType = type(Ellipsis)
DictProxyType = type(TypeType.__dict__)
NotImplementedType = type(NotImplemented)
# For Jython, the following two types are identical
GetSetDescriptorType = type(FunctionType.func_code)
MemberDescriptorType = type(FunctionType.func_globals)
del sys, _f, _g, _C, _x # Not for export
| apache-2.0 |
rahuldhote/odoo | openerp/cli/start.py | 240 | 2748 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import glob
import os
import sys
from . import Command
from .server import main
from openerp.modules.module import get_module_root, MANIFEST
from openerp.service.db import _create_empty_database, DatabaseExists
class Start(Command):
"""Quick start the Odoo server for your project"""
def get_module_list(self, path):
mods = glob.glob(os.path.join(path, '*/%s' % MANIFEST))
return [mod.split(os.path.sep)[-2] for mod in mods]
def run(self, cmdargs):
parser = argparse.ArgumentParser(
prog="%s start" % sys.argv[0].split(os.path.sep)[-1],
description=self.__doc__
)
parser.add_argument('--path', default=".",
help="Directory where your project's modules are stored (will autodetect from current dir)")
parser.add_argument("-d", "--database", dest="db_name", default=None,
help="Specify the database name (default to project's directory name")
args, unknown = parser.parse_known_args(args=cmdargs)
project_path = os.path.abspath(os.path.expanduser(os.path.expandvars(args.path)))
module_root = get_module_root(project_path)
db_name = None
if module_root:
# started in a module so we choose this module name for database
db_name = project_path.split(os.path.sep)[-1]
# go to the parent's directory of the module root
project_path = os.path.abspath(os.path.join(project_path, os.pardir))
# check if one of the subfolders has at least one module
mods = self.get_module_list(project_path)
if mods and '--addons-path' not in cmdargs:
cmdargs.append('--addons-path=%s' % project_path)
if not args.db_name:
args.db_name = db_name or project_path.split(os.path.sep)[-1]
cmdargs.extend(('-d', args.db_name))
# TODO: forbid some database names ? eg template1, ...
try:
_create_empty_database(args.db_name)
except DatabaseExists, e:
pass
except Exception, e:
die("Could not create database `%s`. (%s)" % (args.db_name, e))
if '--db-filter' not in cmdargs:
cmdargs.append('--db-filter=^%s$' % args.db_name)
# Remove --path /-p options from the command arguments
def to_remove(i, l):
return l[i] == '-p' or l[i].startswith('--path') or \
(i > 0 and l[i-1] in ['-p', '--path'])
cmdargs = [v for i, v in enumerate(cmdargs)
if not to_remove(i, cmdargs)]
main(cmdargs)
def die(message, code=1):
print >>sys.stderr, message
sys.exit(code)
| agpl-3.0 |
onitake/ansible | test/units/mock/yaml_helper.py | 209 | 5267 | import io
import yaml
from ansible.module_utils.six import PY3
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.parsing.yaml.dumper import AnsibleDumper
class YamlTestUtils(object):
"""Mixin class to combine with a unittest.TestCase subclass."""
def _loader(self, stream):
"""Vault related tests will want to override this.
Vault cases should setup a AnsibleLoader that has the vault password."""
return AnsibleLoader(stream)
def _dump_stream(self, obj, stream, dumper=None):
"""Dump to a py2-unicode or py3-string stream."""
if PY3:
return yaml.dump(obj, stream, Dumper=dumper)
else:
return yaml.dump(obj, stream, Dumper=dumper, encoding=None)
def _dump_string(self, obj, dumper=None):
"""Dump to a py2-unicode or py3-string"""
if PY3:
return yaml.dump(obj, Dumper=dumper)
else:
return yaml.dump(obj, Dumper=dumper, encoding=None)
def _dump_load_cycle(self, obj):
# Each pass though a dump or load revs the 'generation'
# obj to yaml string
string_from_object_dump = self._dump_string(obj, dumper=AnsibleDumper)
# wrap a stream/file like StringIO around that yaml
stream_from_object_dump = io.StringIO(string_from_object_dump)
loader = self._loader(stream_from_object_dump)
# load the yaml stream to create a new instance of the object (gen 2)
obj_2 = loader.get_data()
# dump the gen 2 objects directory to strings
string_from_object_dump_2 = self._dump_string(obj_2,
dumper=AnsibleDumper)
# The gen 1 and gen 2 yaml strings
self.assertEquals(string_from_object_dump, string_from_object_dump_2)
# the gen 1 (orig) and gen 2 py object
self.assertEquals(obj, obj_2)
# again! gen 3... load strings into py objects
stream_3 = io.StringIO(string_from_object_dump_2)
loader_3 = self._loader(stream_3)
obj_3 = loader_3.get_data()
string_from_object_dump_3 = self._dump_string(obj_3, dumper=AnsibleDumper)
self.assertEquals(obj, obj_3)
# should be transitive, but...
self.assertEquals(obj_2, obj_3)
self.assertEquals(string_from_object_dump, string_from_object_dump_3)
def _old_dump_load_cycle(self, obj):
'''Dump the passed in object to yaml, load it back up, dump again, compare.'''
stream = io.StringIO()
yaml_string = self._dump_string(obj, dumper=AnsibleDumper)
self._dump_stream(obj, stream, dumper=AnsibleDumper)
yaml_string_from_stream = stream.getvalue()
# reset stream
stream.seek(0)
loader = self._loader(stream)
# loader = AnsibleLoader(stream, vault_password=self.vault_password)
obj_from_stream = loader.get_data()
stream_from_string = io.StringIO(yaml_string)
loader2 = self._loader(stream_from_string)
# loader2 = AnsibleLoader(stream_from_string, vault_password=self.vault_password)
obj_from_string = loader2.get_data()
stream_obj_from_stream = io.StringIO()
stream_obj_from_string = io.StringIO()
if PY3:
yaml.dump(obj_from_stream, stream_obj_from_stream, Dumper=AnsibleDumper)
yaml.dump(obj_from_stream, stream_obj_from_string, Dumper=AnsibleDumper)
else:
yaml.dump(obj_from_stream, stream_obj_from_stream, Dumper=AnsibleDumper, encoding=None)
yaml.dump(obj_from_stream, stream_obj_from_string, Dumper=AnsibleDumper, encoding=None)
yaml_string_stream_obj_from_stream = stream_obj_from_stream.getvalue()
yaml_string_stream_obj_from_string = stream_obj_from_string.getvalue()
stream_obj_from_stream.seek(0)
stream_obj_from_string.seek(0)
if PY3:
yaml_string_obj_from_stream = yaml.dump(obj_from_stream, Dumper=AnsibleDumper)
yaml_string_obj_from_string = yaml.dump(obj_from_string, Dumper=AnsibleDumper)
else:
yaml_string_obj_from_stream = yaml.dump(obj_from_stream, Dumper=AnsibleDumper, encoding=None)
yaml_string_obj_from_string = yaml.dump(obj_from_string, Dumper=AnsibleDumper, encoding=None)
assert yaml_string == yaml_string_obj_from_stream
assert yaml_string == yaml_string_obj_from_stream == yaml_string_obj_from_string
assert (yaml_string == yaml_string_obj_from_stream == yaml_string_obj_from_string == yaml_string_stream_obj_from_stream ==
yaml_string_stream_obj_from_string)
assert obj == obj_from_stream
assert obj == obj_from_string
assert obj == yaml_string_obj_from_stream
assert obj == yaml_string_obj_from_string
assert obj == obj_from_stream == obj_from_string == yaml_string_obj_from_stream == yaml_string_obj_from_string
return {'obj': obj,
'yaml_string': yaml_string,
'yaml_string_from_stream': yaml_string_from_stream,
'obj_from_stream': obj_from_stream,
'obj_from_string': obj_from_string,
'yaml_string_obj_from_string': yaml_string_obj_from_string}
| gpl-3.0 |
jclakkis/discus-inferno | flaskenv/lib/python2.7/site-packages/psycopg2/tests/test_bug_gc.py | 62 | 1719 | #!/usr/bin/env python
# bug_gc.py - test for refcounting/GC bug
#
# Copyright (C) 2010-2011 Federico Di Gregorio <[email protected]>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import psycopg2
import psycopg2.extensions
import unittest
import gc
from testutils import ConnectingTestCase, skip_if_no_uuid
class StolenReferenceTestCase(ConnectingTestCase):
@skip_if_no_uuid
def test_stolen_reference_bug(self):
def fish(val, cur):
gc.collect()
return 42
UUID = psycopg2.extensions.new_type((2950,), "UUID", fish)
psycopg2.extensions.register_type(UUID, self.conn)
curs = self.conn.cursor()
curs.execute("select 'b5219e01-19ab-4994-b71e-149225dc51e4'::uuid")
curs.fetchone()
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
| mit |
wolfv/uberwriter | uberwriter/plugins/bibtex/fuzzywuzzy/process.py | 11 | 8790 | #!/usr/bin/env python
# encoding: utf-8
"""
process.py
Copyright (c) 2011 Adam Cohen
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import itertools
from . import fuzz
from . import utils
def extract(query, choices, processor=None, scorer=None, limit=5):
"""Select the best match in a list or dictionary of choices.
Find best matches in a list or dictionary of choices, return a
list of tuples containing the match and it's score. If a dictionary
is used, also returns the key for each match.
Arguments:
query: An object representing the thing we want to find.
choices: An iterable or dictionary-like object containing choices
to be matched against the query. Dictionary arguments of
{key: value} pairs will attempt to match the query against
each value.
processor: Optional function of the form f(a) -> b, where a is an
individual choice and b is the choice to be used in matching.
This can be used to match against, say, the first element of
a list:
lambda x: x[0]
Defaults to fuzzywuzzy.utils.full_process().
scorer: Optional function for scoring matches between the query and
an individual processed choice. This should be a function
of the form f(query, choice) -> int.
By default, fuzz.WRatio() is used and expects both query and
choice to be strings.
limit: Optional maximum for the number of elements returned. Defaults
to 5.
Returns:
List of tuples containing the match and its score.
If a list is used for choices, then the result will be 2-tuples.
If a dictionary is used, then the result will be 3-tuples containing
he key for each match.
For example, searching for 'bird' in the dictionary
{'bard': 'train', 'dog': 'man'}
may return
[('train', 22, 'bard'), ('man', 0, 'dog')]
"""
if choices is None:
return []
# Catch generators without lengths
try:
if len(choices) == 0:
return []
except TypeError:
pass
# default, turn whatever the choice is into a workable string
if not processor:
processor = utils.full_process
# default: wratio
if not scorer:
scorer = fuzz.WRatio
sl = []
try:
# See if choices is a dictionary-like object.
for key, choice in choices.items():
processed = processor(choice)
score = scorer(query, processed)
sl.append((choice, score, key))
except AttributeError:
# It's a list; just iterate over it.
for choice in choices:
processed = processor(choice)
score = scorer(query, processed)
sl.append((choice, score))
sl.sort(key=lambda i: i[1], reverse=True)
return sl[:limit]
def extractBests(query, choices, processor=None, scorer=None, score_cutoff=0, limit=5):
"""Get a list of the best matches to a collection of choices.
Convenience function for getting the choices with best scores.
Args:
query: A string to match against
choices: A list or dictionary of choices, suitable for use with
extract().
processor: Optional function for transforming choices before matching.
See extract().
scorer: Scoring function for extract().
score_cutoff: Optional argument for score threshold. No matches with
a score less than this number will be returned. Defaults to 0.
limit: Optional maximum for the number of elements returned. Defaults
to 5.
Returns: A a list of (match, score) tuples.
"""
best_list = extract(query, choices, processor, scorer, limit)
return list(itertools.takewhile(lambda x: x[1] >= score_cutoff, best_list))
def extractOne(query, choices, processor=None, scorer=None, score_cutoff=0):
"""Find the single best match above a score in a list of choices.
This is a convenience method which returns the single best choice.
See extract() for the full arguments list.
Args:
query: A string to match against
choices: A list or dictionary of choices, suitable for use with
extract().
processor: Optional function for transforming choices before matching.
See extract().
scorer: Scoring function for extract().
score_cutoff: Optional argument for score threshold. If the best
match is found, but it is not greater than this number, then
return None anyway ("not a good enough match"). Defaults to 0.
Returns:
A tuple containing a single match and its score, if a match
was found that was above score_cutoff. Otherwise, returns None.
"""
best_list = extract(query, choices, processor, scorer, limit=1)
if len(best_list) > 0 and best_list[0][1] >= score_cutoff:
return best_list[0]
return None
def dedupe(contains_dupes, threshold=70, scorer=fuzz.token_set_ratio):
"""This convenience function takes a list of strings containing duplicates and uses fuzzy matching to identify
and remove duplicates. Specifically, it uses the process.extract to identify duplicates that
score greater than a user defined threshold. Then, it looks for the longest item in the duplicate list
since we assume this item contains the most entity information and returns that. It breaks string
length ties on an alphabetical sort.
Note: as the threshold DECREASES the number of duplicates that are found INCREASES. This means that the
returned deduplicated list will likely be shorter. Raise the threshold for fuzzy_dedupe to be less
sensitive.
Args:
contains_dupes: A list of strings that we would like to dedupe.
threshold: the numerical value (0,100) point at which we expect to find duplicates.
Defaults to 70 out of 100
scorer: Optional function for scoring matches between the query and
an individual processed choice. This should be a function
of the form f(query, choice) -> int.
By default, fuzz.token_set_ratio() is used and expects both query and
choice to be strings.
Returns:
A deduplicated list. For example:
In: contains_dupes = ['Frodo Baggin', 'Frodo Baggins', 'F. Baggins', 'Samwise G.', 'Gandalf', 'Bilbo Baggins']
In: fuzzy_dedupe(contains_dupes)
Out: ['Frodo Baggins', 'Samwise G.', 'Bilbo Baggins', 'Gandalf']
"""
extractor = []
# iterate over items in *contains_dupes*
for item in contains_dupes:
# return all duplicate matches found
matches = extract(item, contains_dupes, limit=None, scorer=scorer)
# filter matches based on the threshold
filtered = [x for x in matches if x[1] > threshold]
# if there is only 1 item in *filtered*, no duplicates were found so append to *extracted*
if len(filtered) == 1:
extractor.append(filtered[0][0])
else:
# alpha sort
filtered = sorted(filtered, key=lambda x: x[0])
# length sort
filter_sort = sorted(filtered, key=lambda x: len(x[0]), reverse=True)
# take first item as our 'canonical example'
extractor.append(filter_sort[0][0])
# uniquify *extractor* list
keys = {}
for e in extractor:
keys[e] = 1
extractor = keys.keys()
# check that extractor differs from contain_dupes (e.g. duplicates were found)
# if not, then return the original list
if len(extractor) == len(contains_dupes):
return contains_dupes
else:
return extractor
| gpl-3.0 |
thopiekar/Cura | cura/Machines/Models/MultiBuildPlateModel.py | 2 | 2526 | # Copyright (c) 2018 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from PyQt5.QtCore import QTimer, pyqtSignal, pyqtProperty
from UM.Application import Application
from UM.Scene.Camera import Camera
from UM.Scene.Selection import Selection
from UM.Qt.ListModel import ListModel
#
# This is the model for multi build plate feature.
# This has nothing to do with the build plate types you can choose on the sidebar for a machine.
#
class MultiBuildPlateModel(ListModel):
maxBuildPlateChanged = pyqtSignal()
activeBuildPlateChanged = pyqtSignal()
selectionChanged = pyqtSignal()
def __init__(self, parent = None):
super().__init__(parent)
self._update_timer = QTimer()
self._update_timer.setInterval(100)
self._update_timer.setSingleShot(True)
self._update_timer.timeout.connect(self._updateSelectedObjectBuildPlateNumbers)
self._application = Application.getInstance()
self._application.getController().getScene().sceneChanged.connect(self._updateSelectedObjectBuildPlateNumbersDelayed)
Selection.selectionChanged.connect(self._updateSelectedObjectBuildPlateNumbers)
self._max_build_plate = 1 # default
self._active_build_plate = -1
def setMaxBuildPlate(self, max_build_plate):
if self._max_build_plate != max_build_plate:
self._max_build_plate = max_build_plate
self.maxBuildPlateChanged.emit()
## Return the highest build plate number
@pyqtProperty(int, notify = maxBuildPlateChanged)
def maxBuildPlate(self):
return self._max_build_plate
def setActiveBuildPlate(self, nr):
if self._active_build_plate != nr:
self._active_build_plate = nr
self.activeBuildPlateChanged.emit()
@pyqtProperty(int, notify = activeBuildPlateChanged)
def activeBuildPlate(self):
return self._active_build_plate
def _updateSelectedObjectBuildPlateNumbersDelayed(self, *args):
if not isinstance(args[0], Camera):
self._update_timer.start()
def _updateSelectedObjectBuildPlateNumbers(self, *args):
result = set()
for node in Selection.getAllSelectedObjects():
result.add(node.callDecoration("getBuildPlateNumber"))
self._selection_build_plates = list(result)
self.selectionChanged.emit()
@pyqtProperty("QVariantList", notify = selectionChanged)
def selectionBuildPlates(self):
return self._selection_build_plates
| lgpl-3.0 |
adhoc-dev/odoo-addons | product_pack/models/pack.py | 5 | 3149 | # -*- encoding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from openerp import fields, models, api
class product_pack(models.Model):
_name = 'product.pack.line'
_rec_name = 'product_id'
parent_product_id = fields.Many2one(
'product.product',
'Parent Product',
ondelete='cascade',
required=True
)
quantity = fields.Float(
'Quantity',
required=True,
default=1.0,
)
product_id = fields.Many2one(
'product.product',
'Product',
ondelete='cascade',
required=True,
)
@api.multi
def get_sale_order_line_vals(self, line, order):
self.ensure_one()
# pack_price = 0.0
subproduct = self.product_id
quantity = self.quantity * line.product_uom_qty
taxes = order.fiscal_position.map_tax(
subproduct.taxes_id)
tax_id = [(6, 0, taxes.ids)]
if subproduct.uos_id:
uos_id = subproduct.uos_id.id
uos_qty = quantity * subproduct.uos_coeff
else:
uos_id = False
uos_qty = quantity
# if pack is fixed price or totlice price we don want amount on
# pack lines
if line.product_id.pack_price_type in [
'fixed_price', 'totalice_price']:
price = 0.0
discount = 0.0
else:
pricelist = order.pricelist_id.id
price = self.env['product.pricelist'].price_get(
subproduct.id, quantity,
order.partner_id.id, context={
'uom': subproduct.uom_id.id,
'date': order.date_order})[pricelist]
discount = line.discount
# Obtain product name in partner's language
if order.partner_id.lang:
subproduct = subproduct.with_context(
lang=order.partner_id.lang)
subproduct_name = subproduct.name
vals = {
'order_id': order.id,
'name': '%s%s' % (
'> ' * (line.pack_depth + 1), subproduct_name
),
# 'delay': subproduct.sale_delay or 0.0,
'product_id': subproduct.id,
# 'procurement_ids': (
# [(4, x.id) for x in line.procurement_ids]
# ),
'price_unit': price,
'tax_id': tax_id,
'address_allotment_id': False,
'product_uom_qty': quantity,
'product_uom': subproduct.uom_id.id,
'product_uos_qty': uos_qty,
'product_uos': uos_id,
'product_packaging': False,
'discount': discount,
'number_packages': False,
'th_weight': False,
'state': 'draft',
'pack_parent_line_id': line.id,
'pack_depth': line.pack_depth + 1,
}
return vals
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
guiquanz/Dato-Core | src/unity/python/graphlab_util/lambda_closure_capture.py | 13 | 9429 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the DATO-PYTHON-LICENSE file for details.
'''
import sys
import parser
import symbol
import token
import ast
import inspect
import graphlab.meta as meta
class expression_validator(ast.NodeVisitor):
"""
This tree walk attempts to validate an expression: that the expression
should *not* contain certain names.
This is used for the case
x = 10
lambda x: fn(x+15, x)
Really, the "x+15" expression is invalid since the expression uses an
lambda argument. However, it does evaluate correctly in the scope
since "x" also exists in the function scope.
We thus need to validate the expression before attempting to evaluate it
so that the expression must not contain a lambda argument.
This validator here is a lot stricter than it should since it will also
prevent all cases where something with the same name as the lambda argument
is created in an inner scope. For instance:
lambda x: fn((lambda x: x + 15)(5), x)
lambda x: fn(([x for x in [1,2,3]], x)
"""
def __init__(self, blocked_symbols):
self.blocked_symbols = blocked_symbols
def visit_Name(self, node):
if node.id in self.blocked_symbols:
raise RuntimeError("Blocked symbols encountered")
class attribute_reader(ast.NodeVisitor):
"""
Things like gl.extensions._demo_add
get parsed as
Attribute(value=Attribute(value=Name(id='gl', ctx=Load()),
attr='extensions', ctx=Load()), attr='_demo_add', ctx=Load())
This causes problems for
lambda x: gl.extensions._demo_add(x, 5)
We need to breakdown the attribute into the original string
"""
def default(self, node):
raise NotImplementedError("Cannot process token at " +
str(node.lineno) + ":" + str(node.col_offset))
def visit_Name(self, node):
return node.id
def visit_Attribute(self, node):
s = self.visit(node.value)
return s + "." + node.attr
class Parameter(object):
def __init__(self, name):
self.name = name
def __str__(self):
return 'λ' + self.name
def __repr__(self):
return str(self)
class lambda_closure_visitor(ast.NodeVisitor):
"""
This implements a *very* limited decompiler. It only handles cases of
lambda x: fn(a, b, x, ...)
where a,b, etc are variables captured from the surrounding scope, and there
may be some occurances of x.
No additional statements or expressions are permitted
"""
FUNCTION = 0 # I am translating the wrapping lambda function
INNER_CALL = 1 # I am translating the function call inside
PARAMETER = 2 # I am just translating a function parameter
def __init__(self):
# The fn
self.closure_fn_name = ""
# A list of captured positional arguments
# lambda parameters are denoted by being of type Parameter
self.positional_args = []
# A dictionary of captured named arguments
# lambda parameters are denoted by being of type Parameter
self.named_args = {}
# List of all the input argument names
self.input_arg_names = []
self.caller_globals = []
self.state = self.FUNCTION
def default(self, node):
raise NotImplementedError("Cannot process token at " +
str(node.lineno) + ":" + str(node.col_offset))
def __repr__(self):
return str(self)
def __str__(self):
ret = self.closure_fn_name + "("
comma = False
for i in self.positional_args:
if comma:
ret = ret + ','
ret = ret + str(i)
comma = True
for i in self.named_args:
if comma:
ret = ret + ','
ret = ret + i + ":" + str(self.named_args[i])
comma = True
ret = ret + ")"
return ret
def translate_ast(self, ast_node):
#print(ast.dump(ast_node))
t = self.visit(ast_node)
def visit_Module(self, node):
if (self.state != self.FUNCTION):
raise NotImplementedError("Unexpected module in position " +
str(node.lineno) + ":" + str(node.col_offset))
for line in node.body:
self.visit(line)
def visit_Call(self, node):
if (self.state != self.INNER_CALL):
raise NotImplementedError("Unexpected call in position " +
str(node.lineno) + ":" + str(node.col_offset))
self.state = self.INNER_CALL
# this is the main closure function call
if self.closure_fn_name != "":
raise NotImplementedError("Cannot translate function call " +
str(node.lineno) + ":" + str(node.col_offset))
elif type(node.func) is ast.Name:
self.closure_fn_name = node.func.id
elif type(node.func) is ast.Attribute:
self.closure_fn_name = attribute_reader().visit(node.func)
else:
raise NotImplementedError("Unexpected type of function call.")
self.state = self.PARAMETER
for i in range(len(node.args)):
arg = node.args[i]
if type(arg) is ast.Name and arg.id in self.input_arg_names:
self.positional_args += [Parameter(arg.id)]
else:
try:
expression_validator(self.input_arg_names).visit(arg)
# try to evaluate the ast
result = eval(compile(ast.Expression(arg), '<string>', 'eval'), self.caller_globals)
except:
raise NotImplementedError("Only simple expressions not using the function arguments are permitted")
self.positional_args += [result]
# keyword arguments next
keywordargs = {i.arg:i.value for i in node.keywords}
for i in keywordargs:
arg = keywordargs[i]
if type(arg) is ast.Name and arg.id in self.input_arg_names:
self.named_args[i] = Parameter(arg.id)
else:
try:
expression_validator(self.input_arg_names).visit(arg)
# try to evaluate the ast
result = eval(compile(ast.Expression(arg), '<string>', 'eval'), self.caller_globals)
except:
raise NotImplementedError("Only simple expressions not using the function arguments are permitted")
self.named_args[i] = result
def visit_arguments(self, node):
if (self.state != self.FUNCTION):
raise NotImplementedError("Unexpected function")
self.input_arg_names = [arg.id for arg in node.args]
def visit_Name(self, node):
raise NotImplementedError("Unexpected name")
def visit_Return(self, node):
if (self.state != self.INNER_CALL):
raise NotImplementedError("Unexpected return")
return self.visit(node.value)
def visit_Lambda(self, node):
return self.visit_FunctionDef(node)
def visit_FunctionDef(self, node):
if (self.state != self.FUNCTION):
raise NotImplementedError("Unexpected function")
self.visit(node.args)
self.state = self.INNER_CALL
if type(node.body) is list:
next_node = node.body[0]
else:
next_node = node.body
if type(next_node) is ast.Call:
self.visit(next_node)
elif type(next_node) is ast.Return and type(next_node.value) is ast.Call:
self.visit(next_node.value)
else:
raise NotImplementedError("Function must comprise of just a function call ")
def visit_ClassDef(self, node):
raise NotImplementedError("Classes are not implemented")
def _isalambda(v):
return isinstance(v, type(lambda: None)) and v.__name__ == '<lambda>'
def translate(fn):
visitor = lambda_closure_visitor()
visitor.caller_globals = fn.func_globals.copy()
# now. annoyingly enough certain captures are not here. We need to
# look in func_closures for it
if fn.func_closure:
closure = dict(zip(fn.func_code.co_freevars, (c.cell_contents for c in fn.func_closure)))
# inject closure into "caller_globals"
for i in closure:
visitor.caller_globals[i] = closure[i]
ast_node = None
try:
if not _isalambda(fn):
ast_node = ast.parse(inspect.getsource(fn))
except:
pass
try:
if ast_node == None:
ast_node = meta.decompiler.decompile_func(fn)
except:
pass
if ast_node is None:
raise RuntimeError("Cannot process provided function")
visitor.translate_ast(ast_node)
return visitor
# if __name__ == "__main__":
# if len(sys.argv) <= 1:
# print("Usage:\n\t./Lua_Translator.py <FILENAME>\n")
# exit(-1)
# f = open(sys.argv[1] , 'r')
# l = f.readlines()
# f.close()
# s = ""
#
# for x in l:
# s = s + x
#
# ast_node = ast.parse(s)
#
# f = open(sys.argv[1].rpartition(".")[0] + "_trans.lua", 'w')
# test = translator_NodeVisitor(f)
# test.translate_ast(ast_node)
# f.close()
| agpl-3.0 |
visualputty/Landing-Lights | django/utils/datastructures.py | 239 | 15522 | from types import GeneratorType
from django.utils.copycompat import copy, deepcopy
class MergeDict(object):
"""
A simple class for creating new "virtual" dictionaries that actually look
up values in more than one dictionary, passed in the constructor.
If a key appears in more than one of the given dictionaries, only the
first occurrence will be used.
"""
def __init__(self, *dicts):
self.dicts = dicts
def __getitem__(self, key):
for dict_ in self.dicts:
try:
return dict_[key]
except KeyError:
pass
raise KeyError
def __copy__(self):
return self.__class__(*self.dicts)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def getlist(self, key):
for dict_ in self.dicts:
if key in dict_.keys():
return dict_.getlist(key)
return []
def iteritems(self):
seen = set()
for dict_ in self.dicts:
for item in dict_.iteritems():
k, v = item
if k in seen:
continue
seen.add(k)
yield item
def iterkeys(self):
for k, v in self.iteritems():
yield k
def itervalues(self):
for k, v in self.iteritems():
yield v
def items(self):
return list(self.iteritems())
def keys(self):
return list(self.iterkeys())
def values(self):
return list(self.itervalues())
def has_key(self, key):
for dict_ in self.dicts:
if key in dict_:
return True
return False
__contains__ = has_key
__iter__ = iterkeys
def copy(self):
"""Returns a copy of this object."""
return self.__copy__()
def __str__(self):
'''
Returns something like
"{'key1': 'val1', 'key2': 'val2', 'key3': 'val3'}"
instead of the generic "<object meta-data>" inherited from object.
'''
return str(dict(self.items()))
def __repr__(self):
'''
Returns something like
MergeDict({'key1': 'val1', 'key2': 'val2'}, {'key3': 'val3'})
instead of generic "<object meta-data>" inherited from object.
'''
dictreprs = ', '.join(repr(d) for d in self.dicts)
return '%s(%s)' % (self.__class__.__name__, dictreprs)
class SortedDict(dict):
"""
A dictionary that keeps its keys in the order in which they're inserted.
"""
def __new__(cls, *args, **kwargs):
instance = super(SortedDict, cls).__new__(cls, *args, **kwargs)
instance.keyOrder = []
return instance
def __init__(self, data=None):
if data is None:
data = {}
elif isinstance(data, GeneratorType):
# Unfortunately we need to be able to read a generator twice. Once
# to get the data into self with our super().__init__ call and a
# second time to setup keyOrder correctly
data = list(data)
super(SortedDict, self).__init__(data)
if isinstance(data, dict):
self.keyOrder = data.keys()
else:
self.keyOrder = []
seen = set()
for key, value in data:
if key not in seen:
self.keyOrder.append(key)
seen.add(key)
def __deepcopy__(self, memo):
return self.__class__([(key, deepcopy(value, memo))
for key, value in self.iteritems()])
def __setitem__(self, key, value):
if key not in self:
self.keyOrder.append(key)
super(SortedDict, self).__setitem__(key, value)
def __delitem__(self, key):
super(SortedDict, self).__delitem__(key)
self.keyOrder.remove(key)
def __iter__(self):
return iter(self.keyOrder)
def pop(self, k, *args):
result = super(SortedDict, self).pop(k, *args)
try:
self.keyOrder.remove(k)
except ValueError:
# Key wasn't in the dictionary in the first place. No problem.
pass
return result
def popitem(self):
result = super(SortedDict, self).popitem()
self.keyOrder.remove(result[0])
return result
def items(self):
return zip(self.keyOrder, self.values())
def iteritems(self):
for key in self.keyOrder:
yield key, self[key]
def keys(self):
return self.keyOrder[:]
def iterkeys(self):
return iter(self.keyOrder)
def values(self):
return map(self.__getitem__, self.keyOrder)
def itervalues(self):
for key in self.keyOrder:
yield self[key]
def update(self, dict_):
for k, v in dict_.iteritems():
self[k] = v
def setdefault(self, key, default):
if key not in self:
self.keyOrder.append(key)
return super(SortedDict, self).setdefault(key, default)
def value_for_index(self, index):
"""Returns the value of the item at the given zero-based index."""
return self[self.keyOrder[index]]
def insert(self, index, key, value):
"""Inserts the key, value pair before the item with the given index."""
if key in self.keyOrder:
n = self.keyOrder.index(key)
del self.keyOrder[n]
if n < index:
index -= 1
self.keyOrder.insert(index, key)
super(SortedDict, self).__setitem__(key, value)
def copy(self):
"""Returns a copy of this object."""
# This way of initializing the copy means it works for subclasses, too.
obj = self.__class__(self)
obj.keyOrder = self.keyOrder[:]
return obj
def __repr__(self):
"""
Replaces the normal dict.__repr__ with a version that returns the keys
in their sorted order.
"""
return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in self.items()])
def clear(self):
super(SortedDict, self).clear()
self.keyOrder = []
class MultiValueDictKeyError(KeyError):
pass
class MultiValueDict(dict):
"""
A subclass of dictionary customized to handle multiple values for the
same key.
>>> d = MultiValueDict({'name': ['Adrian', 'Simon'], 'position': ['Developer']})
>>> d['name']
'Simon'
>>> d.getlist('name')
['Adrian', 'Simon']
>>> d.get('lastname', 'nonexistent')
'nonexistent'
>>> d.setlist('lastname', ['Holovaty', 'Willison'])
This class exists to solve the irritating problem raised by cgi.parse_qs,
which returns a list for every key, even though most Web forms submit
single name-value pairs.
"""
def __init__(self, key_to_list_mapping=()):
super(MultiValueDict, self).__init__(key_to_list_mapping)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__,
super(MultiValueDict, self).__repr__())
def __getitem__(self, key):
"""
Returns the last data value for this key, or [] if it's an empty list;
raises KeyError if not found.
"""
try:
list_ = super(MultiValueDict, self).__getitem__(key)
except KeyError:
raise MultiValueDictKeyError("Key %r not found in %r" % (key, self))
try:
return list_[-1]
except IndexError:
return []
def __setitem__(self, key, value):
super(MultiValueDict, self).__setitem__(key, [value])
def __copy__(self):
return self.__class__([
(k, v[:])
for k, v in self.lists()
])
def __deepcopy__(self, memo=None):
import django.utils.copycompat as copy
if memo is None:
memo = {}
result = self.__class__()
memo[id(self)] = result
for key, value in dict.items(self):
dict.__setitem__(result, copy.deepcopy(key, memo),
copy.deepcopy(value, memo))
return result
def __getstate__(self):
obj_dict = self.__dict__.copy()
obj_dict['_data'] = dict([(k, self.getlist(k)) for k in self])
return obj_dict
def __setstate__(self, obj_dict):
data = obj_dict.pop('_data', {})
for k, v in data.items():
self.setlist(k, v)
self.__dict__.update(obj_dict)
def get(self, key, default=None):
"""
Returns the last data value for the passed key. If key doesn't exist
or value is an empty list, then default is returned.
"""
try:
val = self[key]
except KeyError:
return default
if val == []:
return default
return val
def getlist(self, key):
"""
Returns the list of values for the passed key. If key doesn't exist,
then an empty list is returned.
"""
try:
return super(MultiValueDict, self).__getitem__(key)
except KeyError:
return []
def setlist(self, key, list_):
super(MultiValueDict, self).__setitem__(key, list_)
def setdefault(self, key, default=None):
if key not in self:
self[key] = default
return self[key]
def setlistdefault(self, key, default_list=()):
if key not in self:
self.setlist(key, default_list)
return self.getlist(key)
def appendlist(self, key, value):
"""Appends an item to the internal list associated with key."""
self.setlistdefault(key, [])
super(MultiValueDict, self).__setitem__(key, self.getlist(key) + [value])
def items(self):
"""
Returns a list of (key, value) pairs, where value is the last item in
the list associated with the key.
"""
return [(key, self[key]) for key in self.keys()]
def iteritems(self):
"""
Yields (key, value) pairs, where value is the last item in the list
associated with the key.
"""
for key in self.keys():
yield (key, self[key])
def lists(self):
"""Returns a list of (key, list) pairs."""
return super(MultiValueDict, self).items()
def iterlists(self):
"""Yields (key, list) pairs."""
return super(MultiValueDict, self).iteritems()
def values(self):
"""Returns a list of the last value on every key list."""
return [self[key] for key in self.keys()]
def itervalues(self):
"""Yield the last value on every key list."""
for key in self.iterkeys():
yield self[key]
def copy(self):
"""Returns a shallow copy of this object."""
return copy(self)
def update(self, *args, **kwargs):
"""
update() extends rather than replaces existing key lists.
Also accepts keyword args.
"""
if len(args) > 1:
raise TypeError("update expected at most 1 arguments, got %d" % len(args))
if args:
other_dict = args[0]
if isinstance(other_dict, MultiValueDict):
for key, value_list in other_dict.lists():
self.setlistdefault(key, []).extend(value_list)
else:
try:
for key, value in other_dict.items():
self.setlistdefault(key, []).append(value)
except TypeError:
raise ValueError("MultiValueDict.update() takes either a MultiValueDict or dictionary")
for key, value in kwargs.iteritems():
self.setlistdefault(key, []).append(value)
class DotExpandedDict(dict):
"""
A special dictionary constructor that takes a dictionary in which the keys
may contain dots to specify inner dictionaries. It's confusing, but this
example should make sense.
>>> d = DotExpandedDict({'person.1.firstname': ['Simon'], \
'person.1.lastname': ['Willison'], \
'person.2.firstname': ['Adrian'], \
'person.2.lastname': ['Holovaty']})
>>> d
{'person': {'1': {'lastname': ['Willison'], 'firstname': ['Simon']}, '2': {'lastname': ['Holovaty'], 'firstname': ['Adrian']}}}
>>> d['person']
{'1': {'lastname': ['Willison'], 'firstname': ['Simon']}, '2': {'lastname': ['Holovaty'], 'firstname': ['Adrian']}}
>>> d['person']['1']
{'lastname': ['Willison'], 'firstname': ['Simon']}
# Gotcha: Results are unpredictable if the dots are "uneven":
>>> DotExpandedDict({'c.1': 2, 'c.2': 3, 'c': 1})
{'c': 1}
"""
def __init__(self, key_to_list_mapping):
for k, v in key_to_list_mapping.items():
current = self
bits = k.split('.')
for bit in bits[:-1]:
current = current.setdefault(bit, {})
# Now assign value to current position
try:
current[bits[-1]] = v
except TypeError: # Special-case if current isn't a dict.
current = {bits[-1]: v}
class ImmutableList(tuple):
"""
A tuple-like object that raises useful errors when it is asked to mutate.
Example::
>>> a = ImmutableList(range(5), warning="You cannot mutate this.")
>>> a[3] = '4'
Traceback (most recent call last):
...
AttributeError: You cannot mutate this.
"""
def __new__(cls, *args, **kwargs):
if 'warning' in kwargs:
warning = kwargs['warning']
del kwargs['warning']
else:
warning = 'ImmutableList object is immutable.'
self = tuple.__new__(cls, *args, **kwargs)
self.warning = warning
return self
def complain(self, *wargs, **kwargs):
if isinstance(self.warning, Exception):
raise self.warning
else:
raise AttributeError(self.warning)
# All list mutation functions complain.
__delitem__ = complain
__delslice__ = complain
__iadd__ = complain
__imul__ = complain
__setitem__ = complain
__setslice__ = complain
append = complain
extend = complain
insert = complain
pop = complain
remove = complain
sort = complain
reverse = complain
class DictWrapper(dict):
"""
Wraps accesses to a dictionary so that certain values (those starting with
the specified prefix) are passed through a function before being returned.
The prefix is removed before looking up the real value.
Used by the SQL construction code to ensure that values are correctly
quoted before being used.
"""
def __init__(self, data, func, prefix):
super(DictWrapper, self).__init__(data)
self.func = func
self.prefix = prefix
def __getitem__(self, key):
"""
Retrieves the real value after stripping the prefix string (if
present). If the prefix is present, pass the value through self.func
before returning, otherwise return the raw value.
"""
if key.startswith(self.prefix):
use_func = True
key = key[len(self.prefix):]
else:
use_func = False
value = super(DictWrapper, self).__getitem__(key)
if use_func:
return self.func(value)
return value
| bsd-3-clause |
opendatadurban/scoda | scoda/templates/static/node_modules/node-gyp/gyp/pylib/gyp/common_test.py | 2542 | 1970 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the common.py file."""
import gyp.common
import unittest
import sys
class TestTopologicallySorted(unittest.TestCase):
def test_Valid(self):
"""Test that sorting works on a valid graph with one possible order."""
graph = {
'a': ['b', 'c'],
'b': [],
'c': ['d'],
'd': ['b'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertEqual(
gyp.common.TopologicallySorted(graph.keys(), GetEdge),
['a', 'c', 'd', 'b'])
def test_Cycle(self):
"""Test that an exception is thrown on a cyclic graph."""
graph = {
'a': ['b'],
'b': ['c'],
'c': ['d'],
'd': ['a'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertRaises(
gyp.common.CycleError, gyp.common.TopologicallySorted,
graph.keys(), GetEdge)
class TestGetFlavor(unittest.TestCase):
"""Test that gyp.common.GetFlavor works as intended"""
original_platform = ''
def setUp(self):
self.original_platform = sys.platform
def tearDown(self):
sys.platform = self.original_platform
def assertFlavor(self, expected, argument, param):
sys.platform = argument
self.assertEqual(expected, gyp.common.GetFlavor(param))
def test_platform_default(self):
self.assertFlavor('freebsd', 'freebsd9' , {})
self.assertFlavor('freebsd', 'freebsd10', {})
self.assertFlavor('openbsd', 'openbsd5' , {})
self.assertFlavor('solaris', 'sunos5' , {});
self.assertFlavor('solaris', 'sunos' , {});
self.assertFlavor('linux' , 'linux2' , {});
self.assertFlavor('linux' , 'linux3' , {});
def test_param(self):
self.assertFlavor('foobar', 'linux2' , {'flavor': 'foobar'})
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
nachandr/cfme_tests | cfme/tests/containers/test_static_custom_attributes.py | 3 | 10129 | import re
from copy import deepcopy
from os import path
from random import choice
from string import ascii_letters
from string import digits
import pytest
from manageiq_client.api import APIException
from cfme import test_requirements
from cfme.containers.provider import ContainersProvider
from cfme.containers.provider.openshift import CustomAttribute
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.log import logger
pytestmark = [
pytest.mark.usefixtures('setup_provider'),
pytest.mark.tier(2),
pytest.mark.provider([ContainersProvider], scope='function'),
test_requirements.containers
]
def get_random_string(length):
valid_chars = digits + ascii_letters + ' !@#$%^&*()'
out = ''.join([choice(valid_chars) for _ in range(length)])
return re.sub(r'\s+', ' ', out)
ATTRIBUTES_DATASET = [
CustomAttribute('exp date', '2017-01-02', 'Date'),
CustomAttribute('sales force acount', 'ADF231VRWQ1', None),
CustomAttribute('expected num of nodes', '2', None)
]
VALUE_UPDATES = ['2018-07-12', 'ADF231VRWQ1', '1']
# TODO These should be factored into a single CRUD test
@pytest.fixture(scope='function')
def add_delete_custom_attributes(provider):
provider.add_custom_attributes(*ATTRIBUTES_DATASET)
view = navigate_to(provider, 'Details', force=True)
assert view.entities.summary('Custom Attributes').is_displayed
yield
try:
provider.delete_custom_attributes(*ATTRIBUTES_DATASET)
except APIException:
logger.info("No custom attributes to delete")
def test_add_static_custom_attributes(add_delete_custom_attributes, provider):
"""Tests adding of static custom attributes to provider
Steps:
* Add static custom attributes (API)
* Go to provider summary page
Expected results:
* The attributes was successfully added
Polarion:
assignee: juwatts
caseimportance: high
casecomponent: Containers
initialEstimate: 1/6h
"""
view = navigate_to(provider, 'Details', force=True)
custom_attr_ui = view.entities.summary('Custom Attributes')
for attr in ATTRIBUTES_DATASET:
assert attr.name in custom_attr_ui.fields
assert custom_attr_ui.get_text_of(attr.name) == attr.value
def test_edit_static_custom_attributes(provider):
"""Tests editing of static custom attributes from provider
Prerequisite:
* test_add_static_custom_attributes passed.
Steps:
* Edit (update) the static custom attributes (API)
* Go to provider summary page
Expected results:
* The attributes was successfully updated to the new values
Polarion:
assignee: juwatts
caseimportance: high
casecomponent: Containers
initialEstimate: 1/6h
"""
provider.add_custom_attributes(*ATTRIBUTES_DATASET)
edited_attribs = deepcopy(ATTRIBUTES_DATASET)
for ii, value in enumerate(VALUE_UPDATES):
edited_attribs[ii].value = value
provider.edit_custom_attributes(*edited_attribs)
view = navigate_to(provider, 'Details', force=True)
custom_attr_ui = view.entities.summary('Custom Attributes')
for attr in edited_attribs:
assert attr.name in custom_attr_ui.fields
assert custom_attr_ui.get_text_of(attr.name) == attr.value
provider.delete_custom_attributes(*edited_attribs)
def test_delete_static_custom_attributes(add_delete_custom_attributes, request, provider):
"""Tests deleting of static custom attributes from provider
Steps:
* Delete the static custom attributes that recently added (API)
* Go to provider summary page
Expected results:
* The attributes was successfully deleted
(you should not see a custom attributes table)
Polarion:
assignee: juwatts
caseimportance: high
casecomponent: Containers
initialEstimate: 1/6h
"""
provider.delete_custom_attributes(*ATTRIBUTES_DATASET)
view = navigate_to(provider, 'Details', force=True)
if view.entities.summary('Custom Attributes').is_displayed:
for attr in ATTRIBUTES_DATASET:
assert attr.name not in view.entities.summary('Custom Attributes').fields
else:
logger.info("No custom attributes table to check")
assert True
ca = CustomAttribute('test_value', 'This is a test', None)
request.addfinalizer(lambda: provider.delete_custom_attributes(ca))
provider.add_custom_attributes(ca)
provider.add_custom_attributes(*ATTRIBUTES_DATASET)
provider.browser.refresh()
for attr in ATTRIBUTES_DATASET:
assert attr.name in view.entities.summary('Custom Attributes').fields
assert view.entities.summary('Custom Attributes').get_text_of(attr.name) == attr.value
provider.delete_custom_attributes(*ATTRIBUTES_DATASET)
provider.browser.refresh()
if view.entities.summary('Custom Attributes').is_displayed:
for attr in ATTRIBUTES_DATASET:
assert attr.name not in view.entities.summary('Custom Attributes').fields
else:
logger.info("Custom Attributes Table does not exist. Expecting it to exist")
assert False
def test_add_attribute_with_empty_name(provider):
"""Tests adding of static custom attributes with empty field
Steps:
* add the static custom attribute with name "" (API)
* Go to provider summary page
Expected results:
* You should get an error
* You should not see this attribute in the custom attributes table
Polarion:
assignee: juwatts
caseimportance: high
casecomponent: Containers
initialEstimate: 1/6h
"""
with pytest.raises(APIException):
provider.add_custom_attributes(
CustomAttribute('', "17")
)
pytest.fail('You have added custom attribute with empty name'
'and didn\'t get an error!')
view = navigate_to(provider, 'Details', force=True)
if view.entities.summary('Custom Attributes').is_displayed:
assert "" not in view.entities.summary('Custom Attributes').fields
def test_add_date_attr_with_wrong_value(provider):
"""Trying to add attribute of type date with non-date value
Polarion:
assignee: juwatts
caseimportance: high
casecomponent: Containers
initialEstimate: 1/6h
"""
ca = CustomAttribute('nondate', "koko", 'Date')
with pytest.raises(APIException):
provider.add_custom_attributes(ca)
pytest.fail('You have added custom attribute of type'
'{} with value of {} and didn\'t get an error!'
.format(ca.field_type, ca.value))
view = navigate_to(provider, 'Details', force=True)
if view.entities.summary('Custom Attributes').is_displayed:
assert 'nondate' not in view.entities.summary('Custom Attributes').fields
def test_edit_non_exist_attribute(provider):
"""Trying to edit non-exist attribute
Polarion:
assignee: juwatts
caseimportance: high
casecomponent: Containers
initialEstimate: 1/6h
"""
ca = choice(ATTRIBUTES_DATASET)
# Note: we need to implement it inside the test instead of using
# the API (provider.edit_custom_attributes) in order to
# specify the href and yield the exception
payload = {
"action": "edit",
"resources": [{
"href": '{}/custom_attributes/9876543210000000'
.format(provider.href()),
"value": ca.value
}]}
with pytest.raises(APIException):
provider.appliance.rest_api.post(
path.join(provider.href(), 'custom_attributes'), **payload)
pytest.fail('You tried to edit a non-exist custom attribute'
'({}) and didn\'t get an error!'
.format(ca.value))
def test_delete_non_exist_attribute(provider):
"""
Polarion:
assignee: juwatts
caseimportance: high
casecomponent: Containers
initialEstimate: 1/6h
"""
ca = choice(ATTRIBUTES_DATASET)
with pytest.raises(APIException):
provider.delete_custom_attributes(ca)
pytest.fail('You tried to delete a non-exist custom attribute'
'({}) and didn\'t get an error!'
.format(ca.value))
def test_add_already_exist_attribute(provider):
"""
Polarion:
assignee: juwatts
caseimportance: high
casecomponent: Containers
initialEstimate: 1/6h
"""
ca = choice(ATTRIBUTES_DATASET)
provider.add_custom_attributes(ca)
try:
provider.add_custom_attributes(ca)
except APIException:
pytest.fail('You tried to add a custom attribute that already exists'
'({}) and didn\'t get an error!'
.format(ca.value))
finally:
provider.delete_custom_attributes(ca)
def test_very_long_name_with_special_characters(request, provider):
"""
Polarion:
assignee: juwatts
caseimportance: high
casecomponent: Containers
initialEstimate: 1/6h
"""
ca = CustomAttribute(get_random_string(1000), 'very_long_name', None)
request.addfinalizer(lambda: provider.delete_custom_attributes(ca))
provider.add_custom_attributes(ca)
view = navigate_to(provider, 'Details', force=True)
assert ca.name in view.entities.summary('Custom Attributes').fields
# BZ 540647 was closed as no fix. Code was added that strips underscores from attribute names.
def test_very_long_value_with_special_characters(request, provider):
"""
Polarion:
assignee: juwatts
caseimportance: high
casecomponent: Containers
initialEstimate: 1/6h
"""
ca = CustomAttribute('very long value', get_random_string(1000), None)
request.addfinalizer(lambda: provider.delete_custom_attributes(ca))
provider.add_custom_attributes(ca)
view = navigate_to(provider, 'Details', force=True)
assert ca.value == view.entities.summary('Custom Attributes').get_text_of(ca.name)
| gpl-2.0 |
MaxTyutyunnikov/lino | lino/modlib/vocbook/base.py | 1 | 33978 | # -*- coding: UTF-8 -*-
## Copyright 2011-2013 Luc Saffre
## This file is part of the Lino project.
## Lino is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
## Lino is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with Lino; if not, see <http://www.gnu.org/licenses/>.
"""
Work in progress.
I currently use the :mod:`lino.modlib.vocbook`
module to generate teaching materials for my pupils,
the catering cooks at
`Vigala professional school <http://web.vigalattk.ee/>`_.
"""
import logging
#~ logging.basicConfig(filename='example.log',level=logging.DEBUG)
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
import os
import codecs
import locale
import re
from lino.utils import AttrDict
from lino.utils import iif, curry
from lino.utils import memo
from lino.utils.html2xhtml import html2xhtml
#~ from lino.utils.xmlgen.html import html2rst as html2rst_
from lino.utils.xmlgen.html import html2rst
from lino.utils.xmlgen.html import E
from lino.utils.restify import restify
from atelier.rstgen import write_header
from atelier import rstgen
from lino.utils import htmlgen
USE_XHTML2ODT = False
#~ def html2rst(x):
#~ if isinstance(x,basestring): return x
#~ return html2rst_(x)
def e2s(g):
def fmt(e):
if isinstance(e,basestring): return e
return E.tostring(e)
return ' '.join([fmt(e) for e in g])
if USE_XHTML2ODT:
from Cheetah.Template import Template as CheetahTemplate
import xhtml2odt
class MyODTFile(xhtml2odt.ODTFile):
def render(self,context):
self.open()
tpl = CheetahTemplate(self.xml['content'],namespaces=[context])
nc = unicode(tpl) #.encode('utf-8')
if nc.startswith('<?xml version'):
#~ nc = nc.replace('<?xml version="1.0" encoding="UTF-8"?>','')
nc = nc.split('\n',1)[1]
self.xml['content'] = nc
#~ odt = self.xhtml_to_odt(xhtml)
#~ self.insert_content(odt)
if True:
f = open("content.xml","wt")
f.write(self.xml['content'].encode('utf-8'))
f.close()
self.add_styles()
self.save(self.options.output)
pronunciationRE = re.compile("^(.*)\s*(\[.*\])\s*",re.DOTALL)
def makedirs_if_missing(dirname):
if dirname and not os.path.isdir(dirname):
os.makedirs(dirname)
class LanguageMeta(type):
def __new__(meta, classname, bases, classDict):
# Each subclass gets her own list of word types:
classDict['words'] = []
classDict['wordTypes'] = []
cls = type.__new__(meta, classname, bases, classDict)
return cls
class Language(object):
__metaclass__ = LanguageMeta
@classmethod
def add_wordtype(cls,wt):
cls.wordTypes.append(wt)
@classmethod
def register_word(cls,w):
for ew in cls.words:
if ew == w:
return ew
cls.words.append(w)
return w
class WordType(object):
text = None
@classmethod
def is_of_this_type(cls,w):
return w.type == cls
class Word(object):
type = None
text = None
pronounciation = ''
gender = None
form = None
def __init__(self,text,
type=None,
pronounciation=None,
gender=None,
form=None):
if not text:
raise Exception("Cannot create empty word!")
self.text = text
if type: self.type = type
if form is not None: self.form = form
if pronounciation:
assert pronounciation.startswith('[')
assert pronounciation.endswith(']')
self.pronounciation = pronounciation[1:-1]
if gender:
assert gender in ('m','f','mf','pl')
self.gender = gender
self.translations = []
self.units = []
def get_pron_html(self,article=False):
if not self.pronounciation:
return ''
#~ if article and Nom.is_of_this_type(self):
return "[%s]" % self.pronounciation
def add_to_unit(self,unit):
self.units.append(unit)
#~ unit.add_word(self)
def __repr__(self):
return "%r(%r)" % (self.text,self.type.__name__)
def __eq__(self,other):
if self.__class__ != other.__class__: return False
if self.text != other.text: return False
if self.pronounciation != other.pronounciation: return False
if self.gender != other.gender: return False
if self.type != other.type: return False
if self.form != other.form: return False
return True
def add_translations(self,translations):
for t in translations:
if not t in self.translations:
self.translations.append(t)
def opposite_gender(self):
if self.gender == 'f' : return 'm'
if self.gender == 'm' : return 'f'
return None
def get_partner(self,gender):
if self.gender == 'mf' or self.gender == gender:
return self
if not self.partner:
raise Exception("%r has no partner " % self)
return self.partner
class Column:
label = ''
def __init__(self,label):
self.label = label
@classmethod
def render(cls,w,book):
s = html2rst(E.div(*tuple(cls.word2html(w,book))))
#~ s = html2rst(e) for e (cls.word2html(w,book)))
if "<" in s:
raise Exception("2013116 %r" % cls.word2html(w,book))
if s.startswith('('):
s = '\\' + s
return s
@classmethod
def word2html(cls,w,book):
raise NotImplementedError()
class FR(Column):
label = 'prantsuse k.'
@classmethod
def word2html(cls,w,book):
for e in book.from_language.present_word2html(w,book):
yield e
class PRON(Column):
label = u'hääldamine'
@classmethod
def word2html(cls,w,book):
yield w.get_pron_html()
class ET(Column):
label = u'eesti k.'
@classmethod
def word2html(cls,w,book):
if len(w.translations) == 1:
yield w.translations[0]
else:
yield "; ".join(["(%d) %s" % (n+1,w) for n,w in enumerate(w.translations)])
class M(Column):
label = u'meessoost'
gender = 'm'
@classmethod
def word2html(cls,w,book):
#~ return html2rst(w.html_fr_lesson()) + ' ' + w.pronounciation
w = w.get_partner(cls.gender)
#~ return '<b>%s</b>' % w.text + ' ' + w.get_pron_html()
yield E.b(w.text)
yield ' '
yield w.get_pron_html()
class F(M):
label = u'naissoost'
gender = 'f'
class GEON(FR):
label = u'Maa'
class GEOM(Column):
gender = 'm'
label = u'omadussõna (m)'
@classmethod
def word2html(cls,w,book):
if not w.adjectif:
yield ''
return
w = w.adjectif
w = w.get_partner(cls.gender)
#~ return '<b>%s</b>' % w.text + ' ' + w.get_pron_html()
yield E.b(w.text)
yield ' '
yield w.get_pron_html()
class GEOF(GEOM):
label = u'omadussõna (n)'
gender = 'f'
#~ def mycmp(a,b):
#~ return locale.strcoll(a,b)
def sort_by_fr(a,b):
return locale.strcoll(a.text.lower(),b.text.lower())
#~ return locale.strcoll(S(a.fr),S(b.fr))
class Section:
def __init__(self,book,parent,
title=None,intro=None,
number=None,ref=None,
from_language=None,
to_language=None):
if from_language is None:
from_language = parent.from_language
if to_language is None:
to_language = parent.to_language
#~ if number is None:
#~ raise Exception("Section %r has no number" % title)
self.to_language = to_language
self.from_language = from_language
self.parent = parent
if number is not None:
if not isinstance(number,int):
raise Exception("Section %r got invalid number %r" % (title,number))
elif parent is not None:
number = len(parent.children) + 1
self.number = number
self.book = book
self.ref = ref
self.title = title
self.intro = intro
self.body = []
self.words = []
self.children = []
self.current_lesson = None
if self.ref:
if self.ref in self.book.ref2sect:
raise Exception("Duplicate reference %r" % self.ref)
self.book.ref2sect[self.ref] = self
def add_section(self,*args,**kw):
sect = Section(self.book,self,*args,**kw)
self.children.append(sect)
return sect
def add_index(self,*args,**kw):
sect = Index(self.book,self,*args,**kw)
self.children.append(sect)
return sect
def add_dictionary(self,*args,**kw):
sect = Dictionary(self.book,self,*args,**kw)
self.children.append(sect)
return sect
def add_lesson(self,*args,**kw):
self.current_lesson = Unit(self.book,self,*args,**kw)
self.children.append(self.current_lesson)
def add_after(self,chunk):
#~ self.current_lesson.body.append(chunk)
self.current_lesson.after.append(chunk)
def parse_words(self,cl,lines):
self.current_lesson.parse_words(cl,lines)
def name_parts(self):
if self.parent is None:
return ['index' ]
elif self.children:
return [ self.get_ref(), 'index' ]
else:
return [ self.get_ref() ]
def get_ref(self):
if self.ref:
return self.ref
if self.number is not None:
#~ return str(self.number)
return '%02d' % self.number
def rst_ref_to(self,text=None):
parts = self.name_parts()
#~ ref = self.get_ref()
p = self.parent
while p is not None:
pref = p.get_ref()
#~ if p.number is not None:
if pref is not None:
#~ parts = ['%02d' % p.number] + parts
parts = [pref] + parts
p = p.parent
if not text:
text = self.get_ref_text()
if self.book.writing_format == 'rst':
if text:
return ':doc:`%s </%s>`' % (text,'/'.join(parts))
return ':doc:`/%s`' % '/'.join(parts)
return "*" + text + "*"
#~ return ':doc:`%s </%s>`' % (self.title,'/'.join(parts))
def get_full_number(self):
number = str(self.number)
p = self.parent
while p is not None:
if p.number is not None:
number = str(p.number) + "." + number
p = p.parent
return number
def get_ref_text(self):
return self.title
def html_lines(self,level=1):
if self.number is None:
title = self.title
else:
title = "%s %s" % (self.get_full_number(),self.title)
if True:
if self.parent is not None:
title = restify(self.memo2rst(title)).strip()
if title.startswith('<p>') and title.endswith('</p>'):
title = title[3:-4]
#~ logger.info("20120311 title is %r", title)
else:
raise Exception("20120311 title is %r" % title)
yield htmlgen.H(level,title)
else:
tag = "H%d" % level
title = title.replace("<p>","<"+tag+">")
title = title.replace("</p>","</"+tag+">")
yield title
#~ yield "<H%d>%s</H%d>" % (level,,level)
if self.intro:
yield restify(self.memo2rst(self.intro))
if self.children:
for s in self.children:
for ln in s.html_lines(level+1):
yield ln
for chunk in self.body:
yield restify(self.memo2rst(chunk))
def write_rst_files(self,root):
fn = os.path.join(root,*self.name_parts()) + ".rst"
logger.info("Generate %s",fn)
newroot = os.path.dirname(fn)
makedirs_if_missing(newroot)
fd = codecs.open(fn,'w','utf-8')
if self.number is None:
title = self.title
else:
title = "%d. %s" % (self.number,self.title)
#~ if self.number is None:
#~ write_header(fd,1,"%s" % self.title)
#~ else:
#~ write_header(fd,1,"%d. %s" % (self.number,self.title))
write_header(fd,1,self.memo2rst(title))
self.write_body(fd)
fd.close()
for s in self.children:
s.write_rst_files(newroot)
def write_body(self,fd):
if self.intro:
fd.write(self.memo2rst(self.intro) + '\n\n')
for chunk in self.body:
fd.write(self.memo2rst(chunk) + '\n\n')
if self.children:
fd.write("""\
.. toctree::
:maxdepth: 2
""")
for s in self.children:
fd.write(" " + ("/".join(s.name_parts())) + "\n")
fd.write('\n\n')
def memo2rst(self,s):
return self.book.memo2rst(s)
class Unit(Section):
columns = [FR,PRON,ET]
def __init__(self,book,parent,title=None,intro=None,columns=None,show_headers=None,**kw):
if columns is not None:
self.columns = columns
if show_headers is None:
show_headers = True
elif show_headers is None:
show_headers = False
self.show_headers = show_headers
#~ self.parent = parent
Section.__init__(self,book,parent,title=title,intro=intro,**kw)
if not self.title:
self.title = u"Leçon %d" % self.number
self.after = []
#~ if after:
#~ self.add_after(after)
self.words = []
#~ def add_word(self,w):
#~ self.words.append(w)
def tablerow(self,w):
return [col.render(w,self) for col in self.columns]
def parse_words(self,cl,lines):
#~ lesson = self.current_lesson
for ln in lines.splitlines():
ln = ln.strip()
if ln and not ln.startswith('#'):
a = ln.split(':')
if len(a) != 2:
raise Exception("%r.split(':') is not 2" % ln)
fr_list = a[0].split('|')
et_list = a[1].split('|')
translations = []
for et in et_list:
et = et.strip()
if et == '-':
pass
elif et.startswith('#'):
pass
else:
w = self.to_language.parse_word(et)
translations.append(et)
main = None
for fr in fr_list:
w = self.from_language.parse_word(fr,cl,parent=main)
w.add_to_unit(self)
#~ w.add_lesson(self.current_lesson)
w.add_translations(translations)
if main:
main.marry(w)
else:
main = w
self.words.append(main)
def html_lines(self,level=1):
for ln in Section.html_lines(self,level):
yield ln
words = [w for w in self.words if w.parent is None]
if words:
t = htmlgen.TABLE([col.label for col in self.columns],
show_headers=self.show_headers)
def row(w):
return [col.word2html(w,self) for col in self.columns]
rows = [row(w) for w in words]
for ln in t.html_lines(rows):
yield ln
for chunk in self.after:
yield restify(self.memo2rst(chunk))
def write_body(self,fd):
Section.write_body(self,fd)
words = [w for w in self.words if w.parent is None]
if words:
t = rstgen.Table([col.label for col in self.columns],
show_headers=self.show_headers)
t.write(fd,[self.tablerow(w) for w in words])
for chunk in self.after:
fd.write('\n\n' + chunk + '\n\n')
#~ def uca_collator():
#~ """
#~ """
#~ logger.info("20120308 build uca_collator")
#~ c = Collator(fn)
#~ logger.info("20120308 uca_collator() done")
#~ return c
try:
from lino.utils.pyuca import Collator
#~ fn = os.path.join(os.path.dirname(__file__),'pyuca_allkeys.txt')
fn = 'uca_allkeys.txt'
UCA_COLLATOR = Collator(fn)
except Exception:
UCA_COLLATOR = None
import warnings
warnings.warn("""\
If you want serious alphabetic sorting, you need to download \
http://www.unicode.org/Public/UCA/latest/allkeys.txt \
to your current working directory (`%s`) and rename it to `uca_allkeys.txt`. \
""" % os.getcwd())
def uca_sort(l):
#~ c = uca_collator()
if UCA_COLLATOR:
def k(w): return UCA_COLLATOR.sort_key(w.text)
else:
def k(w): return w.text.upper()
l.sort(key=k)
class Dictionary(Section):
columns = [FR,PRON,ET]
show_headers = True
def html_lines(self,level=1):
for ln in Section.html_lines(self,level):
yield ln
words = [w for w in self.from_language.words if w.parent is None]
if words:
uca_sort(words)
t = htmlgen.TABLE([col.label for col in self.columns],
show_headers=self.show_headers)
def row(w):
return [col.word2html(w,self) for col in self.columns]
rows = [row(w) for w in words]
for ln in t.html_lines(rows):
yield ln
class Index(Section):
def html_lines(self,level=1):
for ln in Section.html_lines(self,level):
yield ln
#~ self.from_language.words.sort(sort_by_fr)
uca_sort(self.from_language.words)
#~ self.from_language.words = uca_sorted(self.from_language.words)
def fmt(w):
return e2s(self.from_language.word2html(w)) \
+ " " + e2s(ET.word2html(w,self)) \
+ " " \
+ ", ".join([u.get_full_number() for u in w.units])
for w in self.from_language.words:
yield "<br>" + fmt(w)
def write_body(self,fd):
Section.write_body(self,fd)
self.from_language.words.sort(sort_by_fr)
uca_sort(self.from_language.words)
#~ self.from_language.words = uca_sorted(self.from_language.words)
def fmt(w):
for x in self.from_language.word2html(w):
yield x
yield " "
for x in ET.word2html(w,self):
yield x
yield " "
yield ", ".join([u.rst_ref_to() for u in w.units])
for w in self.from_language.words:
fd.write("| %s\n" % html2rst(E.div(*fmt(w))))
class MemoParser(memo.Parser):
def __init__(self,book,*args,**kw):
self.book = book
memo.Parser.__init__(self,*args,**kw)
self.register_command('ref',self.cmd_ref)
self.register_command('item',curry(self.cmd_item,'- '))
self.register_command('oitem',curry(self.cmd_item,'#. '))
self.register_command('ruleslist',self.cmd_ruleslist)
#~ self.register_command('url',self.cmd_url)
def cmd_ref(self,s):
sect = self.book.ref2sect[s]
return sect.rst_ref_to()
def cmd_item(self,prefix,ref,rulesmode=False):
indent = " " * len(prefix)
sect = self.book.ref2sect[ref]
r = prefix
if not rulesmode:
r += sect.rst_ref_to()
if sect.intro:
r += " -- "
if sect.intro:
intro = self.book.memo2rst(sect.intro.strip())
if "\n\n" in intro:
r += "\n"
for ln in intro.splitlines():
r += indent + ln + "\n"
r += "\n"
else:
intro = intro.replace('\n','\n'+indent)
r += intro
if rulesmode:
r += "\n" + indent + "-- " + sect.rst_ref_to(text=sect.get_full_number())
r += "\n"
return r
def cmd_ruleslist(self,s):
r = ''
for ref in s.split():
r += self.cmd_item('#. ',ref,rulesmode=True)
return r
#~ def cmd_url(self,s):
#~ if not s: return "XXX"
#~ url,text = s.split(None,1)
#~ # return '<a href="%s">%s</a>' % (url,text)
#~ return E.a(text,href=url)
class Book:
def __init__(self,from_language,to_language,
title=None,input_template=None,
memo_parser=None):
self.writing_format = None
self.input_template = input_template
self.ref2sect = dict()
self.memo_parser = memo_parser or MemoParser(self)
self.main = Section(self,None,title,
from_language=from_language,to_language=to_language)
def memo2rst(self,s):
return self.memo_parser.parse(s)
def add_section(self,*args,**kw): return self.main.add_section(*args,**kw)
def add_index(self,*args,**kw): return self.main.add_index(*args,**kw)
def add_dictionary(self,*args,**kw): return self.main.add_dictionary(*args,**kw)
def old_as_odt(self):
from xhtml2odt import ODTFile
from lino.utils import AttrDict
from lino.utils.html2xhtml import html2xhtml
options = AttrDict(
url = "",
with_network = False,
verbose = True,
template = self.input_template,
top_header_level = 1,
img_width = "8cm",
img_height = "6cm",
)
#~ version=False # help="Show the version and exit")
#~ input=input", metavar="FILE",
#~ help="Read the html from this file")
#~ parser.add_option("-o", "--output", dest="output", metavar="FILE",
#~ help="Location of the output ODT file")
#~ parser.add_option("-t", "--template", dest="template", metavar="FILE",
#~ help="Location of the template ODT file")
#~ parser.add_option("-u", "--url", dest="url",
#~ help="Use this URL for relative links")
#~ parser.add_option("-v", "--verbose", dest="verbose",
#~ action="store_true", default=False,
#~ help="Show what's going on")
#~ parser.add_option("--html-id", dest="htmlid", metavar="ID",
#~ help="Only export from the element with this ID")
#~ parser.add_option("--replace", dest="replace_keyword",
#~ default="ODT-INSERT", metavar="KEYWORD",
#~ help="Keyword to replace in the ODT template "
#~ "(default is %default)")
#~ parser.add_option("--cut-start", dest="cut_start",
#~ default="ODT-CUT-START", metavar="KEYWORD",
#~ help="Keyword to start cutting text from the ODT "
#~ "template (default is %default)")
#~ parser.add_option("--cut-stop", dest="cut_stop",
#~ default="ODT-CUT-STOP", metavar="KEYWORD",
#~ help="Keyword to stop cutting text from the ODT "
#~ "template (default is %default)")
#~ parser.add_option("--top-header-level", dest="top_header_level",
#~ type="int", default="1", metavar="LEVEL",
#~ help="Level of highest header in the HTML "
#~ "(default is %default)")
#~ parser.add_option("--img-default-width", dest="img_width",
#~ metavar="WIDTH", default="8cm",
#~ help="Default image width (default is %default)")
#~ parser.add_option("--img-default-height", dest="img_height",
#~ metavar="HEIGHT", default="6cm",
#~ help="Default image height (default is %default)")
#~ parser.add_option("--dpi", dest="img_dpi", type="int",
#~ default=96, metavar="DPI", help="Screen resolution "
#~ "in Dots Per Inch (default is %default)")
#~ parser.add_option("--no-network", dest="with_network",
#~ action="store_false", default=True,
#~ help="Do not download remote images")
#~ options, args = parser.parse_args()
odtfile = ODTFile(options)
odtfile.open()
xhtml = ''.join([ln for ln in self.main.html_lines()])
xhtml = html2xhtml(xhtml)
#~ xhtml = "<DIV>%s</DIV>" % xhtml
xhtml = """\
<html xmlns="http://www.w3.org/1999/xhtml"><body>%s</body></html>""" % xhtml
#~ xhtml = "<p>%s</p>" % xhtml
if True:
f = open("before.xml","wt")
f.write(xhtml.encode('utf-8'))
f.close()
#~ logger.info("Gonna do it with %r",xhtml)
xhtml = odtfile.xhtml_to_odt(xhtml)
if True:
f = open("after.xml","wt")
f.write(xhtml)
#~ f.write(xhtml.encode('utf-8'))
f.close()
return xhtml
def html(self):
#~ s = htmlgen.DIV(self.main.html_lines)
s = ''.join([ln for ln in self.main.html_lines()])
s = "<div>%s</div>" % s
if True:
f = open("odt_content.xml","wt")
f.write(s.encode('utf-8'))
f.close()
#~ logger.info(s)
return s
def write_rst_files(self,root='.'):
self.writing_format = 'rst'
self.main.write_rst_files(root)
if False: # must convert to new structure
fn = os.path.join('dict','et_fr.rst')
logger.info("Generate %s",fn)
fd = codecs.open(fn,'w','utf-8')
write_header(fd,1,'eesti-prantsuse')
t = rstgen.Table(['Nr.',"ET","FR",u"hääldamine","Tasand"])
self.words.sort(sort_by_et)
words_et = [w for w in self.words if not w.hide_et]
t.write(fd,[
(i,w.et,html2rst(w.html_fr()),w.pronounciation,w.lesson.rst_ref_to())
for i,w in enumerate(words_et)])
fd.close()
def write_odt_file(self,target):
#~ from appy.pod.renderer import Renderer
from lino.utils import iif
#~ from lino.utils.appy_pod import setup_renderer
from lino.utils.appy_pod import Renderer
assert os.path.abspath(self.input_template) != os.path.abspath(target)
if os.path.exists(target):
os.remove(target)
#~ tpl = os.path.join(os.path.dirname(__filename__),'cfr.odt')
ses = settings.SITE.login("root") # not tested after 20130327
context = dict(
self=self,
iif=iif,
)
appy_params = dict()
logger.info(u"appy.pod render %s -> %s (params=%s)",self.input_template,target,appy_params)
renderer = Renderer(ses,self.input_template, context, target,**appy_params)
#~ setup_renderer(renderer)
#~ renderer.context.update(restify=debug_restify)
self.writing_format = 'odt'
renderer.run()
if USE_XHTML2ODT:
class Book2(Book):
def write_odt_file(self,target):
#~ from lino.utils import iif
#~ from lino.utils import AttrDict
#~ from lino.utils.html2xhtml import html2xhtml
assert os.path.abspath(self.input_template) != os.path.abspath(target)
if os.path.exists(target):
os.remove(target)
options = AttrDict(
url = "",
template = self.input_template,
output = target,
with_network = True,
verbose = True,
top_header_level = 1,
img_width = "8cm",
img_height = "6cm",
)
#~ version=False # help="Show the version and exit")
#~ input=input", metavar="FILE",
#~ help="Read the html from this file")
#~ parser.add_option("-o", "--output", dest="output", metavar="FILE",
#~ help="Location of the output ODT file")
#~ parser.add_option("-t", "--template", dest="template", metavar="FILE",
#~ help="Location of the template ODT file")
#~ parser.add_option("-u", "--url", dest="url",
#~ help="Use this URL for relative links")
#~ parser.add_option("-v", "--verbose", dest="verbose",
#~ action="store_true", default=False,
#~ help="Show what's going on")
#~ parser.add_option("--html-id", dest="htmlid", metavar="ID",
#~ help="Only export from the element with this ID")
#~ parser.add_option("--replace", dest="replace_keyword",
#~ default="ODT-INSERT", metavar="KEYWORD",
#~ help="Keyword to replace in the ODT template "
#~ "(default is %default)")
#~ parser.add_option("--cut-start", dest="cut_start",
#~ default="ODT-CUT-START", metavar="KEYWORD",
#~ help="Keyword to start cutting text from the ODT "
#~ "template (default is %default)")
#~ parser.add_option("--cut-stop", dest="cut_stop",
#~ default="ODT-CUT-STOP", metavar="KEYWORD",
#~ help="Keyword to stop cutting text from the ODT "
#~ "template (default is %default)")
#~ parser.add_option("--top-header-level", dest="top_header_level",
#~ type="int", default="1", metavar="LEVEL",
#~ help="Level of highest header in the HTML "
#~ "(default is %default)")
#~ parser.add_option("--img-default-width", dest="img_width",
#~ metavar="WIDTH", default="8cm",
#~ help="Default image width (default is %default)")
#~ parser.add_option("--img-default-height", dest="img_height",
#~ metavar="HEIGHT", default="6cm",
#~ help="Default image height (default is %default)")
#~ parser.add_option("--dpi", dest="img_dpi", type="int",
#~ default=96, metavar="DPI", help="Screen resolution "
#~ "in Dots Per Inch (default is %default)")
#~ parser.add_option("--no-network", dest="with_network",
#~ action="store_false", default=True,
#~ help="Do not download remote images")
#~ options, args = parser.parse_args()
self.odtfile = MyODTFile(options)
context = dict(iif=iif)
context.update(book=self)
self.odtfile.render(context)
def as_odt(self):
xhtml = ''.join([ln for ln in self.main.html_lines()])
xhtml = html2xhtml(xhtml)
#~ xhtml = "<div>%s</div>" % xhtml
#~ xhtml = "<p>%s</p>" % xhtml
#~ xhtml = '<html><body>%s</body></html>' % xhtml
xhtml = '<html xmlns="http://www.w3.org/1999/xhtml"><body>%s</body></html>' % xhtml
if not True:
f = open("before.xml","wt")
f.write(xhtml.encode('utf-8'))
f.close()
#~ logger.info("Gonna do it with %r",xhtml)
xhtml = self.odtfile.xhtml_to_odt(xhtml)
if True:
f = open("after.xml","wt")
f.write(xhtml)
#~ f.write(xhtml.encode('utf-8'))
f.close()
return xhtml.decode('utf-8')
| gpl-3.0 |
tempbottle/Nuitka | nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Tool/rpcgen.py | 4 | 2827 | """SCons.Tool.rpcgen
Tool-specific initialization for RPCGEN tools.
Three normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/rpcgen.py 2014/07/05 09:42:21 garyo"
from SCons.Builder import Builder
import SCons.Util
cmd = "cd ${SOURCE.dir} && $RPCGEN -%s $RPCGENFLAGS %s -o ${TARGET.abspath} ${SOURCE.file}"
rpcgen_client = cmd % ('l', '$RPCGENCLIENTFLAGS')
rpcgen_header = cmd % ('h', '$RPCGENHEADERFLAGS')
rpcgen_service = cmd % ('m', '$RPCGENSERVICEFLAGS')
rpcgen_xdr = cmd % ('c', '$RPCGENXDRFLAGS')
def generate(env):
"Add RPCGEN Builders and construction variables for an Environment."
client = Builder(action=rpcgen_client, suffix='_clnt.c', src_suffix='.x')
header = Builder(action=rpcgen_header, suffix='.h', src_suffix='.x')
service = Builder(action=rpcgen_service, suffix='_svc.c', src_suffix='.x')
xdr = Builder(action=rpcgen_xdr, suffix='_xdr.c', src_suffix='.x')
env.Append(BUILDERS={'RPCGenClient' : client,
'RPCGenHeader' : header,
'RPCGenService' : service,
'RPCGenXDR' : xdr})
env['RPCGEN'] = 'rpcgen'
env['RPCGENFLAGS'] = SCons.Util.CLVar('')
env['RPCGENCLIENTFLAGS'] = SCons.Util.CLVar('')
env['RPCGENHEADERFLAGS'] = SCons.Util.CLVar('')
env['RPCGENSERVICEFLAGS'] = SCons.Util.CLVar('')
env['RPCGENXDRFLAGS'] = SCons.Util.CLVar('')
def exists(env):
return env.Detect('rpcgen')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
jendap/tensorflow | tensorflow/contrib/tpu/python/tpu/datasets.py | 5 | 7480 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Library of Cloud TPU helper functions for data loading."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import functional_ops
def _TextLineDataset(filename):
buffer_size = 8 * 1024 * 1024 # 8 MiB per file
dataset = readers.TextLineDataset(filename, buffer_size=buffer_size)
return dataset
def _TFRecordDataset(filename):
buffer_size = 8 * 1024 * 1024 # 8 MiB per file
dataset = readers.TFRecordDataset(filename, buffer_size=buffer_size)
return dataset
_FILETYPE_MAP = {
'tfrecord': _TFRecordDataset,
'textline': _TextLineDataset,
'text': _TextLineDataset,
}
def StreamingFilesDataset(files,
filetype=None,
file_reader_job=None,
worker_job=None,
num_epochs=None,
filename_shuffle_buffer_size=None,
num_parallel_reads=None,
batch_transfer_size=None,
sloppy=None):
"""StreamingFilesDataset constructs a dataset to stream from workers (GCE VM).
Because Cloud TPUs are allocated over the network, a Cloud TPU cannot read
files local to your GCE VM. In order to train using files stored on your local
VM (e.g. on local SSD for extreme performance), use the StreamingFilesDataset
helper to generate a dataset to feed your Cloud TPU with files from your GCE
VM.
The resulting dataset may return an OutOfRangeError if there are no files
found as a result of the fileglob expansion.
Note: StreamingFilesDataset assumes that the session is using a
TPUClusterResolver and has therefore a worker and a coordinator job. File
loading will be done on the coordinator job.
Args:
files: A string glob to match files, or a `tf.data.Dataset` generating file
names.
filetype: A string (one of 'tfrecord', or 'textline') or a single-argument
TensorFlow function that when given a filename returns a dataset.
file_reader_job: An optional string that corresponds to the job that should
perform the file reads.
worker_job: An optional string that corresponds to the job that should
process the tensors (i.e. your GPU or TPU worker).
num_epochs: The number of epochs through the training set that should be
generated. By default, it will repeat infinitely.
filename_shuffle_buffer_size: An optional integer whose value controls the
shuffling of the file names. If you would like to read from the files in
the same order, set to 0 or False.
num_parallel_reads: An optional integer controlling the number of files to
read from concurrently. (Set to 1 for no parallelism.)
batch_transfer_size: An optional integer controlling the batching used to
amortize the remote function invocation overhead. Set to a very large
number to increase throughput. Set to a very small number to reduce memory
consumption. Set to False to skip batching.
sloppy: (Optional.) If `False`, read input data while maintaining a
deterministic order. (This may have significant performance impacts.)
sloppy defaults to: True.
Returns:
A `tf.data.Dataset` with an infinite stream of elements generated by a
parallel interleaving of the set of files matched (or generated) by `files`
with a type is the output of the dataset specified by `filetype`.
Raises:
ValueError: if any argument is not of the expected type.
"""
if filetype is None:
filetype = 'tfrecord'
if isinstance(filetype, str):
if filetype not in _FILETYPE_MAP:
raise ValueError('Unexpected filetype: %s' % filetype)
reader_fn = _FILETYPE_MAP[filetype]
elif callable(filetype):
reader_fn = filetype
else:
raise ValueError('filetype should be a string or a callable')
file_reader_job = file_reader_job or 'coordinator'
worker_job = worker_job or 'worker'
if filename_shuffle_buffer_size is None:
filename_shuffle_buffer_size = 4096
num_parallel_reads = num_parallel_reads or 8
if batch_transfer_size is None:
batch_transfer_size = 256
if sloppy is None:
sloppy = True
with ops.device('/job:%s' % file_reader_job):
if isinstance(files, str):
source_dataset = dataset_ops.Dataset.list_files(files)
elif isinstance(files, dataset_ops.DatasetV2):
source_dataset = files
else:
raise ValueError('files was not a string or a dataset: %s' % files)
if filename_shuffle_buffer_size:
source_dataset = source_dataset.shuffle(
buffer_size=filename_shuffle_buffer_size)
source_dataset = source_dataset.apply(
interleave_ops.parallel_interleave(
reader_fn, cycle_length=num_parallel_reads, sloppy=sloppy))
source_dataset = source_dataset.repeat(num_epochs)
if batch_transfer_size:
source_dataset = source_dataset.batch(batch_transfer_size)
source_dataset = source_dataset.prefetch(1)
source_iterator = dataset_ops.make_one_shot_iterator(source_dataset)
source_handle = source_iterator.string_handle()
@function.Defun(dtypes.string)
def LoadingFunc(h):
remote_iterator = iterator_ops.Iterator.from_string_handle(
h, source_dataset.output_types, source_dataset.output_shapes)
return remote_iterator.get_next()
def MapFn(unused_input):
if isinstance(source_dataset.output_types, dtypes.DType):
output_types = [source_dataset.output_types]
elif isinstance(source_dataset.output_types, (list, tuple)):
output_types = source_dataset.output_types
else:
raise ValueError('source dataset has invalid output types')
remote_calls = functional_ops.remote_call(
args=[source_handle],
Tout=output_types,
f=LoadingFunc,
target='/job:%s/replica:0/task:0/cpu:0' % file_reader_job)
if len(remote_calls) == 1:
return remote_calls[0]
else:
return remote_calls
with ops.device('/job:%s' % worker_job):
output_dataset = dataset_ops.Dataset.range(2).repeat().map(
MapFn, num_parallel_calls=4 if sloppy else None)
output_dataset = output_dataset.prefetch(1)
if batch_transfer_size:
# Undo the batching used during the transfer.
output_dataset = output_dataset.apply(batching.unbatch()).prefetch(1)
return output_dataset
| apache-2.0 |
gnachman/iTerm2 | tools/ply/ply-3.4/test/yacc_badprec2.py | 174 | 1501 | # -----------------------------------------------------------------------------
# yacc_badprec2.py
#
# Bad precedence
# -----------------------------------------------------------------------------
import sys
if ".." not in sys.path: sys.path.insert(0,"..")
import ply.yacc as yacc
from calclex import tokens
# Parsing rules
precedence = (
42,
('left','TIMES','DIVIDE'),
('right','UMINUS'),
)
# dictionary of names
names = { }
def p_statement_assign(t):
'statement : NAME EQUALS expression'
names[t[1]] = t[3]
def p_statement_expr(t):
'statement : expression'
print(t[1])
def p_expression_binop(t):
'''expression : expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIVIDE expression'''
if t[2] == '+' : t[0] = t[1] + t[3]
elif t[2] == '-': t[0] = t[1] - t[3]
elif t[2] == '*': t[0] = t[1] * t[3]
elif t[2] == '/': t[0] = t[1] / t[3]
def p_expression_uminus(t):
'expression : MINUS expression %prec UMINUS'
t[0] = -t[2]
def p_expression_group(t):
'expression : LPAREN expression RPAREN'
t[0] = t[2]
def p_expression_number(t):
'expression : NUMBER'
t[0] = t[1]
def p_expression_name(t):
'expression : NAME'
try:
t[0] = names[t[1]]
except LookupError:
print("Undefined name '%s'" % t[1])
t[0] = 0
def p_error(t):
print("Syntax error at '%s'" % t.value)
yacc.yacc()
| gpl-2.0 |
wayoda/Arduino | arduino-core/src/processing/app/i18n/python/requests/packages/charade/charsetgroupprober.py | 2929 | 3791 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
from .charsetprober import CharSetProber
class CharSetGroupProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mActiveNum = 0
self._mProbers = []
self._mBestGuessProber = None
def reset(self):
CharSetProber.reset(self)
self._mActiveNum = 0
for prober in self._mProbers:
if prober:
prober.reset()
prober.active = True
self._mActiveNum += 1
self._mBestGuessProber = None
def get_charset_name(self):
if not self._mBestGuessProber:
self.get_confidence()
if not self._mBestGuessProber:
return None
# self._mBestGuessProber = self._mProbers[0]
return self._mBestGuessProber.get_charset_name()
def feed(self, aBuf):
for prober in self._mProbers:
if not prober:
continue
if not prober.active:
continue
st = prober.feed(aBuf)
if not st:
continue
if st == constants.eFoundIt:
self._mBestGuessProber = prober
return self.get_state()
elif st == constants.eNotMe:
prober.active = False
self._mActiveNum -= 1
if self._mActiveNum <= 0:
self._mState = constants.eNotMe
return self.get_state()
return self.get_state()
def get_confidence(self):
st = self.get_state()
if st == constants.eFoundIt:
return 0.99
elif st == constants.eNotMe:
return 0.01
bestConf = 0.0
self._mBestGuessProber = None
for prober in self._mProbers:
if not prober:
continue
if not prober.active:
if constants._debug:
sys.stderr.write(prober.get_charset_name()
+ ' not active\n')
continue
cf = prober.get_confidence()
if constants._debug:
sys.stderr.write('%s confidence = %s\n' %
(prober.get_charset_name(), cf))
if bestConf < cf:
bestConf = cf
self._mBestGuessProber = prober
if not self._mBestGuessProber:
return 0.0
return bestConf
# else:
# self._mBestGuessProber = self._mProbers[0]
# return self._mBestGuessProber.get_confidence()
| lgpl-2.1 |
GeographicaGS/moocng | moocng/media_contents/handlers/prezi.py | 1 | 1737 | import re
from django.template.loader import get_template
from django.template import Context
from django.templatetags.static import static
from .base import MediaContentHandlerBase
class PreziMediaContentHandler(MediaContentHandlerBase):
def get_iframe_template(self, content_id, **kwargs):
template = get_template("media_contents/handlers/prezi_template.html")
context = Context({
'content_id': content_id,
'origin': kwargs.pop('host', ''),
})
return template.render(context)
def get_iframe_code(self, content_id, **kwargs):
template = get_template("media_contents/handlers/prezi.html")
context = Context({
'content_id': content_id,
'origin': kwargs.get('host', ''),
'height': kwargs.get('height', '349px'),
'width': kwargs.get('width', '620px'),
'extra_params': kwargs.get('extra_params', ''),
'extra_attribs': kwargs.get('extra_attribs', ''),
})
return template.render(context)
def get_javascript_code(self, **kwargs):
template = get_template("media_contents/handlers/prezi_js.html")
context = Context(kwargs)
return template.render(context)
def get_thumbnail_url(self, content_id):
return static('img/media_contents/prezi.png')
def get_last_frame(self, content_id, tmpdir):
return None
def extract_id(self, url):
patterns = [
'prezi\.com/([a-zA-Z\d\-\_]+)/.*',
'^([a-zA-Z\d\-\_]+)$',
]
for pattern in patterns:
result = re.search(pattern, url, re.IGNORECASE)
if result:
return result.group(1)
return ''
| apache-2.0 |
zero-ui/miniblink49 | v8_7_5/tools/unittests/run_tests_test.py | 3 | 23853 | #!/usr/bin/env python
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Global system tests for V8 test runners and fuzzers.
This hooks up the framework under tools/testrunner testing high-level scenarios
with different test suite extensions and build configurations.
"""
# TODO(machenbach): Mock out util.GuessOS to make these tests really platform
# independent.
# TODO(machenbach): Move coverage recording to a global test entry point to
# include other unittest suites in the coverage report.
# TODO(machenbach): Coverage data from multiprocessing doesn't work.
# TODO(majeski): Add some tests for the fuzzers.
# for py2/py3 compatibility
from __future__ import print_function
import collections
import contextlib
import json
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from cStringIO import StringIO
TOOLS_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEST_DATA_ROOT = os.path.join(TOOLS_ROOT, 'unittests', 'testdata')
RUN_TESTS_PY = os.path.join(TOOLS_ROOT, 'run-tests.py')
Result = collections.namedtuple(
'Result', ['stdout', 'stderr', 'returncode'])
Result.__str__ = lambda self: (
'\nReturncode: %s\nStdout:\n%s\nStderr:\n%s\n' %
(self.returncode, self.stdout, self.stderr))
@contextlib.contextmanager
def temp_dir():
"""Wrapper making a temporary directory available."""
path = None
try:
path = tempfile.mkdtemp('v8_test_')
yield path
finally:
if path:
shutil.rmtree(path)
@contextlib.contextmanager
def temp_base(baseroot='testroot1'):
"""Wrapper that sets up a temporary V8 test root.
Args:
baseroot: The folder with the test root blueprint. Relevant files will be
copied to the temporary test root, to guarantee a fresh setup with no
dirty state.
"""
basedir = os.path.join(TEST_DATA_ROOT, baseroot)
with temp_dir() as tempbase:
builddir = os.path.join(tempbase, 'out', 'Release')
testroot = os.path.join(tempbase, 'test')
os.makedirs(builddir)
shutil.copy(os.path.join(basedir, 'v8_build_config.json'), builddir)
shutil.copy(os.path.join(basedir, 'd8_mocked.py'), builddir)
for suite in os.listdir(os.path.join(basedir, 'test')):
os.makedirs(os.path.join(testroot, suite))
for entry in os.listdir(os.path.join(basedir, 'test', suite)):
shutil.copy(
os.path.join(basedir, 'test', suite, entry),
os.path.join(testroot, suite))
yield tempbase
@contextlib.contextmanager
def capture():
"""Wrapper that replaces system stdout/stderr an provides the streams."""
oldout = sys.stdout
olderr = sys.stderr
try:
stdout=StringIO()
stderr=StringIO()
sys.stdout = stdout
sys.stderr = stderr
yield stdout, stderr
finally:
sys.stdout = oldout
sys.stderr = olderr
def run_tests(basedir, *args, **kwargs):
"""Executes the test runner with captured output."""
with capture() as (stdout, stderr):
sys_args = ['--command-prefix', sys.executable] + list(args)
if kwargs.get('infra_staging', False):
sys_args.append('--infra-staging')
else:
sys_args.append('--no-infra-staging')
code = standard_runner.StandardTestRunner(basedir=basedir).execute(sys_args)
return Result(stdout.getvalue(), stderr.getvalue(), code)
def override_build_config(basedir, **kwargs):
"""Override the build config with new values provided as kwargs."""
path = os.path.join(basedir, 'out', 'Release', 'v8_build_config.json')
with open(path) as f:
config = json.load(f)
config.update(kwargs)
with open(path, 'w') as f:
json.dump(config, f)
class SystemTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Try to set up python coverage and run without it if not available.
cls._cov = None
try:
import coverage
if int(coverage.__version__.split('.')[0]) < 4:
cls._cov = None
print('Python coverage version >= 4 required.')
raise ImportError()
cls._cov = coverage.Coverage(
source=([os.path.join(TOOLS_ROOT, 'testrunner')]),
omit=['*unittest*', '*__init__.py'],
)
cls._cov.exclude('raise NotImplementedError')
cls._cov.exclude('if __name__ == .__main__.:')
cls._cov.exclude('except TestRunnerError:')
cls._cov.exclude('except KeyboardInterrupt:')
cls._cov.exclude('if options.verbose:')
cls._cov.exclude('if verbose:')
cls._cov.exclude('pass')
cls._cov.exclude('assert False')
cls._cov.start()
except ImportError:
print('Running without python coverage.')
sys.path.append(TOOLS_ROOT)
global standard_runner
from testrunner import standard_runner
global num_fuzzer
from testrunner import num_fuzzer
from testrunner.local import command
from testrunner.local import pool
command.setup_testing()
pool.setup_testing()
@classmethod
def tearDownClass(cls):
if cls._cov:
cls._cov.stop()
print('')
print(cls._cov.report(show_missing=True))
def testPass(self):
"""Test running only passing tests in two variants.
Also test printing durations.
"""
with temp_base() as basedir:
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--variants=default,stress',
'--time',
'sweet/bananas',
'sweet/raspberries',
)
self.assertIn('Done running sweet/bananas default: pass', result.stdout, result)
# TODO(majeski): Implement for test processors
# self.assertIn('Total time:', result.stderr, result)
# self.assertIn('sweet/bananas', result.stderr, result)
self.assertEqual(0, result.returncode, result)
def testShardedProc(self):
with temp_base() as basedir:
for shard in [1, 2]:
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--variants=default,stress',
'--shard-count=2',
'--shard-run=%d' % shard,
'sweet/blackberries',
'sweet/raspberries',
infra_staging=False,
)
# One of the shards gets one variant of each test.
self.assertIn('2 tests ran', result.stdout, result)
if shard == 1:
self.assertIn(
'Done running sweet/raspberries default', result.stdout, result)
self.assertIn(
'Done running sweet/raspberries stress', result.stdout, result)
self.assertEqual(0, result.returncode, result)
else:
self.assertIn(
'sweet/blackberries default: FAIL', result.stdout, result)
self.assertIn(
'sweet/blackberries stress: FAIL', result.stdout, result)
self.assertEqual(1, result.returncode, result)
@unittest.skip("incompatible with test processors")
def testSharded(self):
"""Test running a particular shard."""
with temp_base() as basedir:
for shard in [1, 2]:
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--variants=default,stress',
'--shard-count=2',
'--shard-run=%d' % shard,
'sweet/bananas',
'sweet/raspberries',
)
# One of the shards gets one variant of each test.
self.assertIn('Running 2 tests', result.stdout, result)
self.assertIn('Done running sweet/bananas', result.stdout, result)
self.assertIn('Done running sweet/raspberries', result.stdout, result)
self.assertEqual(0, result.returncode, result)
def testFail(self):
"""Test running only failing tests in two variants."""
with temp_base() as basedir:
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--variants=default,stress',
'sweet/strawberries',
infra_staging=False,
)
self.assertIn('Done running sweet/strawberries default: FAIL', result.stdout, result)
self.assertEqual(1, result.returncode, result)
def check_cleaned_json_output(
self, expected_results_name, actual_json, basedir):
# Check relevant properties of the json output.
with open(actual_json) as f:
json_output = json.load(f)[0]
pretty_json = json.dumps(json_output, indent=2, sort_keys=True)
# Replace duration in actual output as it's non-deterministic. Also
# replace the python executable prefix as it has a different absolute
# path dependent on where this runs.
def replace_variable_data(data):
data['duration'] = 1
data['command'] = ' '.join(
['/usr/bin/python'] + data['command'].split()[1:])
data['command'] = data['command'].replace(basedir + '/', '')
for data in json_output['slowest_tests']:
replace_variable_data(data)
for data in json_output['results']:
replace_variable_data(data)
json_output['duration_mean'] = 1
with open(os.path.join(TEST_DATA_ROOT, expected_results_name)) as f:
expected_test_results = json.load(f)
msg = None # Set to pretty_json for bootstrapping.
self.assertDictEqual(json_output, expected_test_results, msg)
def testFailWithRerunAndJSON(self):
"""Test re-running a failing test and output to json."""
with temp_base() as basedir:
json_path = os.path.join(basedir, 'out.json')
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--variants=default',
'--rerun-failures-count=2',
'--random-seed=123',
'--json-test-results', json_path,
'sweet/strawberries',
infra_staging=False,
)
self.assertIn('Done running sweet/strawberries default: FAIL', result.stdout, result)
# With test processors we don't count reruns as separated failures.
# TODO(majeski): fix it?
self.assertIn('1 tests failed', result.stdout, result)
self.assertEqual(0, result.returncode, result)
# TODO(majeski): Previously we only reported the variant flags in the
# flags field of the test result.
# After recent changes we report all flags, including the file names.
# This is redundant to the command. Needs investigation.
self.maxDiff = None
self.check_cleaned_json_output(
'expected_test_results1.json', json_path, basedir)
def testFlakeWithRerunAndJSON(self):
"""Test re-running a failing test and output to json."""
with temp_base(baseroot='testroot2') as basedir:
json_path = os.path.join(basedir, 'out.json')
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--variants=default',
'--rerun-failures-count=2',
'--random-seed=123',
'--json-test-results', json_path,
'sweet',
infra_staging=False,
)
self.assertIn(
'Done running sweet/bananaflakes default: pass', result.stdout, result)
self.assertIn('All tests succeeded', result.stdout, result)
self.assertEqual(0, result.returncode, result)
self.maxDiff = None
self.check_cleaned_json_output(
'expected_test_results2.json', json_path, basedir)
def testAutoDetect(self):
"""Fake a build with several auto-detected options.
Using all those options at once doesn't really make much sense. This is
merely for getting coverage.
"""
with temp_base() as basedir:
override_build_config(
basedir, dcheck_always_on=True, is_asan=True, is_cfi=True,
is_msan=True, is_tsan=True, is_ubsan_vptr=True, target_cpu='x86',
v8_enable_i18n_support=False, v8_target_cpu='x86',
v8_use_snapshot=False, v8_enable_embedded_builtins=False,
v8_enable_verify_csa=False, v8_enable_lite_mode=False,
v8_enable_pointer_compression=False)
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--variants=default',
'sweet/bananas',
)
expect_text = (
'>>> Autodetected:\n'
'asan\n'
'cfi_vptr\n'
'dcheck_always_on\n'
'msan\n'
'no_i18n\n'
'no_snap\n'
'tsan\n'
'ubsan_vptr\n'
'>>> Running tests for ia32.release')
self.assertIn(expect_text, result.stdout, result)
self.assertEqual(0, result.returncode, result)
# TODO(machenbach): Test some more implications of the auto-detected
# options, e.g. that the right env variables are set.
def testSkips(self):
"""Test skipping tests in status file for a specific variant."""
with temp_base() as basedir:
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--variants=nooptimization',
'sweet/strawberries',
infra_staging=False,
)
self.assertIn('0 tests ran', result.stdout, result)
self.assertEqual(2, result.returncode, result)
def testRunSkips(self):
"""Inverse the above. Test parameter to keep running skipped tests."""
with temp_base() as basedir:
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--variants=nooptimization',
'--run-skipped',
'sweet/strawberries',
)
self.assertIn('1 tests failed', result.stdout, result)
self.assertIn('1 tests ran', result.stdout, result)
self.assertEqual(1, result.returncode, result)
def testDefault(self):
"""Test using default test suites, though no tests are run since they don't
exist in a test setting.
"""
with temp_base() as basedir:
result = run_tests(
basedir,
'--mode=Release',
infra_staging=False,
)
self.assertIn('0 tests ran', result.stdout, result)
self.assertEqual(2, result.returncode, result)
def testNoBuildConfig(self):
"""Test failing run when build config is not found."""
with temp_base() as basedir:
result = run_tests(basedir)
self.assertIn('Failed to load build config', result.stdout, result)
self.assertEqual(5, result.returncode, result)
def testInconsistentMode(self):
"""Test failing run when attempting to wrongly override the mode."""
with temp_base() as basedir:
override_build_config(basedir, is_debug=True)
result = run_tests(basedir, '--mode=Release')
self.assertIn('execution mode (release) for release is inconsistent '
'with build config (debug)', result.stdout, result)
self.assertEqual(5, result.returncode, result)
def testInconsistentArch(self):
"""Test failing run when attempting to wrongly override the arch."""
with temp_base() as basedir:
result = run_tests(basedir, '--mode=Release', '--arch=ia32')
self.assertIn(
'--arch value (ia32) inconsistent with build config (x64).',
result.stdout, result)
self.assertEqual(5, result.returncode, result)
def testWrongVariant(self):
"""Test using a bogus variant."""
with temp_base() as basedir:
result = run_tests(basedir, '--mode=Release', '--variants=meh')
self.assertEqual(5, result.returncode, result)
def testModeFromBuildConfig(self):
"""Test auto-detection of mode from build config."""
with temp_base() as basedir:
result = run_tests(basedir, '--outdir=out/Release', 'sweet/bananas')
self.assertIn('Running tests for x64.release', result.stdout, result)
self.assertEqual(0, result.returncode, result)
@unittest.skip("not available with test processors")
def testReport(self):
"""Test the report feature.
This also exercises various paths in statusfile logic.
"""
with temp_base() as basedir:
result = run_tests(
basedir,
'--mode=Release',
'--variants=default',
'sweet',
'--report',
)
self.assertIn(
'3 tests are expected to fail that we should fix',
result.stdout, result)
self.assertEqual(1, result.returncode, result)
@unittest.skip("not available with test processors")
def testWarnUnusedRules(self):
"""Test the unused-rules feature."""
with temp_base() as basedir:
result = run_tests(
basedir,
'--mode=Release',
'--variants=default,nooptimization',
'sweet',
'--warn-unused',
)
self.assertIn( 'Unused rule: carrots', result.stdout, result)
self.assertIn( 'Unused rule: regress/', result.stdout, result)
self.assertEqual(1, result.returncode, result)
@unittest.skip("not available with test processors")
def testCatNoSources(self):
"""Test printing sources, but the suite's tests have none available."""
with temp_base() as basedir:
result = run_tests(
basedir,
'--mode=Release',
'--variants=default',
'sweet/bananas',
'--cat',
)
self.assertIn('begin source: sweet/bananas', result.stdout, result)
self.assertIn('(no source available)', result.stdout, result)
self.assertEqual(0, result.returncode, result)
def testPredictable(self):
"""Test running a test in verify-predictable mode.
The test will fail because of missing allocation output. We verify that and
that the predictable flags are passed and printed after failure.
"""
with temp_base() as basedir:
override_build_config(basedir, v8_enable_verify_predictable=True)
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--variants=default',
'sweet/bananas',
infra_staging=False,
)
self.assertIn('1 tests ran', result.stdout, result)
self.assertIn(
'Done running sweet/bananas default: FAIL', result.stdout, result)
self.assertIn('Test had no allocation output', result.stdout, result)
self.assertIn('--predictable --verify_predictable', result.stdout, result)
self.assertEqual(1, result.returncode, result)
def testSlowArch(self):
"""Test timeout factor manipulation on slow architecture."""
with temp_base() as basedir:
override_build_config(basedir, v8_target_cpu='arm64')
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--variants=default',
'sweet/bananas',
)
# TODO(machenbach): We don't have a way for testing if the correct
# timeout was used.
self.assertEqual(0, result.returncode, result)
def testRandomSeedStressWithDefault(self):
"""Test using random-seed-stress feature has the right number of tests."""
with temp_base() as basedir:
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--variants=default',
'--random-seed-stress-count=2',
'sweet/bananas',
infra_staging=False,
)
self.assertIn('2 tests ran', result.stdout, result)
self.assertEqual(0, result.returncode, result)
def testRandomSeedStressWithSeed(self):
"""Test using random-seed-stress feature passing a random seed."""
with temp_base() as basedir:
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--variants=default',
'--random-seed-stress-count=2',
'--random-seed=123',
'sweet/strawberries',
)
self.assertIn('2 tests ran', result.stdout, result)
# We use a failing test so that the command is printed and we can verify
# that the right random seed was passed.
self.assertIn('--random-seed=123', result.stdout, result)
self.assertEqual(1, result.returncode, result)
def testSpecificVariants(self):
"""Test using NO_VARIANTS modifiers in status files skips the desire tests.
The test runner cmd line configures 4 tests to run (2 tests * 2 variants).
But the status file applies a modifier to each skipping one of the
variants.
"""
with temp_base() as basedir:
override_build_config(basedir, v8_use_snapshot=False)
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--variants=default,stress',
'sweet/bananas',
'sweet/raspberries',
)
# Both tests are either marked as running in only default or only
# slow variant.
self.assertIn('2 tests ran', result.stdout, result)
self.assertEqual(0, result.returncode, result)
def testStatusFilePresubmit(self):
"""Test that the fake status file is well-formed."""
with temp_base() as basedir:
from testrunner.local import statusfile
self.assertTrue(statusfile.PresubmitCheck(
os.path.join(basedir, 'test', 'sweet', 'sweet.status')))
def testDotsProgress(self):
with temp_base() as basedir:
result = run_tests(
basedir,
'--mode=Release',
'--progress=dots',
'sweet/cherries',
'sweet/bananas',
'--no-sorting', '-j1', # make results order deterministic
infra_staging=False,
)
self.assertIn('2 tests ran', result.stdout, result)
self.assertIn('F.', result.stdout, result)
self.assertEqual(1, result.returncode, result)
def testMonoProgress(self):
self._testCompactProgress('mono')
def testColorProgress(self):
self._testCompactProgress('color')
def _testCompactProgress(self, name):
with temp_base() as basedir:
result = run_tests(
basedir,
'--mode=Release',
'--progress=%s' % name,
'sweet/cherries',
'sweet/bananas',
infra_staging=False,
)
if name == 'color':
expected = ('\033[34m% 28\033[0m|'
'\033[32m+ 1\033[0m|'
'\033[31m- 1\033[0m]: Done')
else:
expected = '% 28|+ 1|- 1]: Done'
self.assertIn(expected, result.stdout)
self.assertIn('sweet/cherries', result.stdout)
self.assertIn('sweet/bananas', result.stdout)
self.assertEqual(1, result.returncode, result)
def testExitAfterNFailures(self):
with temp_base() as basedir:
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--exit-after-n-failures=2',
'-j1',
'sweet/mangoes', # PASS
'sweet/strawberries', # FAIL
'sweet/blackberries', # FAIL
'sweet/raspberries', # should not run
)
self.assertIn('sweet/mangoes default: pass', result.stdout, result)
self.assertIn('sweet/strawberries default: FAIL', result.stdout, result)
self.assertIn('Too many failures, exiting...', result.stdout, result)
self.assertIn('sweet/blackberries default: FAIL', result.stdout, result)
self.assertNotIn('Done running sweet/raspberries', result.stdout, result)
self.assertIn('2 tests failed', result.stdout, result)
self.assertIn('3 tests ran', result.stdout, result)
self.assertEqual(1, result.returncode, result)
def testNumFuzzer(self):
sys_args = ['--command-prefix', sys.executable, '--outdir', 'out/Release']
with temp_base() as basedir:
with capture() as (stdout, stderr):
code = num_fuzzer.NumFuzzer(basedir=basedir).execute(sys_args)
result = Result(stdout.getvalue(), stderr.getvalue(), code)
self.assertEqual(0, result.returncode, result)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
TE-ToshiakiTanaka/alize | alize/library/slack/module.py | 1 | 1735 | import os
import sys
from alize.log import Log
from alize.exception import *
try :
from slacker import Slacker
except Exception as e:
print(str(e))
L = Log("Slack.Library.ALIZE")
class Slack(object):
def __init__(self, token):
try:
self.slack = Slacker(token)
except Exception as e:
L.warning(str(e))
raise SlackError("%s is not exists." % token)
def message(self, message, channels):
try:
result = self.slack.chat.post_message(
channels,
message,
as_user=True)
if result.successful:
return result.body
else:
L.warning("Slack Error : %s" % result.error)
raise SlackError(result.error)
except Exception as e:
L.warning(str(e))
raise SlackError("%s is not exists." % channels)
def upload(self, filepath, channels,
content=None,
filetype=None,
filename=None,
title=None,
initial_comment=None):
try:
result = self.slack.files.upload(
filepath,
content=content,
filetype=filetype,
filename=filename,
title=title,
initial_comment=initial_comment,
channels=channels)
if result.successful:
return result.body
else:
L.warning("Slack Error : %s" % result.error)
raise SlackError(result.error)
except Exception as e:
L.warning(str(e))
raise SlackError("%s is not exists." % channels)
| mit |
ua-snap/downscale | snap_scripts/epscor_sc/downscale_cmip5_epscor_sc.py | 1 | 6401 | # downscale the prepped cmip5 data downloaded using SYNDA for EPSCoR SC project
# author: Michael Lindgren -- June 09, 2016 (UPDATED: September 21, 2016 -- [ML])
if __name__ == '__main__':
import glob, os, rasterio, itertools
from functools import partial
import downscale
from downscale import preprocess, Mask, utils
import argparse
import numpy as np
# # parse the commandline arguments
parser = argparse.ArgumentParser( description='downscale the AR5-CMIP5 data to the AKCAN extent required by SNAP' )
parser.add_argument( "-b", "--base_dir", action='store', dest='base_dir', type=str, help="base directory where data is stored in structured folders" )
parser.add_argument( "-m", "--model", action='store', dest='model', type=str, help="cmip5 model name (exact)" )
parser.add_argument( "-v", "--variable", action='store', dest='variable', type=str, help="cmip5 variable name (exact)" )
parser.add_argument( "-s", "--scenario", action='store', dest='scenario', type=str, help="cmip5 scenario name (exact)" )
parser.add_argument( "-u", "--units", action='store', dest='units', type=str, help="cmip5 units name (exact)" )
parser.add_argument( "-met", "--metric", action='store', dest='metric', type=str, help="cmip5 metric name (exact)" )
args = parser.parse_args()
# unpack the args
variable = args.variable
scenario = args.scenario
model = args.model
units = args.units
metric = args.metric
base_dir = args.base_dir
# AOI MASK -- HARDWIRE -- PCLL for CMIP5
aoi_mask_fn = '/workspace/Shared/Tech_Projects/DeltaDownscaling/project_data/akcan_aoi_mask_PCLL.shp'
project = 'ar5'
# # # # FOR TESTING # # #
# base_dir = '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data'
# variable = 'pr'
# scenario = 'rcp60'
# model = 'GFDL-CM3'
# units = 'mm'
# metric = 'total'
# some setup args
base_path = os.path.join( base_dir,'cmip5','prepped' )
output_dir = os.path.join( base_dir, 'downscaled' )
variables = [ variable ]
scenarios = [ scenario ]
models = [ model ]
anom = True # write out anoms (True) or not (False)
interp = False # interpolate across space -- Low Res
find_bounds = False
# modelnames is simply the string name to put in the output filenaming if that differs from the modelname
# used in querying the file which is the models list variable
all_models = [ 'IPSL-CM5A-LR', 'MRI-CGCM3', 'GISS-E2-R', 'GFDL-CM3', 'CCSM4' ]
modelnames = [ 'IPSL-CM5A-LR', 'MRI-CGCM3', 'GISS-E2-R', 'GFDL-CM3', 'NCAR-CCSM4' ]
modelnames = dict( zip( all_models, modelnames ) )
if not os.path.exists( output_dir ):
os.makedirs( output_dir )
os.chdir( output_dir )
for variable, model, scenario in itertools.product( variables, models, scenarios ):
# fix the climatology -- precip only
if variable == 'pr':
fix_clim = True
else:
fix_clim = False
modelname = modelnames[ model ]
# SETUP BASELINE
clim_path = os.path.join( base_dir, 'prism', variable )
filelist = glob.glob( os.path.join( clim_path, '*.tif' ) )
filelist = [ i for i in filelist if '_14_' not in i ] # remove the GD ANNUAL _14_ file.
baseline = downscale.Baseline( filelist )
input_path = os.path.join( base_path, model, scenario, variable )
output_path = os.path.join( output_dir, model, scenario, variable )
if not os.path.exists( output_path ):
os.makedirs( output_path )
print( input_path )
# list files for this set of downscaling -- one per folder
fn, = glob.glob( os.path.join( input_path, '*.nc' ) )
if 'historical' in scenario:
historical = downscale.Dataset( fn, variable, model, scenario, project=project, units=units, metric=metric, begin=1860, end=2005 )
future = None # no need for futures here....
else:
# get the historical data for anomalies
historical_fn, = glob.glob( os.path.join( os.path.dirname( fn ).replace( scenario, 'historical' ), '*.nc' ) )
historical = downscale.Dataset( historical_fn, variable, model, scenario, project=project, units=units, metric=metric, begin=1860, end=2005 )
future = downscale.Dataset( fn, variable, model, scenario, project=project, units=units, metric=metric, begin=2006, end=2100 )
# convert from Kelvin to Celcius
if variable != 'pr':
if historical:
historical.ds[ variable ] = historical.ds[ variable ] - 273.15
historical.ds[ variable ][ 'units' ] = units
if future:
future.ds[ variable ] = future.ds[ variable ] - 273.15
future.ds[ variable ][ 'units' ] = units
if variable == 'pr':
# convert to mm/month
if historical:
timesteps, = historical.ds.time.shape # this assumes time begins in January
days = [31,28,31,30,31,30,31,31,30,31,30,31] * (timesteps / 12)
for index, days_in_month in zip(range( len( days ) ), days ):
historical.ds[ variable ][index, ...] = historical.ds[ variable ][index, ...].data * 86400 * days_in_month
historical.ds[ variable ][ 'units' ] = units
if future:
timesteps, = future.ds.time.shape # this assumes time begins in January
days = [31,28,31,30,31,30,31,31,30,31,30,31] * (timesteps / 12)
for index, days_in_month in enumerate( days ):
future.ds[ variable ][index, ...] = future.ds[ variable ][index, ...] * 86400 * days_in_month
future.ds[ variable ][ 'units' ] = units
# DOWNSCALE
mask = rasterio.open( baseline.filelist[0] ).read_masks( 1 )
clim_begin = '1961'
clim_end = '1990'
if variable == 'pr':
# truncate to whole number
rounder = np.rint
downscaling_operation = 'mult'
aoi_mask = aoi_mask_fn
# make AOI_Mask input resolution for computing 95th percentiles...
if aoi_mask_fn is not None:
aoi_mask = Mask( aoi_mask_fn, historical, 1, 0 )
else:
aoi_mask = None
else:
# round to 2 decimals
rounder = partial( np.round, decimals=1 )
downscaling_operation = 'add'
aoi_mask = None
def round_it( x, mask ):
arr = np.ma.masked_array( data=x, mask=mask )
return rounder( arr )
round_data = partial( round_it, mask=( mask==0 ) )
ar5 = downscale.DeltaDownscale( baseline, clim_begin, clim_end, historical, future,
downscaling_operation=downscaling_operation, mask=mask, mask_value=0, ncpus=32,
src_crs={'init':'epsg:4326'}, src_nodata=None, dst_nodata=None,
post_downscale_function=round_data, varname=variable, modelname=modelname, anom=anom,
fix_clim=fix_clim, aoi_mask=aoi_mask )
ar5.downscale( output_dir=output_path )
| mit |
kmspriyatham/symath | scipy/scipy/interpolate/benchmarks/bench_memusage.py | 3 | 3412 | # Posix-only benchmark
from __future__ import division, absolute_import, print_function
import os
import sys
import re
import subprocess
import time
import textwrap
from numpy.testing import dec
from scipy.stats import spearmanr
import numpy as np
@dec.skipif(not sys.platform.startswith('linux'), "Memory benchmark works only on Linux")
def bench_leaks():
mem_info = get_mem_info()
set_mem_rlimit(int(mem_info['memtotal'] * 0.7))
# Setup temp file, make it fit in memory
print_table_row(['repeats', 'peak memory (MB)'])
repeats = [2, 5, 10, 50, 200]
peak_mems = []
for repeat in repeats:
code = """
import numpy as np
from scipy.interpolate import griddata
def func(x, y):
return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
points = np.random.rand(1000, 2)
values = func(points[:,0], points[:,1])
for t in range(%(repeat)d):
for method in ['nearest', 'linear', 'cubic']:
griddata(points, values, (grid_x, grid_y), method=method)
""" % dict(repeat=repeat)
_, peak_mem = run_monitored(code)
peak_mems.append(peak_mem)
print_table_row(["%d" % repeat, "%.1f" % (peak_mem/1e6,)])
print("")
corr, p = spearmanr(repeats, peak_mems)
if p < 0.05:
print("*"*79)
print("PROBABLE MEMORY LEAK")
print("*"*79)
raise AssertionError("Probable memory leak")
else:
print("PROBABLY NO MEMORY LEAK")
def print_table_row(columns):
print(" | ".join("%-20s" % x for x in columns))
def run_monitored(code):
"""
Run code in a new Python process, and monitor peak memory usage.
Returns
-------
duration : float
Duration in seconds (including Python startup time)
peak_memusage : float
Peak memory usage (rough estimate only) in bytes
"""
code = textwrap.dedent(code)
process = subprocess.Popen([sys.executable, '-c', code],
cwd=os.path.dirname(__file__))
peak_memusage = -1
start = time.time()
while True:
ret = process.poll()
if ret is not None:
break
with open('/proc/%d/status' % process.pid, 'r') as f:
procdata = f.read()
m = re.search('VmRSS:\s*(\d+)\s*kB', procdata, re.S | re.I)
if m is not None:
memusage = float(m.group(1)) * 1e3
peak_memusage = max(memusage, peak_memusage)
time.sleep(0.01)
process.wait()
duration = time.time() - start
if process.returncode != 0:
raise AssertionError("Running failed:\n%s" % code)
return duration, peak_memusage
def get_mem_info():
"""Get information about available memory"""
info = {}
with open('/proc/meminfo', 'r') as f:
for line in f:
p = line.split()
info[p[0].strip(':').lower()] = float(p[1]) * 1e3
return info
def set_mem_rlimit(max_mem):
"""
Set rlimit to 80% of total system memory, to avoid grinding halt
because of swapping.
"""
import resource
cur_limit = resource.getrlimit(resource.RLIMIT_AS)
if cur_limit[0] > 0:
max_mem = min(max_mem, cur_limit[0])
resource.setrlimit(resource.RLIMIT_AS, (max_mem, cur_limit[1]))
if __name__ == "__main__":
bench_run()
| apache-2.0 |
dand-oss/yaml-cpp | test/gtest-1.10.0/googlemock/scripts/generator/cpp/ast.py | 69 | 62925 | #!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate an Abstract Syntax Tree (AST) for C++."""
__author__ = '[email protected] (Neal Norwitz)'
# TODO:
# * Tokens should never be exported, need to convert to Nodes
# (return types, parameters, etc.)
# * Handle static class data for templatized classes
# * Handle casts (both C++ and C-style)
# * Handle conditions and loops (if/else, switch, for, while/do)
#
# TODO much, much later:
# * Handle #define
# * exceptions
try:
# Python 3.x
import builtins
except ImportError:
# Python 2.x
import __builtin__ as builtins
import sys
import traceback
from cpp import keywords
from cpp import tokenize
from cpp import utils
if not hasattr(builtins, 'reversed'):
# Support Python 2.3 and earlier.
def reversed(seq):
for i in range(len(seq)-1, -1, -1):
yield seq[i]
if not hasattr(builtins, 'next'):
# Support Python 2.5 and earlier.
def next(obj):
return obj.next()
VISIBILITY_PUBLIC, VISIBILITY_PROTECTED, VISIBILITY_PRIVATE = range(3)
FUNCTION_NONE = 0x00
FUNCTION_CONST = 0x01
FUNCTION_VIRTUAL = 0x02
FUNCTION_PURE_VIRTUAL = 0x04
FUNCTION_CTOR = 0x08
FUNCTION_DTOR = 0x10
FUNCTION_ATTRIBUTE = 0x20
FUNCTION_UNKNOWN_ANNOTATION = 0x40
FUNCTION_THROW = 0x80
FUNCTION_OVERRIDE = 0x100
"""
These are currently unused. Should really handle these properly at some point.
TYPE_MODIFIER_INLINE = 0x010000
TYPE_MODIFIER_EXTERN = 0x020000
TYPE_MODIFIER_STATIC = 0x040000
TYPE_MODIFIER_CONST = 0x080000
TYPE_MODIFIER_REGISTER = 0x100000
TYPE_MODIFIER_VOLATILE = 0x200000
TYPE_MODIFIER_MUTABLE = 0x400000
TYPE_MODIFIER_MAP = {
'inline': TYPE_MODIFIER_INLINE,
'extern': TYPE_MODIFIER_EXTERN,
'static': TYPE_MODIFIER_STATIC,
'const': TYPE_MODIFIER_CONST,
'register': TYPE_MODIFIER_REGISTER,
'volatile': TYPE_MODIFIER_VOLATILE,
'mutable': TYPE_MODIFIER_MUTABLE,
}
"""
_INTERNAL_TOKEN = 'internal'
_NAMESPACE_POP = 'ns-pop'
# TODO(nnorwitz): use this as a singleton for templated_types, etc
# where we don't want to create a new empty dict each time. It is also const.
class _NullDict(object):
__contains__ = lambda self: False
keys = values = items = iterkeys = itervalues = iteritems = lambda self: ()
# TODO(nnorwitz): move AST nodes into a separate module.
class Node(object):
"""Base AST node."""
def __init__(self, start, end):
self.start = start
self.end = end
def IsDeclaration(self):
"""Returns bool if this node is a declaration."""
return False
def IsDefinition(self):
"""Returns bool if this node is a definition."""
return False
def IsExportable(self):
"""Returns bool if this node exportable from a header file."""
return False
def Requires(self, node):
"""Does this AST node require the definition of the node passed in?"""
return False
def XXX__str__(self):
return self._StringHelper(self.__class__.__name__, '')
def _StringHelper(self, name, suffix):
if not utils.DEBUG:
return '%s(%s)' % (name, suffix)
return '%s(%d, %d, %s)' % (name, self.start, self.end, suffix)
def __repr__(self):
return str(self)
class Define(Node):
def __init__(self, start, end, name, definition):
Node.__init__(self, start, end)
self.name = name
self.definition = definition
def __str__(self):
value = '%s %s' % (self.name, self.definition)
return self._StringHelper(self.__class__.__name__, value)
class Include(Node):
def __init__(self, start, end, filename, system):
Node.__init__(self, start, end)
self.filename = filename
self.system = system
def __str__(self):
fmt = '"%s"'
if self.system:
fmt = '<%s>'
return self._StringHelper(self.__class__.__name__, fmt % self.filename)
class Goto(Node):
def __init__(self, start, end, label):
Node.__init__(self, start, end)
self.label = label
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.label))
class Expr(Node):
def __init__(self, start, end, expr):
Node.__init__(self, start, end)
self.expr = expr
def Requires(self, node):
# TODO(nnorwitz): impl.
return False
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.expr))
class Return(Expr):
pass
class Delete(Expr):
pass
class Friend(Expr):
def __init__(self, start, end, expr, namespace):
Expr.__init__(self, start, end, expr)
self.namespace = namespace[:]
class Using(Node):
def __init__(self, start, end, names):
Node.__init__(self, start, end)
self.names = names
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.names))
class Parameter(Node):
def __init__(self, start, end, name, parameter_type, default):
Node.__init__(self, start, end)
self.name = name
self.type = parameter_type
self.default = default
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
return self.type.name == node.name
def __str__(self):
name = str(self.type)
suffix = '%s %s' % (name, self.name)
if self.default:
suffix += ' = ' + ''.join([d.name for d in self.default])
return self._StringHelper(self.__class__.__name__, suffix)
class _GenericDeclaration(Node):
def __init__(self, start, end, name, namespace):
Node.__init__(self, start, end)
self.name = name
self.namespace = namespace[:]
def FullName(self):
prefix = ''
if self.namespace and self.namespace[-1]:
prefix = '::'.join(self.namespace) + '::'
return prefix + self.name
def _TypeStringHelper(self, suffix):
if self.namespace:
names = [n or '<anonymous>' for n in self.namespace]
suffix += ' in ' + '::'.join(names)
return self._StringHelper(self.__class__.__name__, suffix)
# TODO(nnorwitz): merge with Parameter in some way?
class VariableDeclaration(_GenericDeclaration):
def __init__(self, start, end, name, var_type, initial_value, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.type = var_type
self.initial_value = initial_value
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
return self.type.name == node.name
def ToString(self):
"""Return a string that tries to reconstitute the variable decl."""
suffix = '%s %s' % (self.type, self.name)
if self.initial_value:
suffix += ' = ' + self.initial_value
return suffix
def __str__(self):
return self._StringHelper(self.__class__.__name__, self.ToString())
class Typedef(_GenericDeclaration):
def __init__(self, start, end, name, alias, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.alias = alias
def IsDefinition(self):
return True
def IsExportable(self):
return True
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
name = node.name
for token in self.alias:
if token is not None and name == token.name:
return True
return False
def __str__(self):
suffix = '%s, %s' % (self.name, self.alias)
return self._TypeStringHelper(suffix)
class _NestedType(_GenericDeclaration):
def __init__(self, start, end, name, fields, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.fields = fields
def IsDefinition(self):
return True
def IsExportable(self):
return True
def __str__(self):
suffix = '%s, {%s}' % (self.name, self.fields)
return self._TypeStringHelper(suffix)
class Union(_NestedType):
pass
class Enum(_NestedType):
pass
class Class(_GenericDeclaration):
def __init__(self, start, end, name, bases, templated_types, body, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.bases = bases
self.body = body
self.templated_types = templated_types
def IsDeclaration(self):
return self.bases is None and self.body is None
def IsDefinition(self):
return not self.IsDeclaration()
def IsExportable(self):
return not self.IsDeclaration()
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
if self.bases:
for token_list in self.bases:
# TODO(nnorwitz): bases are tokens, do name comparison.
for token in token_list:
if token.name == node.name:
return True
# TODO(nnorwitz): search in body too.
return False
def __str__(self):
name = self.name
if self.templated_types:
name += '<%s>' % self.templated_types
suffix = '%s, %s, %s' % (name, self.bases, self.body)
return self._TypeStringHelper(suffix)
class Struct(Class):
pass
class Function(_GenericDeclaration):
def __init__(self, start, end, name, return_type, parameters,
modifiers, templated_types, body, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
converter = TypeConverter(namespace)
self.return_type = converter.CreateReturnType(return_type)
self.parameters = converter.ToParameters(parameters)
self.modifiers = modifiers
self.body = body
self.templated_types = templated_types
def IsDeclaration(self):
return self.body is None
def IsDefinition(self):
return self.body is not None
def IsExportable(self):
if self.return_type and 'static' in self.return_type.modifiers:
return False
return None not in self.namespace
def Requires(self, node):
if self.parameters:
# TODO(nnorwitz): parameters are tokens, do name comparison.
for p in self.parameters:
if p.name == node.name:
return True
# TODO(nnorwitz): search in body too.
return False
def __str__(self):
# TODO(nnorwitz): add templated_types.
suffix = ('%s %s(%s), 0x%02x, %s' %
(self.return_type, self.name, self.parameters,
self.modifiers, self.body))
return self._TypeStringHelper(suffix)
class Method(Function):
def __init__(self, start, end, name, in_class, return_type, parameters,
modifiers, templated_types, body, namespace):
Function.__init__(self, start, end, name, return_type, parameters,
modifiers, templated_types, body, namespace)
# TODO(nnorwitz): in_class could also be a namespace which can
# mess up finding functions properly.
self.in_class = in_class
class Type(_GenericDeclaration):
"""Type used for any variable (eg class, primitive, struct, etc)."""
def __init__(self, start, end, name, templated_types, modifiers,
reference, pointer, array):
"""
Args:
name: str name of main type
templated_types: [Class (Type?)] template type info between <>
modifiers: [str] type modifiers (keywords) eg, const, mutable, etc.
reference, pointer, array: bools
"""
_GenericDeclaration.__init__(self, start, end, name, [])
self.templated_types = templated_types
if not name and modifiers:
self.name = modifiers.pop()
self.modifiers = modifiers
self.reference = reference
self.pointer = pointer
self.array = array
def __str__(self):
prefix = ''
if self.modifiers:
prefix = ' '.join(self.modifiers) + ' '
name = str(self.name)
if self.templated_types:
name += '<%s>' % self.templated_types
suffix = prefix + name
if self.reference:
suffix += '&'
if self.pointer:
suffix += '*'
if self.array:
suffix += '[]'
return self._TypeStringHelper(suffix)
# By definition, Is* are always False. A Type can only exist in
# some sort of variable declaration, parameter, or return value.
def IsDeclaration(self):
return False
def IsDefinition(self):
return False
def IsExportable(self):
return False
class TypeConverter(object):
def __init__(self, namespace_stack):
self.namespace_stack = namespace_stack
def _GetTemplateEnd(self, tokens, start):
count = 1
end = start
while 1:
token = tokens[end]
end += 1
if token.name == '<':
count += 1
elif token.name == '>':
count -= 1
if count == 0:
break
return tokens[start:end-1], end
def ToType(self, tokens):
"""Convert [Token,...] to [Class(...), ] useful for base classes.
For example, code like class Foo : public Bar<x, y> { ... };
the "Bar<x, y>" portion gets converted to an AST.
Returns:
[Class(...), ...]
"""
result = []
name_tokens = []
reference = pointer = array = False
def AddType(templated_types):
# Partition tokens into name and modifier tokens.
names = []
modifiers = []
for t in name_tokens:
if keywords.IsKeyword(t.name):
modifiers.append(t.name)
else:
names.append(t.name)
name = ''.join(names)
if name_tokens:
result.append(Type(name_tokens[0].start, name_tokens[-1].end,
name, templated_types, modifiers,
reference, pointer, array))
del name_tokens[:]
i = 0
end = len(tokens)
while i < end:
token = tokens[i]
if token.name == '<':
new_tokens, new_end = self._GetTemplateEnd(tokens, i+1)
AddType(self.ToType(new_tokens))
# If there is a comma after the template, we need to consume
# that here otherwise it becomes part of the name.
i = new_end
reference = pointer = array = False
elif token.name == ',':
AddType([])
reference = pointer = array = False
elif token.name == '*':
pointer = True
elif token.name == '&':
reference = True
elif token.name == '[':
pointer = True
elif token.name == ']':
pass
else:
name_tokens.append(token)
i += 1
if name_tokens:
# No '<' in the tokens, just a simple name and no template.
AddType([])
return result
def DeclarationToParts(self, parts, needs_name_removed):
name = None
default = []
if needs_name_removed:
# Handle default (initial) values properly.
for i, t in enumerate(parts):
if t.name == '=':
default = parts[i+1:]
name = parts[i-1].name
if name == ']' and parts[i-2].name == '[':
name = parts[i-3].name
i -= 1
parts = parts[:i-1]
break
else:
if parts[-1].token_type == tokenize.NAME:
name = parts.pop().name
else:
# TODO(nnorwitz): this is a hack that happens for code like
# Register(Foo<T>); where it thinks this is a function call
# but it's actually a declaration.
name = '???'
modifiers = []
type_name = []
other_tokens = []
templated_types = []
i = 0
end = len(parts)
while i < end:
p = parts[i]
if keywords.IsKeyword(p.name):
modifiers.append(p.name)
elif p.name == '<':
templated_tokens, new_end = self._GetTemplateEnd(parts, i+1)
templated_types = self.ToType(templated_tokens)
i = new_end - 1
# Don't add a spurious :: to data members being initialized.
next_index = i + 1
if next_index < end and parts[next_index].name == '::':
i += 1
elif p.name in ('[', ']', '='):
# These are handled elsewhere.
other_tokens.append(p)
elif p.name not in ('*', '&', '>'):
# Ensure that names have a space between them.
if (type_name and type_name[-1].token_type == tokenize.NAME and
p.token_type == tokenize.NAME):
type_name.append(tokenize.Token(tokenize.SYNTAX, ' ', 0, 0))
type_name.append(p)
else:
other_tokens.append(p)
i += 1
type_name = ''.join([t.name for t in type_name])
return name, type_name, templated_types, modifiers, default, other_tokens
def ToParameters(self, tokens):
if not tokens:
return []
result = []
name = type_name = ''
type_modifiers = []
pointer = reference = array = False
first_token = None
default = []
def AddParameter(end):
if default:
del default[0] # Remove flag.
parts = self.DeclarationToParts(type_modifiers, True)
(name, type_name, templated_types, modifiers,
unused_default, unused_other_tokens) = parts
parameter_type = Type(first_token.start, first_token.end,
type_name, templated_types, modifiers,
reference, pointer, array)
p = Parameter(first_token.start, end, name,
parameter_type, default)
result.append(p)
template_count = 0
for s in tokens:
if not first_token:
first_token = s
if s.name == '<':
template_count += 1
elif s.name == '>':
template_count -= 1
if template_count > 0:
type_modifiers.append(s)
continue
if s.name == ',':
AddParameter(s.start)
name = type_name = ''
type_modifiers = []
pointer = reference = array = False
first_token = None
default = []
elif s.name == '*':
pointer = True
elif s.name == '&':
reference = True
elif s.name == '[':
array = True
elif s.name == ']':
pass # Just don't add to type_modifiers.
elif s.name == '=':
# Got a default value. Add any value (None) as a flag.
default.append(None)
elif default:
default.append(s)
else:
type_modifiers.append(s)
AddParameter(tokens[-1].end)
return result
def CreateReturnType(self, return_type_seq):
if not return_type_seq:
return None
start = return_type_seq[0].start
end = return_type_seq[-1].end
_, name, templated_types, modifiers, default, other_tokens = \
self.DeclarationToParts(return_type_seq, False)
names = [n.name for n in other_tokens]
reference = '&' in names
pointer = '*' in names
array = '[' in names
return Type(start, end, name, templated_types, modifiers,
reference, pointer, array)
def GetTemplateIndices(self, names):
# names is a list of strings.
start = names.index('<')
end = len(names) - 1
while end > 0:
if names[end] == '>':
break
end -= 1
return start, end+1
class AstBuilder(object):
def __init__(self, token_stream, filename, in_class='', visibility=None,
namespace_stack=[]):
self.tokens = token_stream
self.filename = filename
# TODO(nnorwitz): use a better data structure (deque) for the queue.
# Switching directions of the "queue" improved perf by about 25%.
# Using a deque should be even better since we access from both sides.
self.token_queue = []
self.namespace_stack = namespace_stack[:]
self.in_class = in_class
if in_class is None:
self.in_class_name_only = None
else:
self.in_class_name_only = in_class.split('::')[-1]
self.visibility = visibility
self.in_function = False
self.current_token = None
# Keep the state whether we are currently handling a typedef or not.
self._handling_typedef = False
self.converter = TypeConverter(self.namespace_stack)
def HandleError(self, msg, token):
printable_queue = list(reversed(self.token_queue[-20:]))
sys.stderr.write('Got %s in %s @ %s %s\n' %
(msg, self.filename, token, printable_queue))
def Generate(self):
while 1:
token = self._GetNextToken()
if not token:
break
# Get the next token.
self.current_token = token
# Dispatch on the next token type.
if token.token_type == _INTERNAL_TOKEN:
if token.name == _NAMESPACE_POP:
self.namespace_stack.pop()
continue
try:
result = self._GenerateOne(token)
if result is not None:
yield result
except:
self.HandleError('exception', token)
raise
def _CreateVariable(self, pos_token, name, type_name, type_modifiers,
ref_pointer_name_seq, templated_types, value=None):
reference = '&' in ref_pointer_name_seq
pointer = '*' in ref_pointer_name_seq
array = '[' in ref_pointer_name_seq
var_type = Type(pos_token.start, pos_token.end, type_name,
templated_types, type_modifiers,
reference, pointer, array)
return VariableDeclaration(pos_token.start, pos_token.end,
name, var_type, value, self.namespace_stack)
def _GenerateOne(self, token):
if token.token_type == tokenize.NAME:
if (keywords.IsKeyword(token.name) and
not keywords.IsBuiltinType(token.name)):
method = getattr(self, 'handle_' + token.name)
return method()
elif token.name == self.in_class_name_only:
# The token name is the same as the class, must be a ctor if
# there is a paren. Otherwise, it's the return type.
# Peek ahead to get the next token to figure out which.
next = self._GetNextToken()
self._AddBackToken(next)
if next.token_type == tokenize.SYNTAX and next.name == '(':
return self._GetMethod([token], FUNCTION_CTOR, None, True)
# Fall through--handle like any other method.
# Handle data or function declaration/definition.
syntax = tokenize.SYNTAX
temp_tokens, last_token = \
self._GetVarTokensUpTo(syntax, '(', ';', '{', '[')
temp_tokens.insert(0, token)
if last_token.name == '(':
# If there is an assignment before the paren,
# this is an expression, not a method.
expr = bool([e for e in temp_tokens if e.name == '='])
if expr:
new_temp = self._GetTokensUpTo(tokenize.SYNTAX, ';')
temp_tokens.append(last_token)
temp_tokens.extend(new_temp)
last_token = tokenize.Token(tokenize.SYNTAX, ';', 0, 0)
if last_token.name == '[':
# Handle array, this isn't a method, unless it's an operator.
# TODO(nnorwitz): keep the size somewhere.
# unused_size = self._GetTokensUpTo(tokenize.SYNTAX, ']')
temp_tokens.append(last_token)
if temp_tokens[-2].name == 'operator':
temp_tokens.append(self._GetNextToken())
else:
temp_tokens2, last_token = \
self._GetVarTokensUpTo(tokenize.SYNTAX, ';')
temp_tokens.extend(temp_tokens2)
if last_token.name == ';':
# Handle data, this isn't a method.
parts = self.converter.DeclarationToParts(temp_tokens, True)
(name, type_name, templated_types, modifiers, default,
unused_other_tokens) = parts
t0 = temp_tokens[0]
names = [t.name for t in temp_tokens]
if templated_types:
start, end = self.converter.GetTemplateIndices(names)
names = names[:start] + names[end:]
default = ''.join([t.name for t in default])
return self._CreateVariable(t0, name, type_name, modifiers,
names, templated_types, default)
if last_token.name == '{':
self._AddBackTokens(temp_tokens[1:])
self._AddBackToken(last_token)
method_name = temp_tokens[0].name
method = getattr(self, 'handle_' + method_name, None)
if not method:
# Must be declaring a variable.
# TODO(nnorwitz): handle the declaration.
return None
return method()
return self._GetMethod(temp_tokens, 0, None, False)
elif token.token_type == tokenize.SYNTAX:
if token.name == '~' and self.in_class:
# Must be a dtor (probably not in method body).
token = self._GetNextToken()
# self.in_class can contain A::Name, but the dtor will only
# be Name. Make sure to compare against the right value.
if (token.token_type == tokenize.NAME and
token.name == self.in_class_name_only):
return self._GetMethod([token], FUNCTION_DTOR, None, True)
# TODO(nnorwitz): handle a lot more syntax.
elif token.token_type == tokenize.PREPROCESSOR:
# TODO(nnorwitz): handle more preprocessor directives.
# token starts with a #, so remove it and strip whitespace.
name = token.name[1:].lstrip()
if name.startswith('include'):
# Remove "include".
name = name[7:].strip()
assert name
# Handle #include \<newline> "header-on-second-line.h".
if name.startswith('\\'):
name = name[1:].strip()
assert name[0] in '<"', token
assert name[-1] in '>"', token
system = name[0] == '<'
filename = name[1:-1]
return Include(token.start, token.end, filename, system)
if name.startswith('define'):
# Remove "define".
name = name[6:].strip()
assert name
value = ''
for i, c in enumerate(name):
if c.isspace():
value = name[i:].lstrip()
name = name[:i]
break
return Define(token.start, token.end, name, value)
if name.startswith('if') and name[2:3].isspace():
condition = name[3:].strip()
if condition.startswith('0') or condition.startswith('(0)'):
self._SkipIf0Blocks()
return None
def _GetTokensUpTo(self, expected_token_type, expected_token):
return self._GetVarTokensUpTo(expected_token_type, expected_token)[0]
def _GetVarTokensUpTo(self, expected_token_type, *expected_tokens):
last_token = self._GetNextToken()
tokens = []
while (last_token.token_type != expected_token_type or
last_token.name not in expected_tokens):
tokens.append(last_token)
last_token = self._GetNextToken()
return tokens, last_token
# TODO(nnorwitz): remove _IgnoreUpTo() it shouldn't be necessary.
def _IgnoreUpTo(self, token_type, token):
unused_tokens = self._GetTokensUpTo(token_type, token)
def _SkipIf0Blocks(self):
count = 1
while 1:
token = self._GetNextToken()
if token.token_type != tokenize.PREPROCESSOR:
continue
name = token.name[1:].lstrip()
if name.startswith('endif'):
count -= 1
if count == 0:
break
elif name.startswith('if'):
count += 1
def _GetMatchingChar(self, open_paren, close_paren, GetNextToken=None):
if GetNextToken is None:
GetNextToken = self._GetNextToken
# Assumes the current token is open_paren and we will consume
# and return up to the close_paren.
count = 1
token = GetNextToken()
while 1:
if token.token_type == tokenize.SYNTAX:
if token.name == open_paren:
count += 1
elif token.name == close_paren:
count -= 1
if count == 0:
break
yield token
token = GetNextToken()
yield token
def _GetParameters(self):
return self._GetMatchingChar('(', ')')
def GetScope(self):
return self._GetMatchingChar('{', '}')
def _GetNextToken(self):
if self.token_queue:
return self.token_queue.pop()
return next(self.tokens)
def _AddBackToken(self, token):
if token.whence == tokenize.WHENCE_STREAM:
token.whence = tokenize.WHENCE_QUEUE
self.token_queue.insert(0, token)
else:
assert token.whence == tokenize.WHENCE_QUEUE, token
self.token_queue.append(token)
def _AddBackTokens(self, tokens):
if tokens:
if tokens[-1].whence == tokenize.WHENCE_STREAM:
for token in tokens:
token.whence = tokenize.WHENCE_QUEUE
self.token_queue[:0] = reversed(tokens)
else:
assert tokens[-1].whence == tokenize.WHENCE_QUEUE, tokens
self.token_queue.extend(reversed(tokens))
def GetName(self, seq=None):
"""Returns ([tokens], next_token_info)."""
GetNextToken = self._GetNextToken
if seq is not None:
it = iter(seq)
GetNextToken = lambda: next(it)
next_token = GetNextToken()
tokens = []
last_token_was_name = False
while (next_token.token_type == tokenize.NAME or
(next_token.token_type == tokenize.SYNTAX and
next_token.name in ('::', '<'))):
# Two NAMEs in a row means the identifier should terminate.
# It's probably some sort of variable declaration.
if last_token_was_name and next_token.token_type == tokenize.NAME:
break
last_token_was_name = next_token.token_type == tokenize.NAME
tokens.append(next_token)
# Handle templated names.
if next_token.name == '<':
tokens.extend(self._GetMatchingChar('<', '>', GetNextToken))
last_token_was_name = True
next_token = GetNextToken()
return tokens, next_token
def GetMethod(self, modifiers, templated_types):
return_type_and_name = self._GetTokensUpTo(tokenize.SYNTAX, '(')
assert len(return_type_and_name) >= 1
return self._GetMethod(return_type_and_name, modifiers, templated_types,
False)
def _GetMethod(self, return_type_and_name, modifiers, templated_types,
get_paren):
template_portion = None
if get_paren:
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
if token.name == '<':
# Handle templatized dtors.
template_portion = [token]
template_portion.extend(self._GetMatchingChar('<', '>'))
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '(', token
name = return_type_and_name.pop()
# Handle templatized ctors.
if name.name == '>':
index = 1
while return_type_and_name[index].name != '<':
index += 1
template_portion = return_type_and_name[index:] + [name]
del return_type_and_name[index:]
name = return_type_and_name.pop()
elif name.name == ']':
rt = return_type_and_name
assert rt[-1].name == '[', return_type_and_name
assert rt[-2].name == 'operator', return_type_and_name
name_seq = return_type_and_name[-2:]
del return_type_and_name[-2:]
name = tokenize.Token(tokenize.NAME, 'operator[]',
name_seq[0].start, name.end)
# Get the open paren so _GetParameters() below works.
unused_open_paren = self._GetNextToken()
# TODO(nnorwitz): store template_portion.
return_type = return_type_and_name
indices = name
if return_type:
indices = return_type[0]
# Force ctor for templatized ctors.
if name.name == self.in_class and not modifiers:
modifiers |= FUNCTION_CTOR
parameters = list(self._GetParameters())
del parameters[-1] # Remove trailing ')'.
# Handling operator() is especially weird.
if name.name == 'operator' and not parameters:
token = self._GetNextToken()
assert token.name == '(', token
parameters = list(self._GetParameters())
del parameters[-1] # Remove trailing ')'.
token = self._GetNextToken()
while token.token_type == tokenize.NAME:
modifier_token = token
token = self._GetNextToken()
if modifier_token.name == 'const':
modifiers |= FUNCTION_CONST
elif modifier_token.name == '__attribute__':
# TODO(nnorwitz): handle more __attribute__ details.
modifiers |= FUNCTION_ATTRIBUTE
assert token.name == '(', token
# Consume everything between the (parens).
unused_tokens = list(self._GetMatchingChar('(', ')'))
token = self._GetNextToken()
elif modifier_token.name == 'throw':
modifiers |= FUNCTION_THROW
assert token.name == '(', token
# Consume everything between the (parens).
unused_tokens = list(self._GetMatchingChar('(', ')'))
token = self._GetNextToken()
elif modifier_token.name == 'override':
modifiers |= FUNCTION_OVERRIDE
elif modifier_token.name == modifier_token.name.upper():
# HACK(nnorwitz): assume that all upper-case names
# are some macro we aren't expanding.
modifiers |= FUNCTION_UNKNOWN_ANNOTATION
else:
self.HandleError('unexpected token', modifier_token)
assert token.token_type == tokenize.SYNTAX, token
# Handle ctor initializers.
if token.name == ':':
# TODO(nnorwitz): anything else to handle for initializer list?
while token.name != ';' and token.name != '{':
token = self._GetNextToken()
# Handle pointer to functions that are really data but look
# like method declarations.
if token.name == '(':
if parameters[0].name == '*':
# name contains the return type.
name = parameters.pop()
# parameters contains the name of the data.
modifiers = [p.name for p in parameters]
# Already at the ( to open the parameter list.
function_parameters = list(self._GetMatchingChar('(', ')'))
del function_parameters[-1] # Remove trailing ')'.
# TODO(nnorwitz): store the function_parameters.
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == ';', token
return self._CreateVariable(indices, name.name, indices.name,
modifiers, '', None)
# At this point, we got something like:
# return_type (type::*name_)(params);
# This is a data member called name_ that is a function pointer.
# With this code: void (sq_type::*field_)(string&);
# We get: name=void return_type=[] parameters=sq_type ... field_
# TODO(nnorwitz): is return_type always empty?
# TODO(nnorwitz): this isn't even close to being correct.
# Just put in something so we don't crash and can move on.
real_name = parameters[-1]
modifiers = [p.name for p in self._GetParameters()]
del modifiers[-1] # Remove trailing ')'.
return self._CreateVariable(indices, real_name.name, indices.name,
modifiers, '', None)
if token.name == '{':
body = list(self.GetScope())
del body[-1] # Remove trailing '}'.
else:
body = None
if token.name == '=':
token = self._GetNextToken()
if token.name == 'default' or token.name == 'delete':
# Ignore explicitly defaulted and deleted special members
# in C++11.
token = self._GetNextToken()
else:
# Handle pure-virtual declarations.
assert token.token_type == tokenize.CONSTANT, token
assert token.name == '0', token
modifiers |= FUNCTION_PURE_VIRTUAL
token = self._GetNextToken()
if token.name == '[':
# TODO(nnorwitz): store tokens and improve parsing.
# template <typename T, size_t N> char (&ASH(T (&seq)[N]))[N];
tokens = list(self._GetMatchingChar('[', ']'))
token = self._GetNextToken()
assert token.name == ';', (token, return_type_and_name, parameters)
# Looks like we got a method, not a function.
if len(return_type) > 2 and return_type[-1].name == '::':
return_type, in_class = \
self._GetReturnTypeAndClassName(return_type)
return Method(indices.start, indices.end, name.name, in_class,
return_type, parameters, modifiers, templated_types,
body, self.namespace_stack)
return Function(indices.start, indices.end, name.name, return_type,
parameters, modifiers, templated_types, body,
self.namespace_stack)
def _GetReturnTypeAndClassName(self, token_seq):
# Splitting the return type from the class name in a method
# can be tricky. For example, Return::Type::Is::Hard::To::Find().
# Where is the return type and where is the class name?
# The heuristic used is to pull the last name as the class name.
# This includes all the templated type info.
# TODO(nnorwitz): if there is only One name like in the
# example above, punt and assume the last bit is the class name.
# Ignore a :: prefix, if exists so we can find the first real name.
i = 0
if token_seq[0].name == '::':
i = 1
# Ignore a :: suffix, if exists.
end = len(token_seq) - 1
if token_seq[end-1].name == '::':
end -= 1
# Make a copy of the sequence so we can append a sentinel
# value. This is required for GetName will has to have some
# terminating condition beyond the last name.
seq_copy = token_seq[i:end]
seq_copy.append(tokenize.Token(tokenize.SYNTAX, '', 0, 0))
names = []
while i < end:
# Iterate through the sequence parsing out each name.
new_name, next = self.GetName(seq_copy[i:])
assert new_name, 'Got empty new_name, next=%s' % next
# We got a pointer or ref. Add it to the name.
if next and next.token_type == tokenize.SYNTAX:
new_name.append(next)
names.append(new_name)
i += len(new_name)
# Now that we have the names, it's time to undo what we did.
# Remove the sentinel value.
names[-1].pop()
# Flatten the token sequence for the return type.
return_type = [e for seq in names[:-1] for e in seq]
# The class name is the last name.
class_name = names[-1]
return return_type, class_name
def handle_bool(self):
pass
def handle_char(self):
pass
def handle_int(self):
pass
def handle_long(self):
pass
def handle_short(self):
pass
def handle_double(self):
pass
def handle_float(self):
pass
def handle_void(self):
pass
def handle_wchar_t(self):
pass
def handle_unsigned(self):
pass
def handle_signed(self):
pass
def _GetNestedType(self, ctor):
name = None
name_tokens, token = self.GetName()
if name_tokens:
name = ''.join([t.name for t in name_tokens])
# Handle forward declarations.
if token.token_type == tokenize.SYNTAX and token.name == ';':
return ctor(token.start, token.end, name, None,
self.namespace_stack)
if token.token_type == tokenize.NAME and self._handling_typedef:
self._AddBackToken(token)
return ctor(token.start, token.end, name, None,
self.namespace_stack)
# Must be the type declaration.
fields = list(self._GetMatchingChar('{', '}'))
del fields[-1] # Remove trailing '}'.
if token.token_type == tokenize.SYNTAX and token.name == '{':
next = self._GetNextToken()
new_type = ctor(token.start, token.end, name, fields,
self.namespace_stack)
# A name means this is an anonymous type and the name
# is the variable declaration.
if next.token_type != tokenize.NAME:
return new_type
name = new_type
token = next
# Must be variable declaration using the type prefixed with keyword.
assert token.token_type == tokenize.NAME, token
return self._CreateVariable(token, token.name, name, [], '', None)
def handle_struct(self):
# Special case the handling typedef/aliasing of structs here.
# It would be a pain to handle in the class code.
name_tokens, var_token = self.GetName()
if name_tokens:
next_token = self._GetNextToken()
is_syntax = (var_token.token_type == tokenize.SYNTAX and
var_token.name[0] in '*&')
is_variable = (var_token.token_type == tokenize.NAME and
next_token.name == ';')
variable = var_token
if is_syntax and not is_variable:
variable = next_token
temp = self._GetNextToken()
if temp.token_type == tokenize.SYNTAX and temp.name == '(':
# Handle methods declared to return a struct.
t0 = name_tokens[0]
struct = tokenize.Token(tokenize.NAME, 'struct',
t0.start-7, t0.start-2)
type_and_name = [struct]
type_and_name.extend(name_tokens)
type_and_name.extend((var_token, next_token))
return self._GetMethod(type_and_name, 0, None, False)
assert temp.name == ';', (temp, name_tokens, var_token)
if is_syntax or (is_variable and not self._handling_typedef):
modifiers = ['struct']
type_name = ''.join([t.name for t in name_tokens])
position = name_tokens[0]
return self._CreateVariable(position, variable.name, type_name,
modifiers, var_token.name, None)
name_tokens.extend((var_token, next_token))
self._AddBackTokens(name_tokens)
else:
self._AddBackToken(var_token)
return self._GetClass(Struct, VISIBILITY_PUBLIC, None)
def handle_union(self):
return self._GetNestedType(Union)
def handle_enum(self):
token = self._GetNextToken()
if not (token.token_type == tokenize.NAME and token.name == 'class'):
self._AddBackToken(token)
return self._GetNestedType(Enum)
def handle_auto(self):
# TODO(nnorwitz): warn about using auto? Probably not since it
# will be reclaimed and useful for C++0x.
pass
def handle_register(self):
pass
def handle_const(self):
pass
def handle_inline(self):
pass
def handle_extern(self):
pass
def handle_static(self):
pass
def handle_virtual(self):
# What follows must be a method.
token = token2 = self._GetNextToken()
if token.name == 'inline':
# HACK(nnorwitz): handle inline dtors by ignoring 'inline'.
token2 = self._GetNextToken()
if token2.token_type == tokenize.SYNTAX and token2.name == '~':
return self.GetMethod(FUNCTION_VIRTUAL + FUNCTION_DTOR, None)
assert token.token_type == tokenize.NAME or token.name == '::', token
return_type_and_name = self._GetTokensUpTo(tokenize.SYNTAX, '(') # )
return_type_and_name.insert(0, token)
if token2 is not token:
return_type_and_name.insert(1, token2)
return self._GetMethod(return_type_and_name, FUNCTION_VIRTUAL,
None, False)
def handle_volatile(self):
pass
def handle_mutable(self):
pass
def handle_public(self):
assert self.in_class
self.visibility = VISIBILITY_PUBLIC
def handle_protected(self):
assert self.in_class
self.visibility = VISIBILITY_PROTECTED
def handle_private(self):
assert self.in_class
self.visibility = VISIBILITY_PRIVATE
def handle_friend(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
t0 = tokens[0]
return Friend(t0.start, t0.end, tokens, self.namespace_stack)
def handle_static_cast(self):
pass
def handle_const_cast(self):
pass
def handle_dynamic_cast(self):
pass
def handle_reinterpret_cast(self):
pass
def handle_new(self):
pass
def handle_delete(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
return Delete(tokens[0].start, tokens[0].end, tokens)
def handle_typedef(self):
token = self._GetNextToken()
if (token.token_type == tokenize.NAME and
keywords.IsKeyword(token.name)):
# Token must be struct/enum/union/class.
method = getattr(self, 'handle_' + token.name)
self._handling_typedef = True
tokens = [method()]
self._handling_typedef = False
else:
tokens = [token]
# Get the remainder of the typedef up to the semi-colon.
tokens.extend(self._GetTokensUpTo(tokenize.SYNTAX, ';'))
# TODO(nnorwitz): clean all this up.
assert tokens
name = tokens.pop()
indices = name
if tokens:
indices = tokens[0]
if not indices:
indices = token
if name.name == ')':
# HACK(nnorwitz): Handle pointers to functions "properly".
if (len(tokens) >= 4 and
tokens[1].name == '(' and tokens[2].name == '*'):
tokens.append(name)
name = tokens[3]
elif name.name == ']':
# HACK(nnorwitz): Handle arrays properly.
if len(tokens) >= 2:
tokens.append(name)
name = tokens[1]
new_type = tokens
if tokens and isinstance(tokens[0], tokenize.Token):
new_type = self.converter.ToType(tokens)[0]
return Typedef(indices.start, indices.end, name.name,
new_type, self.namespace_stack)
def handle_typeid(self):
pass # Not needed yet.
def handle_typename(self):
pass # Not needed yet.
def _GetTemplatedTypes(self):
result = {}
tokens = list(self._GetMatchingChar('<', '>'))
len_tokens = len(tokens) - 1 # Ignore trailing '>'.
i = 0
while i < len_tokens:
key = tokens[i].name
i += 1
if keywords.IsKeyword(key) or key == ',':
continue
type_name = default = None
if i < len_tokens:
i += 1
if tokens[i-1].name == '=':
assert i < len_tokens, '%s %s' % (i, tokens)
default, unused_next_token = self.GetName(tokens[i:])
i += len(default)
else:
if tokens[i-1].name != ',':
# We got something like: Type variable.
# Re-adjust the key (variable) and type_name (Type).
key = tokens[i-1].name
type_name = tokens[i-2]
result[key] = (type_name, default)
return result
def handle_template(self):
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '<', token
templated_types = self._GetTemplatedTypes()
# TODO(nnorwitz): for now, just ignore the template params.
token = self._GetNextToken()
if token.token_type == tokenize.NAME:
if token.name == 'class':
return self._GetClass(Class, VISIBILITY_PRIVATE, templated_types)
elif token.name == 'struct':
return self._GetClass(Struct, VISIBILITY_PUBLIC, templated_types)
elif token.name == 'friend':
return self.handle_friend()
self._AddBackToken(token)
tokens, last = self._GetVarTokensUpTo(tokenize.SYNTAX, '(', ';')
tokens.append(last)
self._AddBackTokens(tokens)
if last.name == '(':
return self.GetMethod(FUNCTION_NONE, templated_types)
# Must be a variable definition.
return None
def handle_true(self):
pass # Nothing to do.
def handle_false(self):
pass # Nothing to do.
def handle_asm(self):
pass # Not needed yet.
def handle_class(self):
return self._GetClass(Class, VISIBILITY_PRIVATE, None)
def _GetBases(self):
# Get base classes.
bases = []
while 1:
token = self._GetNextToken()
assert token.token_type == tokenize.NAME, token
# TODO(nnorwitz): store kind of inheritance...maybe.
if token.name not in ('public', 'protected', 'private'):
# If inheritance type is not specified, it is private.
# Just put the token back so we can form a name.
# TODO(nnorwitz): it would be good to warn about this.
self._AddBackToken(token)
else:
# Check for virtual inheritance.
token = self._GetNextToken()
if token.name != 'virtual':
self._AddBackToken(token)
else:
# TODO(nnorwitz): store that we got virtual for this base.
pass
base, next_token = self.GetName()
bases_ast = self.converter.ToType(base)
assert len(bases_ast) == 1, bases_ast
bases.append(bases_ast[0])
assert next_token.token_type == tokenize.SYNTAX, next_token
if next_token.name == '{':
token = next_token
break
# Support multiple inheritance.
assert next_token.name == ',', next_token
return bases, token
def _GetClass(self, class_type, visibility, templated_types):
class_name = None
class_token = self._GetNextToken()
if class_token.token_type != tokenize.NAME:
assert class_token.token_type == tokenize.SYNTAX, class_token
token = class_token
else:
# Skip any macro (e.g. storage class specifiers) after the
# 'class' keyword.
next_token = self._GetNextToken()
if next_token.token_type == tokenize.NAME:
self._AddBackToken(next_token)
else:
self._AddBackTokens([class_token, next_token])
name_tokens, token = self.GetName()
class_name = ''.join([t.name for t in name_tokens])
bases = None
if token.token_type == tokenize.SYNTAX:
if token.name == ';':
# Forward declaration.
return class_type(class_token.start, class_token.end,
class_name, None, templated_types, None,
self.namespace_stack)
if token.name in '*&':
# Inline forward declaration. Could be method or data.
name_token = self._GetNextToken()
next_token = self._GetNextToken()
if next_token.name == ';':
# Handle data
modifiers = ['class']
return self._CreateVariable(class_token, name_token.name,
class_name,
modifiers, token.name, None)
else:
# Assume this is a method.
tokens = (class_token, token, name_token, next_token)
self._AddBackTokens(tokens)
return self.GetMethod(FUNCTION_NONE, None)
if token.name == ':':
bases, token = self._GetBases()
body = None
if token.token_type == tokenize.SYNTAX and token.name == '{':
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '{', token
ast = AstBuilder(self.GetScope(), self.filename, class_name,
visibility, self.namespace_stack)
body = list(ast.Generate())
if not self._handling_typedef:
token = self._GetNextToken()
if token.token_type != tokenize.NAME:
assert token.token_type == tokenize.SYNTAX, token
assert token.name == ';', token
else:
new_class = class_type(class_token.start, class_token.end,
class_name, bases, None,
body, self.namespace_stack)
modifiers = []
return self._CreateVariable(class_token,
token.name, new_class,
modifiers, token.name, None)
else:
if not self._handling_typedef:
self.HandleError('non-typedef token', token)
self._AddBackToken(token)
return class_type(class_token.start, class_token.end, class_name,
bases, templated_types, body, self.namespace_stack)
def handle_namespace(self):
token = self._GetNextToken()
# Support anonymous namespaces.
name = None
if token.token_type == tokenize.NAME:
name = token.name
token = self._GetNextToken()
self.namespace_stack.append(name)
assert token.token_type == tokenize.SYNTAX, token
# Create an internal token that denotes when the namespace is complete.
internal_token = tokenize.Token(_INTERNAL_TOKEN, _NAMESPACE_POP,
None, None)
internal_token.whence = token.whence
if token.name == '=':
# TODO(nnorwitz): handle aliasing namespaces.
name, next_token = self.GetName()
assert next_token.name == ';', next_token
self._AddBackToken(internal_token)
else:
assert token.name == '{', token
tokens = list(self.GetScope())
# Replace the trailing } with the internal namespace pop token.
tokens[-1] = internal_token
# Handle namespace with nothing in it.
self._AddBackTokens(tokens)
return None
def handle_using(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
return Using(tokens[0].start, tokens[0].end, tokens)
def handle_explicit(self):
assert self.in_class
# Nothing much to do.
# TODO(nnorwitz): maybe verify the method name == class name.
# This must be a ctor.
return self.GetMethod(FUNCTION_CTOR, None)
def handle_this(self):
pass # Nothing to do.
def handle_operator(self):
# Pull off the next token(s?) and make that part of the method name.
pass
def handle_sizeof(self):
pass
def handle_case(self):
pass
def handle_switch(self):
pass
def handle_default(self):
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX
assert token.name == ':'
def handle_if(self):
pass
def handle_else(self):
pass
def handle_return(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
if not tokens:
return Return(self.current_token.start, self.current_token.end, None)
return Return(tokens[0].start, tokens[0].end, tokens)
def handle_goto(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert len(tokens) == 1, str(tokens)
return Goto(tokens[0].start, tokens[0].end, tokens[0].name)
def handle_try(self):
pass # Not needed yet.
def handle_catch(self):
pass # Not needed yet.
def handle_throw(self):
pass # Not needed yet.
def handle_while(self):
pass
def handle_do(self):
pass
def handle_for(self):
pass
def handle_break(self):
self._IgnoreUpTo(tokenize.SYNTAX, ';')
def handle_continue(self):
self._IgnoreUpTo(tokenize.SYNTAX, ';')
def BuilderFromSource(source, filename):
"""Utility method that returns an AstBuilder from source code.
Args:
source: 'C++ source code'
filename: 'file1'
Returns:
AstBuilder
"""
return AstBuilder(tokenize.GetTokens(source), filename)
def PrintIndentifiers(filename, should_print):
"""Prints all identifiers for a C++ source file.
Args:
filename: 'file1'
should_print: predicate with signature: bool Function(token)
"""
source = utils.ReadFile(filename, False)
if source is None:
sys.stderr.write('Unable to find: %s\n' % filename)
return
#print('Processing %s' % actual_filename)
builder = BuilderFromSource(source, filename)
try:
for node in builder.Generate():
if should_print(node):
print(node.name)
except KeyboardInterrupt:
return
except:
pass
def PrintAllIndentifiers(filenames, should_print):
"""Prints all identifiers for each C++ source file in filenames.
Args:
filenames: ['file1', 'file2', ...]
should_print: predicate with signature: bool Function(token)
"""
for path in filenames:
PrintIndentifiers(path, should_print)
def main(argv):
for filename in argv[1:]:
source = utils.ReadFile(filename)
if source is None:
continue
print('Processing %s' % filename)
builder = BuilderFromSource(source, filename)
try:
entire_ast = filter(None, builder.Generate())
except KeyboardInterrupt:
return
except:
# Already printed a warning, print the traceback and continue.
traceback.print_exc()
else:
if utils.DEBUG:
for ast in entire_ast:
print(ast)
if __name__ == '__main__':
main(sys.argv)
| mit |
SCSSoftware/BlenderTools | addon/io_scs_tools/utils/info.py | 1 | 4434 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (C) 2013-2014: SCS Software
import bpy
from io_scs_tools import bl_info
def __get_bl_info_version__(key):
"""Gets version string from bl_info dictonary for given key.
:param key: key in bl_info contaning version tuple (X, X, X, ..) where X is int number
:type key: str
:return: string representation of bl_info dictionary value for given key
:rtype: str
"""
ver = ""
for ver_num in bl_info[key]:
ver += str(ver_num) + "."
return ver[:-1]
def get_tools_version():
"""Returns Blender Tools version as string from bl_info["version"] dictonary value.
:return: string representation of bl_info["version"] tuple
:rtype: str
"""
return __get_bl_info_version__("version")
def get_required_blender_version():
"""Returns required Blender version as string from bl_info["blender"] dictonary value.
:return: string representation of bl_info["blender"] tuple
:rtype: str
"""
return __get_bl_info_version__("blender")
def get_blender_version():
"""Returns Blender's version and the build identifications as strings.
:return: Blender's version number and its build identification as two formated strings
:rtype: tuple(str, str)
"""
b_ver = bpy.app.version
b_ver_str = str(str(b_ver[0]) + "." + str(b_ver[1]) + "." + str(b_ver[2]))
if b_ver[0] == 2 and b_ver[1] <= 69:
build_str = str(" (r" + str(bpy.app.build_revision)[2:-1] + ")")
else:
build_str = str(" (hash: " + str(bpy.app.build_hash)[2:-1] + ")")
return b_ver_str, build_str
def get_combined_ver_str(only_version_numbers=False):
"""Returns combined version string from Blender version and Blender Tools version.
:param only_version_numbers: True to return only versions without "Blender" and "SCS Blender Tools" strings
:type only_version_numbers: bool
:return: combined version string
:rtype: str
"""
(version, build) = get_blender_version()
if only_version_numbers:
return version + build + ", " + get_tools_version()
else:
return "Blender " + version + build + ", SCS Blender Tools: " + get_tools_version()
def is_blender_able_to_run_tools():
"""Tells if Blender version is good enough to run Blender Tools.
:return: True if current blender version meets required version for Blender Tools; False otherwise
:rtype: bool
"""
return cmp_ver_str(get_blender_version()[0], get_required_blender_version()) >= 0
def cmp_ver_str(version_str, version_str2):
"""Compares two version string of format "X.X.X..." where X is number.
:param version_str: version string to check (should be in format: "X.Y" where X and Y are version numbers)
:type version_str: str
:param version_str2: version string to check (should be in format: "X.Y" where X and Y are version numbers)
:type version_str2: str
:return: -1 if first is smaller; 0 if equal; 1 if first is greater;
:rtype: int
"""
version_str = version_str.split(".")
version_str2 = version_str2.split(".")
ver_cmp = []
for ver_i in range(0, 2):
if int(version_str[ver_i]) < int(version_str2[ver_i]):
ver_cmp.append(-1)
elif int(version_str[ver_i]) == int(version_str2[ver_i]):
ver_cmp.append(0)
else:
ver_cmp.append(1)
ver_i += 1
# first version smaller than second
if ver_cmp[0] < 0 or (ver_cmp[0] == 0 and ver_cmp[1] < 0):
return -1
# equal versions
if ver_cmp[0] == 0 and ver_cmp[1] == 0:
return 0
# otherwise we directly assume that first is greater
return 1
| gpl-2.0 |
ameuret/mongrel2 | examples/zcov/zcov/GCovGroup.py | 96 | 6378 | #!/usr/bin/python
from __future__ import division
from pprint import pprint
import cPickle
import os
import warnings
from zcov import GCovParser
class GCovGroup:
@staticmethod
def fromfile(path):
f = open(path)
try:
res = cPickle.load(f)
header,version = res[0],res[1]
except:
raise ValueError,'invalid zcov input'
if header != 'zcov-data':
raise ValueError,'input is not in zcov format'
elif version != 1:
raise ValueError,'unrecognized zcov version'
return res[2]
def tofile(self, path):
f = open(path,'wb')
cPickle.dump(('zcov-data',1,self),f,-1)
f.close()
def __init__(self):
self.entryMap = {}
def addEntry(self, path, entry):
record = self.entryMap.get(path)
if record is None:
self.entryMap[path] = entry
else:
self.entryMap[path] = self.mergeData(record,entry)
def addGCDA(self, data):
for path,entry in data.entries:
self.addEntry(path, entry)
def merge(self, b):
for path,entry in b.entryMap.items():
self.addEntry(path, entry)
def mergeData(self, a, b):
keys = self.mergeKeys(a.keys, b.keys)
lines = self.mergeLines(a.lines, b.lines)
calls = self.mergeCalls(a.calls, b.calls)
branches = self.mergeBranches(a.branches, b.branches)
functions = self.mergeFunctions(a.functions, b.functions)
return GCovParser.GCovFileData(keys, lines, calls, branches, functions)
def mergeKeys(self, aKeys, bKeys):
if set(aKeys) != set(bKeys):
raise ValueError,'Keys differ: %s, %s'%(pprint.pformat(a.keys),
pprint.pformat(b.keys))
keys = {}
for key,aValue in aKeys.items():
bValue = bKeys[key]
if key=='Source':
if aValue != bValue:
raise ValueError,'Key ("%s") differs: %s %s'%(key,
aValue,
bValue)
value = aValue
elif key in ('Runs','Programs'):
value = str(int(aValue) + int(bValue))
elif key in ('Data','Graph'):
value = aValue+','+bValue
else:
raise ValueError,'Unrecognized key: "%s"'%(key,)
keys[key] = value
return keys
def mergeLines(self, aLines, bLines):
if len(aLines) != len(bLines):
raise ValueError,'Entry mismatch (number of lines)'
lines = [None]*len(aLines)
for i,(a,b) in enumerate(zip(aLines,bLines)):
if a is None or b is None:
# Executability can change across tests (conditional
# code), take the non-None one if it exists.
lines[i] = (a,b)[a is None]
else:
lines[i] = a + b
return lines
def mergeLineList(self, aList, bList, merge):
if not aList:
for bItem in bList:
yield bItem
elif not bList:
for aItem in aList:
yield aItem
aIter,bIter = iter(aList),iter(bList)
aItem,bItem = aIter.next(),bIter.next()
while 1:
if aItem[0]==bItem[0]:
yield merge(aItem,bItem)
try:
aItem = aIter.next()
except StopIteration:
for bItem in bIter:
yield bItem
break
try:
bItem = bIter.next()
except StopIteration:
for aItem in aIter:
yield aItem
break
elif aItem[0]<bItem[0]:
yield aItem
try:
aItem = aIter.next()
except StopIteration:
yield bItem
for bItem in bIter:
yield bItem
break
else:
yield bItem
try:
bItem = bIter.next()
except StopIteration:
yield aItem
for aItem in bIter:
yield aItem
break
def mergeCalls(self, aCalls, bCalls):
def merge(a,b):
if a[1] != b[1]:
warnings.warn('Call mismatch (numbers differ)')
# raise ValueError,'Call mismatch (numbers differ)'
count = a[3]+b[3]
code = GCovParser.GCovFileData.CallNotExecuted
if GCovParser.GCovFileData.CallReturned in (a[2],b[2]):
code = GCovParser.GCovFileData.CallReturned
return (a[0],a[1],code,count)
return list(self.mergeLineList(aCalls,bCalls,merge))
def mergeBranches(self, aBranches, bBranches):
def merge(a,b):
# XXX This is really wrong
if a[1] != b[1]:
warnings.warn('Branch mismatch (numbers differ)')
# raise ValueError,'Branch mismatch (numbers differ)'
count = a[3]+b[3]
code = GCovParser.GCovFileData.BranchNotTaken
if GCovParser.GCovFileData.BranchTaken in (a[2],b[2]):
code = GCovParser.GCovFileData.BranchTaken
return (a[0],a[1],code,count)
return list(self.mergeLineList(aBranches,bBranches,merge))
def mergeFunctions(self, aFunctions, bFunctions):
def merge(a,b):
if a[0] != b[0]:
warnings.warn('Function mismatch (names differ)')
# raise ValueError,'Function mismatch (names differ)'
return (a[0],a[1]+b[1])
return list(self.mergeLineList(aFunctions,bFunctions,merge))
###
def main():
from optparse import OptionParser
op = OptionParser("usage: %prog [options] files")
opts,args = op.parse_args()
group = GCovGroup()
for f in args:
res = GCovParser.parseGCDA(f)
group.addGCDA(res)
print '%d total files'%(len(group.entryMap),)
if __name__=='__main__':
main()
| bsd-3-clause |
yongshengwang/builthue | desktop/core/ext-py/PyYAML-3.09/tests/lib3/test_constructor.py | 57 | 8744 |
import yaml
import pprint
import datetime
import yaml.tokens
def execute(code):
global value
exec(code)
return value
def _make_objects():
global MyLoader, MyDumper, MyTestClass1, MyTestClass2, MyTestClass3, YAMLObject1, YAMLObject2, \
AnObject, AnInstance, AState, ACustomState, InitArgs, InitArgsWithState, \
NewArgs, NewArgsWithState, Reduce, ReduceWithState, MyInt, MyList, MyDict, \
FixedOffset, today, execute
class MyLoader(yaml.Loader):
pass
class MyDumper(yaml.Dumper):
pass
class MyTestClass1:
def __init__(self, x, y=0, z=0):
self.x = x
self.y = y
self.z = z
def __eq__(self, other):
if isinstance(other, MyTestClass1):
return self.__class__, self.__dict__ == other.__class__, other.__dict__
else:
return False
def construct1(constructor, node):
mapping = constructor.construct_mapping(node)
return MyTestClass1(**mapping)
def represent1(representer, native):
return representer.represent_mapping("!tag1", native.__dict__)
yaml.add_constructor("!tag1", construct1, Loader=MyLoader)
yaml.add_representer(MyTestClass1, represent1, Dumper=MyDumper)
class MyTestClass2(MyTestClass1, yaml.YAMLObject):
yaml_loader = MyLoader
yaml_dumper = MyDumper
yaml_tag = "!tag2"
def from_yaml(cls, constructor, node):
x = constructor.construct_yaml_int(node)
return cls(x=x)
from_yaml = classmethod(from_yaml)
def to_yaml(cls, representer, native):
return representer.represent_scalar(cls.yaml_tag, str(native.x))
to_yaml = classmethod(to_yaml)
class MyTestClass3(MyTestClass2):
yaml_tag = "!tag3"
def from_yaml(cls, constructor, node):
mapping = constructor.construct_mapping(node)
if '=' in mapping:
x = mapping['=']
del mapping['=']
mapping['x'] = x
return cls(**mapping)
from_yaml = classmethod(from_yaml)
def to_yaml(cls, representer, native):
return representer.represent_mapping(cls.yaml_tag, native.__dict__)
to_yaml = classmethod(to_yaml)
class YAMLObject1(yaml.YAMLObject):
yaml_loader = MyLoader
yaml_dumper = MyDumper
yaml_tag = '!foo'
def __init__(self, my_parameter=None, my_another_parameter=None):
self.my_parameter = my_parameter
self.my_another_parameter = my_another_parameter
def __eq__(self, other):
if isinstance(other, YAMLObject1):
return self.__class__, self.__dict__ == other.__class__, other.__dict__
else:
return False
class YAMLObject2(yaml.YAMLObject):
yaml_loader = MyLoader
yaml_dumper = MyDumper
yaml_tag = '!bar'
def __init__(self, foo=1, bar=2, baz=3):
self.foo = foo
self.bar = bar
self.baz = baz
def __getstate__(self):
return {1: self.foo, 2: self.bar, 3: self.baz}
def __setstate__(self, state):
self.foo = state[1]
self.bar = state[2]
self.baz = state[3]
def __eq__(self, other):
if isinstance(other, YAMLObject2):
return self.__class__, self.__dict__ == other.__class__, other.__dict__
else:
return False
class AnObject:
def __new__(cls, foo=None, bar=None, baz=None):
self = object.__new__(cls)
self.foo = foo
self.bar = bar
self.baz = baz
return self
def __cmp__(self, other):
return cmp((type(self), self.foo, self.bar, self.baz),
(type(other), other.foo, other.bar, other.baz))
def __eq__(self, other):
return type(self) is type(other) and \
(self.foo, self.bar, self.baz) == (other.foo, other.bar, other.baz)
class AnInstance:
def __init__(self, foo=None, bar=None, baz=None):
self.foo = foo
self.bar = bar
self.baz = baz
def __cmp__(self, other):
return cmp((type(self), self.foo, self.bar, self.baz),
(type(other), other.foo, other.bar, other.baz))
def __eq__(self, other):
return type(self) is type(other) and \
(self.foo, self.bar, self.baz) == (other.foo, other.bar, other.baz)
class AState(AnInstance):
def __getstate__(self):
return {
'_foo': self.foo,
'_bar': self.bar,
'_baz': self.baz,
}
def __setstate__(self, state):
self.foo = state['_foo']
self.bar = state['_bar']
self.baz = state['_baz']
class ACustomState(AnInstance):
def __getstate__(self):
return (self.foo, self.bar, self.baz)
def __setstate__(self, state):
self.foo, self.bar, self.baz = state
class NewArgs(AnObject):
def __getnewargs__(self):
return (self.foo, self.bar, self.baz)
def __getstate__(self):
return {}
class NewArgsWithState(AnObject):
def __getnewargs__(self):
return (self.foo, self.bar)
def __getstate__(self):
return self.baz
def __setstate__(self, state):
self.baz = state
InitArgs = NewArgs
InitArgsWithState = NewArgsWithState
class Reduce(AnObject):
def __reduce__(self):
return self.__class__, (self.foo, self.bar, self.baz)
class ReduceWithState(AnObject):
def __reduce__(self):
return self.__class__, (self.foo, self.bar), self.baz
def __setstate__(self, state):
self.baz = state
class MyInt(int):
def __eq__(self, other):
return type(self) is type(other) and int(self) == int(other)
class MyList(list):
def __init__(self, n=1):
self.extend([None]*n)
def __eq__(self, other):
return type(self) is type(other) and list(self) == list(other)
class MyDict(dict):
def __init__(self, n=1):
for k in range(n):
self[k] = None
def __eq__(self, other):
return type(self) is type(other) and dict(self) == dict(other)
class FixedOffset(datetime.tzinfo):
def __init__(self, offset, name):
self.__offset = datetime.timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return datetime.timedelta(0)
today = datetime.date.today()
def _load_code(expression):
return eval(expression)
def _serialize_value(data):
if isinstance(data, list):
return '[%s]' % ', '.join(map(_serialize_value, data))
elif isinstance(data, dict):
items = []
for key, value in data.items():
key = _serialize_value(key)
value = _serialize_value(value)
items.append("%s: %s" % (key, value))
items.sort()
return '{%s}' % ', '.join(items)
elif isinstance(data, datetime.datetime):
return repr(data.utctimetuple())
elif isinstance(data, float) and data != data:
return '?'
else:
return str(data)
def test_constructor_types(data_filename, code_filename, verbose=False):
_make_objects()
native1 = None
native2 = None
try:
native1 = list(yaml.load_all(open(data_filename, 'rb'), Loader=MyLoader))
if len(native1) == 1:
native1 = native1[0]
native2 = _load_code(open(code_filename, 'rb').read())
try:
if native1 == native2:
return
except TypeError:
pass
if verbose:
print("SERIALIZED NATIVE1:")
print(_serialize_value(native1))
print("SERIALIZED NATIVE2:")
print(_serialize_value(native2))
assert _serialize_value(native1) == _serialize_value(native2), (native1, native2)
finally:
if verbose:
print("NATIVE1:")
pprint.pprint(native1)
print("NATIVE2:")
pprint.pprint(native2)
test_constructor_types.unittest = ['.data', '.code']
if __name__ == '__main__':
import sys, test_constructor
sys.modules['test_constructor'] = sys.modules['__main__']
import test_appliance
test_appliance.run(globals())
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.