repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
dproc/trex_odp_porting_integration | scripts/automation/trex_control_plane/client_utils/general_utils.py | 2 | 2206 | #!/router/bin/python
import sys
import site
import string
import random
import os
try:
import pwd
except ImportError:
import getpass
pwd = None
using_python_3 = True if sys.version_info.major == 3 else False
def user_input():
if using_python_3:
return input()
else:
# using python version 2
return raw_input()
def get_current_user():
if pwd:
return pwd.getpwuid(os.geteuid()).pw_name
else:
return getpass.getuser()
def import_module_list_by_path (modules_list):
assert(isinstance(modules_list, list))
for full_path in modules_list:
site.addsitedir(full_path)
def find_path_to_pardir (pardir, base_path = os.getcwd() ):
"""
Finds the absolute path for some parent dir `pardir`, starting from base_path
The request is only valid if the stop initiator is the same client as the TRex run initiator.
:parameters:
pardir : str
name of an upper-level directory to which we want to find an absolute path for
base_path : str
a full (usually nested) path from which we want to find a parent folder.
default value : **current working dir**
:return:
string representation of the full path to
"""
components = base_path.split(os.sep)
return str.join(os.sep, components[:components.index(pardir)+1])
def random_id_gen(length=8):
"""
A generator for creating a random chars id of specific length
:parameters:
length : int
the desired length of the generated id
default: 8
:return:
a random id with each next() request.
"""
id_chars = string.ascii_lowercase + string.digits
while True:
return_id = ''
for i in range(length):
return_id += random.choice(id_chars)
yield return_id
def id_count_gen():
"""
A generator for creating an increasing id for objects, starting from 0
:parameters:
None
:return:
an id (unsigned int) with each next() request.
"""
return_id = 0
while True:
yield return_id
return_id += 1
if __name__ == "__main__":
pass
| apache-2.0 |
antotodd/info3180lab4 | lib/werkzeug/_compat.py | 448 | 6184 | import sys
import operator
import functools
try:
import builtins
except ImportError:
import __builtin__ as builtins
PY2 = sys.version_info[0] == 2
_identity = lambda x: x
if PY2:
unichr = unichr
text_type = unicode
string_types = (str, unicode)
integer_types = (int, long)
int_to_byte = chr
iterkeys = lambda d, *args, **kwargs: d.iterkeys(*args, **kwargs)
itervalues = lambda d, *args, **kwargs: d.itervalues(*args, **kwargs)
iteritems = lambda d, *args, **kwargs: d.iteritems(*args, **kwargs)
iterlists = lambda d, *args, **kwargs: d.iterlists(*args, **kwargs)
iterlistvalues = lambda d, *args, **kwargs: d.iterlistvalues(*args, **kwargs)
iter_bytes = lambda x: iter(x)
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
def fix_tuple_repr(obj):
def __repr__(self):
cls = self.__class__
return '%s(%s)' % (cls.__name__, ', '.join(
'%s=%r' % (field, self[index])
for index, field in enumerate(cls._fields)
))
obj.__repr__ = __repr__
return obj
def implements_iterator(cls):
cls.next = cls.__next__
del cls.__next__
return cls
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
return cls
def native_string_result(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs).encode('utf-8')
return functools.update_wrapper(wrapper, func)
def implements_bool(cls):
cls.__nonzero__ = cls.__bool__
del cls.__bool__
return cls
from itertools import imap, izip, ifilter
range_type = xrange
from StringIO import StringIO
from cStringIO import StringIO as BytesIO
NativeStringIO = BytesIO
def make_literal_wrapper(reference):
return lambda x: x
def normalize_string_tuple(tup):
"""Normalizes a string tuple to a common type. Following Python 2
rules, upgrades to unicode are implicit.
"""
if any(isinstance(x, text_type) for x in tup):
return tuple(to_unicode(x) for x in tup)
return tup
def try_coerce_native(s):
"""Try to coerce a unicode string to native if possible. Otherwise,
leave it as unicode.
"""
try:
return str(s)
except UnicodeError:
return s
wsgi_get_bytes = _identity
def wsgi_decoding_dance(s, charset='utf-8', errors='replace'):
return s.decode(charset, errors)
def wsgi_encoding_dance(s, charset='utf-8', errors='replace'):
if isinstance(s, bytes):
return s
return s.encode(charset, errors)
def to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None:
return None
if isinstance(x, (bytes, bytearray, buffer)):
return bytes(x)
if isinstance(x, unicode):
return x.encode(charset, errors)
raise TypeError('Expected bytes')
def to_native(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None or isinstance(x, str):
return x
return x.encode(charset, errors)
else:
unichr = chr
text_type = str
string_types = (str, )
integer_types = (int, )
iterkeys = lambda d, *args, **kwargs: iter(d.keys(*args, **kwargs))
itervalues = lambda d, *args, **kwargs: iter(d.values(*args, **kwargs))
iteritems = lambda d, *args, **kwargs: iter(d.items(*args, **kwargs))
iterlists = lambda d, *args, **kwargs: iter(d.lists(*args, **kwargs))
iterlistvalues = lambda d, *args, **kwargs: iter(d.listvalues(*args, **kwargs))
int_to_byte = operator.methodcaller('to_bytes', 1, 'big')
def iter_bytes(b):
return map(int_to_byte, b)
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
fix_tuple_repr = _identity
implements_iterator = _identity
implements_to_string = _identity
implements_bool = _identity
native_string_result = _identity
imap = map
izip = zip
ifilter = filter
range_type = range
from io import StringIO, BytesIO
NativeStringIO = StringIO
def make_literal_wrapper(reference):
if isinstance(reference, text_type):
return lambda x: x
return lambda x: x.encode('latin1')
def normalize_string_tuple(tup):
"""Ensures that all types in the tuple are either strings
or bytes.
"""
tupiter = iter(tup)
is_text = isinstance(next(tupiter, None), text_type)
for arg in tupiter:
if isinstance(arg, text_type) != is_text:
raise TypeError('Cannot mix str and bytes arguments (got %s)'
% repr(tup))
return tup
try_coerce_native = _identity
def wsgi_get_bytes(s):
return s.encode('latin1')
def wsgi_decoding_dance(s, charset='utf-8', errors='replace'):
return s.encode('latin1').decode(charset, errors)
def wsgi_encoding_dance(s, charset='utf-8', errors='replace'):
if isinstance(s, bytes):
return s.decode('latin1', errors)
return s.encode(charset).decode('latin1', errors)
def to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None:
return None
if isinstance(x, (bytes, bytearray, memoryview)):
return bytes(x)
if isinstance(x, str):
return x.encode(charset, errors)
raise TypeError('Expected bytes')
def to_native(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None or isinstance(x, str):
return x
return x.decode(charset, errors)
def to_unicode(x, charset=sys.getdefaultencoding(), errors='strict',
allow_none_charset=False):
if x is None:
return None
if not isinstance(x, bytes):
return text_type(x)
if charset is None and allow_none_charset:
return x
return x.decode(charset, errors)
| apache-2.0 |
david-ragazzi/nupic | tests/integration/nupic/opf/opf_checkpoint_test/experiments/temporal_multi_step/a/description.py | 42 | 1569 |
# ----------------------------------------------------------------------
# Copyright (C) 2011 Numenta Inc. All rights reserved.
#
# The information and source code contained herein is the
# exclusive property of Numenta Inc. No part of this software
# may be used, reproduced, stored or distributed in any form,
# without explicit written authorization from Numenta Inc.
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
###############################################################################
# IMPORTANT!!!
# This params file is dynamically generated by the RunExperimentPermutations
# script. Any changes made manually will be over-written the next time
# RunExperimentPermutations is run!!!
###############################################################################
from nupic.frameworks.opf.expdescriptionhelpers import importBaseDescription
# the sub-experiment configuration
config ={
'modelParams' : {'sensorParams': {'encoders': {u'c0_timeOfDay': None, u'c0_dayOfWeek': None, u'c1': {'name': 'c1', 'clipInput': True, 'n': 275, 'fieldname': 'c1', 'w': 21, 'type': 'AdaptiveScalarEncoder'}, u'c0_weekend': None}}, 'spParams': {'synPermInactiveDec': 0.052500000000000005}, 'tpParams': {'minThreshold': 11, 'activationThreshold': 14, 'pamLength': 3}, 'clParams': {'alpha': 0.050050000000000004}},
'firstRecord': 0,
'lastRecord': 250,
}
mod = importBaseDescription('../base.py', config)
locals().update(mod.__dict__)
| gpl-3.0 |
yfried/ansible | lib/ansible/modules/cloud/vmware/vcenter_license.py | 27 | 5353 | #!/usr/bin/python
# Copyright: (c) 2017, Dag Wieers (@dagwieers) <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
module: vcenter_license
short_description: Manage VMware vCenter license keys
description:
- Add and delete vCenter license keys.
version_added: '2.4'
author:
- Dag Wieers (@dagwieers)
requirements:
- pyVmomi
options:
labels:
description:
- The optional labels of the license key to manage in vSphere vCenter.
- This is dictionary with key/value pair.
default: {
'source': 'ansible'
}
license:
description:
- The license key to manage in vSphere vCenter.
required: yes
state:
description:
- Whether to add (C(present)) or remove (C(absent)) the license key.
choices: [absent, present]
default: present
notes:
- This module will also auto-assign the current vCenter to the license key
if the product matches the license key, and vCenter us currently assigned
an evaluation license only.
- The evaluation license (00000-00000-00000-00000-00000) is not listed
when unused.
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Add a new vCenter license
vcenter_license:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
license: f600d-21ae3-5592b-249e0-cc341
state: present
delegate_to: localhost
- name: Remove an (unused) vCenter license
vcenter_license:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
license: f600d-21ae3-5592b-249e0-cc341
state: absent
delegate_to: localhost
'''
RETURN = r'''
licenses:
description: list of license keys after module executed
returned: always
type: list
sample:
- f600d-21ae3-5592b-249e0-cc341
- 143cc-0e942-b2955-3ea12-d006f
'''
try:
from pyVmomi import vim
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import connect_to_api, vmware_argument_spec
def find_key(licenses, license):
for item in licenses:
if item.licenseKey == license:
return item
return None
def list_keys(licenses):
keys = []
for item in licenses:
# Filter out evaluation license key
if item.used is None:
continue
keys.append(item.licenseKey)
return keys
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(
labels=dict(type='dict', default=dict(source='ansible')),
license=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['absent', 'present']),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
license = module.params['license']
state = module.params['state']
# FIXME: This does not seem to work on vCenter v6.0
labels = []
for k in module.params['labels']:
kv = vim.KeyValue()
kv.key = k
kv.value = module.params['labels'][k]
labels.append(kv)
result = dict(
changed=False,
diff=dict(),
)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
content = connect_to_api(module)
lm = content.licenseManager
result['licenses'] = list_keys(lm.licenses)
if module._diff:
result['diff']['before'] = '\n'.join(result['licenses']) + '\n'
if state == 'present' and license not in result['licenses']:
result['changed'] = True
if module.check_mode:
result['licenses'].append(license)
else:
lm.AddLicense(license, labels)
# Automatically assign to current vCenter, if needed
key = find_key(lm.licenses, license)
if content.about.name in key.name:
try:
lam = lm.licenseAssignmentManager
lam.UpdateAssignedLicense(entity=content.about.instanceUuid, licenseKey=license)
except:
module.warn('Could not assign "%s" (%s) to vCenter.' % (license, key.name))
result['licenses'] = list_keys(lm.licenses)
if module._diff:
result['diff']['after'] = '\n'.join(result['licenses']) + '\n'
elif state == 'absent' and license in result['licenses']:
# Check if key is in use
key = find_key(lm.licenses, license)
if key.used > 0:
module.fail_json(msg='Cannot remove key "%s", still in use %s time(s).' % (license, key.used))
result['changed'] = True
if module.check_mode:
result['licenses'].remove(license)
else:
lm.RemoveLicense(license)
result['licenses'] = list_keys(lm.licenses)
if module._diff:
result['diff']['after'] = '\n'.join(result['licenses']) + '\n'
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
y-asano/primecloud-controller | iaas-gw/src/iaasgw/controller/cloudStack/cloudStackInstanceController.py | 5 | 14487 | # coding: UTF-8
#
# Copyright 2014 by SCSK Corporation.
#
# This file is part of PrimeCloud Controller(TM).
#
# PrimeCloud Controller(TM) is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# PrimeCloud Controller(TM) is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PrimeCloud Controller(TM). If not, see <http://www.gnu.org/licenses/>.
#
from iaasgw.exception.iaasException import IaasException
from iaasgw.log.log import IaasLogger
from iaasgw.utils.propertyUtil import getImage, getScriptProperty, getDnsProperty, getPuppetProperty, getVpnProperty
from iaasgw.utils.stringUtils import isEmpty, isNotEmpty, startsWithIgnoreCase
class CloudStackInstanceController(object):
client = None
conn = None
logger = IaasLogger()
platforminfo = None
def __init__(self, platforminfo, ec2iaasclient, conn):
self.client = ec2iaasclient
self.conn = conn
self.platforminfo = platforminfo
def startInstance(self, instanceNo):
#AWS_INSTANCE 取得
tableCSINS = self.conn.getTable("CLOUDSTACK_INSTANCE")
csInstance = self.conn.selectOne(tableCSINS.select(tableCSINS.c.INSTANCE_NO==instanceNo))
#PCC_INSTANCE 取得
tableINS = self.conn.getTable("INSTANCE")
pccInstance = self.conn.selectOne(tableINS.select(tableINS.c.INSTANCE_NO==instanceNo))
#イメージの取得 再考の余地あり
image = getImage(pccInstance["IMAGE_NO"])
#
if isEmpty(csInstance["INSTANCE_ID"]):
#インスタンスの作成
self.run(instanceNo, csInstance, pccInstance, image)
#winodowsなら
if (startsWithIgnoreCase(image["os"], "windows")):
#INSTANCE_ID取得の為、CLOUDSTACK_INSTANCE 再取得
csInstance = self.conn.selectOne(tableCSINS.select(tableCSINS.c.INSTANCE_NO==instanceNo))
self.client.getPasswordData(csInstance["INSTANCE_ID"])
else:
# インスタンスが停止中でない場合はスキップ
if (csInstance["STATE"] != "Stopped"):
return;
# インスタンスの起動
self.start(instanceNo, csInstance, pccInstance)
def stopInstance(self, instanceNo):
#AWS_INSTANCE 取得
tableCSINS = self.conn.getTable("CLOUDSTACK_INSTANCE")
csInstance = self.conn.selectOne(tableCSINS.select(tableCSINS.c.INSTANCE_NO==instanceNo))
#PCC_INSTANCE 取得
tableINS = self.conn.getTable("INSTANCE")
pccInstance = self.conn.selectOne(tableINS.select(tableINS.c.INSTANCE_NO==instanceNo))
# インスタンスIDがない場合は確認する
if (isEmpty(csInstance["INSTANCE_ID"])):
#起動ミス対策
nodes = self.client.describeInstances(name = pccInstance["INSTANCE_NAME"])
if not nodes or len(nodes) == 0:
#インスタンスが存在しない場合
return;
if len(nodes) >= 1:
#FQDNを比較する
for node in nodes:
if pccInstance["FQDN"] == node.extra["displayname"]:
#起動をミスったインスタンスを発見した場合
#ID情報を更新
csInstance["INSTANCE_ID"] = node.id
sql = tableCSINS.update(tableCSINS.c.INSTANCE_NO ==csInstance["INSTANCE_NO"], values=csInstance)
self.conn.execute(sql)
# インスタンスの停止
self.stop(instanceNo, csInstance, pccInstance)
####################################################################################
#---------------------ローカル------------------------------------------------------
####################################################################################
def run(self, instanceNo, csInstance, pccInstance, image):
#serviceoffering名称をIDへ変換
serviceofferings = self.client.describeServiceOfferings()
#デフォルトは最初にHitするID
serviceofferingid = serviceofferings[0]["id"]
for serviceoffering in serviceofferings:
if csInstance["INSTANCE_TYPE"] == serviceoffering["name"]:
serviceofferingid = serviceoffering["id"]
availabilityZone = None
if (isNotEmpty(csInstance["ZONEID"])):
availabilityZone = csInstance["ZONEID"]
#任意設定はここから 必要な分増やす
extra_args = {}
if (isNotEmpty(csInstance["NETWORKID"])):
extra_args["network_id"] = csInstance["NETWORKID"]
#SecurityGroup
securityGroups = []
if (isNotEmpty(csInstance["SECURITYGROUP"])):
securityGroups.append(csInstance["SECURITYGROUP"].split(","))
extra_args["securitygroupnames"] = securityGroups
if (isNotEmpty(csInstance["KEY_NAME"])):
extra_args["keypair"] = csInstance["KEY_NAME"]
#UserDataを作成
userData = self.createUserData(instanceNo, pccInstance, csInstance)
userData = self.makeUserData(userData)
extra_args["userdata"] = userData
self.logger.info("userData:"+userData)
#イベントログ出力
self.conn.debug(pccInstance["FARM_NO"], None, None, instanceNo, pccInstance["INSTANCE_NAME"], "CloudStackInstanceCreate",["CLOUDSTACK"])
#インスタンスの作成
node = self.client.runInstances(pccInstance["INSTANCE_NAME"],
pccInstance["FQDN"],
serviceofferingid,
image["templateId"],
availabilityZone,
**extra_args)
if node["state"] != "Running":
# インスタンス作成失敗時
raise IaasException("EPROCESS-000716", [node["id"], node["state"]])
# ログ出力
self.logger.info(None, "IPROCESS-100603", [node["id"],])
# イベントログ出力
self.conn.debug(pccInstance["FARM_NO"], None, None, instanceNo, pccInstance["INSTANCE_NAME"], "CloudStackInstanceCreateFinish",["CLOUDSTACK", node["id"]])
# データベース更新
table = self.conn.getTable("CLOUDSTACK_INSTANCE")
updateDict = self.conn.selectOne(table.select(table.c.INSTANCE_NO==instanceNo))
updateDict["INSTANCE_ID"] = node["id"]
updateDict["ZONEID"] = node["zoneid"]
updateDict["STATE"] = node["state"]
updateDict["DISPLAYNAME"] = node["displayname"]
updateDict["IPADDRESS"] = node["nic"][0]["ipaddress"]
sql = table.update(table.c.INSTANCE_NO ==updateDict["INSTANCE_NO"], values=updateDict)
self.conn.execute(sql)
def start(self, instanceNo, csInstance, pccInstance):
instanceId = csInstance["INSTANCE_ID"]
# イベントログ出力
self.conn.debug(pccInstance["FARM_NO"], None, None, instanceNo, pccInstance["INSTANCE_NAME"], "CloudStackInstanceStart",["CLOUDSTACK", instanceId])
#serviceoffering名称をIDへ変換
serviceofferings = self.client.describeServiceOfferings()
#デフォルトは最初にHitするID
serviceofferingid = serviceofferings[0]["id"]
for serviceoffering in serviceofferings:
if csInstance["INSTANCE_TYPE"] == serviceoffering["name"]:
serviceofferingid = serviceoffering["id"]
#serviceofferingの変更有無を確認
node = self.client.describeInstance(instanceId)
if node.extra["serviceofferingid"] != serviceofferingid:
# serviceofferingの変更
node = self.client.changeInstance(instanceId, serviceofferingid);
# インスタンスの起動
node = self.client.startInstance(instanceId);
if node["state"] != "Running":
# インスタンス作成失敗時
raise IaasException("EPROCESS-000716", [instanceId, node["state"]])
# ログ出力
self.logger.info(None, "IPROCESS-100601", [instanceId,])
# イベントログ出力
self.conn.debug(pccInstance["FARM_NO"], None, None, instanceNo, pccInstance["INSTANCE_NAME"], "CloudStackInstanceStartFinish",["CLOUDSTACK", instanceId])
# データベース更新
table = self.conn.getTable("CLOUDSTACK_INSTANCE")
updateDict = self.conn.selectOne(table.select(table.c.INSTANCE_NO==instanceNo))
updateDict["ZONEID"] = node["zoneid"]
updateDict["STATE"] = node["state"]
updateDict["DISPLAYNAME"] = node["displayname"]
updateDict["IPADDRESS"] = node["nic"][0]["ipaddress"]
sql = table.update(table.c.INSTANCE_NO ==updateDict["INSTANCE_NO"], values=updateDict)
self.conn.execute(sql)
def stop(self, instanceNo, csInstance, pccInstance):
instanceId = csInstance["INSTANCE_ID"]
# イベントログ出力
self.conn.debug(pccInstance["FARM_NO"], None, None, instanceNo, pccInstance["INSTANCE_NAME"], "CloudStackInstanceStop",["CLOUDSTACK", instanceId])
# インスタンスの停止
node = self.client.stopInstance(instanceId);
if node["state"] != "Stopped":
# インスタンス作成失敗時
raise IaasException("EPROCESS-000718", [instanceId, node["state"]])
# ログ出力
self.logger.info(None, "IPROCESS-100602", [instanceId,])
# イベントログ出力
self.conn.debug(pccInstance["FARM_NO"], None, None, instanceNo, pccInstance["INSTANCE_NAME"], "CloudStackInstanceStopFinish",["CLOUDSTACK", instanceId])
# データベース更新
table = self.conn.getTable("CLOUDSTACK_INSTANCE")
updateDict = self.conn.selectOne(table.select(table.c.INSTANCE_NO==instanceNo))
updateDict["ZONEID"] = node["zoneid"]
updateDict["STATE"] = node["state"]
updateDict["DISPLAYNAME"] = node["displayname"]
updateDict["IPADDRESS"] = node["nic"][0]["ipaddress"]
sql = table.update(table.c.INSTANCE_NO ==updateDict["INSTANCE_NO"], values=updateDict)
self.conn.execute(sql)
def terminate(self, instanceId):
#CLOUDSTACK_INSTANCE 取得
tableCSINS = self.conn.getTable("CLOUDSTACK_INSTANCE")
csInstance = self.conn.selectOne(tableCSINS.select(tableCSINS.c.INSTANCE_ID==instanceId))
if isEmpty(instanceId):
#IDが指定されていない場合はそのまま返す
return
# インスタンスの停止
node = self.client.terminateInstance(instanceId)
# ログ出力
self.logger.info(None, "IPROCESS-100604", [instanceId,])
# データベース更新
csInstance["ZONEID"] = None
csInstance["STATE"] = node["state"]
csInstance["DISPLAYNAME"] = None
csInstance["IPADDRESS"] = None
sql = tableCSINS.update(tableCSINS.c.INSTANCE_NO ==csInstance["INSTANCE_NO"], values=csInstance)
self.conn.execute(sql)
def createUserData(self, instanceNo, pccInstance, csInstance):
table = self.conn.getTable("FARM")
fram = self.conn.selectOne(table.select(table.c.FARM_NO==pccInstance["FARM_NO"]))
#UserDataを作成
userData = {}
#DB情報
userData.update({"instanceName": pccInstance["INSTANCE_NAME"]})
userData.update({"farmName": fram["FARM_NAME"]})
# FQDN
userData.update({"hostname": pccInstance["FQDN"]})
#初期スクリプト情報
userData.update({"scriptserver": getScriptProperty("script.server")})
#DNS情報
userData.update(self.createDnsUserData(instanceNo))
# Puppet情報
userData.update(self.createPuppetUserData())
# VPN情報
internal = self.platforminfo["internal"]
if (internal == 0):
userData.update(self.createVpnUserData(pccInstance))
return userData;
def createDnsUserData(self,instanceNo):
# UserDataを作成
userData = {}
# Primary DNSサーバ
userData.update({"dns": getDnsProperty("dns.server")})
# Secondry DNSサーバ
dns2 = getDnsProperty("dns.server2")
if (isNotEmpty(dns2)):
userData.update({"dns2": dns2})
# DNSドメイン
userData.update({"dnsdomain": getDnsProperty("dns.domain")})
return userData;
def createPuppetUserData(self):
# UserDataを作成
userData = {}
# PuppetMaster情報
userData.update({"puppetmaster": getPuppetProperty("puppet.masterHost")})
return userData;
def createVpnUserData(self, pccInstance):
# UserDataを作成
userData = {}
#VPN情報のユーザとパスワードをセットする
userData.update({"vpnuser": pccInstance["FQDN"]})
userData.update({"vpnuserpass": pccInstance["INSTANCE_CODE"]})
# VPNサーバ情報
userData.update({"vpnserver": getVpnProperty("vpn.server")})
userData.update({"vpnport": getVpnProperty("vpn.port")})
# userData.update({"vpnuser": getVpnProperty("vpn.user")})
# userData.update({"vpnuserpass": getVpnProperty("vpn.userpass")})
# ZIPパスワード
userData.update({"vpnzippass": getVpnProperty("vpn.zippass")})
# クライアント証明書ダウンロードURL
userData.update({"vpnclienturl": getVpnProperty("vpn.clienturl")})
return userData;
def makeUserData(self, map):
if not map or len(map) == 0:
return None
userdata = ''
for key in map.keys():
value = map[key]
if isNotEmpty(value):
if userdata != '':
userdata = userdata + ';'
userdata = userdata + key + "=" + value
return userdata
| gpl-2.0 |
rasa/scoops | makeindex.py | 1 | 13169 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" @todo add docstring """
# ### imports ###
from __future__ import (
absolute_import,
division,
print_function # ,
# unicode_literals
)
import fnmatch
import io
import json
import re
import os
# import pprint
import subprocess
import sys
OSIS = [
'0BSD',
'AAL',
'Abstyles',
'Adobe-2006',
'Adobe-Glyph',
'ADSL',
'AFL-1.1',
'AFL-1.2',
'AFL-2.0',
'AFL-2.1',
'AFL-3.0',
'Afmparse',
'AGPL-1.0',
'AGPL-1.0-only',
'AGPL-1.0-or-later',
'AGPL-3.0',
'AGPL-3.0-only',
'AGPL-3.0-or-later',
'Aladdin',
'AMDPLPA',
'AML',
'AMPAS',
'ANTLR-PD',
'Apache-1.0',
'Apache-1.1',
'Apache-2.0',
'APAFML',
'APL-1.0',
'APSL-1.0',
'APSL-1.1',
'APSL-1.2',
'APSL-2.0',
'Artistic-1.0',
'Artistic-1.0-cl8',
'Artistic-1.0-Perl',
'Artistic-2.0',
'Bahyph',
'Barr',
'Beerware',
'BitTorrent-1.0',
'BitTorrent-1.1',
'Borceux',
'BSD-1-Clause',
'BSD-2-Clause',
'BSD-2-Clause-FreeBSD',
'BSD-2-Clause-NetBSD',
'BSD-2-Clause-Patent',
'BSD-3-Clause',
'BSD-3-Clause-Attribution',
'BSD-3-Clause-Clear',
'BSD-3-Clause-LBNL',
'BSD-3-Clause-No-Nuclear-License',
'BSD-3-Clause-No-Nuclear-License-2014',
'BSD-3-Clause-No-Nuclear-Warranty',
'BSD-4-Clause',
'BSD-4-Clause-UC',
'BSD-Protection',
'BSD-Source-Code',
'BSL-1.0',
'bzip2-1.0.5',
'bzip2-1.0.6',
'Caldera',
'CATOSL-1.1',
'CC-BY-1.0',
'CC-BY-2.0',
'CC-BY-2.5',
'CC-BY-3.0',
'CC-BY-4.0',
'CC-BY-NC-1.0',
'CC-BY-NC-2.0',
'CC-BY-NC-2.5',
'CC-BY-NC-3.0',
'CC-BY-NC-4.0',
'CC-BY-NC-ND-1.0',
'CC-BY-NC-ND-2.0',
'CC-BY-NC-ND-2.5',
'CC-BY-NC-ND-3.0',
'CC-BY-NC-ND-4.0',
'CC-BY-NC-SA-1.0',
'CC-BY-NC-SA-2.0',
'CC-BY-NC-SA-2.5',
'CC-BY-NC-SA-3.0',
'CC-BY-NC-SA-4.0',
'CC-BY-ND-1.0',
'CC-BY-ND-2.0',
'CC-BY-ND-2.5',
'CC-BY-ND-3.0',
'CC-BY-ND-4.0',
'CC-BY-SA-1.0',
'CC-BY-SA-2.0',
'CC-BY-SA-2.5',
'CC-BY-SA-3.0',
'CC-BY-SA-4.0',
'CC0-1.0',
'CDDL-1.0',
'CDDL-1.1',
'CDLA-Permissive-1.0',
'CDLA-Sharing-1.0',
'CECILL-1.0',
'CECILL-1.1',
'CECILL-2.0',
'CECILL-2.1',
'CECILL-B',
'CECILL-C',
'ClArtistic',
'CNRI-Jython',
'CNRI-Python',
'CNRI-Python-GPL-Compatible',
'Condor-1.1',
'CPAL-1.0',
'CPL-1.0',
'CPOL-1.02',
'Crossword',
'CrystalStacker',
'CUA-OPL-1.0',
'Cube',
'curl',
'D-FSL-1.0',
'diffmark',
'DOC',
'Dotseqn',
'DSDP',
'dvipdfm',
'ECL-1.0',
'ECL-2.0',
'eCos-2.0',
'EFL-1.0',
'EFL-2.0',
'eGenix',
'Entessa',
'EPL-1.0',
'EPL-2.0',
'ErlPL-1.1',
'EUDatagrid',
'EUPL-1.0',
'EUPL-1.1',
'EUPL-1.2',
'Eurosym',
'Fair',
'Frameworx-1.0',
'FreeImage',
'FSFAP',
'FSFUL',
'FSFULLR',
'FTL',
'GFDL-1.1',
'GFDL-1.1-only',
'GFDL-1.1-or-later',
'GFDL-1.2',
'GFDL-1.2-only',
'GFDL-1.2-or-later',
'GFDL-1.3',
'GFDL-1.3-only',
'GFDL-1.3-or-later',
'Giftware',
'GL2PS',
'Glide',
'Glulxe',
'gnuplot',
'GPL-1.0',
'GPL-1.0+',
'GPL-1.0-only',
'GPL-1.0-or-later',
'GPL-2.0',
'GPL-2.0+',
'GPL-2.0-only',
'GPL-2.0-or-later',
'GPL-2.0-with-autoconf-exception',
'GPL-2.0-with-bison-exception',
'GPL-2.0-with-classpath-exception',
'GPL-2.0-with-font-exception',
'GPL-2.0-with-GCC-exception',
'GPL-3.0',
'GPL-3.0+',
'GPL-3.0-only',
'GPL-3.0-or-later',
'GPL-3.0-with-autoconf-exception',
'GPL-3.0-with-GCC-exception',
'gSOAP-1.3b',
'HaskellReport',
'HPND',
'IBM-pibs',
'ICU',
'IJG',
'ImageMagick',
'iMatix',
'Imlib2',
'Info-ZIP',
'Intel',
'Intel-ACPI',
'Interbase-1.0',
'IPA',
'IPL-1.0',
'ISC',
'JasPer-2.0',
'JSON',
'LAL-1.2',
'LAL-1.3',
'Latex2e',
'Leptonica',
'LGPL-2.0',
'LGPL-2.0+',
'LGPL-2.0-only',
'LGPL-2.0-or-later',
'LGPL-2.1',
'LGPL-2.1+',
'LGPL-2.1-only',
'LGPL-2.1-or-later',
'LGPL-3.0',
'LGPL-3.0+',
'LGPL-3.0-only',
'LGPL-3.0-or-later',
'LGPLLR',
'Libpng',
'libtiff',
'LiLiQ-P-1.1',
'LiLiQ-R-1.1',
'LiLiQ-Rplus-1.1',
'Linux-OpenIB',
'LPL-1.0',
'LPL-1.02',
'LPPL-1.0',
'LPPL-1.1',
'LPPL-1.2',
'LPPL-1.3a',
'LPPL-1.3c',
'MakeIndex',
'MirOS',
'MIT',
'MIT-0',
'MIT-advertising',
'MIT-CMU',
'MIT-enna',
'MIT-feh',
'MITNFA',
'Motosoto',
'mpich2',
'MPL-1.0',
'MPL-1.1',
'MPL-2.0',
'MPL-2.0-no-copyleft-exception',
'MS-PL',
'MS-RL',
'MTLL',
'Multics',
'Mup',
'NASA-1.3',
'Naumen',
'NBPL-1.0',
'NCSA',
'Net-SNMP',
'NetCDF',
'Newsletr',
'NGPL',
'NLOD-1.0',
'NLPL',
'Nokia',
'NOSL',
'Noweb',
'NPL-1.0',
'NPL-1.1',
'NPOSL-3.0',
'NRL',
'NTP',
'Nunit',
'OCCT-PL',
'OCLC-2.0',
'ODbL-1.0',
'OFL-1.0',
'OFL-1.1',
'OGTSL',
'OLDAP-1.1',
'OLDAP-1.2',
'OLDAP-1.3',
'OLDAP-1.4',
'OLDAP-2.0',
'OLDAP-2.0.1',
'OLDAP-2.1',
'OLDAP-2.2',
'OLDAP-2.2.1',
'OLDAP-2.2.2',
'OLDAP-2.3',
'OLDAP-2.4',
'OLDAP-2.5',
'OLDAP-2.6',
'OLDAP-2.7',
'OLDAP-2.8',
'OML',
'OpenSSL',
'OPL-1.0',
'OSET-PL-2.1',
'OSL-1.0',
'OSL-1.1',
'OSL-2.0',
'OSL-2.1',
'OSL-3.0',
'PDDL-1.0',
'PHP-3.0',
'PHP-3.01',
'Plexus',
'PostgreSQL',
'psfrag',
'psutils',
'Python-2.0',
'Qhull',
'QPL-1.0',
'Rdisc',
'RHeCos-1.1',
'RPL-1.1',
'RPL-1.5',
'RPSL-1.0',
'RSA-MD',
'RSCPL',
'Ruby',
'SAX-PD',
'Saxpath',
'SCEA',
'Sendmail',
'SGI-B-1.0',
'SGI-B-1.1',
'SGI-B-2.0',
'SimPL-2.0',
'SISSL',
'SISSL-1.2',
'Sleepycat',
'SMLNJ',
'SMPPL',
'SNIA',
'Spencer-86',
'Spencer-94',
'Spencer-99',
'SPL-1.0',
'StandardML-NJ',
'SugarCRM-1.1.3',
'SWL',
'TCL',
'TCP-wrappers',
'TMate',
'TORQUE-1.1',
'TOSL',
'Unicode-DFS-2015',
'Unicode-DFS-2016',
'Unicode-TOU',
'Unlicense',
'UPL-1.0',
'Vim',
'VOSTROM',
'VSL-1.0',
'W3C',
'W3C-19980720',
'W3C-20150513',
'Watcom-1.0',
'Wsuipa',
'WTFPL',
'wxWindows',
'X11',
'Xerox',
'XFree86-1.1',
'xinetd',
'Xnet',
'xpp',
'XSkat',
'YPL-1.0',
'YPL-1.1',
'Zed',
'Zend-2.0',
'Zimbra-1.3',
'Zimbra-1.4',
'Zlib',
'zlib-acknowledgement',
'ZPL-1.1',
'ZPL-2.0',
'ZPL-2.1',
'389-exception',
'Autoconf-exception-2.0',
'Autoconf-exception-3.0',
'Bison-exception-2.2',
'Bootloader-exception',
'Classpath-exception-2.0',
'CLISP-exception-2.0',
'DigiRule-FOSS-exception',
'eCos-exception-2.0',
'Fawkes-Runtime-exception',
'FLTK-exception',
'Font-exception-2.0',
'freertos-exception-2.0',
'GCC-exception-2.0',
'GCC-exception-3.1',
'gnu-javamail-exception',
'i2p-gpl-java-exception',
'Libtool-exception',
'Linux-syscall-note',
'LLVM-exception',
'LZMA-exception',
'mif-exception',
'Nokia-Qt-exception-1.1',
'OCCT-exception-1.0',
'OpenJDK-assembly-exception-1.0',
'openvpn-openssl-exception',
'Qt-GPL-exception-1.0',
'Qt-LGPL-exception-1.1',
'Qwt-exception-1.0',
'u-boot-exception-2.0',
'sWxWindows-exception-3.1'
]
OSImap = {}
for osi in OSIS:
OSImap[osi.lower()] = 'https://opensource.org/licenses/%s' % osi
lmap = {
'commercial': 'https://en.m.wikipedia.org/wiki/Software_license#Proprietary_software_licenses',
'freeware': 'https://en.wikipedia.org/wiki/Freeware',
'proprietary': 'https://en.m.wikipedia.org/wiki/Software_license#Proprietary_software_licenses',
'public_domain': 'https://wiki.creativecommons.org/wiki/Public_domain',
'public domain': 'https://wiki.creativecommons.org/wiki/Public_domain',
'public-domain': 'https://wiki.creativecommons.org/wiki/Public_domain',
'publicdomain': 'https://wiki.creativecommons.org/wiki/Public_domain',
'shareware': 'https://en.wikipedia.org/wiki/Shareware',
}
def do_license(v):
""" doc me """
url = v
if 'identifier' in v:
identifier = v['identifier']
else:
identifier = ''
if 'url' in v:
url = v['url']
if re.search('^(http|ftp)', url):
if not identifier:
identifier = 'Link'
v = '[%s](%s "%s")' % (identifier, url, url)
return v
if not identifier:
identifier = url
parts = re.split(r'[,|\s]+', identifier)
v = ''
for part in parts:
if v > '':
v += '/'
url = ''
k = part.lower()
if k in OSImap:
url = OSImap[k]
elif lmap.get(k):
url = lmap[k]
if url > '':
v += '[%s](%s "%s")' % (part, url, url)
else:
v += part
return v
def get_url(js):
""" doc me """
if 'checkver' in js:
if 'url' in js['checkver']:
return js['checkver']['url']
if 'homepage' in js:
return js['homepage']
return ''
def do_version(js):
""" doc me """
version = js['version']
url = get_url(js)
if 'checkver' not in js:
version = '<i>%s</i>' % version
if url == '':
return version
return '[%s](%s "%s")' % (version, url, url)
# pylint: disable=R0912 # Too many branches (22/12) (too-many-branches)
# pylint: disable=R0915 # Too many statements (71/50) (too-many-statements)
def main():
""" doc me """
markdown = 'README.md'
print("Reading %s" % markdown)
with io.open(markdown, 'r', encoding='utf-8') as f:
lines = f.readlines()
for i, line in enumerate(lines):
lines[i] = str(line)
specs = sys.argv
specs.pop(0)
if len(specs) == 0:
specs = ['bucket/*.json']
keys = [
'checkver',
'description',
'homepage',
'license',
'version',
]
rows = {}
cmdline = ["git", "ls-files"]
proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE, shell=True)
(out, _) = proc.communicate()
files = out.splitlines()
for file in files:
file = file.decode("utf-8")
if re.search('wip/', file):
# print("skipping %s: wip" % file)
continue
accept = False
print("file=%s" % file)
for spec in specs:
# print("spec=%s" % spec)
if fnmatch.fnmatch(file, spec):
accept = True
break
if not accept:
# print("skipping %s: not matched" % file)
continue
with open(file, 'r') as f:
j = json.load(f)
row = {}
(name, _) = os.path.splitext(os.path.basename(file))
if re.search('^_', name):
# print("skipping %s: starts with _" % name)
continue
if re.search('^schema', name):
# print("skipping %s: starts with schema" % name)
continue
for key in keys:
if key in j:
val = j[key]
if type(val).__name__ == 'unicode':
val = val.strip()
if key == 'license':
val = do_license(val)
if key == 'version':
val = do_version(j)
row[key] = val
else:
row[key] = ''
rows[name] = row
table = [
'<!-- The following table was inserted by makeindex.py -->',
'<!-- Your edits will be lost the next time makeindex.py is run -->',
'|Name|Version|Description|License|',
'|----|-------|-----------|-------|'
]
newlist = [(key, rows[key]) for key in sorted(rows.keys())]
for (name, row) in newlist:
table.append('|[%s](%s "%s")|%s|%s|%s|' % (
name, row['homepage'], row['homepage'], row['version'], row['description'], row['license']))
out = []
found = False
for line in lines:
line = str(line.strip())
if found:
if re.match(r'^\s*<!--\s+</apps>\s+-->', line):
found = False
else:
continue
if re.match(r'^\s*<!--\s+<apps>\s+-->', line):
found = True
out.append(line)
out.extend(table)
continue
out.append(line)
print("Writing %s" % markdown)
with io.open(markdown + '.tmp', 'w', encoding='utf-8', newline='\n') as f:
data = "\n".join(out) + "\n"
f.write(data)
if os.path.exists(markdown + '.bak'):
os.remove(markdown + '.bak')
os.rename(markdown, markdown + '.bak')
os.rename(markdown + '.tmp', markdown)
main()
sys.exit(0)
| mit |
deepakbane28/nixysa | nixysa/pod_binding.py | 9 | 31775 | #!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""pod binding model module.
This module implements the glue functions for the pod binding model, that is
used by POD types, and strings (which are POD in JavaScript). 'void' is also
included here, although it is only used for return values (and raises an
exception otherwise).
In C++, objects using this binding model are passed and returned by value (or
by pointer when mutable), except strings which are passed by const reference
(and returned by copy).
For example:
void SetValues(int value, const string &name);
float GetValue();
string GetString();
For JS bindings, they are directly represented by variants.
"""
import string
import sys
import cpp_utils
import java_utils
CPP_POD_TO_JSDOC_TYPES = {
'int': 'number',
'std.string' : 'string',
'bool' : 'boolean',
'float' : 'number',
'double' : 'number',
'unsigned int' : 'number',
'size_t' : 'number',
'void' : 'void'}; # void is a special case. It's used for callbacks
class InvalidPODUsage(Exception):
"""Raised when POD type is used incorrectly."""
pass
class BadVoidUsage(Exception):
"""Raised when 'void' is used outside of a return value."""
pass
class UnknownPODType(Exception):
"""Raised when an unknown POD type is used."""
def __init__(self, name):
Exception.__init__(self)
self.name = name
def JavaMemberString(scope, type_defn):
"""Gets the representation of a member name in Java.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition for the type.
Returns:
a string representing the type
"""
# TODO: Check if we need the check below for Java
#final_type = type_defn.GetFinalType()
#if final_type.podtype == 'void':
# raise BadVoidUsage
return java_utils.GetScopedName(scope, type_defn)
def CppTypedefString(scope, type_defn):
"""Gets the representation of a type when used in a C++ typedef.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition for the type.
Returns:
a (string, boolean) pair, the first element being the representation of
the type, the second element indicating whether or not the definition of
the type is needed for the expression to be valid.
Raises:
BadVoidUsage: type_defn is a 'void' POD type.
"""
if type_defn.GetFinalType().podtype == 'void':
raise BadVoidUsage
return cpp_utils.GetScopedName(scope, type_defn), True
def CppMemberString(scope, type_defn):
"""Gets the representation of a type when used as a C++ class member.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition for the type.
Returns:
a (string, boolean) pair, the first element being the representation of
the type, the second element indicating whether or not the definition of
the type is needed for the expression to be valid.
Raises:
BadVoidUsage: type_defn is a 'void' POD type.
"""
if type_defn.GetFinalType().podtype == 'void':
raise BadVoidUsage
return cpp_utils.GetScopedName(scope, type_defn), True
def CppReturnValueString(scope, type_defn):
"""Gets the representation of a type when used as a C++ function return value.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition for the type.
Returns:
a (string, boolean) pair, the first element being the representation of
the type, the second element indicating whether or not the definition of
the type is needed for the expression to be valid.
"""
return cpp_utils.GetScopedName(scope, type_defn), True
def CppParameterString(scope, type_defn):
"""Gets the representation of a type when used for a function parameter.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition for the type.
Returns:
a (string, boolean) pair, the first element being the representation of
the type, the second element indicating whether or not the definition of
the type is needed for the expression to be valid.
Raises:
BadVoidUsage: type_defn is a 'void' POD type.
"""
final_type = type_defn.GetFinalType()
if final_type.podtype == 'void':
raise BadVoidUsage
elif final_type.podtype == 'string' or final_type.podtype == 'wstring':
return 'const %s&' % cpp_utils.GetScopedName(scope, type_defn), True
else:
return cpp_utils.GetScopedName(scope, type_defn), True
def CppMutableParameterString(scope, type_defn):
"""Gets the representation of a type for a mutable function parameter.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition for the type.
Returns:
a (string, boolean) pair, the first element being the string representing
the type, the second element indicating whether or not the definition of
the type is needed for the expression to be valid.
Raises:
BadVoidUsage: type_defn is a 'void' POD type.
"""
if type_defn.GetFinalType().podtype == 'void':
raise BadVoidUsage
return '%s*' % cpp_utils.GetScopedName(scope, type_defn), True
def CppMutableToNonMutable(scope, type_defn, expr):
"""Gets the string converting a mutable expression to a non-mutable one.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition for the type.
expr: a string for the mutable expression.
Returns:
a string, which is the non-mutable expression.
"""
(scope, type_defn) = (scope, type_defn) # silence gpylint.
return '*(%s)' % expr
def CppBaseClassString(scope, type_defn):
"""Gets the representation of a type for a base class.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition for the type.
Returns:
a (string, boolean) pair, the first element being the string representing
the type, the second element indicating whether or not the definition of
the type is needed for the expression to be valid.
Raises:
InvalidPODUsage: always. This function can't be called for a POD type.
"""
raise InvalidPODUsage
def CppCallMethod(scope, type_defn, object_expr, mutable, method, param_exprs):
"""Gets the representation of a member function call.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition, representing the type of the object being called.
object_expr: a string, which is the expression for the object being called.
mutable: a boolean, whether or not the 'object_expr' expression is mutable
or not
method: a Function, representing the function to call.
param_exprs: a list of strings, each being the expression for the value of
each parameter.
Returns:
a string, which is the expression for the function call.
Raises:
InvalidPODUsage: always. This function can't be called for a POD type.
"""
raise InvalidPODUsage
def CppCallStaticMethod(scope, type_defn, method, param_exprs):
"""Gets the representation of a static function call.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition, representing the type of the object being called.
method: a Function, representing the function to call.
param_exprs: a list of strings, each being the expression for the value of
each parameter.
Returns:
a string, which is the expression for the function call.
Raises:
InvalidPODUsage: always. This function can't be called for a POD type.
"""
raise InvalidPODUsage
def CppCallConstructor(scope, type_defn, method, param_exprs):
"""Gets the representation of a constructor call.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition, representing the type of the object being called.
method: a Function, representing the constructor to call.
param_exprs: a list of strings, each being the expression for the value of
each parameter.
Returns:
a string, which is the expression for the constructor call.
Raises:
InvalidPODUsage: always. This function can't be called for a POD type.
"""
raise InvalidPODUsage
def CppSetField(scope, type_defn, object_expr, field, param_expr):
"""Gets the representation of an expression setting a field in an object.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition, representing the type of the object containing the
field being set.
object_expr: a string, which is the expression for the object containing
the field being set.
field: a string, the name of the field to be set.
param_expr: a strings, being the expression for the value to be set.
Returns:
a string, which is the expression for setting the field.
Raises:
InvalidPODUsage: always. This function can't be called for a POD type.
"""
raise InvalidPODUsage
def CppGetField(scope, type_defn, object_expr, field):
"""Gets the representation of an expression getting a field in an object.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition, representing the type of the object containing the
field being retrieved.
object_expr: a string, which is the expression for the object containing
the field being retrieved.
field: a string, the name of the field to be retrieved.
Returns:
a string, which is the expression for getting the field.
Raises:
InvalidPODUsage: always. This function can't be called for a POD type.
"""
raise InvalidPODUsage
def CppSetStatic(scope, type_defn, field, param_expr):
"""Gets the representation of an expression setting a static field.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition, representing the type of the object containing the
field being set.
field: a string, the name of the field to be set.
param_expr: a strings, being the expression for the value to be set.
Returns:
a string, which is the expression for setting the field.
Raises:
InvalidPODUsage: always. This function can't be called for a POD type.
"""
raise InvalidPODUsage
def CppGetStatic(scope, type_defn, field):
"""Gets the representation of an expression getting a static field.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition, representing the type of the object containing the
field being retrieved.
field: a string, the name of the field to be retrieved.
Returns:
a string, which is the expression for getting the field.
Raises:
InvalidPODUsage: always. This function can't be called for a POD type.
"""
raise InvalidPODUsage
def JSDocTypeString(type_defn):
"""Gets the representation of a type in JSDoc notation.
Args:
type_defn: a Definition for the type.
Returns:
a string that is the JSDoc notation of type_defn.
"""
type_defn = type_defn.GetFinalType()
type_stack = type_defn.GetParentScopeStack()
name = type_defn.name
type_string = '.'.join([s.name for s in type_stack[1:]] + [name])
if type_string in CPP_POD_TO_JSDOC_TYPES:
return CPP_POD_TO_JSDOC_TYPES[type_string]
print >> sys.stderr, (
'ERROR: %s : Unknown C++ Pod to JSDoc type conversion for C++ type: %s' %
(type_defn.source, type_string))
return '*'
def NpapiBindingGlueHeader(scope, type_defn):
"""Gets the NPAPI glue header for a given type.
Args:
scope: a Definition for the scope in which the glue will be written.
type_defn: a Definition, representing the type.
Returns:
a string, the glue header.
Raises:
InvalidPODUsage: always. This function can't be called for a POD type.
"""
raise InvalidPODUsage
def NpapiBindingGlueCpp(scope, type_defn):
"""Gets the NPAPI glue implementation for a given type.
Args:
scope: a Definition for the scope in which the glue will be written.
type_defn: a Definition, representing the type.
Returns:
a string, the glue implementation.
Raises:
InvalidPODUsage: always. This function can't be called for a POD type.
"""
raise InvalidPODUsage
def NpapiDispatchFunctionHeader(scope, type_defn, variable, npp, success):
"""Gets a header for NPAPI glue dispatch functions.
This function creates a string containing a C++ code snippet that should be
included at the beginning of NPAPI glue dispatch functions like Invoke or
GetProperty. This code snippet will declare and initialize certain variables
that will be used in the dispatch functions, like the NPObject representing
the object, or a pointer to the NPP instance.
Args:
scope: a Definition for the scope in which the glue will be written.
type_defn: a Definition, representing the type.
variable: a string, representing a name of a variable that can be used to
store a reference to the object.
npp: a string, representing the name of the variable that holds the pointer
to the NPP instance. Will be declared by the code snippet.
success: the name of a bool variable containing the current success status.
(is not declared by the code snippet).
Returns:
a (string, string) pair, the first string being the code snippet, and the
second string being an expression to access the object.
Raises:
InvalidPODUsage: always. This function can't be called for a POD type.
"""
raise InvalidPODUsage
_wstring_from_npvariant_template = string.Template("""
${type} ${variable};
if (!NPVARIANT_IS_STRING(${input})) {
${success} = false;
*error_handle = "Error in " ${context}
": was expecting a string.";
} else if (!UTF8ToString16(NPVARIANT_TO_STRING(${input}).UTF8Characters,
NPVARIANT_TO_STRING(${input}).UTF8Length,
&${variable})) {
${success} = false;
*error_handle = "Error in " ${context}
": hit an unexpected unicode conversion problem.";
}
""")
_string_from_npvariant_template = string.Template("""
${type} ${variable};
if (NPVARIANT_IS_STRING(${input})) {
${variable} = ${type}(NPVARIANT_TO_STRING(${input}).UTF8Characters,
NPVARIANT_TO_STRING(${input}).UTF8Length);
} else {
${success} = false;
*error_handle = "Error in " ${context}
": was expecting a string.";
}
""")
_float_from_npvariant_template = string.Template("""
${type} ${variable} = 0.f;
if (NPVARIANT_IS_NUMBER(${input})) {
${variable} = static_cast<${type}>(NPVARIANT_TO_NUMBER(${input}));
} else {
*error_handle = "Error in " ${context}
": was expecting a number.";
${success} = false;
}
""")
_int_from_npvariant_template = string.Template("""
${type} ${variable} = 0;
if (NPVARIANT_IS_NUMBER(${input})) {
${variable} = static_cast<${type}>(NPVARIANT_TO_NUMBER(${input}));
} else {
*error_handle = "Error in " ${context}
": was expecting an int.";
${success} = false;
}
""")
_bool_from_npvariant_template = string.Template("""
${type} ${variable} = false;
if (NPVARIANT_IS_BOOLEAN(${input})) {
${variable} = NPVARIANT_TO_BOOLEAN(${input});
} else {
*error_handle = "Error in " ${context}
": was expecting a boolean.";
${success} = false;
}
""")
def NpapiFromNPVariant(scope, type_defn, input_expr, variable, success,
exception_context, npp):
"""Gets the string to get a value from a NPVariant.
This function creates a string containing a C++ code snippet that is used to
retrieve a value from a NPVariant. If an error occurs, like if the NPVariant
is not of the correct type, the snippet will set the success status variable
to false.
Args:
scope: a Definition for the scope in which the glue will be written.
type_defn: a Definition, representing the type of the value.
input_expr: an expression representing the NPVariant to get the value from.
variable: a string, representing a name of a variable that can be used to
store a reference to the value.
success: the name of a bool variable containing the current success status.
exception_context: the name of a string containing context information, for
use in exception reporting.
npp: a string, representing the name of the variable that holds the pointer
to the NPP instance.
Returns:
a (string, string) pair, the first string being the code snippet and the
second one being the expression to access that value.
Raises:
BadVoidUsage: type_defn is a 'void' POD type.
UnknownPODType: type_defn is not a known POD type.
"""
npp = npp # silence gpylint.
type_name = cpp_utils.GetScopedName(scope, type_defn)
final_type = type_defn.GetFinalType()
if final_type.podtype == 'void':
return '', 'void(0)'
elif final_type.podtype == 'int':
text = _int_from_npvariant_template.substitute(type=type_name,
input=input_expr,
variable=variable,
success=success,
context=exception_context)
return text, variable
elif final_type.podtype == 'bool':
text = _bool_from_npvariant_template.substitute(type=type_name,
input=input_expr,
variable=variable,
success=success,
context=exception_context)
return text, variable
elif final_type.podtype == 'float':
text = _float_from_npvariant_template.substitute(type=type_name,
input=input_expr,
variable=variable,
success=success,
context=exception_context)
return text, variable
elif final_type.podtype == 'variant':
return '%s %s(npp, %s);' % (type_name, variable, input_expr), variable
elif final_type.podtype == 'string':
text = _string_from_npvariant_template.substitute(type=type_name,
input=input_expr,
variable=variable,
success=success,
context=exception_context)
return text, variable
elif final_type.podtype == 'wstring':
text = _wstring_from_npvariant_template.substitute(type=type_name,
input=input_expr,
variable=variable,
success=success,
context=exception_context)
return text, variable
else:
raise UnknownPODType(final_type.podtype)
def NpapiExprToNPVariant(scope, type_defn, variable, expression, output,
success, npp):
"""Gets the string to store a value into a NPVariant.
This function creates a string containing a C++ code snippet that is used to
store a value into a NPVariant. That operation takes two phases, one that
allocates necessary NPAPI resources, and that can fail, and one that actually
sets the NPVariant (that can't fail). If an error occurs, the snippet will
set the success status variable to false.
Args:
scope: a Definition for the scope in which the glue will be written.
type_defn: a Definition, representing the type of the value.
variable: a string, representing a name of a variable that can be used to
store a reference to the value.
expression: a string representing the expression that yields the value to
be stored.
output: an expression representing a pointer to the NPVariant to store the
value into.
success: the name of a bool variable containing the current success status.
npp: a string, representing the name of the variable that holds the pointer
to the NPP instance.
Returns:
a (string, string) pair, the first string being the code snippet for the
first phase, and the second one being the code snippet for the second phase.
Raises:
UnknownPODType: type_defn is not a known POD type.
"""
npp = npp # silence gpylint.
type_name = cpp_utils.GetScopedName(scope, type_defn)
final_type = type_defn.GetFinalType()
if final_type.podtype == 'void':
return ('%s;' % expression,
'VOID_TO_NPVARIANT(*%s);' % output)
elif final_type.podtype == 'int':
return ('%s %s = %s;' % (type_name, variable, expression),
'INT32_TO_NPVARIANT(%s, *%s);' % (variable, output))
elif final_type.podtype == 'bool':
return ('%s %s = %s;' % (type_name, variable, expression),
'BOOLEAN_TO_NPVARIANT(%s, *%s);' % (variable, output))
elif final_type.podtype == 'float':
return ('%s %s = %s;' % (type_name, variable, expression),
'DOUBLE_TO_NPVARIANT(static_cast<double>(%s), *%s);' %
(variable, output))
elif final_type.podtype == 'variant':
return ('%s %s = %s' % (type_name, variable, expression),
'*%s = %s.NPVariant(npp);' % (output, variable))
elif final_type.podtype == 'string':
return ('GLUE_PROFILE_START(npp, "StringToNPVariant");\n'
'%s = StringToNPVariant(%s, %s);\n'
'GLUE_PROFILE_STOP(npp, "StringToNPVariant");'
% (success, expression, output),
'')
elif final_type.podtype == 'wstring':
return ('GLUE_PROFILE_START(npp, "String16ToNPVariant");\n'
'%s = String16ToNPVariant(%s, %s);\n'
'GLUE_PROFILE_STOP(npp, "String16ToNPVariant");'
% (success, expression, output),
'')
else:
raise UnknownPODType(final_type.podtype)
def PpapiBindingGlueHeader(scope, type_defn):
"""Gets the PPAPI glue header for a given type.
Args:
scope: a Definition for the scope in which the glue will be written.
type_defn: a Definition, representing the type.
Returns:
a string, the glue header.
Raises:
InvalidPODUsage: always. This function can't be called for a POD type.
"""
raise InvalidPODUsage
def PpapiBindingGlueCpp(scope, type_defn):
"""Gets the PPAPI glue implementation for a given type.
Args:
scope: a Definition for the scope in which the glue will be written.
type_defn: a Definition, representing the type.
Returns:
a string, the glue implementation.
Raises:
InvalidPODUsage: always. This function can't be called for a POD type.
"""
raise InvalidPODUsage
def PpapiDispatchFunctionHeader(scope, type_defn, variable, npp, success):
"""Gets a header for PPAPI glue dispatch functions.
This function creates a string containing a C++ code snippet that should be
included at the beginning of PPAPI glue dispatch functions like Call or
GetProperty. This code snippet will declare and initialize certain variables
that will be used in the dispatch functions, like the pp::Var representing
the object, or a pointer to the pp::Instance.
Args:
scope: a Definition for the scope in which the glue will be written.
type_defn: a Definition, representing the type.
variable: a string, representing a name of a variable that can be used to
store a reference to the object.
npp: a string, representing the name of the variable that holds the pointer
to the pp::Instance. Will be declared by the code snippet.
success: the name of a bool variable containing the current success status.
(is not declared by the code snippet).
Returns:
a (string, string) pair, the first string being the code snippet, and the
second string being an expression to access the object.
Raises:
InvalidPODUsage: always. This function can't be called for a POD type.
"""
raise InvalidPODUsage
_string_from_ppvar_template = string.Template("""
${type} ${variable};
if (${input}.is_string()) {
${variable} = ${input}.AsString();
} else {
${success} = false;
*exception = pp::Var("Error in " ${context}
": was expecting a string.");
}
""")
_float_from_ppvar_template = string.Template("""
${type} ${variable} = 0.f;
if (${input}.is_number()) {
${variable} = static_cast<${type}>(${input}.AsDouble());
} else {
*exception = pp::Var("Error in " ${context}
": was expecting a number.");
${success} = false;
}
""")
_int_from_ppvar_template = string.Template("""
${type} ${variable} = 0;
if (${input}.is_number()) {
${variable} = static_cast<${type}>(${input}.AsInt());
} else {
*exception = pp::Var("Error in " ${context}
": was expecting an int.");
${success} = false;
}
""")
_bool_from_ppvar_template = string.Template("""
${type} ${variable} = false;
if (${input}.is_bool()) {
${variable} = ${input}.AsBool();
} else {
*exception = pp::Var("Error in " ${context}
": was expecting a boolean.");
${success} = false;
}
""")
def PpapiFromPPVar(scope, type_defn, input_expr, variable, success,
exception_context, npp):
"""Gets the string to get a value from a pp::Var.
This function creates a string containing a C++ code snippet that is used to
retrieve a value from a pp::Var. If an error occurs, like if the pp::Var
is not of the correct type, the snippet will set the success status variable
to false.
Args:
scope: a Definition for the scope in which the glue will be written.
type_defn: a Definition, representing the type of the value.
input_expr: an expression representing the pp::Var to get the value from.
variable: a string, representing a name of a variable that can be used to
store a reference to the value.
success: the name of a bool variable containing the current success status.
exception_context: the name of a string containing context information, for
use in exception reporting.
npp: a string, representing the name of the variable that holds the pointer
to the pp::Instance.
Returns:
a (string, string) pair, the first string being the code snippet and the
second one being the expression to access that value.
Raises:
BadVoidUsage: type_defn is a 'void' POD type.
UnknownPODType: type_defn is not a known POD type.
"""
npp = npp # silence gpylint.
type_name = cpp_utils.GetScopedName(scope, type_defn)
final_type = type_defn.GetFinalType()
if final_type.podtype == 'void':
return '', 'void(0)'
elif final_type.podtype == 'int':
text = _int_from_ppvar_template.substitute(type=type_name,
input=input_expr,
variable=variable,
success=success,
context=exception_context)
return text, variable
elif final_type.podtype == 'bool':
text = _bool_from_ppvar_template.substitute(type=type_name,
input=input_expr,
variable=variable,
success=success,
context=exception_context)
return text, variable
elif final_type.podtype == 'float':
text = _float_from_ppvar_template.substitute(type=type_name,
input=input_expr,
variable=variable,
success=success,
context=exception_context)
return text, variable
elif final_type.podtype == 'string':
text = _string_from_ppvar_template.substitute(type=type_name,
input=input_expr,
variable=variable,
success=success,
context=exception_context)
return text, variable
else:
raise UnknownPODType(final_type.podtype)
def PpapiExprToPPVar(scope, type_defn, variable, expression, output,
success, npp):
"""Gets the string to store a value into a pp::Var.
This function creates a string containing a C++ code snippet that is used to
store a value into a pp::Var. That operation takes two phases, one that
allocates necessary PPAPI resources, and that can fail, and one that actually
sets the pp::Var (that can't fail). If an error occurs, the snippet will
set the success status variable to false.
Args:
scope: a Definition for the scope in which the glue will be written.
type_defn: a Definition, representing the type of the value.
variable: a string, representing a name of a variable that can be used to
store a reference to the value.
expression: a string representing the expression that yields the value to
be stored.
output: an expression representing a pointer to the pp::Var to store the
value into.
success: the name of a bool variable containing the current success status.
npp: a string, representing the name of the variable that holds the pointer
to the pp::Instance.
Returns:
a (string, string) pair, the first string being the code snippet for the
first phase, and the second one being the code snippet for the second phase.
Raises:
UnknownPODType: type_defn is not a known POD type.
"""
(npp, success) = (npp, success) # silence gpylint.
type_name = cpp_utils.GetScopedName(scope, type_defn)
final_type = type_defn.GetFinalType()
if final_type.podtype == 'void':
return ('%s;' % expression,
'*%s = pp::Var();' % output)
elif final_type.podtype == 'int':
return ('%s %s = %s;' % (type_name, variable, expression),
'*%s = pp::Var((int32_t)%s);' % (output, variable))
elif final_type.podtype == 'bool':
return ('%s %s = %s;' % (type_name, variable, expression),
'*%s = pp::Var(%s);' % (output, variable))
elif final_type.podtype == 'float':
return ('%s %s = %s;' % (type_name, variable, expression),
'*%s = pp::Var(static_cast<double>(%s));' %
(output, variable))
elif final_type.podtype == 'variant':
raise UnimplementedPODType
elif final_type.podtype == 'string':
return ('*%s = pp::Var(%s);' % (output, expression),
'')
else:
raise UnknownPODType(final_type.podtype)
def main(unused_argv):
pass
if __name__ == '__main__':
main(sys.argv)
| apache-2.0 |
kittiu/odoo | addons/mrp/wizard/mrp_price.py | 381 | 2132 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class mrp_price(osv.osv_memory):
_name = 'mrp.product_price'
_description = 'Product Price'
_columns = {
'number': fields.integer('Quantity', required=True, help="Specify quantity of products to produce or buy. Report of Cost structure will be displayed base on this quantity."),
}
_defaults = {
'number': 1,
}
def print_report(self, cr, uid, ids, context=None):
""" To print the report of Product cost structure
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return : Report
"""
if context is None:
context = {}
datas = {'ids' : context.get('active_ids',[])}
res = self.read(cr, uid, ids, ['number'])
res = res and res[0] or {}
datas['form'] = res
return {
'type' : 'ir.actions.report.xml',
'report_name':'product.price',
'datas' : datas,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
tjgillies/distributed-draw | entangled/kademlia/kbucket.py | 2 | 4999 | #!/usr/bin/env python
#
# This library is free software, distributed under the terms of
# the GNU Lesser General Public License Version 3, or any later version.
# See the COPYING file included in this archive
#
# The docstrings in this module contain epytext markup; API documentation
# may be created by processing this file with epydoc: http://epydoc.sf.net
import constants
class BucketFull(Exception):
""" Raised when the bucket is full """
class KBucket(object):
""" Description - later
"""
def __init__(self, rangeMin, rangeMax):
"""
@param rangeMin: The lower boundary for the range in the 160-bit ID
space covered by this k-bucket
@param rangeMax: The upper boundary for the range in the ID space
covered by this k-bucket
"""
self.lastAccessed = 0
self.rangeMin = rangeMin
self.rangeMax = rangeMax
self._contacts = list()
def addContact(self, contact):
""" Add contact to _contact list in the right order. This will move the
contact to the end of the k-bucket if it is already present.
@raise kademlia.kbucket.BucketFull: Raised when the bucket is full and
the contact isn't in the bucket
already
@param contact: The contact to add
@type contact: kademlia.contact.Contact
"""
if contact in self._contacts:
# Move the existing contact to the end of the list
# - using the new contact to allow add-on data (e.g. optimization-specific stuff) to pe updated as well
self._contacts.remove(contact)
self._contacts.append(contact)
elif len(self._contacts) < constants.k:
self._contacts.append(contact)
else:
raise BucketFull("No space in bucket to insert contact")
def getContact(self, contactID):
""" Get the contact specified node ID"""
index = self._contacts.index(contactID)
return self._contacts[index]
def getContacts(self, count=-1, excludeContact=None):
""" Returns a list containing up to the first count number of contacts
@param count: The amount of contacts to return (if 0 or less, return
all contacts)
@type count: int
@param excludeContact: A contact to exclude; if this contact is in
the list of returned values, it will be
discarded before returning. If a C{str} is
passed as this argument, it must be the
contact's ID.
@type excludeContact: kademlia.contact.Contact or str
@raise IndexError: If the number of requested contacts is too large
@return: Return up to the first count number of contacts in a list
If no contacts are present an empty is returned
@rtype: list
"""
# Return all contacts in bucket
if count <= 0:
count = len(self._contacts)
# Get current contact number
currentLen = len(self._contacts)
# If count greater than k - return only k contacts
if count > constants.k:
count = constants.k
# Check if count value in range and,
# if count number of contacts are available
if not currentLen:
contactList = list()
# length of list less than requested amount
elif currentLen < count:
contactList = self._contacts[0:currentLen]
# enough contacts in list
else:
contactList = self._contacts[0:count]
if excludeContact in contactList:
contactList.remove(excludeContact)
return contactList
def removeContact(self, contact):
""" Remove given contact from list
@param contact: The contact to remove, or a string containing the
contact's node ID
@type contact: kademlia.contact.Contact or str
@raise ValueError: The specified contact is not in this bucket
"""
self._contacts.remove(contact)
def keyInRange(self, key):
""" Tests whether the specified key (i.e. node ID) is in the range
of the 160-bit ID space covered by this k-bucket (in otherwords, it
returns whether or not the specified key should be placed in this
k-bucket)
@param key: The key to test
@type key: str or int
@return: C{True} if the key is in this k-bucket's range, or C{False}
if not.
@rtype: bool
"""
if isinstance(key, str):
key = long(key.encode('hex'), 16)
return self.rangeMin <= key < self.rangeMax
def __len__(self):
return len(self._contacts)
| lgpl-3.0 |
mogoweb/chromium-crosswalk | native_client_sdk/src/tools/httpd.py | 41 | 10125 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import BaseHTTPServer
import imp
import logging
import multiprocessing
import optparse
import os
import SimpleHTTPServer # pylint: disable=W0611
import socket
import sys
import time
import urlparse
if sys.version_info < (2, 6, 0):
sys.stderr.write("python 2.6 or later is required run this script\n")
sys.exit(1)
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
NACL_SDK_ROOT = os.path.dirname(SCRIPT_DIR)
# We only run from the examples directory so that not too much is exposed
# via this HTTP server. Everything in the directory is served, so there should
# never be anything potentially sensitive in the serving directory, especially
# if the machine might be a multi-user machine and not all users are trusted.
# We only serve via the loopback interface.
def SanityCheckDirectory(dirname):
abs_serve_dir = os.path.abspath(dirname)
# Verify we don't serve anywhere above NACL_SDK_ROOT.
if abs_serve_dir[:len(NACL_SDK_ROOT)] == NACL_SDK_ROOT:
return
logging.error('For security, httpd.py should only be run from within the')
logging.error('example directory tree.')
logging.error('Attempting to serve from %s.' % abs_serve_dir)
logging.error('Run with --no_dir_check to bypass this check.')
sys.exit(1)
class PluggableHTTPServer(BaseHTTPServer.HTTPServer):
def __init__(self, *args, **kwargs):
BaseHTTPServer.HTTPServer.__init__(self, *args)
self.serve_dir = kwargs.get('serve_dir', '.')
self.test_mode = kwargs.get('test_mode', False)
self.delegate_map = {}
self.running = True
self.result = 0
def Shutdown(self, result=0):
self.running = False
self.result = result
class PluggableHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def _FindDelegateAtPath(self, dirname):
# First check the cache...
logging.debug('Looking for cached delegate in %s...' % dirname)
handler_script = os.path.join(dirname, 'handler.py')
if dirname in self.server.delegate_map:
result = self.server.delegate_map[dirname]
if result is None:
logging.debug('Found None.')
else:
logging.debug('Found delegate.')
return result
# Don't have one yet, look for one.
delegate = None
logging.debug('Testing file %s for existence...' % handler_script)
if os.path.exists(handler_script):
logging.debug(
'File %s exists, looking for HTTPRequestHandlerDelegate.' %
handler_script)
module = imp.load_source('handler', handler_script)
delegate_class = getattr(module, 'HTTPRequestHandlerDelegate', None)
delegate = delegate_class()
if not delegate:
logging.warn(
'Unable to find symbol HTTPRequestHandlerDelegate in module %s.' %
handler_script)
return delegate
def _FindDelegateForURLRecurse(self, cur_dir, abs_root):
delegate = self._FindDelegateAtPath(cur_dir)
if not delegate:
# Didn't find it, try the parent directory, but stop if this is the server
# root.
if cur_dir != abs_root:
parent_dir = os.path.dirname(cur_dir)
delegate = self._FindDelegateForURLRecurse(parent_dir, abs_root)
logging.debug('Adding delegate to cache for %s.' % cur_dir)
self.server.delegate_map[cur_dir] = delegate
return delegate
def _FindDelegateForURL(self, url_path):
path = self.translate_path(url_path)
if os.path.isdir(path):
dirname = path
else:
dirname = os.path.dirname(path)
abs_serve_dir = os.path.abspath(self.server.serve_dir)
delegate = self._FindDelegateForURLRecurse(dirname, abs_serve_dir)
if not delegate:
logging.info('No handler found for path %s. Using default.' % url_path)
return delegate
def _SendNothingAndDie(self, result=0):
self.send_response(200, 'OK')
self.send_header('Content-type', 'text/html')
self.send_header('Content-length', '0')
self.end_headers()
self.server.Shutdown(result)
def send_head(self):
delegate = self._FindDelegateForURL(self.path)
if delegate:
return delegate.send_head(self)
return self.base_send_head()
def base_send_head(self):
return SimpleHTTPServer.SimpleHTTPRequestHandler.send_head(self)
def do_GET(self):
# TODO(binji): pyauto tests use the ?quit=1 method to kill the server.
# Remove this when we kill the pyauto tests.
_, _, _, query, _ = urlparse.urlsplit(self.path)
if query:
params = urlparse.parse_qs(query)
if '1' in params.get('quit', []):
self._SendNothingAndDie()
return
delegate = self._FindDelegateForURL(self.path)
if delegate:
return delegate.do_GET(self)
return self.base_do_GET()
def base_do_GET(self):
return SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
def do_POST(self):
delegate = self._FindDelegateForURL(self.path)
if delegate:
return delegate.do_POST(self)
return self.base_do_POST()
def base_do_POST(self):
if self.server.test_mode:
if self.path == '/ok':
self._SendNothingAndDie(0)
elif self.path == '/fail':
self._SendNothingAndDie(1)
class LocalHTTPServer(object):
"""Class to start a local HTTP server as a child process."""
def __init__(self, dirname, port, test_mode):
parent_conn, child_conn = multiprocessing.Pipe()
self.process = multiprocessing.Process(
target=_HTTPServerProcess,
args=(child_conn, dirname, port, {
'serve_dir': dirname,
'test_mode': test_mode,
}))
self.process.start()
if parent_conn.poll(10): # wait 10 seconds
self.port = parent_conn.recv()
else:
raise Exception('Unable to launch HTTP server.')
self.conn = parent_conn
def ServeForever(self):
"""Serve until the child HTTP process tells us to stop.
Returns:
The result from the child (as an errorcode), or 0 if the server was
killed not by the child (by KeyboardInterrupt for example).
"""
child_result = 0
try:
# Block on this pipe, waiting for a response from the child process.
child_result = self.conn.recv()
except KeyboardInterrupt:
pass
finally:
self.Shutdown()
return child_result
def ServeUntilSubprocessDies(self, process):
"""Serve until the child HTTP process tells us to stop or |subprocess| dies.
Returns:
The result from the child (as an errorcode), or 0 if |subprocess| died,
or the server was killed some other way (by KeyboardInterrupt for
example).
"""
child_result = 0
try:
while True:
if process.poll() is not None:
child_result = 0
break
if self.conn.poll():
child_result = self.conn.recv()
break
time.sleep(0)
except KeyboardInterrupt:
pass
finally:
self.Shutdown()
return child_result
def Shutdown(self):
"""Send a message to the child HTTP server process and wait for it to
finish."""
self.conn.send(False)
self.process.join()
def GetURL(self, rel_url):
"""Get the full url for a file on the local HTTP server.
Args:
rel_url: A URL fragment to convert to a full URL. For example,
GetURL('foobar.baz') -> 'http://localhost:1234/foobar.baz'
"""
return 'http://localhost:%d/%s' % (self.port, rel_url)
def _HTTPServerProcess(conn, dirname, port, server_kwargs):
"""Run a local httpserver with the given port or an ephemeral port.
This function assumes it is run as a child process using multiprocessing.
Args:
conn: A connection to the parent process. The child process sends
the local port, and waits for a message from the parent to
stop serving. It also sends a "result" back to the parent -- this can
be used to allow a client-side test to notify the server of results.
dirname: The directory to serve. All files are accessible through
http://localhost:<port>/path/to/filename.
port: The port to serve on. If 0, an ephemeral port will be chosen.
server_kwargs: A dict that will be passed as kwargs to the server.
"""
try:
os.chdir(dirname)
httpd = PluggableHTTPServer(('', port), PluggableHTTPRequestHandler,
**server_kwargs)
except socket.error as e:
sys.stderr.write('Error creating HTTPServer: %s\n' % e)
sys.exit(1)
try:
conn.send(httpd.server_address[1]) # the chosen port number
httpd.timeout = 0.5 # seconds
while httpd.running:
# Flush output for MSVS Add-In.
sys.stdout.flush()
sys.stderr.flush()
httpd.handle_request()
if conn.poll():
httpd.running = conn.recv()
except KeyboardInterrupt:
pass
finally:
conn.send(httpd.result)
conn.close()
def main(args):
parser = optparse.OptionParser()
parser.add_option('-C', '--serve-dir',
help='Serve files out of this directory.',
dest='serve_dir', default=os.path.abspath('.'))
parser.add_option('-p', '--port',
help='Run server on this port.',
dest='port', default=5103)
parser.add_option('--no_dir_check',
help='No check to ensure serving from safe directory.',
dest='do_safe_check', action='store_false', default=True)
parser.add_option('--test-mode',
help='Listen for posts to /ok or /fail and shut down the server with '
' errorcodes 0 and 1 respectively.',
dest='test_mode', action='store_true')
options, args = parser.parse_args(args)
if options.do_safe_check:
SanityCheckDirectory(options.serve_dir)
server = LocalHTTPServer(options.serve_dir, int(options.port),
options.test_mode)
# Serve until the client tells us to stop. When it does, it will give us an
# errorcode.
print 'Serving %s on %s...' % (options.serve_dir, server.GetURL(''))
return server.ServeForever()
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause |
jmcreasman/TIY-GitHub | node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/MSVSUtil.py | 1812 | 9537 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions shared amongst the Windows generators."""
import copy
import os
# A dictionary mapping supported target types to extensions.
TARGET_TYPE_EXT = {
'executable': 'exe',
'loadable_module': 'dll',
'shared_library': 'dll',
'static_library': 'lib',
}
def _GetLargePdbShimCcPath():
"""Returns the path of the large_pdb_shim.cc file."""
this_dir = os.path.abspath(os.path.dirname(__file__))
src_dir = os.path.abspath(os.path.join(this_dir, '..', '..'))
win_data_dir = os.path.join(src_dir, 'data', 'win')
large_pdb_shim_cc = os.path.join(win_data_dir, 'large-pdb-shim.cc')
return large_pdb_shim_cc
def _DeepCopySomeKeys(in_dict, keys):
"""Performs a partial deep-copy on |in_dict|, only copying the keys in |keys|.
Arguments:
in_dict: The dictionary to copy.
keys: The keys to be copied. If a key is in this list and doesn't exist in
|in_dict| this is not an error.
Returns:
The partially deep-copied dictionary.
"""
d = {}
for key in keys:
if key not in in_dict:
continue
d[key] = copy.deepcopy(in_dict[key])
return d
def _SuffixName(name, suffix):
"""Add a suffix to the end of a target.
Arguments:
name: name of the target (foo#target)
suffix: the suffix to be added
Returns:
Target name with suffix added (foo_suffix#target)
"""
parts = name.rsplit('#', 1)
parts[0] = '%s_%s' % (parts[0], suffix)
return '#'.join(parts)
def _ShardName(name, number):
"""Add a shard number to the end of a target.
Arguments:
name: name of the target (foo#target)
number: shard number
Returns:
Target name with shard added (foo_1#target)
"""
return _SuffixName(name, str(number))
def ShardTargets(target_list, target_dicts):
"""Shard some targets apart to work around the linkers limits.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
Returns:
Tuple of the new sharded versions of the inputs.
"""
# Gather the targets to shard, and how many pieces.
targets_to_shard = {}
for t in target_dicts:
shards = int(target_dicts[t].get('msvs_shard', 0))
if shards:
targets_to_shard[t] = shards
# Shard target_list.
new_target_list = []
for t in target_list:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
new_target_list.append(_ShardName(t, i))
else:
new_target_list.append(t)
# Shard target_dict.
new_target_dicts = {}
for t in target_dicts:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
name = _ShardName(t, i)
new_target_dicts[name] = copy.copy(target_dicts[t])
new_target_dicts[name]['target_name'] = _ShardName(
new_target_dicts[name]['target_name'], i)
sources = new_target_dicts[name].get('sources', [])
new_sources = []
for pos in range(i, len(sources), targets_to_shard[t]):
new_sources.append(sources[pos])
new_target_dicts[name]['sources'] = new_sources
else:
new_target_dicts[t] = target_dicts[t]
# Shard dependencies.
for t in new_target_dicts:
for deptype in ('dependencies', 'dependencies_original'):
dependencies = copy.copy(new_target_dicts[t].get(deptype, []))
new_dependencies = []
for d in dependencies:
if d in targets_to_shard:
for i in range(targets_to_shard[d]):
new_dependencies.append(_ShardName(d, i))
else:
new_dependencies.append(d)
new_target_dicts[t][deptype] = new_dependencies
return (new_target_list, new_target_dicts)
def _GetPdbPath(target_dict, config_name, vars):
"""Returns the path to the PDB file that will be generated by a given
configuration.
The lookup proceeds as follows:
- Look for an explicit path in the VCLinkerTool configuration block.
- Look for an 'msvs_large_pdb_path' variable.
- Use '<(PRODUCT_DIR)/<(product_name).(exe|dll).pdb' if 'product_name' is
specified.
- Use '<(PRODUCT_DIR)/<(target_name).(exe|dll).pdb'.
Arguments:
target_dict: The target dictionary to be searched.
config_name: The name of the configuration of interest.
vars: A dictionary of common GYP variables with generator-specific values.
Returns:
The path of the corresponding PDB file.
"""
config = target_dict['configurations'][config_name]
msvs = config.setdefault('msvs_settings', {})
linker = msvs.get('VCLinkerTool', {})
pdb_path = linker.get('ProgramDatabaseFile')
if pdb_path:
return pdb_path
variables = target_dict.get('variables', {})
pdb_path = variables.get('msvs_large_pdb_path', None)
if pdb_path:
return pdb_path
pdb_base = target_dict.get('product_name', target_dict['target_name'])
pdb_base = '%s.%s.pdb' % (pdb_base, TARGET_TYPE_EXT[target_dict['type']])
pdb_path = vars['PRODUCT_DIR'] + '/' + pdb_base
return pdb_path
def InsertLargePdbShims(target_list, target_dicts, vars):
"""Insert a shim target that forces the linker to use 4KB pagesize PDBs.
This is a workaround for targets with PDBs greater than 1GB in size, the
limit for the 1KB pagesize PDBs created by the linker by default.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
vars: A dictionary of common GYP variables with generator-specific values.
Returns:
Tuple of the shimmed version of the inputs.
"""
# Determine which targets need shimming.
targets_to_shim = []
for t in target_dicts:
target_dict = target_dicts[t]
# We only want to shim targets that have msvs_large_pdb enabled.
if not int(target_dict.get('msvs_large_pdb', 0)):
continue
# This is intended for executable, shared_library and loadable_module
# targets where every configuration is set up to produce a PDB output.
# If any of these conditions is not true then the shim logic will fail
# below.
targets_to_shim.append(t)
large_pdb_shim_cc = _GetLargePdbShimCcPath()
for t in targets_to_shim:
target_dict = target_dicts[t]
target_name = target_dict.get('target_name')
base_dict = _DeepCopySomeKeys(target_dict,
['configurations', 'default_configuration', 'toolset'])
# This is the dict for copying the source file (part of the GYP tree)
# to the intermediate directory of the project. This is necessary because
# we can't always build a relative path to the shim source file (on Windows
# GYP and the project may be on different drives), and Ninja hates absolute
# paths (it ends up generating the .obj and .obj.d alongside the source
# file, polluting GYPs tree).
copy_suffix = 'large_pdb_copy'
copy_target_name = target_name + '_' + copy_suffix
full_copy_target_name = _SuffixName(t, copy_suffix)
shim_cc_basename = os.path.basename(large_pdb_shim_cc)
shim_cc_dir = vars['SHARED_INTERMEDIATE_DIR'] + '/' + copy_target_name
shim_cc_path = shim_cc_dir + '/' + shim_cc_basename
copy_dict = copy.deepcopy(base_dict)
copy_dict['target_name'] = copy_target_name
copy_dict['type'] = 'none'
copy_dict['sources'] = [ large_pdb_shim_cc ]
copy_dict['copies'] = [{
'destination': shim_cc_dir,
'files': [ large_pdb_shim_cc ]
}]
# This is the dict for the PDB generating shim target. It depends on the
# copy target.
shim_suffix = 'large_pdb_shim'
shim_target_name = target_name + '_' + shim_suffix
full_shim_target_name = _SuffixName(t, shim_suffix)
shim_dict = copy.deepcopy(base_dict)
shim_dict['target_name'] = shim_target_name
shim_dict['type'] = 'static_library'
shim_dict['sources'] = [ shim_cc_path ]
shim_dict['dependencies'] = [ full_copy_target_name ]
# Set up the shim to output its PDB to the same location as the final linker
# target.
for config_name, config in shim_dict.get('configurations').iteritems():
pdb_path = _GetPdbPath(target_dict, config_name, vars)
# A few keys that we don't want to propagate.
for key in ['msvs_precompiled_header', 'msvs_precompiled_source', 'test']:
config.pop(key, None)
msvs = config.setdefault('msvs_settings', {})
# Update the compiler directives in the shim target.
compiler = msvs.setdefault('VCCLCompilerTool', {})
compiler['DebugInformationFormat'] = '3'
compiler['ProgramDataBaseFileName'] = pdb_path
# Set the explicit PDB path in the appropriate configuration of the
# original target.
config = target_dict['configurations'][config_name]
msvs = config.setdefault('msvs_settings', {})
linker = msvs.setdefault('VCLinkerTool', {})
linker['GenerateDebugInformation'] = 'true'
linker['ProgramDatabaseFile'] = pdb_path
# Add the new targets. They must go to the beginning of the list so that
# the dependency generation works as expected in ninja.
target_list.insert(0, full_copy_target_name)
target_list.insert(0, full_shim_target_name)
target_dicts[full_copy_target_name] = copy_dict
target_dicts[full_shim_target_name] = shim_dict
# Update the original target to depend on the shim target.
target_dict.setdefault('dependencies', []).append(full_shim_target_name)
return (target_list, target_dicts)
| cc0-1.0 |
TeamExodus/external_chromium_org | tools/telemetry/telemetry/util/path.py | 45 | 1284 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry.core import util
# TODO(dtu): Move these functions from core.util to here.
GetBaseDir = util.GetBaseDir
GetTelemetryDir = util.GetTelemetryDir
GetUnittestDataDir = util.GetUnittestDataDir
GetChromiumSrcDir = util.GetChromiumSrcDir
AddDirToPythonPath = util.AddDirToPythonPath
GetBuildDirectories = util.GetBuildDirectories
def IsExecutable(path):
return os.path.isfile(path) and os.access(path, os.X_OK)
def FindInstalledWindowsApplication(application_path):
"""Search common Windows installation directories for an application.
Args:
application_path: Path to application relative from installation location.
Returns:
A string representing the full path, or None if not found.
"""
search_paths = [os.getenv('PROGRAMFILES(X86)'),
os.getenv('PROGRAMFILES'),
os.getenv('LOCALAPPDATA')]
search_paths += os.getenv('PATH', '').split(os.pathsep)
for search_path in search_paths:
if not search_path:
continue
path = os.path.join(search_path, application_path)
if IsExecutable(path):
return path
return None
| bsd-3-clause |
kzampog/sisyphus | docs/conf.py | 2 | 4917 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = u'cilantro'
copyright = u'2018, Konstantinos Zampogiannis'
author = u'Konstantinos Zampogiannis'
# The short X.Y version
version = u''
# The full version, including alpha/beta/rc tags
release = u''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'cilantrodoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'cilantro.tex', u'cilantro Documentation',
u'Konstantinos Zampogiannis', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cilantro', u'cilantro Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'cilantro', u'cilantro Documentation',
author, 'cilantro', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration ------------------------------------------------- | mit |
ikoula/cloudstack | plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmVmModule.py | 8 | 22802 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''
Created on May 17, 2011
'''
from OvmCommonModule import *
from OvmDiskModule import *
from OvmVifModule import *
from OvmHostModule import OvmHost
from string import Template
from OVSXXenVMConfig import *
from OVSSiteVM import start_vm, stop_vm, reset_vm
from OVSSiteCluster import *
from OvmStoragePoolModule import OvmStoragePool
from OVSXXenStore import xen_get_vm_path, xen_get_vnc_port
from OVSDB import db_get_vm
from OVSXMonitor import xen_get_vm_perf_metrics, xen_get_xm_info
from OVSXXenVM import xen_migrate_vm
from OVSSiteRMVM import unregister_vm, register_vm, set_vm_status
from OVSSiteVMInstall import install_vm_hvm
from OVSSiteRMServer import get_master_ip
from OVSXXenVMInstall import xen_change_vm_cdrom
from OVSXAPIUtil import XenAPIObject, session_login, session_logout
logger = OvmLogger("OvmVm")
class OvmVmDecoder(json.JSONDecoder):
def decode(self, jStr):
deDict = asciiLoads(jStr)
vm = OvmVm()
setAttrFromDict(vm, 'cpuNum', deDict, int)
setAttrFromDict(vm, 'memory', deDict, long)
setattr(vm, 'rootDisk', toOvmDisk(deDict['rootDisk']))
setattr(vm, 'vifs', toOvmVifList(deDict['vifs']))
setattr(vm, 'disks', toOvmDiskList(deDict['disks']))
setAttrFromDict(vm, 'name', deDict)
setAttrFromDict(vm, 'uuid', deDict)
setAttrFromDict(vm, 'bootDev', deDict)
setAttrFromDict(vm, 'type', deDict)
return vm
class OvmVmEncoder(json.JSONEncoder):
def default(self, obj):
if not isinstance(obj, OvmVm): raise Exception("%s is not instance of OvmVm"%type(obj))
dct = {}
safeDictSet(obj, dct, 'cpuNum')
safeDictSet(obj, dct, 'memory')
safeDictSet(obj, dct, 'powerState')
safeDictSet(obj, dct, 'name')
safeDictSet(obj, dct, 'type')
vifs = fromOvmVifList(obj.vifs)
dct['vifs'] = vifs
rootDisk = fromOvmDisk(obj.rootDisk)
dct['rootDisk'] = rootDisk
disks = fromOvmDiskList(obj.disks)
dct['disks'] = disks
return dct
def toOvmVm(jStr):
return json.loads(jStr, cls=OvmVmDecoder)
def fromOvmVm(vm):
return normalizeToGson(json.dumps(vm, cls=OvmVmEncoder))
class OvmVm(OvmObject):
cpuNum = 0
memory = 0
rootDisk = None
vifs = []
disks = []
powerState = ''
name = ''
bootDev = ''
type = ''
def _getVifs(self, vmName):
vmPath = OvmHost()._vmNameToPath(vmName)
domId = OvmHost()._getDomainIdByName(vmName)
vifs = successToMap(xen_get_vifs(vmPath))
lst = []
for k in vifs:
v = vifs[k]
vifName = 'vif' + domId + '.' + k[len('vif'):]
vif = OvmVif()
(mac, bridge, type) = v.split(',')
safeSetAttr(vif, 'name', vifName)
safeSetAttr(vif, 'mac', mac)
safeSetAttr(vif, 'bridge', bridge)
safeSetAttr(vif, 'type', type)
lst.append(vif)
return lst
def _getVifsFromConfig(self, vmPath):
vifs = successToMap(xen_get_vifs(vmPath))
lst = []
for k in vifs:
v = vifs[k]
vif = OvmVif()
(mac, bridge, type) = v.split(',')
safeSetAttr(vif, 'name', k)
safeSetAttr(vif, 'mac', mac)
safeSetAttr(vif, 'bridge', bridge)
safeSetAttr(vif, 'type', type)
lst.append(vif)
return lst
def _getIsoMountPath(self, vmPath):
vmName = basename(vmPath)
priStoragePath = vmPath.rstrip(join('running_pool', vmName))
return join(priStoragePath, 'iso_pool', vmName)
def _getVmTypeFromConfigFile(self, vmPath):
vmType = successToMap(xen_get_vm_type(vmPath))['type']
return vmType.replace('hvm', 'HVM').replace('para', 'PV')
def _tapAOwnerFile(self, vmPath):
# Create a file with name convention 'host_ip_address' in vmPath
# Because xm list doesn't return vm that has been stopped, we scan
# primary storage for stopped vm. This file tells us which host it belongs
# to. The file is used in OvmHost.getAllVms()
self._cleanUpOwnerFile(vmPath)
ownerFileName = makeOwnerFileName()
fd = open(join(vmPath, ownerFileName), 'w')
fd.write(ownerFileName)
fd.close()
def _cleanUpOwnerFile(self, vmPath):
for f in os.listdir(vmPath):
fp = join(vmPath, f)
if isfile(fp) and f.startswith(OWNER_FILE_PREFIX):
os.remove(fp)
@staticmethod
def create(jsonString):
def dumpCfg(vmName, cfgPath):
cfgFd = open(cfgPath, 'r')
cfg = cfgFd.readlines()
cfgFd.close()
logger.info(OvmVm.create, "Start %s with configure:\n\n%s\n"%(vmName, "".join(cfg)))
def setVifsType(vifs, type):
for vif in vifs:
vif.type = type
def hddBoot(vm, vmPath):
vmType = vm.type
if vmType == "FROMCONFIGFILE":
vmType = OvmVm()._getVmTypeFromConfigFile(vmPath)
cfgDict = {}
if vmType == "HVM":
cfgDict['builder'] = "'hvm'"
cfgDict['acpi'] = "1"
cfgDict['apic'] = "1"
cfgDict['device_model'] = "'/usr/lib/xen/bin/qemu-dm'"
cfgDict['kernel'] = "'/usr/lib/xen/boot/hvmloader'"
vifType = 'ioemu'
else:
cfgDict['bootloader'] = "'/usr/bin/pygrub'"
vifType = 'netfront'
cfgDict['name'] = "'%s'"%vm.name
cfgDict['disk'] = "[]"
cfgDict['vcpus'] = "''"
cfgDict['memory'] = "''"
cfgDict['on_crash'] = "'destroy'"
cfgDict['on_reboot'] = "'restart'"
cfgDict['vif'] = "[]"
items = []
for k in cfgDict.keys():
item = " = ".join([k, cfgDict[k]])
items.append(item)
vmSpec = "\n".join(items)
vmCfg = open(join(vmPath, 'vm.cfg'), 'w')
vmCfg.write(vmSpec)
vmCfg.close()
setVifsType(vm.vifs, vifType)
raiseExceptionIfFail(xen_set_vcpus(vmPath, vm.cpuNum))
raiseExceptionIfFail(xen_set_memory(vmPath, BytesToM(vm.memory)))
raiseExceptionIfFail(xen_add_disk(vmPath, vm.rootDisk.path, mode=vm.rootDisk.type))
vifs = [OvmVif.toXenString(v) for v in vm.vifs]
for vif in vifs:
raiseExceptionIfFail(xen_set_vifs(vmPath, vif))
for disk in vm.disks:
raiseExceptionIfFail(xen_add_disk(vmPath, disk.path, mode=disk.type))
raiseExceptionIfFail(xen_set_vm_vnc_password(vmPath, ""))
cfgFile = join(vmPath, 'vm.cfg')
# only HVM supports attaching cdrom
if vmType == 'HVM':
# Add an empty "hdc:cdrom" entry in config. Fisrt we set boot order to 'd' that is cdrom boot,
# then 'hdc:cdrom' entry will be in disk list. Second, change boot order to 'c' which
# is harddisk boot. VM can not start with an empty 'hdc:cdrom' when boot order is 'd'.
# it's tricky !
raiseExceptionIfFail(xen_config_boot_sequence(vmPath, 'd'))
raiseExceptionIfFail(xen_config_boot_sequence(vmPath, 'c'))
raiseExceptionIfFail(xen_correct_cfg(cfgFile, vmPath))
xen_correct_qos_cfg(cfgFile)
dumpCfg(vm.name, cfgFile)
server = successToMap(get_master_ip())['ip']
raiseExceptionIfFail(start_vm(vmPath, server))
rs = SUCC()
return rs
def cdBoot(vm, vmPath):
isoMountPath = None
try:
cdrom = None
for disk in vm.disks:
if disk.isIso == True:
cdrom = disk
break
if not cdrom: raise Exception("Cannot find Iso in disks")
isoOnSecStorage = dirname(cdrom.path)
isoName = basename(cdrom.path)
isoMountPath = OvmVm()._getIsoMountPath(vmPath)
OvmStoragePool()._mount(isoOnSecStorage, isoMountPath)
isoPath = join(isoMountPath, isoName)
if not exists(isoPath):
raise Exception("Cannot found iso %s at %s which mounts to %s"%(isoName, isoOnSecStorage, isoMountPath))
stdout = run_cmd(args=['file', isoPath])
if not stdout.strip().endswith("(bootable)"): raise Exception("ISO %s is not bootable"%cdrom.path)
#now alter cdrom to correct path
cdrom.path = isoPath
if len(vm.vifs) != 0:
vif = vm.vifs[0]
#ISO boot must be HVM
vifCfg = ','.join([vif.mac, vif.bridge, 'ioemu'])
else:
vifCfg = ''
rootDiskSize = os.path.getsize(vm.rootDisk.path)
rooDiskCfg = ':'.join([join(vmPath, basename(vm.rootDisk.path)), str(BytesToG(rootDiskSize)), 'True'])
disks = [rooDiskCfg]
for d in vm.disks:
if d.isIso: continue
size = os.path.getsize(d.path)
cfg = ':'.join([d.path, str(BytesToG(size)), 'True'])
disks.append(cfg)
disksCfg = ','.join(disks)
server = successToMap(get_master_ip())['ip']
raiseExceptionIfFail(install_vm_hvm(vmPath, BytesToM(vm.memory), vm.cpuNum, vifCfg, disksCfg, cdrom.path, vncpassword='', dedicated_server=server))
rs = SUCC()
return rs
except Exception, e:
if isoMountPath and OvmStoragePool()._isMounted(isoMountPath):
doCmd(['umount', '-f', isoMountPath])
errmsg = fmt_err_msg(e)
raise Exception(errmsg)
try:
vm = toOvmVm(jsonString)
logger.debug(OvmVm.create, "creating vm, spec:%s"%jsonString)
rootDiskPath = vm.rootDisk.path
if not exists(rootDiskPath): raise Exception("Cannot find root disk %s"%rootDiskPath)
rootDiskDir = dirname(rootDiskPath)
vmPath = join(dirname(rootDiskDir), vm.name)
if not exists(vmPath):
doCmd(['ln', '-s', rootDiskDir, vmPath])
vmNameFile = open(join(rootDiskDir, 'vmName'), 'w')
vmNameFile.write(vm.name)
vmNameFile.close()
OvmVm()._tapAOwnerFile(rootDiskDir)
# set the VM to DOWN before starting, OVS agent will check this status
set_vm_status(vmPath, 'DOWN')
if vm.bootDev == "HDD":
return hddBoot(vm, vmPath)
elif vm.bootDev == "CD":
return cdBoot(vm, vmPath)
else:
raise Exception("Unkown bootdev %s for %s"%(vm.bootDev, vm.name))
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmVm.create, errmsg)
raise XmlRpcFault(toErrCode(OvmVm, OvmVm.create), errmsg)
@staticmethod
def stop(vmName):
try:
try:
OvmHost()._getDomainIdByName(vmName)
except NoVmFoundException, e:
logger.info(OvmVm.stop, "vm %s is already stopped"%vmName)
return SUCC()
logger.info(OvmVm.stop, "Stop vm %s"%vmName)
try:
vmPath = OvmHost()._vmNameToPath(vmName)
except Exception, e:
errmsg = fmt_err_msg(e)
logger.info(OvmVm.stop, "Cannot find link for vm %s on primary storage, treating it as stopped\n %s"%(vmName, errmsg))
return SUCC()
# set the VM to RUNNING before stopping, OVS agent will check this status
set_vm_status(vmPath, 'RUNNING')
raiseExceptionIfFail(stop_vm(vmPath))
return SUCC()
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmVm.stop, errmsg)
raise XmlRpcFault(toErrCode(OvmVm, OvmVm.stop), errmsg)
@staticmethod
def reboot(vmName):
try:
#===================================================================
# Xend has a bug of reboot. If reboot vm too quick, xend return success
# but actually it refused reboot (seen from log)
# vmPath = successToMap(xen_get_vm_path(vmName))['path']
# raiseExceptionIfFail(reset_vm(vmPath))
#===================================================================
vmPath = OvmHost()._vmNameToPath(vmName)
OvmVm.stop(vmName)
raiseExceptionIfFail(start_vm(vmPath))
vncPort= successToMap(xen_get_vnc_port(vmName))['vnc_port']
logger.info(OvmVm.stop, "reboot vm %s, new vncPort is %s"%(vmName, vncPort))
return toGson({"vncPort":str(vncPort)})
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmVm.reboot, errmsg)
raise XmlRpcFault(toErrCode(OvmVm, OvmVm.reboot), errmsg)
@staticmethod
def getDetails(vmName):
try:
vm = OvmVm()
try:
OvmHost()._getDomainIdByName(vmName)
vmPath = OvmHost()._vmNameToPath(vmName)
vifsFromConfig = False
except NoVmFoundException, e:
vmPath = OvmHost()._getVmPathFromPrimaryStorage(vmName)
vifsFromConfig = True
if not isdir(vmPath):
# The case is, when vm starting was not completed at primaryStroageDownload or createVolume(e.g. mgmt server stop), the mgmt
# server will keep vm state in staring, then a stop command will be sent. The stop command will delete bridges that vm attaches,
# by retriving birdge info by OvmVm.getDetails(). In this case, the vm doesn't exists, so returns a fake object here.
fakeDisk = OvmDisk()
vm.rootDisk = fakeDisk
else:
if vifsFromConfig:
vm.vifs.extend(vm._getVifsFromConfig(vmPath))
else:
vm.vifs.extend(vm._getVifs(vmName))
safeSetAttr(vm, 'name', vmName)
disks = successToMap(xen_get_vdisks(vmPath))['vdisks'].split(',')
rootDisk = None
#BUG: there is no way to get type of disk, assume all are "w"
for d in disks:
if vmName in d:
rootDisk = OvmDisk()
safeSetAttr(rootDisk, 'path', d)
safeSetAttr(rootDisk, 'type', "w")
continue
disk = OvmDisk()
safeSetAttr(disk, 'path', d)
safeSetAttr(disk, 'type', "w")
vm.disks.append(disk)
if not rootDisk: raise Exception("Cannot find root disk for vm %s"%vmName)
safeSetAttr(vm, 'rootDisk', rootDisk)
vcpus = int(successToMap(xen_get_vcpus(vmPath))['vcpus'])
safeSetAttr(vm, 'cpuNum', vcpus)
memory = MtoBytes(int(successToMap(xen_get_memory(vmPath))['memory']))
safeSetAttr(vm, 'memory', memory)
vmStatus = db_get_vm(vmPath)
safeSetAttr(vm, 'powerState', vmStatus['status'])
vmType = successToMap(xen_get_vm_type(vmPath))['type'].replace('hvm', 'HVM').replace('para', 'PV')
safeSetAttr(vm, 'type', vmType)
rs = fromOvmVm(vm)
logger.info(OvmVm.getDetails, rs)
return rs
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmVm.getDetails, errmsg)
raise XmlRpcFault(toErrCode(OvmVm, OvmVm.getDetails), errmsg)
@staticmethod
def getVmStats(vmName):
def getVcpuNumAndUtils():
try:
session = session_login()
refs = session.xenapi.VM.get_by_name_label(vmName)
if len(refs) == 0:
raise Exception("No ref for %s found in xenapi VM objects"%vmName)
vm = XenAPIObject('VM', session, refs[0])
VM_metrics = XenAPIObject("VM_metrics", session, vm.get_metrics())
items = VM_metrics.get_VCPUs_utilisation().items()
nvCpus = len(items)
if nvCpus == 0:
raise Exception("vm %s has 0 vcpus !!!"%vmName)
xmInfo = successToMap(xen_get_xm_info())
nCpus = int(xmInfo['nr_cpus'])
totalUtils = 0.0
# CPU utlization of VM = (total cpu utilization of each vcpu) / number of physical cpu
for num, util in items:
totalUtils += float(util)
avgUtils = float(totalUtils/nCpus) * 100
return (nvCpus, avgUtils)
finally:
session_logout()
try:
try:
OvmHost()._getDomainIdByName(vmName)
vmPath = OvmHost()._vmNameToPath(vmName)
(nvcpus, avgUtils) = getVcpuNumAndUtils()
vifs = successToMap(xen_get_vifs(vmPath))
rxBytes = 0
txBytes = 0
vifs = OvmVm()._getVifs(vmName)
for vif in vifs:
rxp = join('/sys/class/net', vif.name, 'statistics/rx_bytes')
txp = join("/sys/class/net/", vif.name, "statistics/tx_bytes")
if not exists(rxp): raise Exception('can not find %s'%rxp)
if not exists(txp): raise Exception('can not find %s'%txp)
rxBytes += long(doCmd(['cat', rxp])) / 1000
txBytes += long(doCmd(['cat', txp])) / 1000
except NoVmFoundException, e:
vmPath = OvmHost()._getVmPathFromPrimaryStorage(vmName)
nvcpus = int(successToMap(xen_get_vcpus(vmPath))['vcpus'])
avgUtils = 0
rxBytes = 0
txBytes = 0
rs = toGson({"cpuNum":nvcpus, "cpuUtil":avgUtils, "rxBytes":rxBytes, "txBytes":txBytes})
logger.debug(OvmVm.getVmStats, rs)
return rs
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmVm.getVmStats, errmsg)
raise XmlRpcFault(toErrCode(OvmVm, OvmVm.getVmStats), errmsg)
@staticmethod
def migrate(vmName, targetHost):
try:
vmPath = OvmHost()._vmNameToPath(vmName)
raiseExceptionIfFail(xen_migrate_vm(vmPath, targetHost))
unregister_vm(vmPath)
OvmVm()._cleanUpOwnerFile(vmPath)
return SUCC()
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmVm.migrate, errmsg)
raise XmlRpcFault(toErrCode(OvmVm, OvmVm.migrate), errmsg)
@staticmethod
def register(vmName):
try:
vmPath = OvmHost()._vmNameToPath(vmName)
raiseExceptionIfFail(register_vm(vmPath))
OvmVm()._tapAOwnerFile(vmPath)
vncPort= successToMap(xen_get_vnc_port(vmName))['vnc_port']
rs = toGson({"vncPort":str(vncPort)})
logger.debug(OvmVm.register, rs)
return rs
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmVm.register, errmsg)
raise XmlRpcFault(toErrCode(OvmVm, OvmVm.register), errmsg)
@staticmethod
def getVncPort(vmName):
try:
vncPort= successToMap(xen_get_vnc_port(vmName))['vnc_port']
rs = toGson({"vncPort":vncPort})
logger.debug(OvmVm.getVncPort, rs)
return rs
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmVm.getVncPort, errmsg)
raise XmlRpcFault(toErrCode(OvmVm, OvmVm.getVncPort), errmsg)
@staticmethod
def detachOrAttachIso(vmName, iso, isAttach):
try:
if vmName in OvmHost.getAllVms():
scope = 'both'
vmPath = OvmHost()._vmNameToPath(vmName)
else:
scope = 'cfg'
vmPath = OvmHost()._getVmPathFromPrimaryStorage(vmName)
vmType = OvmVm()._getVmTypeFromConfigFile(vmPath)
if vmType != 'HVM':
raise Exception("Only HVM supports attaching/detaching ISO")
if not isAttach:
iso = ''
else:
isoName = basename(iso)
isoMountPoint = OvmVm()._getIsoMountPath(vmPath)
isoOnSecStorage = dirname(iso)
OvmStoragePool()._mount(isoOnSecStorage, isoMountPoint)
iso = join(isoMountPoint, isoName)
exceptionIfNoSuccess(xen_change_vm_cdrom(vmPath, iso, scope))
return SUCC()
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmVm.detachOrAttachIso, errmsg)
raise XmlRpcFault(toErrCode(OvmVm, OvmVm.detachOrAttachIso), errmsg)
if __name__ == "__main__":
import sys
print OvmVm.getDetails(sys.argv[1])
#print OvmVm.getVmStats(sys.argv[1]) | gpl-2.0 |
tayebzaidi/PPLL_Spr_16 | finalPractica/2_2_ciclos_mejor.py | 1 | 4345 | from mrjob.job import MRJob
from mrjob.step import MRStep
import string
import sys
class MRGrados(MRJob):
SORT_VALUES = True
def mapper(self, _, line):
line_stripped = line.translate(string.maketrans("",""), '"')
line_split = line_stripped.split(',') #split by the comma
sorted_line = sorted(line_split)
node0 = sorted_line[0]
node1 = sorted_line[1]
if node0 != node1: #eliminate edges with the same vertice
yield (node0, node1), None #eliminate duplicate nodes
def reducer(self, key, values):
yield key[0], key
yield key[1], key
def sift(self, key, values):
degree = 0
send_edges = []
for val in values:
degree += 1
if val not in send_edges:
send_edges.append(val)
for edge in sorted(send_edges):
if key == edge[0]:
location = 0
elif key == edge[1]:
location = 1
yield edge, (edge, degree, location)
def grado_calc(self, key, values):
for edge, degree, location in values:
if location == 0:
degree0 = degree
if location == 1:
degree1 = degree
yield edge, (degree0, degree1)
def steps(self):
return [
MRStep(mapper = self.mapper,
reducer = self.reducer),
MRStep(reducer = self.sift),
MRStep(reducer = self.grado_calc)
]
class MRCiclos(MRJob):
def mapper(self, _, line):
line_split = line.split() #split by the comma)
node0 = line_split[0]
node1 = line_split[1]
degree0 = line_split[2]
degree1 = line_split[3]
if degree0 <= degree1:
yield node0, (node0, node1)
else:
yield node1, (node0, node1)
yield '.pass_through.', (node0, node1, degree0, degree1)
def reducer(self, key, values):
if key != '.pass_through.':
edges = list(values)
if len(edges) > 1:
for i in range(len(edges)):
for j in range(i, len(edges)):
if i != j:
if edges[i][0] and edges[j][1] != key:
yield (edges[i][0], edges[j][1]), edges[i]
else:
for node0, node1, degree0, degree1 in values:
yield (node0, node1), 'original'
def reducer2(self, key, values):
vals = list(values)
if len(vals) > 1:
for val in vals:
node0 = key[0]
node1 = key[1]
if val != 'original':
if val[0] in [node0, node1]:
node2 = val[1]
else:
node2 = val[0]
ciclo = [node0,node1,node2]
ciclo_sorted = sorted(ciclo)
yield ciclo_sorted, None
def steps(self):
return [
MRStep(mapper = self.mapper,
reducer = self.reducer),
MRStep(reducer = self.reducer2)
]
if __name__=="__main__":
print 'Starting grado_calcjob'
job_grado_calc = MRGrados(args=sys.argv[1:])
runner_grado_calc= job_grado_calc.make_runner()
runner_grado_calc.run()
grado_calc_output = []
for line in runner_grado_calc.stream_output():
grado_calc_output = grado_calc_output + [job_grado_calc.parse_output_line(line)]
#print 'Results grado_calc:', grado_calc_output
f = open('results_grado_calc.txt','w')
for (node1, node2), (degree0, degree1) in grado_calc_output:
f.write(str(node1)+'\t'+str(node2)+'\t'+str(degree0)+'\t'+str(degree1)+'\n')
f.close()
#print 'Starting ciclos_count job'
job_ciclos_count = MRCiclos(args=['results_grado_calc.txt'])
runner_ciclos_count = job_ciclos_count.make_runner()
runner_ciclos_count.run()
ciclos_count_output = []
for line in runner_ciclos_count.stream_output():
ciclos_count_output = ciclos_count_output + [job_ciclos_count.parse_output_line(line)]
for result in ciclos_count_output:
print result
| gpl-3.0 |
rbdavid/DNA_stacking_analysis | angles_binary.py | 1 | 9052 | #!/Library/Frameworks/Python.framework/Versions/2.7/bin/python
# USAGE:
# PREAMBLE:
import numpy as np
import MDAnalysis
import sys
import os
import matplotlib.pyplot as plt
traj_file ='%s' %(sys.argv[1])
# ----------------------------------------
# VARIABLE DECLARATION
base1 = 1
nbases = 15
#nbases = 3
#Nsteps = 150000 # check length of the energy file; if not 150000 lines, then need to alter Nsteps value so that angle values will match up
#Nsteps = 149996
#equilib_step = 37500 # we have chosen 75 ns to be the equilib time; 75ns = 37500 frames; if energy values do not match with angle values, then equilib_step needs to be altered as well...
#equilib_step = 37496
#production = Nsteps - equilib_step
# SUBROUTINES/DEFINITIONS:
arccosine = np.arccos
dotproduct = np.dot
pi = np.pi
ldtxt = np.loadtxt
zeros = np.zeros
# ----------------------------------------
# DICTIONARY DECLARATION
normals = {} # create the normals dictionary for future use
total_binaries = {} # create the total_binaries dictionary for future use
get_norm = normals.get
get_tb = total_binaries.get
# ----------------------------------------
# PLOTTING SUBROUTINES
def plotting(xdata, ydata, base):
plt.plot(xdata, ydata, 'rx')
plt.title('Stacking behavior of base %s over the trajectory' %(base))
plt.xlabel('Simulation time (ns)')
plt.ylabel('Stacking metric')
plt.xlim((0,300))
plt.grid( b=True, which='major', axis='both', color='k', linestyle='-')
plt.savefig('stacking_binary.%s.png' %(base))
plt.close()
def vdw_hist(data, base_a, base_b):
events, edges, patches = plt.hist(data, bins = 100, histtype = 'bar')
plt.title('Distribution of vdW Energies - Base Pair %s-%s' %(base_a, base_b))
plt.xlabel('vdW Energy ($kcal\ mol^{-1}$)')
plt.xlim((-8,0))
plt.ylabel('Frequency')
plt.savefig('energy.%s.%s.png' %(base_a, base_b))
nf = open('energy.%s.%s.dat' %(base_a, base_b), 'w')
for i in range(len(events)):
nf.write(' %10.1f %10.4f\n' %(events[i], edges[i]))
nf.close()
plt.close()
events = []
edges = []
patches = []
def angle_hist(data, base_a, base_b):
events, edges, patches = plt.hist(data, bins = 100, histtype = 'bar')
plt.title('Distribution of Angles btw Base Pair %s-%s' %(base_a, base_b))
plt.xlabel('Angle (Degrees)')
plt.ylabel('Frequency')
plt.savefig('angle.%s.%s.png' %(base_a, base_b))
nf = open('angle.%s.%s.dat' %(base_a, base_b), 'w')
for i in range(len(events)):
nf.write(' %10.1f %10.4f\n' %(events[i], edges[i]))
nf.close()
plt.close()
events = []
edges = []
patches = []
def energy_angle_hist(xdata, ydata, base_a, base_b):
counts, xedges, yedges, image = plt.hist2d(xdata, ydata, bins = 100)
cb1 = plt.colorbar()
cb1.set_label('Frequency')
plt.title('Distribution of Base Pair interactions - %s-%s' %(base_a, base_b))
plt.xlabel('Angle (Degrees)')
plt.ylabel('vdW Energy ($kcal\ mol^{-1}$)')
plt.ylim((-6,0.5))
plt.savefig('vdw_angle.%s.%s.png' %(base_a, base_b))
plt.close()
counts = []
xedges = []
yedges = []
image = []
# MAIN PROGRAM:
# ----------------------------------------
# ATOM SELECTION - load the trajectory and select the desired nucleotide atoms to be analyzed later on
u = MDAnalysis.Universe('../nucleic_ions.pdb', traj_file, delta=2.0) # load in trajectory file
Nsteps = len(u.trajectory)
equilib_step = 37500 # first 75 ns are not to be included in total stacking metric
production = Nsteps - equilib_step
nucleic = u.selectAtoms('resid 1:15') # atom selections for nucleic chain
a1 = nucleic.selectAtoms('resid 1') # residue 1 has different atom IDs for the base atoms
a1_base = a1.atoms[10:24] # atom selections
bases = [] # make a list of the 15 bases filled with atoms
bases.append(a1_base) # add base 1 into list
for residue in nucleic.residues[1:15]: # collect the other bases into list
residue_base = []
residue_base = residue.atoms[12:26]
bases.append(residue_base)
# ----------------------------------------
# DICTIONARY DEVELOPMENT - Develop the normals and total binary dictionary which contain the data for each base
while base1 <= nbases:
normals['normal.%s' %(base1)] = get_norm('normal.%s' %(base1), np.zeros((Nsteps, 3)))
total_binaries['base.%s' %(base1)] = get_tb('base.%s' %(base1), np.zeros(Nsteps))
base1 += 1
# ----------------------------------------
# SIMULATION TIME - calculate the array that contains the simulation time in ns units
time = np.zeros(Nsteps)
for i in range(Nsteps):
time[i] = i*0.002 # time units: ns
# ----------------------------------------
# NORMAL ANALYSIS for each base - loops through all bases and all timesteps of the trajectory; calculates the normal vector of the base atoms
base1 = 1
while (base1 <= nbases):
for ts in u.trajectory:
Princ_axes = []
Princ_axes = bases[base1 - 1].principalAxes()
normals['normal.%s' %(base1)][ts.frame - 1] = Princ_axes[2] # ts.frame index starts at 1; add normal to dictionary with index starting at 0
base1 += 1
# ----------------------------------------
# BASE PAIR ANALYSIS - loops through all base pairs (w/out duplicates) and performs the angle analysis as well as the binary analysis
base1 = 1 # reset the base index to start at 1
while (base1 <= nbases): # while loops to perform the base-pair analysis while avoiding performing the same analysis twice
base2 = base1 + 1
while (base2 <= nbases):
os.mkdir('base%s_base%s' %(base1, base2)) # makes and moves into a directory for the base pair
os.chdir('base%s_base%s' %(base1, base2))
energyfile = '../../nonbond_energy/base%s_base%s/base%s_base%s.energies.dat' %(base1, base2, base1, base2)
energies = ldtxt(energyfile) # load in the energy file to a numpy array
vdw_energies = energies[:,2]
binary = zeros(Nsteps)
nf = open('binary.%s.%s.dat' %(base1, base2), 'w') # write the base pair data to a file; make sure to be writing this in a base pair directory
# angle and binary analysis for base pair;
for i in range(Nsteps):
angle = 0.
angle = arccosine(dotproduct(normals['normal.%s' %(base1)][i], normals['normal.%s' %(base2)][i]))
angle = angle*(180./pi)
if angle > 90.:
angle = 180. - angle
if vdw_energies[i] <= -3.5 and angle <= 30.: # cutoff: -3.5 kcal mol^-1 and 30 degrees
binary[i] = 1. # assumed else binary[i] = 0.
nf.write(' %10.3f %10.5f %10.5f %10.1f\n' %(time[i], vdw_energies[i], angle, binary[i])) # check time values
total_binaries['base.%s' %(base1)][i] = total_binaries['base.%s' %(base1)][i] + binary[i]
total_binaries['base.%s' %(base2)][i] = total_binaries['base.%s' %(base2)][i] + binary[i]
nf.close()
angles = []
energies = []
vdw_energies = []
os.chdir('..')
base2 += 1
base1 += 1
# ----------------------------------------
# TOTAL BINARY METRIC ANALYSIS - writing to file and plotting
# print out (also plot) the total binary data to an indivual file for each individual base
base1 = 1 # reset the base index to start at 1
os.mkdir('total_binaries')
os.chdir('total_binaries')
while (base1 <= nbases):
os.mkdir('base%s' %(base1))
os.chdir('base%s' %(base1))
nf = open('binary.%s.dat' %(base1), 'w')
for i in range(Nsteps):
nf.write(' %10.3f %10.1f\n' %(time[i], total_binaries['base.%s' %(base1)][i])) # check time values
nf.close()
counts = 0
for i in range(equilib_step, Nsteps):
if total_binaries['base.%s' %(base1)][i] > 0.:
counts +=1
prob = 0.
prob = (float(counts)/production)*100.
nf = open('stacking.%s.dat' %(base1), 'w')
nf.write('counts: %10.1f out of %10.1f time steps \n Probability of stacking = %10.4f ' %(counts, production, prob))
nf.close()
plotting(time[:], total_binaries['base.%s' %(base1)][:], base1)
os.chdir('..')
base1 += 1
# ----------------------------------------
# BASE PAIR PLOTTING - making histogram plots for vdW energy distributions, angle distributions, and 2d hist of vdw vs angle distributions
# Also printint out a file that contains the count of timesteps where the base pair are stacked
os.chdir('..')
base1 = 1
while (base1 <= nbases): # while loops to perform the base-pair analysis while avoiding performing the same analysis twice
base2 = base1 + 1
while (base2 <= nbases):
os.chdir('base%s_base%s' %(base1, base2))
infile = 'binary.%s.%s.dat' %(base1, base2)
data = ldtxt(infile) # data[0] = time, data[1] = vdW energies, data[2] = angle, data[3] = base pair binary metric
vdw_hist(data[equilib_step:,1], base1, base2)
angle_hist(data[equilib_step:,2], base1, base2)
energy_angle_hist(data[equilib_step:,2], data[equilib_step:,1], base1, base2)
nf = open('stacking.%s.%s.dat' %(base1, base2), 'w')
bp_counts = sum(data[equilib_step:,3])
nf.write('counts for base pair %s-%s: %10.1f' %(base1, base2, bp_counts))
nf.close()
data = []
os.chdir('..')
base2 += 1
base1 += 1
# ----------------------------------------
# END
| mit |
TribeMedia/sky_engine | testing/scripts/get_compile_targets.py | 76 | 1285 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import json
import os
import sys
import common
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--output', required=True)
parser.add_argument('args', nargs=argparse.REMAINDER)
args = parser.parse_args(argv)
passthrough_args = args.args
if passthrough_args[0] == '--':
passthrough_args = passthrough_args[1:]
results = {}
for filename in os.listdir(common.SCRIPT_DIR):
if not filename.endswith('.py'):
continue
if filename in ('common.py', 'get_compile_targets.py'):
continue
with common.temporary_file() as tempfile_path:
rc = common.run_command(
[sys.executable, os.path.join(common.SCRIPT_DIR, filename)] +
passthrough_args +
[
'compile_targets',
'--output', tempfile_path
]
)
if rc != 0:
return rc
with open(tempfile_path) as f:
results[filename] = json.load(f)
with open(args.output, 'w') as f:
json.dump(results, f)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause |
JeffAMcGee/friendloc | friendloc/explore/sprawl.py | 1 | 13724 | #!/usr/bin/env python
import numpy
import random
import logging
import itertools
import collections
from friendloc.base import gob
from friendloc.base.models import Edges, User, Tweets
from friendloc.base import gisgraphy, twitter, utils
NEBR_KEYS = ['rfriends','just_followers','just_friends','just_mentioned']
@gob.mapper(all_items=True)
def parse_geotweets(tweets):
"""
read tweets from Twitter's streaming API and save users and their tweets
USAGE: gunzip -c ~/may/*/*.gz | ./gb.py -s parse_geotweets
"""
# We save users and locations intermingled because this data is too big to
# fit in memory, and we do not want to do two passes.
users = set()
for i,t in enumerate(tweets):
if i%10000 ==0:
logging.info("read %d tweets"%i)
if 'id' not in t: continue # this is not a tweet
uid = t['user']['id']
if not t.get('coordinates'): continue
if uid not in users:
yield User.mod_id(uid),t['user']
users.add(uid)
yield User.mod_id(uid),(uid,t['coordinates']['coordinates'])
logging.info("sending up to %d users"%len(users))
def _untangle_users_and_coords(users_and_coords):
users = {}
locs = collections.defaultdict(list)
for user_or_coord in users_and_coords:
if isinstance(user_or_coord,dict):
users[user_or_coord['id']] = user_or_coord
else:
uid,coord = user_or_coord
locs[uid].append(coord)
return users, locs
@gob.mapper(all_items=True)
def mloc_users(users_and_coords):
"""
pick users with good home locations from geotweets
"""
users, locs = _untangle_users_and_coords(users_and_coords)
selected = []
for uid,user in users.iteritems():
spots = locs[uid]
if len(spots)<=2: continue
if user['followers_count']==0 and user['friends_count']==0: continue
median = utils.median_2d(spots)
dists = [utils.coord_in_miles(median,spot) for spot in spots]
if numpy.median(dists)>50:
continue #user moves too much
user['mloc'] = median
selected.append(user)
random.shuffle(selected)
return selected
@gob.mapper(all_items=True)
def mloc_reject_count(users_and_coords):
"""
count the number of users we ignored in mloc_users. (This job was done to
calculate a number for the paper, and is almost trash.)
"""
results = collections.defaultdict(int)
users, locs = _untangle_users_and_coords(users_and_coords)
for uid,user in users.iteritems():
spots = locs[uid]
if len(spots)<=2:
results['spots']+=1
continue
median = utils.median_2d(spots)
dists = [utils.coord_in_miles(median,spot) for spot in spots]
if numpy.median(dists)>50:
results['moves']+=1
elif user['followers_count']==0 and user['friends_count']==0:
results['counts']+=1
else:
results['good']+=1
return results.iteritems()
def _fetch_edges(twit,uid):
edges = Edges.get_id(uid)
if not edges:
edges = twit.get_edges(uid)
edges.save()
return edges
def _fetch_tweets(twit,uid):
tweets = Tweets.get_id(uid)
if not tweets:
tweets_ = twit.user_timeline(uid)
tweets = Tweets(_id=uid,tweets=tweets_)
tweets.save()
return tweets
def _contact_sets(tweets, edges):
ated = set(tweets.ats or [])
frds = set(edges.friends)
fols = set(edges.followers)
return dict(
rfriends = frds&fols,
just_friends = frds-fols,
just_followers = fols-frds,
just_mentioned = ated-(frds|fols),
)
def _pick_best_contacts(user, sets, limit=100):
def digit_sum(uid):
return sum(map(int,str(uid)))
left = limit
for key in NEBR_KEYS:
if left>0:
uids = sorted(sets[key],key=digit_sum,reverse=True)[:left]
else:
uids = []
left -=len(uids)
setattr(user,key,uids)
def _pick_random_contacts(user, sets, limit=100):
#pick uids from sets
for key,s in sets.iteritems():
l = list(s)
random.shuffle(l)
setattr(user,key,l[:limit//4])
def _save_user_contacts(twit,user,contact_picker,limit):
logging.info("visit %s - %d",user.screen_name,user._id)
if user.protected:
user.error_status=401
user.merge()
return None, None
edges, tweets = None, None
try:
edges = _fetch_edges(twit,user._id)
tweets = _fetch_tweets(twit,user._id)
sets = _contact_sets(tweets,edges)
contact_picker(user,sets,limit)
except twitter.TwitterFailure as e:
logging.warn("%d for %d",e.status_code,user._id)
user.error_status = e.status_code
user.merge()
return edges, tweets
def _my_contacts(user):
return ((User.mod_id(c),c) for c in user.contacts)
@gob.mapper(all_items=True)
def find_contacts(user_ds):
"""
for each target user, fetch edges and tweets, pick 100 located contact ids
"""
gis = gisgraphy.GisgraphyResource()
twit = twitter.TwitterResource()
for user_d in itertools.islice(user_ds,2600):
user = User.get_id(user_d['id'])
if user:
logging.warn("not revisiting %d",user._id)
else:
user = User(user_d)
user.geonames_place = gis.twitter_loc(user.location)
_save_user_contacts(twit, user, _pick_random_contacts, limit=100)
for mod_nebr in _my_contacts(user):
yield mod_nebr
@gob.mapper()
def find_leafs(uid):
"""
for each contact, fetch edges and tweets, pick 100 leaf ids
"""
twit = twitter.TwitterResource()
user = User.get_id(uid)
_save_user_contacts(twit, user, _pick_random_contacts, limit=100)
return _my_contacts(user)
@gob.mapper(all_items=True)
def total_contacts(user_ds):
"""
count the total number of contacts (to include in the paper)
"""
for user_d in itertools.islice(user_ds,2600):
user = User.get_id(user_d['id'])
if not user:
yield "no user"
elif user.error_status:
yield str(user.error_status)
else:
edges = Edges.get_id(user._id)
tweets = Tweets.get_id(user._id)
if not edges or not tweets:
yield "no contacts"
else:
sets = _contact_sets(tweets,edges)
yield [len(sets[k]) for k in User.NEBR_KEYS]
@gob.mapper(all_items=True)
def mloc_uids(user_ds):
"""
pick 2500 target users who have locations and good contacts
"""
retrieved = [u['id'] for u in itertools.islice(user_ds,2600)]
users = User.find(User._id.is_in(retrieved))
good_ = { u._id for u in users if any(getattr(u,k) for k in NEBR_KEYS)}
good = [uid for uid in retrieved if uid in good_]
logging.info("found %d of %d",len(good),len(retrieved))
# throw away accounts that didn't work to get down to the 2500 good users
return good[:2500]
@gob.mapper(all_items=True)
def trash_extra_mloc(mloc_uids):
"remove the mloc_users that mloc_uids skipped over"
# This scares me a bit, but it's too late to go back and fix find_contacts.
# I really wish I had limited find_contacts to stop after 2500 good users.
db = User.database
mloc_uids = set(mloc_uids)
group_ = set(uid%100 for uid in mloc_uids)
assert len(group_)==1
group = next(iter(group_))
stored = User.mod_id_set(group)
trash = list(stored - mloc_uids)
logging.info("trashing %d users",len(trash))
logging.debug("full list: %r",trash)
db.Edges.remove({'_id':{'$in':trash}})
db.Tweets.remove({'_id':{'$in':trash}})
db.User.remove({'_id':{'$in':trash}})
@gob.mapper()
def saved_users():
"""
Create set of ids already already in the database so that lookup_contacts
can skip these users. Talking to the database in lookup_contacts to check
if users are in the database is too slow.
"""
users = User.database.User.find({},fields=[],timeout=False)
return ((User.mod_id(u['_id']),u['_id']) for u in users)
@gob.mapper(all_items=True,slurp={'mdists':next})
def lookup_contacts(contact_uids,mdists,env):
"""
lookup user profiles for contacts or leafs
"""
twit = twitter.TwitterResource()
gis = gisgraphy.GisgraphyResource()
gis.set_mdists(mdists)
# FIXME: we need a better way to know which file we are on.
# FIXME: use the new input_paths thing
first, contact_uids = utils.peek(contact_uids)
group = User.mod_id(first)
logging.info('lookup old uids for %s',group)
save_name = 'saved_users.%s'%group
if env.name_exists(save_name):
stored = set(env.load(save_name))
else:
stored = User.mod_id_set(int(group))
logging.info('loaded mod_group %s of %d users',group,len(stored))
missing = (id for id in contact_uids if id not in stored)
chunks = utils.grouper(100, missing, dontfill=True)
for chunk in chunks:
users = twit.user_lookup(user_ids=list(chunk))
for amigo in filter(None,users):
assert User.mod_id(amigo._id)==group
amigo.geonames_place = gis.twitter_loc(amigo.location)
amigo.merge()
yield len(users)
def _pick_neighbors(user):
nebrs = {}
for key in NEBR_KEYS:
cids = getattr(user,key)
if not cids:
continue
# this is slowish
contacts = User.find(User._id.is_in(cids), fields=['gnp'])
nebrs[key] = set(u._id for u in contacts if u.has_place())
picked_ = filter(None,
itertools.chain.from_iterable(
itertools.izip_longest(*nebrs.values())))
picked = picked_[:25]
logging.info('picked %d of %d contacts',len(picked),len(user.contacts))
return picked
@gob.mapper()
def pick_nebrs(mloc_uid):
"""
For each target user, pick the 25 located contacts.
"""
# reads predict.prep.mloc_uids, requires lookup_contacts, but don't read it.
user = User.get_id(mloc_uid)
user.neighbors = _pick_neighbors(user)
user.save()
return ((User.mod_id(n),n) for n in user.neighbors)
@gob.mapper(all_items=True,slurp={'mdists':next})
def fix_mloc_mdists(mloc_uids,mdists):
"""
Add the median location error to profiles of contacts and target users.
"""
gis = gisgraphy.GisgraphyResource()
gis.set_mdists(mdists)
# We didn't have mdists at the time the mloc users were saved. This
# function could be avoided by running the mdist calculation before
# running find_contacts.
fixed = 0
users = User.find(User._id.is_in(tuple(mloc_uids)))
for user in users:
user.geonames_place = gis.twitter_loc(user.location)
user.save()
if user.geonames_place:
fixed+=1
logging.info("fixed %d mdists",fixed)
return [fixed]
@gob.mapper(all_items=True)
def uid_split(groups):
"""
after a set reduce, split up the user ids into seperate files
"""
# This method should really be built into gob somehow.
return (
(group, id)
for group,ids in groups
for id in ids
)
def _fetch_profiles(uids,twit,gis):
users = list(User.find(User._id.is_in(uids)))
existing_ids = {u._id for u in users}
missing_ids = [uid for uid in uids if uid not in existing_ids]
chunks = utils.grouper(100, missing_ids, dontfill=True)
for chunk in chunks:
found = twit.user_lookup(user_ids=list(chunk))
for amigo in filter(None,found):
amigo.geonames_place = gis.twitter_loc(amigo.location)
amigo.merge()
users.append(amigo)
return users
def _calc_lorat(nebrs,twit,gis):
leaf_ids = {uid
for nebr in nebrs
for uid in nebr.contacts[:10]}
leafs_ = _fetch_profiles(list(leaf_ids),twit,gis)
leafs = {leaf._id:leaf for leaf in leafs_}
for nebr in nebrs:
# Does this break if the contact does not exist?
nebr_loc = nebr.geonames_place.to_d()
dists = []
for leaf_id in nebr.contacts[:10]:
leaf = leafs.get(leaf_id)
if leaf and leaf.has_place():
dist = utils.coord_in_miles(nebr_loc,leaf.geonames_place.to_d())
dists.append(dist)
if dists:
lorat = sum(1.0 for d in dists if d<25)/len(dists)
else:
lorat = float('nan')
nebr.local_ratio = lorat
CrawlResults = collections.namedtuple("CrawlResults",['nebrs','ats','ated'])
def crawl_single(user, twit, gis, fast):
"""
save a single user, contacts, and leafs to the database
crawl a user object who has not been visited before
twit is a TwitterResource
gis is a GisgraphyResource with mdists
"""
edges,tweets=_save_user_contacts(twit, user, _pick_best_contacts, limit=100)
contact_ids = user.contacts
profiles = {p._id:p for p in _fetch_profiles(contact_ids,twit,gis)}
def has_place(uid):
return uid in profiles and profiles[uid].has_place()
user.neighbors = filter(has_place, contact_ids)[:25]
nebrs = [profiles[nid] for nid in user.neighbors]
ated = set()
if not fast:
for nebr in nebrs:
ne,nt = _save_user_contacts(twit, nebr, _pick_best_contacts, limit=100)
if nt and nt.ats and user._id in nt.ats:
ated.add(nebr._id)
need_lorat = [nebr for nebr in nebrs if nebr.local_ratio is None]
_calc_lorat(need_lorat,twit,gis)
for nebr in need_lorat:
nebr.merge()
user.merge()
return CrawlResults(nebrs,tweets.ats if tweets else [],ated)
| bsd-2-clause |
runiq/modeling-clustering | find-correct-cluster-number/plot_clustering_metrics.py | 1 | 10092 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Performs a clustering run with a number of clusters and a given mask,
and creates graphs of the corresponding DBI, pSF, SSR/SST, and RMSD
values.
These faciliate the choice of cluster numbers and improve the clustering
process by allowing to pick the number of clusters with the highest
information content.
"""
# TODO
# - Fix plot_tree()
# - Do some logging
# - remove clustering_run from plot_metrics() and plot_tree() as it
# basically represents world state. Use explicit metrics/nodes instead
# - Implement ylabel alignment as soon as PGF backend has its act together
import cStringIO as csio
from glob import glob, iglob
import os
import os.path as op
import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as tic
import matplotlib.transforms as tfs
import clustering_run as cr
import newick as cn
def align_yaxis_labels(axes, sortfunc):
xpos = sortfunc(ax.yaxis.get_label().get_position()[0] for ax in axes)
for ax in axes:
trans = tfs.blended_transform_factory(tfs.IdentityTransform(), ax.transAxes)
ax.yaxis.set_label_coords(xpos, 0.5, transform=trans)
def plot_metrics(clustering_run, output_file, xmin=None, xmax=None,
use_tex=False, figsize=(12,8), square=False):
metrics = clustering_run.gather_metrics()
# The ±0.5 are so that all chosen points are well within the
# plots
if xmin is None:
xmin = min(metrics['n'])
if xmax is None:
xmax = max(metrics['n'])
xlim = (xmin-0.5, xmax+0.5)
fig = plt.figure(figsize=figsize)
if clustering_run.no_ssr_sst:
gridindex = 310
else:
if square:
gridindex = 220
else:
gridindex = 410
if use_tex:
rmsd_ylabel = r'Critical distance/\si{\angstrom}'
xlabel = r'$n_{\text{Clusters}}$'
else:
rmsd_ylabel = u'Critical distance/Å'
xlabel = r'Number of clusters'
ax1 = fig.add_subplot(gridindex+1, ylabel=rmsd_ylabel)
ax2 = fig.add_subplot(gridindex+2, ylabel='DBI', sharex=ax1)
ax3 = fig.add_subplot(gridindex+3, ylabel='pSF', sharex=ax1)
ax1.plot(metrics['n'], metrics['rmsd'], marker='.')
ax2.plot(metrics['n'], metrics['dbi'], marker='.')
ax3.plot(metrics['n'], metrics['psf'], marker='.')
if not clustering_run.no_ssr_sst:
ax4 = fig.add_subplot(gridindex+4,
ylabel='SSR/SST', xlim=xlim, sharex=ax1)
ax4.plot(metrics['n'], metrics['ssr_sst'], marker='.')
if square and not clustering_run.no_ssr_sst:
nonxaxes = fig.axes[:-2]
xaxes = fig.axes[-2:]
lefthandplots = fig.axes[0::2]
righthandplots = fig.axes[1::2]
# Put yticklabels of right-hand plots to the right
for ax in righthandplots:
ax.yaxis.tick_right()
ax.yaxis.set_label_position('right')
else:
nonxaxes = fig.axes[:-1]
xaxes = [fig.axes[-1]]
lefthandplots = fig.axes
# xaxes limits and tick locations are propagated across sharex plots
for ax in xaxes:
ax.set_xlabel(xlabel)
ax.xaxis.set_major_locator(tic.MultipleLocator(10))
ax.xaxis.set_minor_locator(tic.AutoMinorLocator(2))
for ax in nonxaxes:
plt.setp(ax.get_xticklabels(), visible=False)
# 5 yticklabels are enough for everybody
for ax in fig.axes:
ax.yaxis.set_major_locator(tic.MaxNLocator(nbins=5))
ax.yaxis.set_minor_locator(tic.MaxNLocator(nbins=5))
# Draw first to get proper ylabel coordinates
# fig.canvas.draw()
# align_yaxis_labels(lefthandplots, sortfunc=min)
# if square and not clustering_run.no_ssr_sst:
# align_yaxis_labels(righthandplots, sortfunc=max)
fig.savefig(output_file)
def plot_tree(clustering_run, node_info, steps, dist, output, graphical=None, no_length=False):
tree = cn.parse_clustermerging(clustering_run)
newick = tree.create_newick(node_info=node_info, no_length=no_length, steps=steps, dist=dist)
if output is sys.stdout:
fh = output
else:
fh = open(output, 'w')
fh.write(newick)
fh.close()
fig = plt.figure()
ax1 = fig.add_subplot(111, ylabel='Cluster tree')
if graphical is not None:
cn.draw(csio.StringIO(newick), do_show=False, axes=ax1)
fig.savefig(graphical)
def parse_args():
import argparse as ap
parser = ap.ArgumentParser()
parser.add_argument('-c', '--cm-file', metavar='FILE',
default='./ClusterMerging.txt', dest='cm_fn',
help="File to parse (default: ./ClusterMerging.txt)")
parser.add_argument('-C', '--matplotlibrc', metavar='FILE', default=None,
help="Matplotlibrc file to use")
parser.add_argument('-p', '--prefix', default='c',
help="Prefix for clustering result files (default: \"c\")")
parser.add_argument('-N', '--no-ssr-sst', action='store_true', default=False,
help="Don't gather SSR_SST values (default: False)")
subs = parser.add_subparsers(dest='subcommand', help="Sub-command help")
c = subs.add_parser('cluster', help="Do clustering run to gather metrics")
c.add_argument('prmtop', help="prmtop file")
c.add_argument('-m', '--mask', metavar='MASKSTR', default='@CA,C,O,N',
help=("Mask string (default: '@CA,C,O,N')"))
c.add_argument('-P', '--ptraj-trajin-file', metavar='FILE',
default='ptraj_trajin', dest='ptraj_trajin_fn',
help=("Filename for ptraj trajin file (default: ptraj_trajin)"))
c.add_argument('-n', '--num-clusters', dest='n_clusters', type=int,
metavar='CLUSTERS', default=50,
help="Number of clusters to examine (default (also maximum): 50)")
c.add_argument('-s', '--start-num-clusters', dest='start_n_clusters',
type=int, metavar='CLUSTERS', default=2,
help="Number of clusters to start from (default: 2)")
c.add_argument('-l', '--logfile', metavar='FILE', default=None,
dest='log_fn',
help=("Logfile for ptraj run (default: Print to stdout)"))
c.add_argument('--use-cpptraj', action='store_true', default=False,
help="Use cpptraj instead of ptraj")
t = subs.add_parser('tree', help="Create Newick tree representation")
t.add_argument('-o', '--output', metavar='FILE', default=sys.stdout,
help="Output file for Newick tree (default: print to terminal)")
t.add_argument('-g', '--graphical', default=None,
help="Save tree as png (default: Don't)")
t.add_argument('-s', '--steps', type=int, default=None,
help="Number of steps to print (default: all)")
t.add_argument('-d', '--dist', type=float, default=None,
help="Minimum distance to print (default: all)")
t.add_argument('-i', '--node-info', choices=['num', 'dist', 'id'],
default='num', help="Node data to print")
t.add_argument('-l', '--no-length', default=False, action='store_true',
help="Don't print branch length information")
p = subs.add_parser('plot', help="Plot clustering metrics")
p.add_argument('-o', '--output', metavar='FILE',
default='clustering_metrics.png',
help="Filename for output file (default: show using matplotlib)")
p.add_argument('-n', '--num-clusters', dest='n_clusters', type=int,
metavar='CLUSTERS', default=50,
help="Number of clusters to examine (default (also maximum): 50)")
p.add_argument('-s', '--start-num-clusters', dest='start_n_clusters',
type=int, metavar='CLUSTERS', default=2,
help="Number of clusters to start from (default: 2)")
p.add_argument('-T', '--use-tex', default=False, action='store_true',
help="Use LaTeX output (default: use plaintext output)")
p.add_argument('-S', '--fig-size', nargs=2, type=float, metavar='X Y', default=[12, 8],
help=("Figure size in inches (default: 12x8)"))
p.add_argument('--square', default=False, action='store_true',
help="Plot in two columns")
return parser.parse_args()
def main():
args = parse_args()
if args.matplotlibrc is not None:
matplotlib.rc_file(args.matplotlibrc)
if args.subcommand == 'cluster':
if args.n_clusters < 1 or args.n_clusters > 50:
print "Error: Maximum cluster number must be between 1 and 50."
sys.exit(1)
cn_fns = None
clustering_run = cr.ClusteringRun(prmtop=args.prmtop,
start_n_clusters=args.start_n_clusters, n_clusters=args.n_clusters,
cm_fn=args.cm_fn, mask=args.mask,
ptraj_trajin_fn=args.ptraj_trajin_fn, cn_fns=cn_fns,
prefix=args.prefix, log_fn=args.log_fn,
no_ssr_sst=args.no_ssr_sst)
else:
if not op.exists(args.cm_fn):
print ("{cm_fn} doesn't exist. Please perform a clustering run",
"first.".format(cm_fn=args.cm_fn))
sys.exit(1)
# We assume that the number of clusters starts at 1
n_clusters = len(glob('{prefix}*.txt'.format(prefix=args.prefix)))
cn_fns = {i: '{prefix}{n}.txt'.format(prefix=args.prefix, n=i) for
i in xrange(1, n_clusters+1)}
# Only cm_fn and cn_fns are necessary for plotting the tree and
# metrics
clustering_run = cr.ClusteringRun(prmtop=None, cm_fn=args.cm_fn,
cn_fns=cn_fns, no_ssr_sst=args.no_ssr_sst)
if args.subcommand == 'plot':
plot_metrics(clustering_run, output_file=args.output,
xmin=args.start_n_clusters, xmax=args.n_clusters,
use_tex=args.use_tex, figsize=args.fig_size,
square=args.square)
elif args.subcommand == 'tree':
plot_tree(clustering_run=clustering_run, node_info=args.node_info,
steps=args.steps, dist=args.dist, no_length=args.no_length,
graphical=args.graphical, output=args.output)
if __name__ == '__main__':
main()
| bsd-2-clause |
trishnaguha/ansible | lib/ansible/modules/cloud/openstack/os_ironic_node.py | 41 | 12663 | #!/usr/bin/python
# coding: utf-8 -*-
# (c) 2015, Hewlett-Packard Development Company, L.P.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_ironic_node
short_description: Activate/Deactivate Bare Metal Resources from OpenStack
author: "Monty Taylor (@emonty)"
extends_documentation_fragment: openstack
version_added: "2.0"
description:
- Deploy to nodes controlled by Ironic.
options:
state:
description:
- Indicates desired state of the resource
choices: ['present', 'absent']
default: present
deploy:
description:
- Indicates if the resource should be deployed. Allows for deployment
logic to be disengaged and control of the node power or maintenance
state to be changed.
type: bool
default: 'yes'
uuid:
description:
- globally unique identifier (UUID) to be given to the resource.
ironic_url:
description:
- If noauth mode is utilized, this is required to be set to the
endpoint URL for the Ironic API. Use with "auth" and "auth_type"
settings set to None.
config_drive:
description:
- A configdrive file or HTTP(S) URL that will be passed along to the
node.
instance_info:
description:
- Definition of the instance information which is used to deploy
the node. This information is only required when an instance is
set to present.
suboptions:
image_source:
description:
- An HTTP(S) URL where the image can be retrieved from.
image_checksum:
description:
- The checksum of image_source.
image_disk_format:
description:
- The type of image that has been requested to be deployed.
power:
description:
- A setting to allow power state to be asserted allowing nodes
that are not yet deployed to be powered on, and nodes that
are deployed to be powered off.
choices: ['present', 'absent']
default: present
maintenance:
description:
- A setting to allow the direct control if a node is in
maintenance mode.
type: bool
default: 'no'
maintenance_reason:
description:
- A string expression regarding the reason a node is in a
maintenance mode.
wait:
description:
- A boolean value instructing the module to wait for node
activation or deactivation to complete before returning.
type: bool
default: 'no'
version_added: "2.1"
timeout:
description:
- An integer value representing the number of seconds to
wait for the node activation or deactivation to complete.
version_added: "2.1"
availability_zone:
description:
- Ignored. Present for backwards compatibility
'''
EXAMPLES = '''
# Activate a node by booting an image with a configdrive attached
os_ironic_node:
cloud: "openstack"
uuid: "d44666e1-35b3-4f6b-acb0-88ab7052da69"
state: present
power: present
deploy: True
maintenance: False
config_drive: "http://192.168.1.1/host-configdrive.iso"
instance_info:
image_source: "http://192.168.1.1/deploy_image.img"
image_checksum: "356a6b55ecc511a20c33c946c4e678af"
image_disk_format: "qcow"
delegate_to: localhost
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def _choose_id_value(module):
if module.params['uuid']:
return module.params['uuid']
if module.params['name']:
return module.params['name']
return None
# TODO(TheJulia): Change this over to use the machine patch method
# in shade once it is available.
def _prepare_instance_info_patch(instance_info):
patch = []
patch.append({
'op': 'replace',
'path': '/instance_info',
'value': instance_info
})
return patch
def _is_true(value):
true_values = [True, 'yes', 'Yes', 'True', 'true', 'present', 'on']
if value in true_values:
return True
return False
def _is_false(value):
false_values = [False, None, 'no', 'No', 'False', 'false', 'absent', 'off']
if value in false_values:
return True
return False
def _check_set_maintenance(module, cloud, node):
if _is_true(module.params['maintenance']):
if _is_false(node['maintenance']):
cloud.set_machine_maintenance_state(
node['uuid'],
True,
reason=module.params['maintenance_reason'])
module.exit_json(changed=True, msg="Node has been set into "
"maintenance mode")
else:
# User has requested maintenance state, node is already in the
# desired state, checking to see if the reason has changed.
if (str(node['maintenance_reason']) not in
str(module.params['maintenance_reason'])):
cloud.set_machine_maintenance_state(
node['uuid'],
True,
reason=module.params['maintenance_reason'])
module.exit_json(changed=True, msg="Node maintenance reason "
"updated, cannot take any "
"additional action.")
elif _is_false(module.params['maintenance']):
if node['maintenance'] is True:
cloud.remove_machine_from_maintenance(node['uuid'])
return True
else:
module.fail_json(msg="maintenance parameter was set but a valid "
"the value was not recognized.")
return False
def _check_set_power_state(module, cloud, node):
if 'power on' in str(node['power_state']):
if _is_false(module.params['power']):
# User has requested the node be powered off.
cloud.set_machine_power_off(node['uuid'])
module.exit_json(changed=True, msg="Power requested off")
if 'power off' in str(node['power_state']):
if (_is_false(module.params['power']) and
_is_false(module.params['state'])):
return False
if (_is_false(module.params['power']) and
_is_false(module.params['state'])):
module.exit_json(
changed=False,
msg="Power for node is %s, node must be reactivated "
"OR set to state absent"
)
# In the event the power has been toggled on and
# deployment has been requested, we need to skip this
# step.
if (_is_true(module.params['power']) and
_is_false(module.params['deploy'])):
# Node is powered down when it is not awaiting to be provisioned
cloud.set_machine_power_on(node['uuid'])
return True
# Default False if no action has been taken.
return False
def main():
argument_spec = openstack_full_argument_spec(
uuid=dict(required=False),
name=dict(required=False),
instance_info=dict(type='dict', required=False),
config_drive=dict(required=False),
ironic_url=dict(required=False),
state=dict(required=False, default='present'),
maintenance=dict(required=False),
maintenance_reason=dict(required=False),
power=dict(required=False, default='present'),
deploy=dict(required=False, default=True),
wait=dict(type='bool', required=False, default=False),
timeout=dict(required=False, type='int', default=1800),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if (module.params['auth_type'] in [None, 'None'] and
module.params['ironic_url'] is None):
module.fail_json(msg="Authentication appears disabled, Please "
"define an ironic_url parameter")
if (module.params['ironic_url'] and
module.params['auth_type'] in [None, 'None']):
module.params['auth'] = dict(
endpoint=module.params['ironic_url']
)
node_id = _choose_id_value(module)
if not node_id:
module.fail_json(msg="A uuid or name value must be defined "
"to use this module.")
sdk, cloud = openstack_cloud_from_module(module)
try:
node = cloud.get_machine(node_id)
if node is None:
module.fail_json(msg="node not found")
uuid = node['uuid']
instance_info = module.params['instance_info']
changed = False
wait = module.params['wait']
timeout = module.params['timeout']
# User has reqeusted desired state to be in maintenance state.
if module.params['state'] is 'maintenance':
module.params['maintenance'] = True
if node['provision_state'] in [
'cleaning',
'deleting',
'wait call-back']:
module.fail_json(msg="Node is in %s state, cannot act upon the "
"request as the node is in a transition "
"state" % node['provision_state'])
# TODO(TheJulia) This is in-development code, that requires
# code in the shade library that is still in development.
if _check_set_maintenance(module, cloud, node):
if node['provision_state'] in 'active':
module.exit_json(changed=True,
result="Maintenance state changed")
changed = True
node = cloud.get_machine(node_id)
if _check_set_power_state(module, cloud, node):
changed = True
node = cloud.get_machine(node_id)
if _is_true(module.params['state']):
if _is_false(module.params['deploy']):
module.exit_json(
changed=changed,
result="User request has explicitly disabled "
"deployment logic"
)
if 'active' in node['provision_state']:
module.exit_json(
changed=changed,
result="Node already in an active state."
)
if instance_info is None:
module.fail_json(
changed=changed,
msg="When setting an instance to present, "
"instance_info is a required variable.")
# TODO(TheJulia): Update instance info, however info is
# deployment specific. Perhaps consider adding rebuild
# support, although there is a known desire to remove
# rebuild support from Ironic at some point in the future.
patch = _prepare_instance_info_patch(instance_info)
cloud.set_node_instance_info(uuid, patch)
cloud.validate_node(uuid)
if not wait:
cloud.activate_node(uuid, module.params['config_drive'])
else:
cloud.activate_node(
uuid,
configdrive=module.params['config_drive'],
wait=wait,
timeout=timeout)
# TODO(TheJulia): Add more error checking..
module.exit_json(changed=changed, result="node activated")
elif _is_false(module.params['state']):
if node['provision_state'] not in "deleted":
cloud.purge_node_instance_info(uuid)
if not wait:
cloud.deactivate_node(uuid)
else:
cloud.deactivate_node(
uuid,
wait=wait,
timeout=timeout)
module.exit_json(changed=True, result="deleted")
else:
module.exit_json(changed=False, result="node not found")
else:
module.fail_json(msg="State must be present, absent, "
"maintenance, off")
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=str(e))
if __name__ == "__main__":
main()
| gpl-3.0 |
PaulEnglert/ML | deep_architectures/deep_belief_network.py | 1 | 2528 | # -*- coding: utf-8 -*-
from neural_networks.boltzmann_machines.generative_rbm import GenRBM
from neural_networks.perceptrons.mlp import MLP
from utilities.data_utils import make_batches
class DBN():
''' Deep belief network aka stacked boltzmann machines'''
def __init__(self, layer_definitions):
self.num_layers = len(layer_definitions)
# build stack of RBMs for pretraining
self.rbm_stack = []
for l in range(self.num_layers - 1):
self.rbm_stack.append(
GenRBM(layer_definitions[l], layer_definitions[l + 1]))
# build MLP used for fine tuning
print 'Initializing MLP with a configuration of {0}, {1}, {2}'.format(
layer_definitions[0],
[l for l in layer_definitions[1:-1]], layer_definitions[-1])
self.mlp = MLP(
layer_definitions[0],
[l + 1 for l in layer_definitions[1:-1]],
layer_definitions[-1])
def pre_train(
self, trainX, epochs_per_layer=5, learning_rate=0.01,
learning_rate_decay=1, lambda_1=0, lambda_2=0):
X = trainX.copy()
for l in range(self.num_layers - 1):
print 'Training GenRBM {0}'.format(l)
batches = make_batches(X.copy(), 100, keep_last=True)
self.rbm_stack[l].train(
batches, epochs=epochs_per_layer,
learning_rate=learning_rate,
learning_rate_decay=learning_rate_decay,
lambda_1=lambda_1,
lambda_2=lambda_2) # train layer with X
X = self.rbm_stack[l].sample_hidden(X)
def fine_tune(
self, trainX, trainY, epochs=10,
learning_rate=0.01, learning_rate_decay=1):
print 'Fine Tuning GenRB as MLP'
self.mlp.set_weights(self.__convert_weights(self.rbm_stack))
self.mlp.train(make_batches(trainX.copy(), 10, keep_last=False),
make_batches(trainY.copy(), 10, keep_last=False),
epochs=epochs,
learning_rate=learning_rate,
learning_rate_decay=learning_rate_decay)
def __convert_weights(self, stack, use_best=False):
weights = []
for s in stack:
# get weights of botzmann machine
w = (s.W_best if use_best else s.W)
# move first row to last and cut first column
weights.append(
w[[i for i in range(1, w.shape[0])] + [0], 1:])
return weights
| bsd-2-clause |
ApolloAuto/apollo | modules/tools/mobileye_viewer/planning_data.py | 3 | 2223 | #!/usr/bin/env python3
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import threading
class PlanningData:
def __init__(self, planning_pb=None):
self.path_lock = threading.Lock()
self.path_param_lock = threading.Lock()
self.planning_pb = planning_pb
self.path_x = []
self.path_y = []
self.relative_time = []
self.speed = []
self.s = []
self.theta = []
def update(self, planning_pb):
self.planning_pb = planning_pb
def compute_path(self):
if self.planning_pb is None:
return
path_x = []
path_y = []
for point in self.planning_pb.trajectory_point:
path_x.append(-1 * point.path_point.y)
path_y.append(point.path_point.x)
self.path_lock.acquire()
self.path_x = path_x
self.path_y = path_y
self.path_lock.release()
def compute_path_param(self):
if self.planning_pb is None:
return
relative_time = []
speed = []
s = []
theta = []
for point in self.planning_pb.trajectory_point:
relative_time.append(point.relative_time)
speed.append(point.v)
s.append(point.path_point.s)
theta.append(point.path_point.theta)
self.path_param_lock.acquire()
self.relative_time = relative_time
self.speed = speed
self.s = s
self.theta = theta
self.path_param_lock.release()
| apache-2.0 |
suncycheng/intellij-community | python/lib/Lib/site-packages/django/contrib/admin/widgets.py | 73 | 11754 | """
Form Widget classes specific to the Django admin site.
"""
import django.utils.copycompat as copy
from django import forms
from django.forms.widgets import RadioFieldRenderer
from django.forms.util import flatatt
from django.utils.html import escape
from django.utils.text import truncate_words
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
from django.utils.encoding import force_unicode
from django.conf import settings
from django.core.urlresolvers import reverse, NoReverseMatch
class FilteredSelectMultiple(forms.SelectMultiple):
"""
A SelectMultiple with a JavaScript filter interface.
Note that the resulting JavaScript assumes that the jsi18n
catalog has been loaded in the page
"""
class Media:
js = (settings.ADMIN_MEDIA_PREFIX + "js/core.js",
settings.ADMIN_MEDIA_PREFIX + "js/SelectBox.js",
settings.ADMIN_MEDIA_PREFIX + "js/SelectFilter2.js")
def __init__(self, verbose_name, is_stacked, attrs=None, choices=()):
self.verbose_name = verbose_name
self.is_stacked = is_stacked
super(FilteredSelectMultiple, self).__init__(attrs, choices)
def render(self, name, value, attrs=None, choices=()):
if attrs is None: attrs = {}
attrs['class'] = 'selectfilter'
if self.is_stacked: attrs['class'] += 'stacked'
output = [super(FilteredSelectMultiple, self).render(name, value, attrs, choices)]
output.append(u'<script type="text/javascript">addEvent(window, "load", function(e) {')
# TODO: "id_" is hard-coded here. This should instead use the correct
# API to determine the ID dynamically.
output.append(u'SelectFilter.init("id_%s", "%s", %s, "%s"); });</script>\n' % \
(name, self.verbose_name.replace('"', '\\"'), int(self.is_stacked), settings.ADMIN_MEDIA_PREFIX))
return mark_safe(u''.join(output))
class AdminDateWidget(forms.DateInput):
class Media:
js = (settings.ADMIN_MEDIA_PREFIX + "js/calendar.js",
settings.ADMIN_MEDIA_PREFIX + "js/admin/DateTimeShortcuts.js")
def __init__(self, attrs={}, format=None):
super(AdminDateWidget, self).__init__(attrs={'class': 'vDateField', 'size': '10'}, format=format)
class AdminTimeWidget(forms.TimeInput):
class Media:
js = (settings.ADMIN_MEDIA_PREFIX + "js/calendar.js",
settings.ADMIN_MEDIA_PREFIX + "js/admin/DateTimeShortcuts.js")
def __init__(self, attrs={}, format=None):
super(AdminTimeWidget, self).__init__(attrs={'class': 'vTimeField', 'size': '8'}, format=format)
class AdminSplitDateTime(forms.SplitDateTimeWidget):
"""
A SplitDateTime Widget that has some admin-specific styling.
"""
def __init__(self, attrs=None):
widgets = [AdminDateWidget, AdminTimeWidget]
# Note that we're calling MultiWidget, not SplitDateTimeWidget, because
# we want to define widgets.
forms.MultiWidget.__init__(self, widgets, attrs)
def format_output(self, rendered_widgets):
return mark_safe(u'<p class="datetime">%s %s<br />%s %s</p>' % \
(_('Date:'), rendered_widgets[0], _('Time:'), rendered_widgets[1]))
class AdminRadioFieldRenderer(RadioFieldRenderer):
def render(self):
"""Outputs a <ul> for this set of radio fields."""
return mark_safe(u'<ul%s>\n%s\n</ul>' % (
flatatt(self.attrs),
u'\n'.join([u'<li>%s</li>' % force_unicode(w) for w in self]))
)
class AdminRadioSelect(forms.RadioSelect):
renderer = AdminRadioFieldRenderer
class AdminFileWidget(forms.ClearableFileInput):
template_with_initial = (u'<p class="file-upload">%s</p>'
% forms.ClearableFileInput.template_with_initial)
template_with_clear = (u'<span class="clearable-file-input">%s</span>'
% forms.ClearableFileInput.template_with_clear)
class ForeignKeyRawIdWidget(forms.TextInput):
"""
A Widget for displaying ForeignKeys in the "raw_id" interface rather than
in a <select> box.
"""
def __init__(self, rel, attrs=None, using=None):
self.rel = rel
self.db = using
super(ForeignKeyRawIdWidget, self).__init__(attrs)
def render(self, name, value, attrs=None):
if attrs is None:
attrs = {}
related_url = '../../../%s/%s/' % (self.rel.to._meta.app_label, self.rel.to._meta.object_name.lower())
params = self.url_parameters()
if params:
url = '?' + '&'.join(['%s=%s' % (k, v) for k, v in params.items()])
else:
url = ''
if "class" not in attrs:
attrs['class'] = 'vForeignKeyRawIdAdminField' # The JavaScript looks for this hook.
output = [super(ForeignKeyRawIdWidget, self).render(name, value, attrs)]
# TODO: "id_" is hard-coded here. This should instead use the correct
# API to determine the ID dynamically.
output.append('<a href="%s%s" class="related-lookup" id="lookup_id_%s" onclick="return showRelatedObjectLookupPopup(this);"> ' % \
(related_url, url, name))
output.append('<img src="%simg/admin/selector-search.gif" width="16" height="16" alt="%s" /></a>' % (settings.ADMIN_MEDIA_PREFIX, _('Lookup')))
if value:
output.append(self.label_for_value(value))
return mark_safe(u''.join(output))
def base_url_parameters(self):
params = {}
if self.rel.limit_choices_to and hasattr(self.rel.limit_choices_to, 'items'):
items = []
for k, v in self.rel.limit_choices_to.items():
if isinstance(v, list):
v = ','.join([str(x) for x in v])
else:
v = str(v)
items.append((k, v))
params.update(dict(items))
return params
def url_parameters(self):
from django.contrib.admin.views.main import TO_FIELD_VAR
params = self.base_url_parameters()
params.update({TO_FIELD_VAR: self.rel.get_related_field().name})
return params
def label_for_value(self, value):
key = self.rel.get_related_field().name
try:
obj = self.rel.to._default_manager.using(self.db).get(**{key: value})
return ' <strong>%s</strong>' % escape(truncate_words(obj, 14))
except (ValueError, self.rel.to.DoesNotExist):
return ''
class ManyToManyRawIdWidget(ForeignKeyRawIdWidget):
"""
A Widget for displaying ManyToMany ids in the "raw_id" interface rather than
in a <select multiple> box.
"""
def render(self, name, value, attrs=None):
if attrs is None:
attrs = {}
attrs['class'] = 'vManyToManyRawIdAdminField'
if value:
value = ','.join([force_unicode(v) for v in value])
else:
value = ''
return super(ManyToManyRawIdWidget, self).render(name, value, attrs)
def url_parameters(self):
return self.base_url_parameters()
def label_for_value(self, value):
return ''
def value_from_datadict(self, data, files, name):
value = data.get(name)
if value:
return value.split(',')
def _has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
for pk1, pk2 in zip(initial, data):
if force_unicode(pk1) != force_unicode(pk2):
return True
return False
class RelatedFieldWidgetWrapper(forms.Widget):
"""
This class is a wrapper to a given widget to add the add icon for the
admin interface.
"""
def __init__(self, widget, rel, admin_site, can_add_related=None):
self.is_hidden = widget.is_hidden
self.needs_multipart_form = widget.needs_multipart_form
self.attrs = widget.attrs
self.choices = widget.choices
self.widget = widget
self.rel = rel
# Backwards compatible check for whether a user can add related
# objects.
if can_add_related is None:
can_add_related = rel.to in admin_site._registry
self.can_add_related = can_add_related
# so we can check if the related object is registered with this AdminSite
self.admin_site = admin_site
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.widget = copy.deepcopy(self.widget, memo)
obj.attrs = self.widget.attrs
memo[id(self)] = obj
return obj
def _media(self):
return self.widget.media
media = property(_media)
def render(self, name, value, *args, **kwargs):
rel_to = self.rel.to
info = (rel_to._meta.app_label, rel_to._meta.object_name.lower())
try:
related_url = reverse('admin:%s_%s_add' % info, current_app=self.admin_site.name)
except NoReverseMatch:
info = (self.admin_site.root_path, rel_to._meta.app_label, rel_to._meta.object_name.lower())
related_url = '%s%s/%s/add/' % info
self.widget.choices = self.choices
output = [self.widget.render(name, value, *args, **kwargs)]
if self.can_add_related:
# TODO: "id_" is hard-coded here. This should instead use the correct
# API to determine the ID dynamically.
output.append(u'<a href="%s" class="add-another" id="add_id_%s" onclick="return showAddAnotherPopup(this);"> ' % \
(related_url, name))
output.append(u'<img src="%simg/admin/icon_addlink.gif" width="10" height="10" alt="%s"/></a>' % (settings.ADMIN_MEDIA_PREFIX, _('Add Another')))
return mark_safe(u''.join(output))
def build_attrs(self, extra_attrs=None, **kwargs):
"Helper function for building an attribute dictionary."
self.attrs = self.widget.build_attrs(extra_attrs=None, **kwargs)
return self.attrs
def value_from_datadict(self, data, files, name):
return self.widget.value_from_datadict(data, files, name)
def _has_changed(self, initial, data):
return self.widget._has_changed(initial, data)
def id_for_label(self, id_):
return self.widget.id_for_label(id_)
class AdminTextareaWidget(forms.Textarea):
def __init__(self, attrs=None):
final_attrs = {'class': 'vLargeTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextareaWidget, self).__init__(attrs=final_attrs)
class AdminTextInputWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextInputWidget, self).__init__(attrs=final_attrs)
class AdminURLFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vURLField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminURLFieldWidget, self).__init__(attrs=final_attrs)
class AdminIntegerFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vIntegerField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminIntegerFieldWidget, self).__init__(attrs=final_attrs)
class AdminCommaSeparatedIntegerFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vCommaSeparatedIntegerField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminCommaSeparatedIntegerFieldWidget, self).__init__(attrs=final_attrs)
| apache-2.0 |
raywill/crawl_smth | rent_crawl.py | 1 | 3770 | #!/usr/bin/python
# coding:utf-8
from bs4 import BeautifulSoup
import re
import os
import sys
import urllib
import time
import random
import time
#################### 配置开始#################
# 版面配置
# 支持爬多个版面,取消下面的注释即可
# 二手房
# board = 'OurHouse'
# 二手市场主版
# board = 'SecondMarket'
# 租房
boards = ['OurEstate', 'PolicyEstate', 'SchoolEstate', 'RealEstate_review', 'ShangHaiEstate', 'RealEstate', 'Picture']
# 关注关键词文件
keywordsFile = '/home/wwwroot/www.reactshare.cn/rent/keywords.txt'
# 黑名单关键词
blacklistFile = '/home/wwwroot/www.reactshare.cn/rent/blacklist.txt'
# 爬虫结果文件,简易放入 http 服务目录中
outputFile = '/home/wwwroot/www.reactshare.cn/rent/index.html'
# 比对文件,如果没有更新则不通知
lastCopy = '/home/wwwroot/www.reactshare.cn/rent/last.html'
# 结果通知地址, 用于通知爬虫执行完毕,可查看结果
notifyUrl = "http://m.reactshare.cn/rent"
# 最多爬的页数
maxCrawlPage = 3
# 每爬一个页面最少等待多少秒,防止被黑名单
# 外加一个随机等待因子,总计等待baseSleepSec + [0~X] 秒
baseSleepSec = 1
randWaitSec = 2
# 随机等待
################### 配置结束#################
reload(sys)
sys.setdefaultencoding("utf-8")
keywords = []
blacklist = []
matched = []
final = []
def notInBlackList(item) :
for kw in blacklist:
if item.find(kw) >= 0:
return False
return True
for kw in open(keywordsFile).readlines():
keywords.append(kw.strip())
for kw in open(blacklistFile).readlines():
blacklist.append(kw.strip())
for board in boards:
# continue
for page in range(1, maxCrawlPage):
url = 'http://m.newsmth.net/board/%s?p=%s' % (board, page)
data = urllib.urlopen(url).read()
# print data
soup = BeautifulSoup(data, "html5lib")
for a in soup.find_all(href=re.compile("\/article\/" + board)):
item = a.encode('utf-8')
for kw in keywords:
if item.find(kw) >= 0 and notInBlackList(item):
matched.append(item)
time.sleep(baseSleepSec + randWaitSec * random.random())
for item in matched:
if item not in final:
final.append(item)
# 为了避免无聊的人反复顶贴,做一次排序
final.sort()
union=final
# 检查本次爬得得数据是否有更新
if os.path.exists(lastCopy):
last=[]
for item in open(lastCopy).readlines():
last.append(item.strip())
union=list(set(last).union(set(final)))
diff=list(set(union) ^ set(last))
if len(diff) == 0 :
sys.exit(0)
# 保存上次数据
tmp = open(lastCopy, 'w')
tmp.write('\n'.join(union))
tmp.close()
# 输出网页
# 重新按照关键词分类来渲染页面
html = "<html><head><meta charset='UTF-8' /><meta name='viewport' content='width=device-width,user-scalable=yes'><meta name='apple-mobile-web-app-capable' content='yes'><title>水木爬爬</title><base href='http://m.newsmth.net/' /></head><body>"
html += "<style> a:visited {color:gray;} a:active {color:red;} a {color:blue;}</style>"
for kw in keywords:
html += "<h2> %s </h2>" % (kw)
for item in union:
if item.find(kw) >= 0:
html += "%s<br/>" % (item)
html += "<hr />"
for board in boards:
html += "<p><a href='http://m.newsmth.net/board/%s'>%s</a></p>" % (board, board)
html += "<hr />"
html += "<p>%d items updated at %s </p><p><a href='http://m.newsmth.net/'>水木社区</a></p>" % (len(union), time.strftime('%Y-%m-%d %X', time.localtime()))
html += "</body></html>"
output = open(outputFile, 'w')
output.write(html)
output.close()
# notify
data = urllib.urlopen(notifyUrl).read()
| mit |
cobalys/django | django/contrib/localflavor/es/es_provinces.py | 110 | 1482 | # -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
PROVINCE_CHOICES = (
('01', _('Araba')),
('02', _('Albacete')),
('03', _('Alacant')),
('04', _('Almeria')),
('05', _('Avila')),
('06', _('Badajoz')),
('07', _('Illes Balears')),
('08', _('Barcelona')),
('09', _('Burgos')),
('10', _('Caceres')),
('11', _('Cadiz')),
('12', _('Castello')),
('13', _('Ciudad Real')),
('14', _('Cordoba')),
('15', _('A Coruna')),
('16', _('Cuenca')),
('17', _('Girona')),
('18', _('Granada')),
('19', _('Guadalajara')),
('20', _('Guipuzkoa')),
('21', _('Huelva')),
('22', _('Huesca')),
('23', _('Jaen')),
('24', _('Leon')),
('25', _('Lleida')),
('26', _('La Rioja')),
('27', _('Lugo')),
('28', _('Madrid')),
('29', _('Malaga')),
('30', _('Murcia')),
('31', _('Navarre')),
('32', _('Ourense')),
('33', _('Asturias')),
('34', _('Palencia')),
('35', _('Las Palmas')),
('36', _('Pontevedra')),
('37', _('Salamanca')),
('38', _('Santa Cruz de Tenerife')),
('39', _('Cantabria')),
('40', _('Segovia')),
('41', _('Seville')),
('42', _('Soria')),
('43', _('Tarragona')),
('44', _('Teruel')),
('45', _('Toledo')),
('46', _('Valencia')),
('47', _('Valladolid')),
('48', _('Bizkaia')),
('49', _('Zamora')),
('50', _('Zaragoza')),
('51', _('Ceuta')),
('52', _('Melilla')),
)
| bsd-3-clause |
openaire/iis | iis-3rdparty-madis/src/main/resources/eu/dnetlib/iis/3rdparty/scripts/madis/lib/dateutil/easter.py | 291 | 2633 | """
Copyright (c) 2003-2007 Gustavo Niemeyer <[email protected]>
This module offers extensions to the standard python 2.3+
datetime module.
"""
__author__ = "Gustavo Niemeyer <[email protected]>"
__license__ = "PSF License"
import datetime
__all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"]
EASTER_JULIAN = 1
EASTER_ORTHODOX = 2
EASTER_WESTERN = 3
def easter(year, method=EASTER_WESTERN):
"""
This method was ported from the work done by GM Arts,
on top of the algorithm by Claus Tondering, which was
based in part on the algorithm of Ouding (1940), as
quoted in "Explanatory Supplement to the Astronomical
Almanac", P. Kenneth Seidelmann, editor.
This algorithm implements three different easter
calculation methods:
1 - Original calculation in Julian calendar, valid in
dates after 326 AD
2 - Original method, with date converted to Gregorian
calendar, valid in years 1583 to 4099
3 - Revised method, in Gregorian calendar, valid in
years 1583 to 4099 as well
These methods are represented by the constants:
EASTER_JULIAN = 1
EASTER_ORTHODOX = 2
EASTER_WESTERN = 3
The default method is method 3.
More about the algorithm may be found at:
http://users.chariot.net.au/~gmarts/eastalg.htm
and
http://www.tondering.dk/claus/calendar.html
"""
if not (1 <= method <= 3):
raise ValueError, "invalid method"
# g - Golden year - 1
# c - Century
# h - (23 - Epact) mod 30
# i - Number of days from March 21 to Paschal Full Moon
# j - Weekday for PFM (0=Sunday, etc)
# p - Number of days from March 21 to Sunday on or before PFM
# (-6 to 28 methods 1 & 3, to 56 for method 2)
# e - Extra days to add for method 2 (converting Julian
# date to Gregorian date)
y = year
g = y % 19
e = 0
if method < 3:
# Old method
i = (19*g+15)%30
j = (y+y//4+i)%7
if method == 2:
# Extra dates to convert Julian to Gregorian date
e = 10
if y > 1600:
e = e+y//100-16-(y//100-16)//4
else:
# New method
c = y//100
h = (c-c//4-(8*c+13)//25+19*g+15)%30
i = h-(h//28)*(1-(h//28)*(29//(h+1))*((21-g)//11))
j = (y+y//4+i+2-c+c//4)%7
# p can be from -6 to 56 corresponding to dates 22 March to 23 May
# (later dates apply to method 2, although 23 May never actually occurs)
p = i-j+e
d = 1+(p+27+(p+6)//40)%31
m = 3+(p+26)//30
return datetime.date(int(y),int(m),int(d))
| apache-2.0 |
svn2github/vbox | src/VBox/HostServices/SharedOpenGL/crserverlib/server_dispatch.py | 12 | 4596 | # Copyright (c) 2001, Stanford University
# All rights reserved.
#
# See the file LICENSE.txt for information on redistributing this software.
import sys, string, re
import apiutil
apiutil.CopyrightC()
print """
/* DO NOT EDIT - THIS FILE AUTOMATICALLY GENERATED BY server_dispatch.py SCRIPT */
#include "cr_spu.h"
#include "chromium.h"
#include "cr_error.h"
#include "server_dispatch.h"
#include "server.h"
#include "cr_unpack.h"
CRCurrentStatePointers crServerCurrent;
"""
for func_name in apiutil.AllSpecials( sys.argv[1]+"/../state_tracker/state" ):
params = apiutil.Parameters(func_name)
if (apiutil.FindSpecial( "server", func_name ) or
"get" in apiutil.Properties(func_name)):
continue
wrap = apiutil.GetCategoryWrapper(func_name)
if wrap:
print '#if defined(CR_%s)' % wrap
print 'void SERVER_DISPATCH_APIENTRY crServerDispatch%s( %s )' % ( func_name, apiutil.MakeDeclarationString( params ) )
print '{'
print '\tcrState%s( %s );' % (func_name, apiutil.MakeCallString( params ) )
print '\tcr_server.head_spu->dispatch_table.%s( %s );' % (func_name, apiutil.MakeCallString( params ) )
print '}'
if wrap:
print '#endif'
keys = apiutil.GetDispatchedFunctions(sys.argv[1]+"/APIspec.txt")
for func_name in keys:
current = 0
array = ""
condition = ""
m = re.search( r"^(Color|Normal)([1234])(ub|b|us|s|ui|i|f|d)$", func_name )
if m :
current = 1
name = string.lower( m.group(1)[:1] ) + m.group(1)[1:]
type = m.group(3) + m.group(2)
m = re.search( r"^(SecondaryColor)(3)(ub|b|us|s|ui|i|f|d)(EXT)$", func_name )
if m :
current = 1
name = string.lower(m.group(1)[:1] ) + m.group(1)[1:]
type = m.group(3) + m.group(2)
m = re.search( r"^(TexCoord)([1234])(ub|b|us|s|ui|i|f|d)$", func_name )
if m :
current = 1
name = string.lower( m.group(1)[:1] ) + m.group(1)[1:]
type = m.group(3) + m.group(2)
array = "[0]"
m = re.search( r"^(MultiTexCoord)([1234])(ub|b|us|s|ui|i|f|d)ARB$", func_name )
if m :
current = 1
name = "texCoord"
type = m.group(3) + m.group(2)
array = "[texture-GL_TEXTURE0_ARB]"
condition = "if (texture >= GL_TEXTURE0_ARB && texture < GL_TEXTURE0_ARB + CR_MAX_TEXTURE_UNITS)"
m = re.match( r"^(Index)(ub|b|us|s|ui|i|f|d)$", func_name )
if m :
current = 1
name = string.lower( m.group(1)[:1] ) + m.group(1)[1:]
type = m.group(2) + "1"
m = re.match( r"^(EdgeFlag)$", func_name )
if m :
current = 1
name = string.lower( m.group(1)[:1] ) + m.group(1)[1:]
type = "l1"
m = re.match( r"^(FogCoord)(f|d)(EXT)$", func_name)
if m :
current = 1
name = string.lower( m.group(1)[:1] ) + m.group(1)[1:]
type = m.group(2) + "1"
# Vertex attribute commands w/ some special cases
m = re.search( r"^(VertexAttrib)([1234])(s|i|f|d)ARB$", func_name )
if m :
current = 1
name = string.lower( m.group(1)[:1] ) + m.group(1)[1:]
type = m.group(3) + m.group(2)
array = "[index]"
condition = "if (index < CR_MAX_VERTEX_ATTRIBS)"
if func_name == "VertexAttrib4NubARB":
current = 1
name = "vertexAttrib"
type = "ub4"
array = "[index]"
condition = "if (index < CR_MAX_VERTEX_ATTRIBS)"
if current:
params = apiutil.Parameters(func_name)
print 'void SERVER_DISPATCH_APIENTRY crServerDispatch%s( %s )' % ( func_name, apiutil.MakeDeclarationString(params) )
print '{'
print '\t%s' % (condition)
print '\t{'
print '\t\tcr_server.head_spu->dispatch_table.%s( %s );' % (func_name, apiutil.MakeCallString(params) )
print "\t\tcr_server.current.c.%s.%s%s = cr_unpackData;" % (name,type,array)
print '\t}'
print '}\n'
print """
void crServerInitDispatch(void)
{
crSPUInitDispatchTable( &(cr_server.dispatch) );
crSPUCopyDispatchTable( &(cr_server.dispatch), &(cr_server.head_spu->dispatch_table ) );
"""
for func_name in keys:
if ("get" in apiutil.Properties(func_name) or
apiutil.FindSpecial( "server", func_name ) or
apiutil.FindSpecial( sys.argv[1]+"/../state_tracker/state", func_name )):
wrap = apiutil.GetCategoryWrapper(func_name)
if wrap:
print '#if defined(CR_%s)' % wrap
print '\tcr_server.dispatch.%s = crServerDispatch%s;' % (func_name, func_name)
if wrap:
print '#endif'
print '}'
| gpl-2.0 |
geoffkilpin/pombola | pombola/feedback/forms.py | 4 | 1293 | import time
import datetime
from django import forms
from django.forms.util import ErrorDict
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.utils.crypto import salted_hmac, constant_time_compare
from django.utils.encoding import force_unicode
from django.utils.text import get_text_list
from django.utils.translation import ungettext, ugettext_lazy as _
from pombola.feedback.models import Feedback
class FeedbackForm(forms.Form):
"""
Gather feedback
"""
url = forms.URLField(
widget = forms.HiddenInput,
required = False,
)
comment = forms.CharField(
label = _('Your feedback'),
widget = forms.Textarea,
max_length = 2000,
)
email = forms.EmailField(
label = _('Your email'),
required = False,
help_text = "optional - but lets us get back to you...",
)
# This is a honeypot field to catch spam bots. If there is any content in
# it the feedback status will be set to 'spammy'. This field is hidden by
# CSS in the form so should never be shown to a user. Hopefully it will not
# be autofilled either.
website = forms.CharField(
label = _('Leave this blank'),
required = False,
)
| agpl-3.0 |
heplesser/nest-simulator | pynest/nest/tests/test_connect_arrays.py | 12 | 13340 | # -*- coding: utf-8 -*-
#
# test_connect_arrays.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import unittest
import numpy as np
import nest
nest.set_verbosity('M_WARNING')
HAVE_OPENMP = nest.ll_api.sli_func("is_threaded")
class TestConnectArrays(unittest.TestCase):
non_unique = np.array([1, 1, 3, 5, 4, 5, 9, 7, 2, 8], dtype=np.uint64)
def setUp(self):
nest.ResetKernel()
def test_connect_arrays_unique(self):
"""Connecting NumPy arrays of unique node IDs"""
n = 10
nest.Create('iaf_psc_alpha', n)
sources = np.arange(1, n+1, dtype=np.uint64)
targets = np.arange(1, n+1, dtype=np.uint64)
weights = 1.5
delays = 1.4
nest.Connect(sources, targets, syn_spec={'weight': weights, 'delay': delays})
conns = nest.GetConnections()
self.assertEqual(len(conns), n*n)
for c in conns:
np.testing.assert_approx_equal(c.weight, weights)
np.testing.assert_approx_equal(c.delay, delays)
def test_connect_arrays_nonunique(self):
"""Connecting NumPy arrays with non-unique node IDs"""
n = 10
nest.Create('iaf_psc_alpha', n)
sources = np.arange(1, n+1, dtype=np.uint64)
targets = self.non_unique
weights = np.ones(n)
delays = np.ones(n)
nest.Connect(sources, targets, syn_spec={'weight': weights, 'delay': delays},
conn_spec='one_to_one')
conns = nest.GetConnections()
for s, t, w, d, c in zip(sources, targets, weights, delays, conns):
self.assertEqual(c.source, s)
self.assertEqual(c.target, t)
self.assertEqual(c.weight, w)
self.assertEqual(c.delay, d)
def test_connect_arrays_nonunique_dict_conn_spec(self):
"""Connecting NumPy arrays with non-unique node IDs and conn_spec as a dict"""
n = 10
nest.Create('iaf_psc_alpha', n)
sources = np.arange(1, n+1, dtype=np.uint64)
targets = self.non_unique
weights = 2 * np.ones(n)
delays = 1.5 * np.ones(n)
nest.Connect(sources, targets, syn_spec={'weight': weights, 'delay': delays},
conn_spec={'rule': 'one_to_one'})
conns = nest.GetConnections()
for s, t, w, d, c in zip(sources, targets, weights, delays, conns):
self.assertEqual(c.source, s)
self.assertEqual(c.target, t)
self.assertEqual(c.weight, w)
self.assertEqual(c.delay, d)
def test_connect_arrays_no_conn_spec(self):
"""Connecting NumPy arrays of node IDs without specifying conn_spec"""
n = 10
nest.Create('iaf_psc_alpha', n)
sources = np.arange(1, n+1, dtype=np.uint64)
targets = self.non_unique
with self.assertRaises(ValueError):
nest.Connect(sources, targets)
def test_connect_arrays_different_weights_delays(self):
"""Connecting NumPy arrays with different weights and delays"""
n = 10
nest.Create('iaf_psc_alpha', n)
sources = np.arange(1, n+1, dtype=np.uint64)
targets = self.non_unique
weights = np.linspace(0.6, 1.5, n)
delays = np.linspace(0.4, 1.3, n)
nest.Connect(sources, targets, syn_spec={'weight': weights, 'delay': delays},
conn_spec={'rule': 'one_to_one'})
conns = nest.GetConnections()
np.testing.assert_array_equal(conns.source, sources)
np.testing.assert_array_equal(conns.target, targets)
np.testing.assert_array_almost_equal(conns.weight, weights)
np.testing.assert_array_almost_equal(conns.delay, delays)
def test_connect_arrays_threaded(self):
"""Connecting NumPy arrays, threaded"""
nest.SetKernelStatus({'local_num_threads': 2})
n = 10
nest.Create('iaf_psc_alpha', n)
sources = np.arange(1, n+1, dtype=np.uint64)
targets = self.non_unique
weights = np.ones(len(sources))
delays = np.ones(len(sources))
syn_model = 'static_synapse'
nest.Connect(sources, targets, conn_spec='one_to_one',
syn_spec={'weight': weights, 'delay': delays, 'synapse_model': syn_model})
conns = nest.GetConnections()
# Sorting connection information by source to make it equivalent to the reference.
conn_info = [(c.source, c.target, c.weight, c.delay) for c in conns]
conn_info.sort(key=lambda conn: conn[0])
for s, t, w, d, c in zip(sources, targets, weights, delays, conn_info):
conn_s, conn_t, conn_w, conn_d = c
self.assertEqual(conn_s, s)
self.assertEqual(conn_t, t)
self.assertEqual(conn_w, w)
self.assertEqual(conn_d, d)
def test_connect_arrays_no_delays(self):
"""Connecting NumPy arrays without specifying delays"""
n = 10
nest.Create('iaf_psc_alpha', n)
sources = np.arange(1, n+1, dtype=np.uint64)
targets = self.non_unique
weights = np.ones(n)
nest.Connect(sources, targets, conn_spec='one_to_one', syn_spec={'weight': weights})
conns = nest.GetConnections()
for s, t, w, c in zip(sources, targets, weights, conns):
self.assertEqual(c.source, s)
self.assertEqual(c.target, t)
self.assertEqual(c.weight, w)
def test_connect_array_list(self):
"""Connecting NumPy array and list"""
n = 10
nest.Create('iaf_psc_alpha', n)
sources = list(range(1, n + 1))
targets = self.non_unique
nest.Connect(sources, targets, conn_spec='one_to_one')
conns = nest.GetConnections()
for s, t, c in zip(sources, targets, conns):
self.assertEqual(c.source, s)
self.assertEqual(c.target, t)
def test_connect_arrays_no_weights(self):
"""Connecting NumPy arrays without specifying weights"""
n = 10
neurons = nest.Create('iaf_psc_alpha', n)
targets = self.non_unique
delays = np.ones(n)
nest.Connect(neurons, targets, conn_spec='one_to_one', syn_spec={'delay': delays})
conns = nest.GetConnections()
for s, t, d, c in zip(neurons.tolist(), targets, delays, conns):
self.assertEqual(c.source, s)
self.assertEqual(c.target, t)
self.assertEqual(c.delay, d)
def test_connect_arrays_rtype(self):
"""Connecting NumPy arrays with specified receptor_type"""
n = 10
nest.Create('iaf_psc_exp_multisynapse', n)
sources = np.arange(1, n+1, dtype=np.uint64)
targets = self.non_unique
weights = np.ones(len(sources))
delays = np.ones(len(sources))
receptor_type = np.ones(len(sources), dtype=np.uint64)
syn_model = 'static_synapse'
nest.Connect(sources, targets, conn_spec='one_to_one',
syn_spec={'weight': weights, 'delay': delays, 'receptor_type': receptor_type})
conns = nest.GetConnections()
for s, t, w, d, r, c in zip(sources, targets, weights, delays, receptor_type, conns):
self.assertEqual(c.source, s)
self.assertEqual(c.target, t)
self.assertEqual(c.weight, w)
self.assertEqual(c.delay, d)
self.assertEqual(c.receptor, r)
def test_connect_arrays_additional_synspec_params(self):
"""Connecting NumPy arrays with additional syn_spec params"""
n = 10
nest.Create('iaf_psc_exp_multisynapse', n)
sources = np.arange(1, n+1, dtype=np.uint64)
targets = self.non_unique
weights = np.ones(len(sources))
delays = np.ones(len(sources))
syn_model = 'vogels_sprekeler_synapse'
receptor_type = np.ones(len(sources), dtype=np.uint64)
alpha = 0.1*np.ones(len(sources))
tau = 20.*np.ones(len(sources))
nest.Connect(sources, targets, conn_spec='one_to_one',
syn_spec={'weight': weights, 'delay': delays, 'synapse_model': syn_model,
'receptor_type': receptor_type, 'alpha': alpha, 'tau': tau})
conns = nest.GetConnections()
for s, t, w, d, r, a, tau, c in zip(sources, targets, weights, delays, receptor_type, alpha, tau, conns):
self.assertEqual(c.source, s)
self.assertEqual(c.target, t)
self.assertEqual(c.weight, w)
self.assertEqual(c.delay, d)
self.assertEqual(c.receptor, r)
self.assertEqual(c.alpha, a)
self.assertEqual(c.tau, tau)
def test_connect_arrays_float_rtype(self):
"""Raises exception when not using integer value for receptor_type"""
n = 10
nest.Create('iaf_psc_exp_multisynapse', n)
sources = np.arange(1, n+1, dtype=np.uint64)
targets = self.non_unique
weights = np.ones(n)
delays = np.ones(n)
syn_model = 'vogels_sprekeler_synapse'
receptor_type = 1.5*np.ones(len(sources))
with self.assertRaises(nest.kernel.NESTErrors.BadParameter):
nest.Connect(sources, targets, conn_spec='one_to_one',
syn_spec={'weight': weights, 'delay': delays, 'synapse_model': syn_model,
'receptor_type': receptor_type})
def test_connect_arrays_wrong_dtype(self):
"""Raises exception when connecting NumPy arrays with wrong dtype"""
n = 10
nest.Create('iaf_psc_alpha', n)
sources = np.arange(1, n+1, dtype=np.double)
targets = np.array(self.non_unique, dtype=np.double)
weights = np.ones(n)
delays = np.ones(n)
syn_model = 'static_synapse'
with self.assertRaises(nest.kernel.NESTErrors.ArgumentType):
nest.Connect(sources, targets, syn_spec={'weight': weights, 'delay': delays},
conn_spec='one_to_one')
def test_connect_arrays_unknown_nodes(self):
"""Raises exception when connecting NumPy arrays with unknown nodes"""
n = 10
nest.Create('iaf_psc_alpha', n)
sources = np.arange(1, n+2, dtype=np.uint64)
targets = np.arange(1, n+2, dtype=np.uint64)
weights = np.ones(len(sources))
delays = np.ones(len(sources))
syn_model = 'static_synapse'
with self.assertRaises(nest.kernel.NESTErrors.UnknownNode):
nest.Connect(sources, targets, syn_spec={'weight': weights, 'delay': delays,
'synapse_model': syn_model})
@unittest.skipIf(not HAVE_OPENMP, 'NEST was compiled without multi-threading')
def test_connect_arrays_receptor_type(self):
"""Connecting NumPy arrays with receptor type specified, threaded"""
nest.SetKernelStatus({'local_num_threads': 2})
n = 10
nest.Create('iaf_psc_alpha', n)
sources = np.arange(1, n+1, dtype=np.uint64)
targets = self.non_unique
weights = len(sources) * [2.]
nest.Connect(sources, targets, conn_spec='one_to_one', syn_spec={'weight': weights, 'receptor_type': 0})
self.assertEqual(len(sources) * [0], nest.GetConnections().receptor)
@unittest.skipIf(not HAVE_OPENMP, 'NEST was compiled without multi-threading')
def test_connect_arrays_differnt_alpha(self):
"""Connecting NumPy arrays with different alpha values in a threaded environment"""
nest.SetKernelStatus({'local_num_threads': 4})
neurons = nest.Create("iaf_psc_exp", 10)
# syn_spec parameters are dependent on source, so we test with source id's not starting with 1
source = np.array([2, 5, 3, 10, 1, 9, 4, 6, 8, 7])
target = 1 + np.random.choice(10, 10, replace=True)
weights = len(source) * [2.]
alpha = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.11])
# Need to make sure the correct alpha value is used with the correct source
src_alpha_ref = {key: val for key, val in zip(source, alpha)}
nest.Connect(source, target, conn_spec='one_to_one',
syn_spec={'alpha': alpha, 'receptor_type': 0,
'weight': weights, 'synapse_model': "stdp_synapse"})
conns = nest.GetConnections()
src = conns.source
alp = conns.alpha
src_alpha = {key: val for key, val in zip(src, alp)}
self.assertEqual(src_alpha_ref, src_alpha)
def suite():
suite = unittest.TestLoader().loadTestsFromTestCase(TestConnectArrays)
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
| gpl-2.0 |
malkavi/lutris | lutris/runners/o2em.py | 1 | 3325 | # -*- coding: utf-8 -*-
import os
from lutris import settings
from lutris.runners.runner import Runner
class o2em(Runner):
"""Magnavox Oyssey² Emulator"""
human_name = "O2EM"
package = "o2em"
executable = "o2em"
platform = "Magnavox Odyssey 2, Phillips Videopac+"
tarballs = {
'i386': None,
'x64': "o2em-1.18-x86_64.tar.gz",
}
checksums = {
'o2rom': "562d5ebf9e030a40d6fabfc2f33139fd",
'c52': "f1071cdb0b6b10dde94d3bc8a6146387",
'jopac': "279008e4a0db2dc5f1c048853b033828",
'g7400': "79008e4a0db2dc5f1c048853b033828",
}
bios_choices = [
("Magnavox Odyssey2", "o2rom"),
("Phillips C52", "c52"),
("Phillips Videopac+", "g7400"),
("Brandt Jopac", "jopac")
]
controller_choices = [
("Disable", "0"),
("Arrows keys and right shift", "1"),
("W,S,A,D,SPACE", "2"),
("Joystick", "3")
]
game_options = [{
"option": "main_file",
"type": "file",
"label": "ROM file",
"default_path": 'game_path',
'help': ("The game data, commonly called a ROM image.")
}]
runner_options = [
{
"option": "bios",
"type": "choice",
"choices": bios_choices,
"label": "Bios"
},
{
"option": "controller1",
"type": "choice",
"choices": controller_choices,
"label": "First controller"
},
{
"option": "controller2",
"type": "choice",
"choices": controller_choices,
"label": "Second controller"
},
{
"option": "fullscreen",
"type": "bool",
"label": "Fullscreen"
},
{
"option": "scanlines",
"type": "bool",
"label": "Scanlines",
'help': ("Activates a display filter adding scanlines to look "
"more like yesteryear matieral.")
}
]
def install(self):
super(o2em, self).install()
bios_path = os.path.expanduser("~/.o2em/bios")
if not os.path.exists(bios_path):
os.makedirs(bios_path)
def get_executable(self):
return os.path.join(settings.RUNNER_DIR, 'o2em/o2em')
def play(self):
bios_path = os.path.join(os.path.expanduser("~"), ".o2em/bios/")
arguments = ["-biosdir=\"%s\"" % bios_path]
if self.runner_config.get("fullscreen"):
arguments.append("-fullscreen")
if self.runner_config.get("scanlines"):
arguments.append("-scanlines")
if "controller1" in self.runner_config:
arguments.append("-s1=%s" % self.runner_config["controller1"])
if "controller2" in self.runner_config:
arguments.append("-s2=%s" % self.runner_config["controller2"])
rom_path = self.game_config.get('main_file') or ''
if not os.path.exists(rom_path):
return {'error': 'FILE_NOT_FOUND', 'file': rom_path}
romdir = os.path.dirname(rom_path)
romfile = os.path.basename(rom_path)
arguments.append("-romdir=\"%s\"/" % romdir)
arguments.append("\"%s\"" % romfile)
return {'command': [self.get_executable()] + arguments}
| gpl-3.0 |
davidharrigan/django | tests/template_tests/syntax_tests/test_with.py | 391 | 2245 | from django.template import TemplateSyntaxError
from django.test import SimpleTestCase
from ..utils import setup
class WithTagTests(SimpleTestCase):
@setup({'with01': '{% with key=dict.key %}{{ key }}{% endwith %}'})
def test_with01(self):
output = self.engine.render_to_string('with01', {'dict': {'key': 50}})
self.assertEqual(output, '50')
@setup({'legacywith01': '{% with dict.key as key %}{{ key }}{% endwith %}'})
def test_legacywith01(self):
output = self.engine.render_to_string('legacywith01', {'dict': {'key': 50}})
self.assertEqual(output, '50')
@setup({'with02': '{{ key }}{% with key=dict.key %}'
'{{ key }}-{{ dict.key }}-{{ key }}'
'{% endwith %}{{ key }}'})
def test_with02(self):
output = self.engine.render_to_string('with02', {'dict': {'key': 50}})
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID50-50-50INVALID')
else:
self.assertEqual(output, '50-50-50')
@setup({'legacywith02': '{{ key }}{% with dict.key as key %}'
'{{ key }}-{{ dict.key }}-{{ key }}'
'{% endwith %}{{ key }}'})
def test_legacywith02(self):
output = self.engine.render_to_string('legacywith02', {'dict': {'key': 50}})
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID50-50-50INVALID')
else:
self.assertEqual(output, '50-50-50')
@setup({'with03': '{% with a=alpha b=beta %}{{ a }}{{ b }}{% endwith %}'})
def test_with03(self):
output = self.engine.render_to_string('with03', {'alpha': 'A', 'beta': 'B'})
self.assertEqual(output, 'AB')
@setup({'with-error01': '{% with dict.key xx key %}{{ key }}{% endwith %}'})
def test_with_error01(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('with-error01', {'dict': {'key': 50}})
@setup({'with-error02': '{% with dict.key as %}{{ key }}{% endwith %}'})
def test_with_error02(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('with-error02', {'dict': {'key': 50}})
| bsd-3-clause |
mbodenhamer/syn | syn/type/a/tests/test_type.py | 1 | 9511 | import six
from syn.five import xrange
from nose.tools import assert_raises
from syn.type.a import (Type, ValuesType, MultiType, TypeType, AnyType,
TypeExtension, Set, Schema)
from syn.base_utils import is_hashable, feq
from syn.base_utils import ngzwarn, on_error, elog
from syn.globals import TEST_SAMPLES as SAMPLES
SAMPLES //= 2
SAMPLES = max(SAMPLES, 1)
ngzwarn(SAMPLES, 'SAMPLES')
#-------------------------------------------------------------------------------
# Type
def test_type():
t = Type()
assert t == Type()
assert t != 1
assert is_hashable(t)
assert_raises(NotImplementedError, t.check, 1)
assert_raises(NotImplementedError, t.coerce, 1)
assert_raises(NotImplementedError, t.display)
assert_raises(NotImplementedError, t.enumeration_value, 1)
assert_raises(NotImplementedError, t.generate)
assert_raises(NotImplementedError, t.rst)
assert_raises(NotImplementedError, t.validate, 1)
#-------------------------------------------------------------------------------
# AnyType
def test_anytype():
t = AnyType()
assert t == AnyType()
t.check(1)
assert t.coerce(1) == 1
assert t.display() == t.rst() == 'any'
t.validate(1)
#-------------------------------------------------------------------------------
# TypeType
class Foo(object):
def __init__(self, value):
self.value = value
def validate(self):
assert self.value > 5
class Bar(Foo):
@classmethod
def coerce(cls, value):
return Bar(value + 1)
def test_typetype():
t = TypeType(int)
assert t.type is int
assert not t.call_coerce
assert not t.call_validate
assert t == TypeType(int)
assert t != TypeType(float)
t.check(1)
assert_raises(TypeError, t.check, 1.2)
assert t.query(1)
assert not t.query(1.2)
res, e = t.query_exception(1)
assert res
assert e is None
res, e = t.query_exception(1.2)
assert not res
assert isinstance(e, TypeError)
assert t.coerce(1.2) == 1
assert_raises(TypeError, t.coerce, 'abc')
assert t.display() == 'int'
assert t.rst() == '*int*'
t.validate(1)
assert_raises(TypeError, t.validate, 1.2)
f = TypeType(Foo)
assert f.type is Foo
assert not f.call_coerce
assert f.call_validate
f.check(Foo(2))
assert_raises(TypeError, f.check, 2)
f1 = f.coerce(1)
assert isinstance(f1, Foo)
assert f1.value == 1
assert_raises(TypeError, f.validate, 6)
assert_raises(AssertionError, f.validate, Foo(5))
assert f.display() == 'Foo'
assert f.rst() == '*Foo*'
f.validate(Foo(6))
b = TypeType(Bar)
assert b.type is Bar
assert b.call_coerce
assert b.call_validate
b.check(Bar(2))
assert_raises(TypeError, b.check, Foo(2))
b1 = b.coerce(1)
assert isinstance(b1, Bar)
assert b1.value == 2
assert_raises(TypeError, b.validate, 6)
assert_raises(AssertionError, b.validate, Bar(5))
b.validate(Bar(6))
#-------------------------------------------------------------------------------
# ValuesType
def test_valuestype():
t = ValuesType({1, 1.2, u'b'})
assert t == ValuesType([1, 1.2, u'b'])
assert t != ValuesType([1, 1.3, u'b'])
t.check(1)
t.check(1.2)
t.check(u'b')
assert_raises(TypeError, t.check, 2)
assert t.coerce(1) == 1
assert_raises(TypeError, t.coerce, 2)
t.validate(1)
assert_raises(TypeError, t.validate, 2)
t = ValuesType({1, 1.2})
assert t.display() in ('[1, 1.2]', '[1.2, 1]')
assert t.rst() in ('[1, 1.2]', '[1.2, 1]')
assert t.display() == t.rst()
#-------------------------------------------------------------------------------
# MultiType
def test_multitype():
import math
t = MultiType((int, float))
assert t == MultiType((int, float))
assert t != MultiType((int, str))
assert t.is_typelist
assert t.query(1)
assert t.query(1.2)
assert not t.query(u'a')
assert t.coerce(1.2) == 1
assert t.coerce(u'inf') == float(u'inf')
assert_raises(TypeError, t.coerce, u'abc')
assert t.display() == 'int | float'
assert t.rst() == '*int* | *float*'
t.validate(1)
assert_raises(TypeError, t.validate, u'abc')
t = MultiType((int, Foo, ValuesType([math.pi, math.e])))
assert not t.is_typelist
assert t.query(1)
assert t.query(Foo(2))
assert t.query(math.pi)
assert not t.query(3.4)
assert t.coerce(1) == 1
f = t.coerce(u'abc')
assert isinstance(f, Foo)
assert f.value == u'abc'
t.validate(1)
t.validate(Foo(6))
assert_raises(TypeError, t.validate, 3.4)
assert_raises(AssertionError, t.validate, Foo(5))
t = MultiType(six.string_types)
t.validate('abc')
t.validate('abc')
t.validate(u'abc')
assert_raises(TypeError, t.validate, 3.4)
#-------------------------------------------------------------------------------
# Set
def test_set():
from syn.sets.b import Range
t = Set(Range(1, 5))
assert t == Set(Range(1, 5))
assert t != Set(Range(0, 5))
assert Type.dispatch(t) is t
assert t.query(1)
assert not t.query(0)
t.validate(1)
assert_raises(TypeError, t.validate, 0)
assert t.coerce(1) == 1
assert_raises(TypeError, t.coerce, 0)
s = set(xrange(1, 6))
for k in xrange(SAMPLES):
val = t.generate()
with on_error(elog, s.__contains__, (val,)):
assert val in s
assert t.display() == t.rst() == '<Set>'
#-------------------------------------------------------------------------------
# Schema
def test_schema():
from syn.schema.b.sequence import Sequence
from syn.type.a import List
t = Schema(Sequence(1, 2, 3))
assert t == Schema(Sequence(1, 2, 3))
assert t != Schema(Sequence(1, 3, 2))
assert Type.dispatch(t) is t
assert t.query([1, 2, 3])
assert not t.query([1, 3, 2])
t.validate([1, 2, 3])
assert_raises(TypeError, t.validate, [1, 3, 2])
assert t.generate() == [1, 2, 3]
assert t.display() == t.rst() == '<Schema>'
assert t.coerce(1) == 1
t = Schema(Sequence(int, float))
assert t.query([1, 2.3])
assert not t.query([1, 2])
val = t.generate()
assert t.query(val)
t = Schema(Sequence(int, List(float)))
assert not t.query([1, 1.2])
assert not t.query([1, [1, 2]])
assert t.query([1, [1.2, 3.4]])
assert t.query([1, []])
val = t.generate()
assert t.query(val)
#-------------------------------------------------------------------------------
# dispatch_type
def test_dispatch_type():
t = Type.dispatch(None)
assert isinstance(t, AnyType)
t = Type.dispatch(int)
assert isinstance(t, TypeType)
assert t.type is int
t = Type.dispatch((int, float))
assert isinstance(t, MultiType)
assert t.typelist == (int, float)
t = Type.dispatch([1, 2])
assert isinstance(t, ValuesType)
assert t.values == [1, 2]
t = Type.dispatch(six.string_types)
assert isinstance(t, TypeType)
t.validate('abc')
t.validate(u'abc')
assert_raises(TypeError, t.validate, 1)
te = TypeExtension()
assert Type.dispatch(te) is te
assert Type.dispatch(TypeExtension) is not TypeExtension
assert isinstance(Type.dispatch(TypeExtension), TypeExtension)
assert_raises(TypeError, Type.dispatch, 1)
assert_raises(TypeError, Type.dispatch, b'abc')
assert_raises(TypeError, Type.dispatch, u'abc')
#-------------------------------------------------------------------------------
# Test generation
def test_generation():
from syn.base_utils.rand import PRIMITIVE_TYPES
from syn.types import Type as Type_
anys = [AnyType().generate() for k in xrange(SAMPLES)]
if len(anys) > 2:
assert any(x is not None for x in anys)
class Foo(object): pass
assert isinstance(AnyType().generate(types=[Foo]), tuple(PRIMITIVE_TYPES))
class Bar(object):
@classmethod
def _generate(cls, **kwargs):
return cls()
class BarType(Type_): type = Bar
assert isinstance(TypeType(int).generate(), int)
assert isinstance(TypeType(Bar).generate(), Bar)
assert_raises(NotImplementedError, TypeType(Foo).generate)
assert ValuesType([1, 2, 3]).generate() in {1, 2, 3}
t = MultiType([int, float])
assert isinstance(t.generate(), (int, float))
assert isinstance(t.generate(exclude_types=[float]), int)
#-------------------------------------------------------------------------------
# Test enumeration values
def test_enumeration_values():
assert TypeType(int).enumeration_value(0) == 0
v = ValuesType([1, 2, 3])
assert v.enumeration_value(0) == 1
assert v.enumeration_value(1) == 2
assert v.enumeration_value(2) == 3
assert v.enumeration_value(3) == 1
m = MultiType([int, float])
assert m.enumeration_value(0) == 0
assert feq(m.enumeration_value(1), 0.1)
assert m.enumeration_value(2) == 2
assert feq(m.enumeration_value(3), 0.3)
anys = [AnyType().enumeration_value(k) for k in xrange(SAMPLES)]
if len(anys) > 2:
assert any(x is not None for x in anys)
class Foo(object): pass
assert AnyType().enumeration_value(0, types=[Foo]) == 0
#-------------------------------------------------------------------------------
if __name__ == '__main__': # pragma: no cover
from syn.base_utils import run_all_tests
run_all_tests(globals(), verbose=True, print_errors=False)
| mit |
dermoth/gramps | data/man/pt_BR/conf.py | 12 | 7714 | # -*- coding: utf-8 -*-
#
# Gramps documentation build configuration file, created by
# sphinx-quickstart on Sat Dec 1 14:38:29 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'pt_BR'
# General information about the project.
project = u'Gramps'
copyright = u'2015, Gramps project'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '4.2'
# The full version, including alpha/beta/rc tags.
release = '4.2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
locale_dirs = './locale'
gettext_compact = True
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Grampsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Gramps.tex', u'Gramps Documentation',
u'.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('pt_BR', 'gramps', u'Gramps Documentation',
[u'.'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Gramps', u'Gramps Documentation',
u'.', 'Gramps', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| gpl-2.0 |
belmiromoreira/nova | nova/image/download/__init__.py | 61 | 1636 | # Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
import stevedore.driver
import stevedore.extension
from nova.i18n import _LE
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def load_transfer_modules():
module_dictionary = {}
ex = stevedore.extension.ExtensionManager('nova.image.download.modules')
for module_name in ex.names():
mgr = stevedore.driver.DriverManager(
namespace='nova.image.download.modules',
name=module_name,
invoke_on_load=False)
schemes_list = mgr.driver.get_schemes()
for scheme in schemes_list:
if scheme in module_dictionary:
LOG.error(_LE('%(scheme)s is registered as a module twice. '
'%(module_name)s is not being used.'),
{'scheme': scheme,
'module_name': module_name})
else:
module_dictionary[scheme] = mgr.driver
return module_dictionary
| apache-2.0 |
benthomasson/cumulus-linux-ansible-modules | tests/test_cl_license.py | 6 | 2493 | import mock
from mock import MagicMock
from nose.tools import set_trace
from library import cl_license
from asserts import assert_equals
from datetime import date, datetime
def mod_args_generator(values, *args):
def mod_args(args):
return values[args]
return mod_args
@mock.patch('library.cl_license.AnsibleModule')
def test_install_license_failed(mock_ansible_module):
""" test install license failed"""
instance = mock_ansible_module.return_value
instance.params = {'src': 'blah'}
run_command = MagicMock()
run_command.return_value = (1, 'output', 'err')
instance.run_command = run_command
cl_license.install_license(instance)
run_command.assert_called_with('/usr/cumulus/bin/cl-license -i blah')
instance.fail_json.assert_called_with(msg='err')
@mock.patch('library.cl_license.AnsibleModule')
def test_install_license_passed(mock_ansible_module):
""" test install license passed """
instance = mock_ansible_module.return_value
instance.params = {'src': 'blah'}
run_command = MagicMock()
run_command.return_value = (0, 'output', None)
instance.run_command = run_command
cl_license.install_license(instance)
run_command.assert_called_with('/usr/cumulus/bin/cl-license -i blah')
assert_equals(instance.fail_json.call_count, 0)
@mock.patch('library.cl_license.install_license')
@mock.patch('library.cl_license.AnsibleModule')
def test_license_not_installed(mock_ansible_module,
mock_install_license):
instance = mock_ansible_module.return_value
instance.params = {'src': 'blah'}
run_command = MagicMock()
run_command.return_value = (20, 'No license', None)
instance.run_command = run_command
cl_license.main()
assert_equals(mock_install_license.call_count, 1)
instance.exit_json.assert_called_with(msg='License installation completed',
changed=True)
@mock.patch('library.cl_license.install_license')
@mock.patch('library.cl_license.AnsibleModule')
def test_license_already_installed(mock_ansible_module,
mock_install_license):
instance = mock_ansible_module.return_value
instance.params = {'src': 'blah'}
run_command = MagicMock()
run_command.return_value = (0, 'license is there', None)
instance.run_command = run_command
cl_license.main()
assert_equals(mock_install_license.call_count, 0)
instance.exit_json.assert_called_with(
msg='No change. License already installed',
changed=False)
| gpl-3.0 |
popazerty/gui-test | lib/python/Components/Renderer/DMCHDMaxTemp.py | 11 | 2083 | # -*- coding: utf-8 -*-
#
# Maximum Temperature Renderer for Dreambox/Enigma-2
# Coded by Vali (c)2010
# Support: www.dreambox-tools.info
#
#
# This plugin is licensed under the Creative Commons
# Attribution-NonCommercial-ShareAlike 3.0 Unported License.
# To view a copy of this license, visit http://creativecommons.org/licenses/by-nc-sa/3.0/
# or send a letter to Creative Commons, 559 Nathan Abbott Way, Stanford, California 94305, USA.
#
# Alternatively, this plugin may be distributed and executed on hardware which
# is licensed by Dream Multimedia GmbH.
#
#
# This plugin is NOT free software. It is open source, you are allowed to
# modify it (if you keep the license), but it may not be commercially
# distributed other than under the conditions noted above.
#
#
#######################################################################
from Components.VariableText import VariableText
from Components.Sensors import sensors
from Tools.HardwareInfo import HardwareInfo
from enigma import eLabel
from Renderer import Renderer
from os import popen
class DMCHDMaxTemp(Renderer, VariableText):
def __init__(self):
Renderer.__init__(self)
VariableText.__init__(self)
if "8000" in HardwareInfo().get_device_name() or "800se" in HardwareInfo().get_device_name() or "500" in HardwareInfo().get_device_name():
self.ZeigeTemp = True
else:
self.ZeigeTemp = False
GUI_WIDGET = eLabel
def changed(self, what):
if not self.suspended:
if self.ZeigeTemp:
maxtemp = 0
try:
templist = sensors.getSensorsList(sensors.TYPE_TEMPERATURE)
tempcount = len(templist)
for count in range(tempcount):
id = templist[count]
tt = sensors.getSensorValue(id)
if tt > maxtemp:
maxtemp = tt
except:
pass
self.text = str(maxtemp) + "°C"
else:
loada = 0
try:
out_line = popen("cat /proc/loadavg").readline()
loada = out_line[:4]
except:
pass
self.text = loada
def onShow(self):
self.suspended = False
self.changed(None)
def onHide(self):
self.suspended = True
| gpl-2.0 |
openstack/sqlalchemy-migrate | migrate/changeset/constraint.py | 140 | 7318 | """
This module defines standalone schema constraint classes.
"""
from sqlalchemy import schema
from migrate.exceptions import *
class ConstraintChangeset(object):
"""Base class for Constraint classes."""
def _normalize_columns(self, cols, table_name=False):
"""Given: column objects or names; return col names and
(maybe) a table"""
colnames = []
table = None
for col in cols:
if isinstance(col, schema.Column):
if col.table is not None and table is None:
table = col.table
if table_name:
col = '.'.join((col.table.name, col.name))
else:
col = col.name
colnames.append(col)
return colnames, table
def __do_imports(self, visitor_name, *a, **kw):
engine = kw.pop('engine', self.table.bind)
from migrate.changeset.databases.visitor import (get_engine_visitor,
run_single_visitor)
visitorcallable = get_engine_visitor(engine, visitor_name)
run_single_visitor(engine, visitorcallable, self, *a, **kw)
def create(self, *a, **kw):
"""Create the constraint in the database.
:param engine: the database engine to use. If this is \
:keyword:`None` the instance's engine will be used
:type engine: :class:`sqlalchemy.engine.base.Engine`
:param connection: reuse connection istead of creating new one.
:type connection: :class:`sqlalchemy.engine.base.Connection` instance
"""
# TODO: set the parent here instead of in __init__
self.__do_imports('constraintgenerator', *a, **kw)
def drop(self, *a, **kw):
"""Drop the constraint from the database.
:param engine: the database engine to use. If this is
:keyword:`None` the instance's engine will be used
:param cascade: Issue CASCADE drop if database supports it
:type engine: :class:`sqlalchemy.engine.base.Engine`
:type cascade: bool
:param connection: reuse connection istead of creating new one.
:type connection: :class:`sqlalchemy.engine.base.Connection` instance
:returns: Instance with cleared columns
"""
self.cascade = kw.pop('cascade', False)
self.__do_imports('constraintdropper', *a, **kw)
# the spirit of Constraint objects is that they
# are immutable (just like in a DB. they're only ADDed
# or DROPped).
#self.columns.clear()
return self
class PrimaryKeyConstraint(ConstraintChangeset, schema.PrimaryKeyConstraint):
"""Construct PrimaryKeyConstraint
Migrate's additional parameters:
:param cols: Columns in constraint.
:param table: If columns are passed as strings, this kw is required
:type table: Table instance
:type cols: strings or Column instances
"""
__migrate_visit_name__ = 'migrate_primary_key_constraint'
def __init__(self, *cols, **kwargs):
colnames, table = self._normalize_columns(cols)
table = kwargs.pop('table', table)
super(PrimaryKeyConstraint, self).__init__(*colnames, **kwargs)
if table is not None:
self._set_parent(table)
def autoname(self):
"""Mimic the database's automatic constraint names"""
return "%s_pkey" % self.table.name
class ForeignKeyConstraint(ConstraintChangeset, schema.ForeignKeyConstraint):
"""Construct ForeignKeyConstraint
Migrate's additional parameters:
:param columns: Columns in constraint
:param refcolumns: Columns that this FK reffers to in another table.
:param table: If columns are passed as strings, this kw is required
:type table: Table instance
:type columns: list of strings or Column instances
:type refcolumns: list of strings or Column instances
"""
__migrate_visit_name__ = 'migrate_foreign_key_constraint'
def __init__(self, columns, refcolumns, *args, **kwargs):
colnames, table = self._normalize_columns(columns)
table = kwargs.pop('table', table)
refcolnames, reftable = self._normalize_columns(refcolumns,
table_name=True)
super(ForeignKeyConstraint, self).__init__(colnames, refcolnames, *args,
**kwargs)
if table is not None:
self._set_parent(table)
@property
def referenced(self):
return [e.column for e in self.elements]
@property
def reftable(self):
return self.referenced[0].table
def autoname(self):
"""Mimic the database's automatic constraint names"""
if hasattr(self.columns, 'keys'):
# SA <= 0.5
firstcol = self.columns[self.columns.keys()[0]]
ret = "%(table)s_%(firstcolumn)s_fkey" % dict(
table=firstcol.table.name,
firstcolumn=firstcol.name,)
else:
# SA >= 0.6
ret = "%(table)s_%(firstcolumn)s_fkey" % dict(
table=self.table.name,
firstcolumn=self.columns[0],)
return ret
class CheckConstraint(ConstraintChangeset, schema.CheckConstraint):
"""Construct CheckConstraint
Migrate's additional parameters:
:param sqltext: Plain SQL text to check condition
:param columns: If not name is applied, you must supply this kw\
to autoname constraint
:param table: If columns are passed as strings, this kw is required
:type table: Table instance
:type columns: list of Columns instances
:type sqltext: string
"""
__migrate_visit_name__ = 'migrate_check_constraint'
def __init__(self, sqltext, *args, **kwargs):
cols = kwargs.pop('columns', [])
if not cols and not kwargs.get('name', False):
raise InvalidConstraintError('You must either set "name"'
'parameter or "columns" to autogenarate it.')
colnames, table = self._normalize_columns(cols)
table = kwargs.pop('table', table)
schema.CheckConstraint.__init__(self, sqltext, *args, **kwargs)
if table is not None:
self._set_parent(table)
self.colnames = colnames
def autoname(self):
return "%(table)s_%(cols)s_check" % \
dict(table=self.table.name, cols="_".join(self.colnames))
class UniqueConstraint(ConstraintChangeset, schema.UniqueConstraint):
"""Construct UniqueConstraint
Migrate's additional parameters:
:param cols: Columns in constraint.
:param table: If columns are passed as strings, this kw is required
:type table: Table instance
:type cols: strings or Column instances
.. versionadded:: 0.6.0
"""
__migrate_visit_name__ = 'migrate_unique_constraint'
def __init__(self, *cols, **kwargs):
self.colnames, table = self._normalize_columns(cols)
table = kwargs.pop('table', table)
super(UniqueConstraint, self).__init__(*self.colnames, **kwargs)
if table is not None:
self._set_parent(table)
def autoname(self):
"""Mimic the database's automatic constraint names"""
return "%s_%s_key" % (self.table.name, self.colnames[0])
| mit |
benoitsteiner/tensorflow-opencl | tensorflow/contrib/learn/python/learn/utils/export.py | 48 | 13721 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Export utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.session_bundle import exporter
from tensorflow.contrib.session_bundle import gc
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import saver as tf_saver
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def _get_first_op_from_collection(collection_name):
"""Get first element from the collection."""
elements = ops.get_collection(collection_name)
if elements is not None:
if elements:
return elements[0]
return None
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def _get_saver():
"""Lazy init and return saver."""
saver = _get_first_op_from_collection(ops.GraphKeys.SAVERS)
if saver is not None:
if saver:
saver = saver[0]
else:
saver = None
if saver is None and variables.global_variables():
saver = tf_saver.Saver()
ops.add_to_collection(ops.GraphKeys.SAVERS, saver)
return saver
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def _export_graph(graph, saver, checkpoint_path, export_dir,
default_graph_signature, named_graph_signatures,
exports_to_keep):
"""Exports graph via session_bundle, by creating a Session."""
with graph.as_default():
with tf_session.Session('') as session:
variables.local_variables_initializer()
lookup_ops.tables_initializer()
saver.restore(session, checkpoint_path)
export = exporter.Exporter(saver)
export.init(
init_op=control_flow_ops.group(
variables.local_variables_initializer(),
lookup_ops.tables_initializer()),
default_graph_signature=default_graph_signature,
named_graph_signatures=named_graph_signatures,
assets_collection=ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS))
return export.export(export_dir, contrib_variables.get_global_step(),
session, exports_to_keep=exports_to_keep)
@deprecated('2017-03-25',
'signature_fns are deprecated. For canned Estimators they are no '
'longer needed. For custom Estimators, please return '
'output_alternatives from your model_fn via ModelFnOps.')
def generic_signature_fn(examples, unused_features, predictions):
"""Creates generic signature from given examples and predictions.
This is needed for backward compatibility with default behavior of
export_estimator.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `Tensor` or `dict` of `Tensor`s.
Returns:
Tuple of default signature and empty named signatures.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('examples cannot be None when using this signature fn.')
tensors = {'inputs': examples}
if not isinstance(predictions, dict):
predictions = {'outputs': predictions}
tensors.update(predictions)
default_signature = exporter.generic_signature(tensors)
return default_signature, {}
@deprecated('2017-03-25',
'signature_fns are deprecated. For canned Estimators they are no '
'longer needed. For custom Estimators, please return '
'output_alternatives from your model_fn via ModelFnOps.')
def classification_signature_fn(examples, unused_features, predictions):
"""Creates classification signature from given examples and predictions.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `Tensor` or dict of tensors that contains the classes tensor
as in {'classes': `Tensor`}.
Returns:
Tuple of default classification signature and empty named signatures.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('examples cannot be None when using this signature fn.')
if isinstance(predictions, dict):
default_signature = exporter.classification_signature(
examples, classes_tensor=predictions['classes'])
else:
default_signature = exporter.classification_signature(
examples, classes_tensor=predictions)
return default_signature, {}
@deprecated('2017-03-25',
'signature_fns are deprecated. For canned Estimators they are no '
'longer needed. For custom Estimators, please return '
'output_alternatives from your model_fn via ModelFnOps.')
def classification_signature_fn_with_prob(
examples, unused_features, predictions):
"""Classification signature from given examples and predicted probabilities.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `Tensor` of predicted probabilities or dict that contains the
probabilities tensor as in {'probabilities', `Tensor`}.
Returns:
Tuple of default classification signature and empty named signatures.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('examples cannot be None when using this signature fn.')
if isinstance(predictions, dict):
default_signature = exporter.classification_signature(
examples, scores_tensor=predictions['probabilities'])
else:
default_signature = exporter.classification_signature(
examples, scores_tensor=predictions)
return default_signature, {}
@deprecated('2017-03-25',
'signature_fns are deprecated. For canned Estimators they are no '
'longer needed. For custom Estimators, please return '
'output_alternatives from your model_fn via ModelFnOps.')
def regression_signature_fn(examples, unused_features, predictions):
"""Creates regression signature from given examples and predictions.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `Tensor`.
Returns:
Tuple of default regression signature and empty named signatures.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('examples cannot be None when using this signature fn.')
default_signature = exporter.regression_signature(
input_tensor=examples, output_tensor=predictions)
return default_signature, {}
@deprecated('2017-03-25',
'signature_fns are deprecated. For canned Estimators they are no '
'longer needed. For custom Estimators, please return '
'output_alternatives from your model_fn via ModelFnOps.')
def logistic_regression_signature_fn(examples, unused_features, predictions):
"""Creates logistic regression signature from given examples and predictions.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `Tensor` of shape [batch_size, 2] of predicted probabilities or
dict that contains the probabilities tensor as in
{'probabilities', `Tensor`}.
Returns:
Tuple of default regression signature and named signature.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('examples cannot be None when using this signature fn.')
if isinstance(predictions, dict):
predictions_tensor = predictions['probabilities']
else:
predictions_tensor = predictions
# predictions should have shape [batch_size, 2] where first column is P(Y=0|x)
# while second column is P(Y=1|x). We are only interested in the second
# column for inference.
predictions_shape = predictions_tensor.get_shape()
predictions_rank = len(predictions_shape)
if predictions_rank != 2:
logging.fatal(
'Expected predictions to have rank 2, but received predictions with '
'rank: {} and shape: {}'.format(predictions_rank, predictions_shape))
if predictions_shape[1] != 2:
logging.fatal(
'Expected predictions to have 2nd dimension: 2, but received '
'predictions with 2nd dimension: {} and shape: {}. Did you mean to use '
'regression_signature_fn or classification_signature_fn_with_prob '
'instead?'.format(predictions_shape[1], predictions_shape))
positive_predictions = predictions_tensor[:, 1]
default_signature = exporter.regression_signature(
input_tensor=examples, output_tensor=positive_predictions)
return default_signature, {}
# pylint: disable=protected-access
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def _default_input_fn(estimator, examples):
"""Creates default input parsing using Estimator's feature signatures."""
return estimator._get_feature_ops_from_example(examples)
@deprecated('2016-09-23', 'Please use Estimator.export_savedmodel() instead.')
def export_estimator(estimator,
export_dir,
signature_fn=None,
input_fn=_default_input_fn,
default_batch_size=1,
exports_to_keep=None):
"""Deprecated, please use Estimator.export_savedmodel()."""
_export_estimator(estimator=estimator,
export_dir=export_dir,
signature_fn=signature_fn,
input_fn=input_fn,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def _export_estimator(estimator,
export_dir,
signature_fn,
input_fn,
default_batch_size,
exports_to_keep,
input_feature_key=None,
use_deprecated_input_fn=True,
prediction_key=None,
checkpoint_path=None):
if use_deprecated_input_fn:
input_fn = input_fn or _default_input_fn
elif input_fn is None:
raise ValueError('input_fn must be defined.')
# If checkpoint_path is specified, use the specified checkpoint path.
checkpoint_path = (checkpoint_path or
tf_saver.latest_checkpoint(estimator._model_dir))
with ops.Graph().as_default() as g:
contrib_variables.create_global_step(g)
if use_deprecated_input_fn:
examples = array_ops.placeholder(dtype=dtypes.string,
shape=[default_batch_size],
name='input_example_tensor')
features = input_fn(estimator, examples)
else:
features, _ = input_fn()
examples = None
if input_feature_key is not None:
examples = features.pop(input_feature_key)
if (not features) and (examples is None):
raise ValueError('Either features or examples must be defined.')
predictions = estimator._get_predict_ops(features).predictions
if prediction_key is not None:
predictions = predictions[prediction_key]
# Explicit signature_fn takes priority
if signature_fn:
default_signature, named_graph_signatures = signature_fn(examples,
features,
predictions)
else:
try:
# Some estimators provide a signature function.
# TODO(zakaria): check if the estimator has this function,
# raise helpful error if not
signature_fn = estimator._create_signature_fn()
default_signature, named_graph_signatures = (
signature_fn(examples, features, predictions))
except AttributeError:
logging.warn(
'Change warning: `signature_fn` will be required after'
'2016-08-01.\n'
'Using generic signatures for now. To maintain this behavior, '
'pass:\n'
' signature_fn=export.generic_signature_fn\n'
'Also consider passing a regression or classification signature; '
'see cl/126430915 for an example.')
default_signature, named_graph_signatures = generic_signature_fn(
examples, features, predictions)
if exports_to_keep is not None:
exports_to_keep = gc.largest_export_versions(exports_to_keep)
return _export_graph(
g,
_get_saver(),
checkpoint_path,
export_dir,
default_graph_signature=default_signature,
named_graph_signatures=named_graph_signatures,
exports_to_keep=exports_to_keep)
# pylint: enable=protected-access
| apache-2.0 |
caseyrollins/osf.io | addons/base/generic_views.py | 9 | 4519 | """Generic add-on view factories"""
# -*- coding: utf-8 -*-
import httplib as http
from flask import request
from framework.exceptions import HTTPError, PermissionsError
from framework.auth.decorators import must_be_logged_in
from osf.models import ExternalAccount
from osf.utils import permissions
from website.project.decorators import (
must_have_addon, must_be_addon_authorizer,
must_have_permission, must_not_be_registration,
must_be_valid_project
)
def import_auth(addon_short_name, Serializer):
@must_have_addon(addon_short_name, 'user')
@must_have_addon(addon_short_name, 'node')
@must_have_permission(permissions.WRITE)
def _import_auth(auth, node_addon, user_addon, **kwargs):
"""Import add-on credentials from the currently logged-in user to a node.
"""
external_account = ExternalAccount.load(
request.json['external_account_id']
)
if not user_addon.external_accounts.filter(id=external_account.id).exists():
raise HTTPError(http.FORBIDDEN)
try:
node_addon.set_auth(external_account, user_addon.owner)
except PermissionsError:
raise HTTPError(http.FORBIDDEN)
node_addon.save()
return {
'result': Serializer().serialize_settings(node_addon, auth.user),
'message': 'Successfully imported access token from profile.',
}
_import_auth.__name__ = '{0}_import_auth'.format(addon_short_name)
return _import_auth
def account_list(addon_short_name, Serializer):
@must_be_logged_in
def _account_list(auth):
user_settings = auth.user.get_addon(addon_short_name)
serializer = Serializer(user_settings=user_settings)
return serializer.serialized_user_settings
_account_list.__name__ = '{0}_account_list'.format(addon_short_name)
return _account_list
def folder_list(addon_short_name, addon_full_name, get_folders):
# TODO [OSF-6678]: Generalize this for API use after node settings have been refactored
@must_have_addon(addon_short_name, 'node')
@must_be_addon_authorizer(addon_short_name)
def _folder_list(node_addon, **kwargs):
"""Returns a list of folders"""
if not node_addon.has_auth:
raise HTTPError(http.FORBIDDEN)
folder_id = request.args.get('folderId')
return get_folders(node_addon, folder_id)
_folder_list.__name__ = '{0}_folder_list'.format(addon_short_name)
return _folder_list
def get_config(addon_short_name, Serializer):
@must_be_logged_in
@must_have_addon(addon_short_name, 'node')
@must_be_valid_project
@must_have_permission(permissions.WRITE)
def _get_config(node_addon, auth, **kwargs):
"""API that returns the serialized node settings."""
return {
'result': Serializer().serialize_settings(
node_addon,
auth.user
)
}
_get_config.__name__ = '{0}_get_config'.format(addon_short_name)
return _get_config
def set_config(addon_short_name, addon_full_name, Serializer, set_folder):
@must_not_be_registration
@must_have_addon(addon_short_name, 'user')
@must_have_addon(addon_short_name, 'node')
@must_be_addon_authorizer(addon_short_name)
@must_have_permission(permissions.WRITE)
def _set_config(node_addon, user_addon, auth, **kwargs):
"""View for changing a node's linked folder."""
folder = request.json.get('selected')
set_folder(node_addon, folder, auth)
path = node_addon.folder_path
return {
'result': {
'folder': {
'name': path.replace('All Files', '') if path != '/' else '/ (Full {0})'.format(
addon_full_name
),
'path': path,
},
'urls': Serializer(node_settings=node_addon).addon_serialized_urls,
},
'message': 'Successfully updated settings.',
}
_set_config.__name__ = '{0}_set_config'.format(addon_short_name)
return _set_config
def deauthorize_node(addon_short_name):
@must_not_be_registration
@must_have_addon(addon_short_name, 'node')
@must_have_permission(permissions.WRITE)
def _deauthorize_node(auth, node_addon, **kwargs):
node_addon.deauthorize(auth=auth)
node_addon.save()
_deauthorize_node.__name__ = '{0}_deauthorize_node'.format(addon_short_name)
return _deauthorize_node
| apache-2.0 |
nemesisdesign/django | django/contrib/sessions/backends/base.py | 27 | 12374 | from __future__ import unicode_literals
import base64
import logging
import string
from datetime import datetime, timedelta
from django.conf import settings
from django.contrib.sessions.exceptions import SuspiciousSession
from django.core.exceptions import SuspiciousOperation
from django.utils import timezone
from django.utils.crypto import (
constant_time_compare, get_random_string, salted_hmac,
)
from django.utils.encoding import force_bytes, force_text
from django.utils.module_loading import import_string
# session_key should not be case sensitive because some backends can store it
# on case insensitive file systems.
VALID_KEY_CHARS = string.ascii_lowercase + string.digits
class CreateError(Exception):
"""
Used internally as a consistent exception type to catch from save (see the
docstring for SessionBase.save() for details).
"""
pass
class UpdateError(Exception):
"""
Occurs if Django tries to update a session that was deleted.
"""
pass
class SessionBase(object):
"""
Base class for all Session classes.
"""
TEST_COOKIE_NAME = 'testcookie'
TEST_COOKIE_VALUE = 'worked'
__not_given = object()
def __init__(self, session_key=None):
self._session_key = session_key
self.accessed = False
self.modified = False
self.serializer = import_string(settings.SESSION_SERIALIZER)
def __contains__(self, key):
return key in self._session
def __getitem__(self, key):
return self._session[key]
def __setitem__(self, key, value):
self._session[key] = value
self.modified = True
def __delitem__(self, key):
del self._session[key]
self.modified = True
def get(self, key, default=None):
return self._session.get(key, default)
def pop(self, key, default=__not_given):
self.modified = self.modified or key in self._session
args = () if default is self.__not_given else (default,)
return self._session.pop(key, *args)
def setdefault(self, key, value):
if key in self._session:
return self._session[key]
else:
self.modified = True
self._session[key] = value
return value
def set_test_cookie(self):
self[self.TEST_COOKIE_NAME] = self.TEST_COOKIE_VALUE
def test_cookie_worked(self):
return self.get(self.TEST_COOKIE_NAME) == self.TEST_COOKIE_VALUE
def delete_test_cookie(self):
del self[self.TEST_COOKIE_NAME]
def _hash(self, value):
key_salt = "django.contrib.sessions" + self.__class__.__name__
return salted_hmac(key_salt, value).hexdigest()
def encode(self, session_dict):
"Returns the given session dictionary serialized and encoded as a string."
serialized = self.serializer().dumps(session_dict)
hash = self._hash(serialized)
return base64.b64encode(hash.encode() + b":" + serialized).decode('ascii')
def decode(self, session_data):
encoded_data = base64.b64decode(force_bytes(session_data))
try:
# could produce ValueError if there is no ':'
hash, serialized = encoded_data.split(b':', 1)
expected_hash = self._hash(serialized)
if not constant_time_compare(hash.decode(), expected_hash):
raise SuspiciousSession("Session data corrupted")
else:
return self.serializer().loads(serialized)
except Exception as e:
# ValueError, SuspiciousOperation, unpickling exceptions. If any of
# these happen, just return an empty dictionary (an empty session).
if isinstance(e, SuspiciousOperation):
logger = logging.getLogger('django.security.%s' % e.__class__.__name__)
logger.warning(force_text(e))
return {}
def update(self, dict_):
self._session.update(dict_)
self.modified = True
def has_key(self, key):
return key in self._session
def keys(self):
return self._session.keys()
def values(self):
return self._session.values()
def items(self):
return self._session.items()
def iterkeys(self):
return self._session.iterkeys()
def itervalues(self):
return self._session.itervalues()
def iteritems(self):
return self._session.iteritems()
def clear(self):
# To avoid unnecessary persistent storage accesses, we set up the
# internals directly (loading data wastes time, since we are going to
# set it to an empty dict anyway).
self._session_cache = {}
self.accessed = True
self.modified = True
def is_empty(self):
"Returns True when there is no session_key and the session is empty"
try:
return not bool(self._session_key) and not self._session_cache
except AttributeError:
return True
def _get_new_session_key(self):
"Returns session key that isn't being used."
while True:
session_key = get_random_string(32, VALID_KEY_CHARS)
if not self.exists(session_key):
break
return session_key
def _get_or_create_session_key(self):
if self._session_key is None:
self._session_key = self._get_new_session_key()
return self._session_key
def _validate_session_key(self, key):
"""
Key must be truthy and at least 8 characters long. 8 characters is an
arbitrary lower bound for some minimal key security.
"""
return key and len(key) >= 8
def _get_session_key(self):
return self.__session_key
def _set_session_key(self, value):
"""
Validate session key on assignment. Invalid values will set to None.
"""
if self._validate_session_key(value):
self.__session_key = value
else:
self.__session_key = None
session_key = property(_get_session_key)
_session_key = property(_get_session_key, _set_session_key)
def _get_session(self, no_load=False):
"""
Lazily loads session from storage (unless "no_load" is True, when only
an empty dict is stored) and stores it in the current instance.
"""
self.accessed = True
try:
return self._session_cache
except AttributeError:
if self.session_key is None or no_load:
self._session_cache = {}
else:
self._session_cache = self.load()
return self._session_cache
_session = property(_get_session)
def get_expiry_age(self, **kwargs):
"""Get the number of seconds until the session expires.
Optionally, this function accepts `modification` and `expiry` keyword
arguments specifying the modification and expiry of the session.
"""
try:
modification = kwargs['modification']
except KeyError:
modification = timezone.now()
# Make the difference between "expiry=None passed in kwargs" and
# "expiry not passed in kwargs", in order to guarantee not to trigger
# self.load() when expiry is provided.
try:
expiry = kwargs['expiry']
except KeyError:
expiry = self.get('_session_expiry')
if not expiry: # Checks both None and 0 cases
return settings.SESSION_COOKIE_AGE
if not isinstance(expiry, datetime):
return expiry
delta = expiry - modification
return delta.days * 86400 + delta.seconds
def get_expiry_date(self, **kwargs):
"""Get session the expiry date (as a datetime object).
Optionally, this function accepts `modification` and `expiry` keyword
arguments specifying the modification and expiry of the session.
"""
try:
modification = kwargs['modification']
except KeyError:
modification = timezone.now()
# Same comment as in get_expiry_age
try:
expiry = kwargs['expiry']
except KeyError:
expiry = self.get('_session_expiry')
if isinstance(expiry, datetime):
return expiry
if not expiry: # Checks both None and 0 cases
expiry = settings.SESSION_COOKIE_AGE
return modification + timedelta(seconds=expiry)
def set_expiry(self, value):
"""
Sets a custom expiration for the session. ``value`` can be an integer,
a Python ``datetime`` or ``timedelta`` object or ``None``.
If ``value`` is an integer, the session will expire after that many
seconds of inactivity. If set to ``0`` then the session will expire on
browser close.
If ``value`` is a ``datetime`` or ``timedelta`` object, the session
will expire at that specific future time.
If ``value`` is ``None``, the session uses the global session expiry
policy.
"""
if value is None:
# Remove any custom expiration for this session.
try:
del self['_session_expiry']
except KeyError:
pass
return
if isinstance(value, timedelta):
value = timezone.now() + value
self['_session_expiry'] = value
def get_expire_at_browser_close(self):
"""
Returns ``True`` if the session is set to expire when the browser
closes, and ``False`` if there's an expiry date. Use
``get_expiry_date()`` or ``get_expiry_age()`` to find the actual expiry
date/age, if there is one.
"""
if self.get('_session_expiry') is None:
return settings.SESSION_EXPIRE_AT_BROWSER_CLOSE
return self.get('_session_expiry') == 0
def flush(self):
"""
Removes the current session data from the database and regenerates the
key.
"""
self.clear()
self.delete()
self._session_key = None
def cycle_key(self):
"""
Creates a new session key, while retaining the current session data.
"""
try:
data = self._session_cache
except AttributeError:
data = {}
key = self.session_key
self.create()
self._session_cache = data
if key:
self.delete(key)
# Methods that child classes must implement.
def exists(self, session_key):
"""
Returns True if the given session_key already exists.
"""
raise NotImplementedError('subclasses of SessionBase must provide an exists() method')
def create(self):
"""
Creates a new session instance. Guaranteed to create a new object with
a unique key and will have saved the result once (with empty data)
before the method returns.
"""
raise NotImplementedError('subclasses of SessionBase must provide a create() method')
def save(self, must_create=False):
"""
Saves the session data. If 'must_create' is True, a new session object
is created (otherwise a CreateError exception is raised). Otherwise,
save() only updates an existing object and does not create one
(an UpdateError is raised).
"""
raise NotImplementedError('subclasses of SessionBase must provide a save() method')
def delete(self, session_key=None):
"""
Deletes the session data under this key. If the key is None, the
current session key value is used.
"""
raise NotImplementedError('subclasses of SessionBase must provide a delete() method')
def load(self):
"""
Loads the session data and returns a dictionary.
"""
raise NotImplementedError('subclasses of SessionBase must provide a load() method')
@classmethod
def clear_expired(cls):
"""
Remove expired sessions from the session store.
If this operation isn't possible on a given backend, it should raise
NotImplementedError. If it isn't necessary, because the backend has
a built-in expiration mechanism, it should be a no-op.
"""
raise NotImplementedError('This backend does not support clear_expired().')
| bsd-3-clause |
SebasSBM/django | django/template/smartif.py | 275 | 6643 | """
Parser and utilities for the smart 'if' tag
"""
import warnings
from django.utils.deprecation import RemovedInDjango110Warning
# Using a simple top down parser, as described here:
# http://effbot.org/zone/simple-top-down-parsing.htm.
# 'led' = left denotation
# 'nud' = null denotation
# 'bp' = binding power (left = lbp, right = rbp)
class TokenBase(object):
"""
Base class for operators and literals, mainly for debugging and for throwing
syntax errors.
"""
id = None # node/token type name
value = None # used by literals
first = second = None # used by tree nodes
def nud(self, parser):
# Null denotation - called in prefix context
raise parser.error_class(
"Not expecting '%s' in this position in if tag." % self.id
)
def led(self, left, parser):
# Left denotation - called in infix context
raise parser.error_class(
"Not expecting '%s' as infix operator in if tag." % self.id
)
def display(self):
"""
Returns what to display in error messages for this node
"""
return self.id
def __repr__(self):
out = [str(x) for x in [self.id, self.first, self.second] if x is not None]
return "(" + " ".join(out) + ")"
def infix(bp, func):
"""
Creates an infix operator, given a binding power and a function that
evaluates the node
"""
class Operator(TokenBase):
lbp = bp
def led(self, left, parser):
self.first = left
self.second = parser.expression(bp)
return self
def eval(self, context):
try:
return func(context, self.first, self.second)
except Exception:
# Templates shouldn't throw exceptions when rendering. We are
# most likely to get exceptions for things like {% if foo in bar
# %} where 'bar' does not support 'in', so default to False
return False
return Operator
def prefix(bp, func):
"""
Creates a prefix operator, given a binding power and a function that
evaluates the node.
"""
class Operator(TokenBase):
lbp = bp
def nud(self, parser):
self.first = parser.expression(bp)
self.second = None
return self
def eval(self, context):
try:
return func(context, self.first)
except Exception:
return False
return Operator
# Operator precedence follows Python.
# NB - we can get slightly more accurate syntax error messages by not using the
# same object for '==' and '='.
# We defer variable evaluation to the lambda to ensure that terms are
# lazily evaluated using Python's boolean parsing logic.
OPERATORS = {
'or': infix(6, lambda context, x, y: x.eval(context) or y.eval(context)),
'and': infix(7, lambda context, x, y: x.eval(context) and y.eval(context)),
'not': prefix(8, lambda context, x: not x.eval(context)),
'in': infix(9, lambda context, x, y: x.eval(context) in y.eval(context)),
'not in': infix(9, lambda context, x, y: x.eval(context) not in y.eval(context)),
# This should be removed in Django 1.10:
'=': infix(10, lambda context, x, y: x.eval(context) == y.eval(context)),
'==': infix(10, lambda context, x, y: x.eval(context) == y.eval(context)),
'!=': infix(10, lambda context, x, y: x.eval(context) != y.eval(context)),
'>': infix(10, lambda context, x, y: x.eval(context) > y.eval(context)),
'>=': infix(10, lambda context, x, y: x.eval(context) >= y.eval(context)),
'<': infix(10, lambda context, x, y: x.eval(context) < y.eval(context)),
'<=': infix(10, lambda context, x, y: x.eval(context) <= y.eval(context)),
}
# Assign 'id' to each:
for key, op in OPERATORS.items():
op.id = key
class Literal(TokenBase):
"""
A basic self-resolvable object similar to a Django template variable.
"""
# IfParser uses Literal in create_var, but TemplateIfParser overrides
# create_var so that a proper implementation that actually resolves
# variables, filters etc is used.
id = "literal"
lbp = 0
def __init__(self, value):
self.value = value
def display(self):
return repr(self.value)
def nud(self, parser):
return self
def eval(self, context):
return self.value
def __repr__(self):
return "(%s %r)" % (self.id, self.value)
class EndToken(TokenBase):
lbp = 0
def nud(self, parser):
raise parser.error_class("Unexpected end of expression in if tag.")
EndToken = EndToken()
class IfParser(object):
error_class = ValueError
def __init__(self, tokens):
# pre-pass necessary to turn 'not','in' into single token
l = len(tokens)
mapped_tokens = []
i = 0
while i < l:
token = tokens[i]
if token == "not" and i + 1 < l and tokens[i + 1] == "in":
token = "not in"
i += 1 # skip 'in'
mapped_tokens.append(self.translate_token(token))
i += 1
self.tokens = mapped_tokens
self.pos = 0
self.current_token = self.next_token()
def translate_token(self, token):
try:
op = OPERATORS[token]
except (KeyError, TypeError):
return self.create_var(token)
else:
if token == '=':
warnings.warn(
"Operator '=' is deprecated and will be removed in Django 1.10. Use '==' instead.",
RemovedInDjango110Warning, stacklevel=2
)
return op()
def next_token(self):
if self.pos >= len(self.tokens):
return EndToken
else:
retval = self.tokens[self.pos]
self.pos += 1
return retval
def parse(self):
retval = self.expression()
# Check that we have exhausted all the tokens
if self.current_token is not EndToken:
raise self.error_class("Unused '%s' at end of if expression." %
self.current_token.display())
return retval
def expression(self, rbp=0):
t = self.current_token
self.current_token = self.next_token()
left = t.nud(self)
while rbp < self.current_token.lbp:
t = self.current_token
self.current_token = self.next_token()
left = t.led(left, self)
return left
def create_var(self, value):
return Literal(value)
| bsd-3-clause |
deepsrd/android_kernel_nx507j | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
Benoss/elastic_admin | elastic_utils/utils.py | 1 | 1080 | import time
def singleton(cls):
instances = {}
def getinstance():
if cls not in instances:
instances[cls] = cls()
return instances[cls]
return getinstance
class Timer(object):
def __init__(self, name='elapsed time', logger=None, print_result=False):
self.verbose = print_result
self.logger = logger
self.name = name
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.secs = self.end - self.start
self.msecs = self.secs * 1000 # millisecs
if self.verbose:
print(self.get_formatted_string())
if self.logger:
self.logger(self.get_formatted_string())
def get_formatted_string(self):
return '{}: {:.1f} ms'.format(self.name, self.msecs)
def new_index_from_name(base_name):
"""
Return a new index name with a timestamp added at the end
:param base_name: str
:return: str
"""
return base_name + "." + str(int(time.time()))
| mit |
quantum13/hgh | hgh/urls.py | 1 | 1675 | # coding: utf-8
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'hgh.views.home', name='home'),
# url(r'^hgh/', include('hgh.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
url(r'^login/$', 'apps.main.views.login', name='login'),
url(r'^logout/$', 'django.contrib.auth.views.logout', {'next_page':'/'}, name='logout'),
url(r'^error_url/$', 'apps.main.views.login_error', name='login_error'),
url(r'^$', 'apps.main.views.home', name='home'),
url(r'^rating/experience/$', 'apps.main.views.rating', {'type':'experience'}, name='rating_exp'),
url(r'^rating/power/$', 'apps.main.views.rating', {'type':'power'}, name='rating_pow'),
url(r'^profile/$', 'apps.main.views.profile', name='profile'),
url(r'^fight/$', 'apps.main.views.prebattle', name='prebattle'),
url(r'^battle/$', 'apps.main.views.battle', name='battle'),
url(r'^battleresult/$', 'apps.main.views.postbattle', name='postbattle'),
url(r'^spellgettargets/(?P<id>\d+)/$', 'apps.main.views.get_target', name='spellgettargets'),
url(r'^battleinfo/(?P<id>\d+)/$', 'apps.main.views.battle_info', name='battle_info'),
url(r'^info/(?P<login>[^/]+)/$', 'apps.main.views.info', name='info'),
url(r'', include('social_auth.urls')),
)
| gpl-2.0 |
SophieBartmann/Faust-Bot | FaustBot/Modules/CustomUserModules/ICDObserver.py | 1 | 1273 | import csv
import re
from FaustBot.Communication.Connection import Connection
from FaustBot.Modules.PrivMsgObserverPrototype import PrivMsgObserverPrototype
class ICDObserver(PrivMsgObserverPrototype):
@staticmethod
def cmd():
return None
@staticmethod
def help():
return None
def get_icd(self, code):
if code == "C64" or code == "P20":
return ""
icd10_codes = open('care_icd10_de.csv', 'r',encoding='utf8')
icd10 = csv.reader(icd10_codes, delimiter=';', quotechar='"')
for row in icd10:
if row[0] == code:
return code +' - ' + row[1]
return 0
def update_on_priv_msg(self, data, connection: Connection):
if data['channel'] != connection.details.get_channel():
return
regex = r'\b(\w\d{2}\.?\d?)\b'
codes = re.findall(regex, data['message'])
for code in codes:
code = code.capitalize()
text = self.get_icd(code)
if text == 0:
if code.find('.') != -1:
code += '-'
else:
code += '.-'
text = self.get_icd(code)
if text != 0:
connection.send_back(text, data)
| gpl-3.0 |
F1000Research/khmer | sandbox/count-kmers-single.py | 1 | 3273 | #! /usr/bin/env python2
#
# This file is part of khmer, http://github.com/ged-lab/khmer/, and is
# Copyright (C) University of California, Davis, 2015. It is licensed under
# the three-clause BSD license; see doc/LICENSE.txt.
# Contact: [email protected]
#
# pylint: disable=missing-docstring,invalid-name
"""
Produce k-mer counts for all the k-mers in the given sequence file,
using the given countgraph.
% python sandbox/count-kmers-single.py <fasta/fastq>
Use '-h' for parameter help.
"""
from __future__ import print_function
import sys
import khmer
import argparse
import screed
import csv
from khmer.khmer_args import (build_counting_args, report_on_config, info,
add_threading_args)
from khmer.kfile import (check_input_files, check_space,
check_space_for_graph)
import threading
def get_parser():
parser = build_counting_args(
descr="Output abundances of the k-mers in the sequence file.")
add_threading_args(parser)
parser.add_argument('input_sequence_filename', help='The input'
' FAST[AQ] sequence file.')
parser.add_argument('-o', '--out', metavar="output_file",
dest='output_file',
type=argparse.FileType('w'),
default=None, help='output counts to this file')
return parser
def main():
info('count-kmers-single.py', ['counting'])
args = get_parser().parse_args()
check_input_files(args.input_sequence_filename, False)
print ('making k-mer countgraph', file=sys.stderr)
countgraph = khmer.Countgraph(args.ksize, args.max_tablesize,
args.n_tables)
# @CTB countgraph.set_use_bigcount(args.bigcount)
kmer_size = countgraph.ksize()
hashsizes = countgraph.hashsizes()
tracking = khmer._Nodegraph( # pylint: disable=protected-access
kmer_size, hashsizes)
print ('kmer_size: %s' % countgraph.ksize(), file=sys.stderr)
print ('k-mer countgraph sizes: %s' % (countgraph.hashsizes(),),
file=sys.stderr)
if args.output_file is None:
args.output_file = sys.stdout
writer = csv.writer(args.output_file)
# start loading
rparser = khmer.ReadParser(args.input_sequence_filename)
threads = []
print ('consuming input, round 1 -- %s' % (args.input_sequence_filename),
file=sys.stderr)
for _ in range(args.threads):
thread = \
threading.Thread(
target=countgraph.consume_fasta_with_reads_parser,
args=(rparser, )
)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
for record in screed.open(args.input_sequence_filename):
seq = record.sequence.replace('N', 'A')
for i in range(len(seq) - kmer_size + 1):
kmer = seq[i:i+kmer_size]
if not tracking.get(kmer):
tracking.count(kmer)
writer.writerow([kmer, str(countgraph.get(kmer))])
print ('Total number of unique k-mers: {0}'.format(
countgraph.n_unique_kmers()), file=sys.stderr)
if __name__ == '__main__':
main()
# vim: set ft=python ts=4 sts=4 sw=4 et tw=79:
| bsd-3-clause |
Ken69267/config-stuff | .vim/eclim/autoload/eclim/python/rope/refactor/occurrences.py | 91 | 10704 | import re
import rope.base.pynames
from rope.base import pynames, pyobjects, codeanalyze, evaluate, exceptions, utils, worder
class Finder(object):
"""For finding occurrences of a name
The constructor takes a `filters` argument. It should be a list
of functions that take a single argument. For each possible
occurrence, these functions are called in order with the an
instance of `Occurrence`:
* If it returns `None` other filters are tried.
* If it returns `True`, the occurrence will be a match.
* If it returns `False`, the occurrence will be skipped.
* If all of the filters return `None`, it is skipped also.
"""
def __init__(self, pycore, name, filters=[lambda o: True], docs=False):
self.pycore = pycore
self.name = name
self.docs = docs
self.filters = filters
self._textual_finder = _TextualFinder(name, docs=docs)
def find_occurrences(self, resource=None, pymodule=None):
"""Generate `Occurrence` instances"""
tools = _OccurrenceToolsCreator(self.pycore, resource=resource,
pymodule=pymodule, docs=self.docs)
for offset in self._textual_finder.find_offsets(tools.source_code):
occurrence = Occurrence(tools, offset)
for filter in self.filters:
result = filter(occurrence)
if result is None:
continue
if result:
yield occurrence
break
def create_finder(pycore, name, pyname, only_calls=False, imports=True,
unsure=None, docs=False, instance=None, in_hierarchy=False):
"""A factory for `Finder`
Based on the arguments it creates a list of filters. `instance`
argument is needed only when you want implicit interfaces to be
considered.
"""
pynames = set([pyname])
filters = []
if only_calls:
filters.append(CallsFilter())
if not imports:
filters.append(NoImportsFilter())
if isinstance(instance, rope.base.pynames.ParameterName):
for pyobject in instance.get_objects():
try:
pynames.add(pyobject[name])
except exceptions.AttributeNotFoundError:
pass
for pyname in pynames:
filters.append(PyNameFilter(pyname))
if in_hierarchy:
filters.append(InHierarchyFilter(pyname))
if unsure:
filters.append(UnsureFilter(unsure))
return Finder(pycore, name, filters=filters, docs=docs)
class Occurrence(object):
def __init__(self, tools, offset):
self.tools = tools
self.offset = offset
self.resource = tools.resource
@utils.saveit
def get_word_range(self):
return self.tools.word_finder.get_word_range(self.offset)
@utils.saveit
def get_primary_range(self):
return self.tools.word_finder.get_primary_range(self.offset)
@utils.saveit
def get_pyname(self):
try:
return self.tools.name_finder.get_pyname_at(self.offset)
except exceptions.BadIdentifierError:
pass
@utils.saveit
def get_primary_and_pyname(self):
try:
return self.tools.name_finder.get_primary_and_pyname_at(self.offset)
except exceptions.BadIdentifierError:
pass
@utils.saveit
def is_in_import_statement(self):
return (self.tools.word_finder.is_from_statement(self.offset) or
self.tools.word_finder.is_import_statement(self.offset))
def is_called(self):
return self.tools.word_finder.is_a_function_being_called(self.offset)
def is_defined(self):
return self.tools.word_finder.is_a_class_or_function_name_in_header(self.offset)
def is_a_fixed_primary(self):
return self.tools.word_finder.is_a_class_or_function_name_in_header(self.offset) or \
self.tools.word_finder.is_a_name_after_from_import(self.offset)
def is_written(self):
return self.tools.word_finder.is_assigned_here(self.offset)
def is_unsure(self):
return unsure_pyname(self.get_pyname())
@property
@utils.saveit
def lineno(self):
offset = self.get_word_range()[0]
return self.tools.pymodule.lines.get_line_number(offset)
def same_pyname(expected, pyname):
"""Check whether `expected` and `pyname` are the same"""
if expected is None or pyname is None:
return False
if expected == pyname:
return True
if type(expected) not in (pynames.ImportedModule, pynames.ImportedName) and \
type(pyname) not in (pynames.ImportedModule, pynames.ImportedName):
return False
return expected.get_definition_location() == pyname.get_definition_location() and \
expected.get_object() == pyname.get_object()
def unsure_pyname(pyname, unbound=True):
"""Return `True` if we don't know what this name references"""
if pyname is None:
return True
if unbound and not isinstance(pyname, pynames.UnboundName):
return False
if pyname.get_object() == pyobjects.get_unknown():
return True
class PyNameFilter(object):
"""For finding occurrences of a name"""
def __init__(self, pyname):
self.pyname = pyname
def __call__(self, occurrence):
if same_pyname(self.pyname, occurrence.get_pyname()):
return True
class InHierarchyFilter(object):
"""For finding occurrences of a name"""
def __init__(self, pyname, implementations_only=False):
self.pyname = pyname
self.impl_only = implementations_only
self.pyclass = self._get_containing_class(pyname)
if self.pyclass is not None:
self.name = pyname.get_object().get_name()
self.roots = self._get_root_classes(self.pyclass, self.name)
else:
self.roots = None
def __call__(self, occurrence):
if self.roots is None:
return
pyclass = self._get_containing_class(occurrence.get_pyname())
if pyclass is not None:
roots = self._get_root_classes(pyclass, self.name)
if self.roots.intersection(roots):
return True
def _get_containing_class(self, pyname):
if isinstance(pyname, pynames.DefinedName):
scope = pyname.get_object().get_scope()
parent = scope.parent
if parent is not None and parent.get_kind() == 'Class':
return parent.pyobject
def _get_root_classes(self, pyclass, name):
if self.impl_only and pyclass == self.pyclass:
return set([pyclass])
result = set()
for superclass in pyclass.get_superclasses():
if name in superclass:
result.update(self._get_root_classes(superclass, name))
if not result:
return set([pyclass])
return result
class UnsureFilter(object):
def __init__(self, unsure):
self.unsure = unsure
def __call__(self, occurrence):
if occurrence.is_unsure() and self.unsure(occurrence):
return True
class NoImportsFilter(object):
def __call__(self, occurrence):
if occurrence.is_in_import_statement():
return False
class CallsFilter(object):
def __call__(self, occurrence):
if not occurrence.is_called():
return False
class _TextualFinder(object):
def __init__(self, name, docs=False):
self.name = name
self.docs = docs
self.comment_pattern = _TextualFinder.any('comment', [r'#[^\n]*'])
self.string_pattern = _TextualFinder.any(
'string', [codeanalyze.get_string_pattern()])
self.pattern = self._get_occurrence_pattern(self.name)
def find_offsets(self, source):
if not self._fast_file_query(source):
return
if self.docs:
searcher = self._normal_search
else:
searcher = self._re_search
for matched in searcher(source):
yield matched
def _re_search(self, source):
for match in self.pattern.finditer(source):
for key, value in match.groupdict().items():
if value and key == 'occurrence':
yield match.start(key)
def _normal_search(self, source):
current = 0
while True:
try:
found = source.index(self.name, current)
current = found + len(self.name)
if (found == 0 or not self._is_id_char(source[found - 1])) and \
(current == len(source) or not self._is_id_char(source[current])):
yield found
except ValueError:
break
def _is_id_char(self, c):
return c.isalnum() or c == '_'
def _fast_file_query(self, source):
try:
source.index(self.name)
return True
except ValueError:
return False
def _get_source(self, resource, pymodule):
if resource is not None:
return resource.read()
else:
return pymodule.source_code
def _get_occurrence_pattern(self, name):
occurrence_pattern = _TextualFinder.any('occurrence',
['\\b' + name + '\\b'])
pattern = re.compile(occurrence_pattern + '|' + self.comment_pattern +
'|' + self.string_pattern)
return pattern
@staticmethod
def any(name, list_):
return '(?P<%s>' % name + '|'.join(list_) + ')'
class _OccurrenceToolsCreator(object):
def __init__(self, pycore, resource=None, pymodule=None, docs=False):
self.pycore = pycore
self.__resource = resource
self.__pymodule = pymodule
self.docs = docs
@property
@utils.saveit
def name_finder(self):
return evaluate.ScopeNameFinder(self.pymodule)
@property
@utils.saveit
def source_code(self):
if self.__resource is not None:
return self.resource.read()
else:
return self.pymodule.source_code
@property
@utils.saveit
def word_finder(self):
return worder.Worder(self.source_code, self.docs)
@property
@utils.saveit
def resource(self):
if self.__resource is not None:
return self.__resource
if self.__pymodule is not None:
return self.__pymodule.resource
@property
@utils.saveit
def pymodule(self):
if self.__pymodule is not None:
return self.__pymodule
return self.pycore.resource_to_pyobject(self.resource)
| mit |
Pexego/odoo | addons/hw_scanner/controllers/main.py | 51 | 7441 | # -*- coding: utf-8 -*-
import logging
import os
import time
from os import listdir
from os.path import join
from threading import Thread, Lock
from select import select
from Queue import Queue, Empty
import openerp
import openerp.addons.hw_proxy.controllers.main as hw_proxy
from openerp import http
from openerp.http import request
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
try:
import evdev
except ImportError:
_logger.error('OpenERP module hw_scanner depends on the evdev python module')
evdev = None
class Scanner(Thread):
def __init__(self):
Thread.__init__(self)
self.lock = Lock()
self.status = {'status':'connecting', 'messages':[]}
self.input_dir = '/dev/input/by-id/'
self.barcodes = Queue()
self.keymap = {
2: ("1","!"),
3: ("2","@"),
4: ("3","#"),
5: ("4","$"),
6: ("5","%"),
7: ("6","^"),
8: ("7","&"),
9: ("8","*"),
10:("9","("),
11:("0",")"),
12:("-","_"),
13:("=","+"),
# 14 BACKSPACE
# 15 TAB
16:("q","Q"),
17:("w","W"),
18:("e","E"),
19:("r","R"),
20:("t","T"),
21:("y","Y"),
22:("u","U"),
23:("i","I"),
24:("o","O"),
25:("p","P"),
26:("[","{"),
27:("]","}"),
# 28 ENTER
# 29 LEFT_CTRL
30:("a","A"),
31:("s","S"),
32:("d","D"),
33:("f","F"),
34:("g","G"),
35:("h","H"),
36:("j","J"),
37:("k","K"),
38:("l","L"),
39:(";",":"),
40:("'","\""),
41:("`","~"),
# 42 LEFT SHIFT
43:("\\","|"),
44:("z","Z"),
45:("x","X"),
46:("c","C"),
47:("v","V"),
48:("b","B"),
49:("n","N"),
50:("m","M"),
51:(",","<"),
52:(".",">"),
53:("/","?"),
# 54 RIGHT SHIFT
57:(" "," "),
}
def lockedstart(self):
with self.lock:
if not self.isAlive():
self.daemon = True
self.start()
def set_status(self, status, message = None):
if status == self.status['status']:
if message != None and message != self.status['messages'][-1]:
self.status['messages'].append(message)
else:
self.status['status'] = status
if message:
self.status['messages'] = [message]
else:
self.status['messages'] = []
if status == 'error' and message:
_logger.error('Barcode Scanner Error: '+message)
elif status == 'disconnected' and message:
_logger.warning('Disconnected Barcode Scanner: '+message)
def get_device(self):
try:
if not evdev:
return None
devices = [ device for device in listdir(self.input_dir)]
keyboards = [ device for device in devices if ('kbd' in device) and ('keyboard' not in device.lower())]
scanners = [ device for device in devices if ('barcode' in device.lower()) or ('scanner' in device.lower())]
if len(scanners) > 0:
self.set_status('connected','Connected to '+scanners[0])
return evdev.InputDevice(join(self.input_dir,scanners[0]))
elif len(keyboards) > 0:
self.set_status('connected','Connected to '+keyboards[0])
return evdev.InputDevice(join(self.input_dir,keyboards[0]))
else:
self.set_status('disconnected','Barcode Scanner Not Found')
return None
except Exception as e:
self.set_status('error',str(e))
return None
def get_barcode(self):
""" Returns a scanned barcode. Will wait at most 5 seconds to get a barcode, and will
return barcode scanned in the past if they are not older than 5 seconds and have not
been returned before. This is necessary to catch barcodes scanned while the POS is
busy reading another barcode
"""
self.lockedstart()
while True:
try:
timestamp, barcode = self.barcodes.get(True, 5)
if timestamp > time.time() - 5:
return barcode
except Empty:
return ''
def get_status(self):
self.lockedstart()
return self.status
def run(self):
""" This will start a loop that catches all keyboard events, parse barcode
sequences and put them on a timestamped queue that can be consumed by
the point of sale's requests for barcode events
"""
self.barcodes = Queue()
barcode = []
shift = False
device = None
while True: # barcodes loop
if device: # ungrab device between barcodes and timeouts for plug & play
try:
device.ungrab()
except Exception as e:
self.set_status('error',str(e))
device = self.get_device()
if not device:
time.sleep(5) # wait until a suitable device is plugged
else:
try:
device.grab()
shift = False
barcode = []
while True: # keycode loop
r,w,x = select([device],[],[],5)
if len(r) == 0: # timeout
break
events = device.read()
for event in events:
if event.type == evdev.ecodes.EV_KEY:
#_logger.debug('Evdev Keyboard event %s',evdev.categorize(event))
if event.value == 1: # keydown events
if event.code in self.keymap:
if shift:
barcode.append(self.keymap[event.code][1])
else:
barcode.append(self.keymap[event.code][0])
elif event.code == 42 or event.code == 54: # SHIFT
shift = True
elif event.code == 28: # ENTER, end of barcode
self.barcodes.put( (time.time(),''.join(barcode)) )
barcode = []
elif event.value == 0: #keyup events
if event.code == 42 or event.code == 54: # LEFT SHIFT
shift = False
except Exception as e:
self.set_status('error',str(e))
s = Scanner()
hw_proxy.drivers['scanner'] = s
class ScannerDriver(hw_proxy.Proxy):
@http.route('/hw_proxy/scanner', type='json', auth='none', cors='*')
def scanner(self):
return s.get_barcode()
| agpl-3.0 |
Hodorable/0602 | openstack_dashboard/dashboards/project/databases/tables.py | 30 | 13199 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core import urlresolvers
from django.template import defaultfilters as d_filters
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import exceptions
from horizon import tables
from horizon.templatetags import sizeformat
from horizon.utils import filters
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.database_backups \
import tables as backup_tables
ACTIVE_STATES = ("ACTIVE",)
class TerminateInstance(tables.BatchAction):
help_text = _("Terminated instances are not recoverable.")
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Terminate Instance",
u"Terminate Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled termination of Instance",
u"Scheduled termination of Instances",
count
)
name = "terminate"
classes = ("btn-danger", )
icon = "remove"
def action(self, request, obj_id):
api.trove.instance_delete(request, obj_id)
class RestartInstance(tables.BatchAction):
help_text = _("Restarted instances will lose any data not"
" saved in persistent storage.")
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Restart Instance",
u"Restart Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Restarted Instance",
u"Restarted Instances",
count
)
name = "restart"
classes = ('btn-danger', 'btn-reboot')
def allowed(self, request, instance=None):
return ((instance.status in ACTIVE_STATES
or instance.status == 'SHUTDOWN'))
def action(self, request, obj_id):
api.trove.instance_restart(request, obj_id)
class DetachReplica(tables.BatchAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Detach Replica",
u"Detach Replicas",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Replica Detached",
u"Replicas Detached",
count
)
name = "detach_replica"
classes = ('btn-danger', 'btn-detach-replica')
def allowed(self, request, instance=None):
return (instance.status in ACTIVE_STATES
and hasattr(instance, 'replica_of'))
def action(self, request, obj_id):
api.trove.instance_detach_replica(request, obj_id)
class DeleteUser(tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete User",
u"Delete Users",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted User",
u"Deleted Users",
count
)
def delete(self, request, obj_id):
datum = self.table.get_object_by_id(obj_id)
try:
api.trove.user_delete(request, datum.instance.id, datum.name)
except Exception:
msg = _('Error deleting database user.')
exceptions.handle(request, msg)
class DeleteDatabase(tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Database",
u"Delete Databases",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Database",
u"Deleted Databases",
count
)
def delete(self, request, obj_id):
datum = self.table.get_object_by_id(obj_id)
try:
api.trove.database_delete(request, datum.instance.id, datum.name)
except Exception:
msg = _('Error deleting database on instance.')
exceptions.handle(request, msg)
class LaunchLink(tables.LinkAction):
name = "launch"
verbose_name = _("Launch Instance")
url = "horizon:project:databases:launch"
classes = ("ajax-modal", "btn-launch")
icon = "cloud-upload"
class CreateBackup(tables.LinkAction):
name = "backup"
verbose_name = _("Create Backup")
url = "horizon:project:database_backups:create"
classes = ("ajax-modal",)
icon = "camera"
def allowed(self, request, instance=None):
return (instance.status in ACTIVE_STATES and
request.user.has_perm('openstack.services.object-store'))
def get_link_url(self, datam):
url = urlresolvers.reverse(self.url)
return url + "?instance=%s" % datam.id
class ResizeVolume(tables.LinkAction):
name = "resize_volume"
verbose_name = _("Resize Volume")
url = "horizon:project:databases:resize_volume"
classes = ("ajax-modal", "btn-resize")
def allowed(self, request, instance=None):
return instance.status in ACTIVE_STATES
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
return urlresolvers.reverse(self.url, args=[instance_id])
class ResizeInstance(tables.LinkAction):
name = "resize_instance"
verbose_name = _("Resize Instance")
url = "horizon:project:databases:resize_instance"
classes = ("ajax-modal", "btn-resize")
def allowed(self, request, instance=None):
return ((instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF'))
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
return urlresolvers.reverse(self.url, args=[instance_id])
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, instance_id):
instance = api.trove.instance_get(request, instance_id)
try:
flavor_id = instance.flavor['id']
instance.full_flavor = api.trove.flavor_get(request, flavor_id)
except Exception:
pass
instance.host = get_host(instance)
return instance
def get_datastore(instance):
if hasattr(instance, "datastore"):
return instance.datastore["type"]
return _("Not available")
def get_datastore_version(instance):
if hasattr(instance, "datastore"):
return instance.datastore["version"]
return _("Not available")
def get_host(instance):
if hasattr(instance, "hostname"):
return instance.hostname
elif hasattr(instance, "ip") and instance.ip:
return instance.ip[0]
return _("Not Assigned")
def get_size(instance):
if hasattr(instance, "full_flavor"):
size_string = _("%(name)s | %(RAM)s RAM")
vals = {'name': instance.full_flavor.name,
'RAM': sizeformat.mb_float_format(instance.full_flavor.ram)}
return size_string % vals
return _("Not available")
def get_volume_size(instance):
if hasattr(instance, "volume"):
return sizeformat.diskgbformat(instance.volume.get("size"))
return _("Not available")
def get_databases(user):
if hasattr(user, "access"):
databases = [db.name for db in user.access]
databases.sort()
return ', '.join(databases)
return _("-")
class InstancesTable(tables.DataTable):
STATUS_CHOICES = (
("ACTIVE", True),
("BLOCKED", True),
("BUILD", None),
("FAILED", False),
("REBOOT", None),
("RESIZE", None),
("BACKUP", None),
("SHUTDOWN", False),
("ERROR", False),
("RESTART_REQUIRED", None),
)
STATUS_DISPLAY_CHOICES = (
("ACTIVE", pgettext_lazy("Current status of a Database Instance",
u"Active")),
("BLOCKED", pgettext_lazy("Current status of a Database Instance",
u"Blocked")),
("BUILD", pgettext_lazy("Current status of a Database Instance",
u"Build")),
("FAILED", pgettext_lazy("Current status of a Database Instance",
u"Failed")),
("REBOOT", pgettext_lazy("Current status of a Database Instance",
u"Reboot")),
("RESIZE", pgettext_lazy("Current status of a Database Instance",
u"Resize")),
("BACKUP", pgettext_lazy("Current status of a Database Instance",
u"Backup")),
("SHUTDOWN", pgettext_lazy("Current status of a Database Instance",
u"Shutdown")),
("ERROR", pgettext_lazy("Current status of a Database Instance",
u"Error")),
("RESTART_REQUIRED",
pgettext_lazy("Current status of a Database Instance",
u"Restart Required")),
)
name = tables.Column("name",
link="horizon:project:databases:detail",
verbose_name=_("Instance Name"))
datastore = tables.Column(get_datastore,
verbose_name=_("Datastore"))
datastore_version = tables.Column(get_datastore_version,
verbose_name=_("Datastore Version"))
host = tables.Column(get_host, verbose_name=_("Host"))
size = tables.Column(get_size,
verbose_name=_("Size"),
attrs={'data-type': 'size'})
volume = tables.Column(get_volume_size,
verbose_name=_("Volume Size"),
attrs={'data-type': 'size'})
status = tables.Column("status",
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES,
display_choices=STATUS_DISPLAY_CHOICES)
class Meta(object):
name = "databases"
verbose_name = _("Instances")
status_columns = ["status"]
row_class = UpdateRow
table_actions = (LaunchLink, TerminateInstance)
row_actions = (CreateBackup,
ResizeVolume,
ResizeInstance,
RestartInstance,
DetachReplica,
TerminateInstance)
class UsersTable(tables.DataTable):
name = tables.Column("name", verbose_name=_("User Name"))
host = tables.Column("host", verbose_name=_("Allowed Host"))
databases = tables.Column(get_databases, verbose_name=_("Databases"))
class Meta(object):
name = "users"
verbose_name = _("Users")
table_actions = [DeleteUser]
row_actions = [DeleteUser]
def get_object_id(self, datum):
return datum.name
class DatabaseTable(tables.DataTable):
name = tables.Column("name", verbose_name=_("Database Name"))
class Meta(object):
name = "databases"
verbose_name = _("Databases")
table_actions = [DeleteDatabase]
row_actions = [DeleteDatabase]
def get_object_id(self, datum):
return datum.name
def is_incremental(obj):
return hasattr(obj, 'parent_id') and obj.parent_id is not None
class InstanceBackupsTable(tables.DataTable):
name = tables.Column("name",
link="horizon:project:database_backups:detail",
verbose_name=_("Name"))
created = tables.Column("created", verbose_name=_("Created"),
filters=[filters.parse_isotime])
location = tables.Column(lambda obj: _("Download"),
link=lambda obj: obj.locationRef,
verbose_name=_("Backup File"))
incremental = tables.Column(is_incremental,
verbose_name=_("Incremental"),
filters=(d_filters.yesno,
d_filters.capfirst))
status = tables.Column(
"status",
verbose_name=_("Status"),
status=True,
status_choices=backup_tables.STATUS_CHOICES,
display_choices=backup_tables.STATUS_DISPLAY_CHOICES)
class Meta(object):
name = "backups"
verbose_name = _("Backups")
status_columns = ["status"]
row_class = UpdateRow
table_actions = (backup_tables.LaunchLink, backup_tables.DeleteBackup)
row_actions = (backup_tables.RestoreLink, backup_tables.DeleteBackup)
| apache-2.0 |
louyihua/edx-platform | lms/djangoapps/branding/models.py | 63 | 1687 | """
Model used by Video module for Branding configuration.
Includes:
BrandingInfoConfig: A ConfigurationModel for managing how Video Module will
use Branding.
"""
import json
from django.db.models import TextField
from django.core.exceptions import ValidationError
from config_models.models import ConfigurationModel
class BrandingInfoConfig(ConfigurationModel):
"""
Configuration for Branding.
Example of configuration that must be stored:
{
"CN": {
"url": "http://www.xuetangx.com",
"logo_src": "http://www.xuetangx.com/static/images/logo.png",
"logo_tag": "Video hosted by XuetangX.com"
}
}
"""
class Meta(ConfigurationModel.Meta):
app_label = "branding"
configuration = TextField(
help_text="JSON data of Configuration for Video Branding."
)
def clean(self):
"""
Validates configuration text field.
"""
try:
json.loads(self.configuration)
except ValueError:
raise ValidationError('Must be valid JSON string.')
@classmethod
def get_config(cls):
"""
Get the Video Branding Configuration.
"""
info = cls.current()
return json.loads(info.configuration) if info.enabled else {}
class BrandingApiConfig(ConfigurationModel):
"""Configure Branding api's
Enable or disable api's functionality.
When this flag is disabled, the api will return 404.
When the flag is enabled, the api will returns the valid reponse.
"""
class Meta(ConfigurationModel.Meta):
app_label = "branding"
| agpl-3.0 |
MRigal/django | tests/queryset_pickle/tests.py | 209 | 6081 | from __future__ import unicode_literals
import datetime
import pickle
import unittest
import warnings
from django.test import TestCase
from django.utils import six
from django.utils.encoding import force_text
from django.utils.version import get_version
from .models import Container, Event, Group, Happening, M2MModel
class PickleabilityTestCase(TestCase):
def setUp(self):
Happening.objects.create() # make sure the defaults are working (#20158)
def assert_pickles(self, qs):
self.assertEqual(list(pickle.loads(pickle.dumps(qs))), list(qs))
def test_related_field(self):
g = Group.objects.create(name="Ponies Who Own Maybachs")
self.assert_pickles(Event.objects.filter(group=g.id))
def test_datetime_callable_default_all(self):
self.assert_pickles(Happening.objects.all())
def test_datetime_callable_default_filter(self):
self.assert_pickles(Happening.objects.filter(when=datetime.datetime.now()))
def test_string_as_default(self):
self.assert_pickles(Happening.objects.filter(name="test"))
def test_standalone_method_as_default(self):
self.assert_pickles(Happening.objects.filter(number1=1))
@unittest.skipIf(six.PY2, "Field doesn't exist on Python 2.")
def test_staticmethod_as_default(self):
self.assert_pickles(Happening.objects.filter(number2=1))
def test_filter_reverse_fk(self):
self.assert_pickles(Group.objects.filter(event=1))
def test_doesnotexist_exception(self):
# Ticket #17776
original = Event.DoesNotExist("Doesn't exist")
unpickled = pickle.loads(pickle.dumps(original))
# Exceptions are not equal to equivalent instances of themselves, so
# can't just use assertEqual(original, unpickled)
self.assertEqual(original.__class__, unpickled.__class__)
self.assertEqual(original.args, unpickled.args)
def test_manager_pickle(self):
pickle.loads(pickle.dumps(Happening.objects))
def test_model_pickle(self):
"""
Test that a model not defined on module level is pickleable.
"""
original = Container.SomeModel(pk=1)
dumped = pickle.dumps(original)
reloaded = pickle.loads(dumped)
self.assertEqual(original, reloaded)
# Also, deferred dynamic model works
Container.SomeModel.objects.create(somefield=1)
original = Container.SomeModel.objects.defer('somefield')[0]
dumped = pickle.dumps(original)
reloaded = pickle.loads(dumped)
self.assertEqual(original, reloaded)
self.assertEqual(original.somefield, reloaded.somefield)
def test_model_pickle_m2m(self):
"""
Test intentionally the automatically created through model.
"""
m1 = M2MModel.objects.create()
g1 = Group.objects.create(name='foof')
m1.groups.add(g1)
m2m_through = M2MModel._meta.get_field('groups').remote_field.through
original = m2m_through.objects.get()
dumped = pickle.dumps(original)
reloaded = pickle.loads(dumped)
self.assertEqual(original, reloaded)
def test_model_pickle_dynamic(self):
class Meta:
proxy = True
dynclass = type(str("DynamicEventSubclass"), (Event, ),
{'Meta': Meta, '__module__': Event.__module__})
original = dynclass(pk=1)
dumped = pickle.dumps(original)
reloaded = pickle.loads(dumped)
self.assertEqual(original, reloaded)
self.assertIs(reloaded.__class__, dynclass)
def test_specialized_queryset(self):
self.assert_pickles(Happening.objects.values('name'))
self.assert_pickles(Happening.objects.values('name').dates('when', 'year'))
# With related field (#14515)
self.assert_pickles(
Event.objects.select_related('group').order_by('title').values_list('title', 'group__name')
)
def test_pickle_prefetch_related_idempotence(self):
g = Group.objects.create(name='foo')
groups = Group.objects.prefetch_related('event_set')
# First pickling
groups = pickle.loads(pickle.dumps(groups))
self.assertQuerysetEqual(groups, [g], lambda x: x)
# Second pickling
groups = pickle.loads(pickle.dumps(groups))
self.assertQuerysetEqual(groups, [g], lambda x: x)
def test_pickle_prefetch_related_with_m2m_and_objects_deletion(self):
"""
#24831 -- Cached properties on ManyToOneRel created in QuerySet.delete()
caused subsequent QuerySet pickling to fail.
"""
g = Group.objects.create(name='foo')
m2m = M2MModel.objects.create()
m2m.groups.add(g)
Group.objects.all().delete()
m2ms = M2MModel.objects.prefetch_related('groups')
m2ms = pickle.loads(pickle.dumps(m2ms))
self.assertQuerysetEqual(m2ms, [m2m], lambda x: x)
def test_missing_django_version_unpickling(self):
"""
#21430 -- Verifies a warning is raised for querysets that are
unpickled without a Django version
"""
qs = Group.missing_django_version_objects.all()
with warnings.catch_warnings(record=True) as recorded:
pickle.loads(pickle.dumps(qs))
msg = force_text(recorded.pop().message)
self.assertEqual(msg,
"Pickled queryset instance's Django version is not specified.")
def test_unsupported_unpickle(self):
"""
#21430 -- Verifies a warning is raised for querysets that are
unpickled with a different Django version than the current
"""
qs = Group.previous_django_version_objects.all()
with warnings.catch_warnings(record=True) as recorded:
pickle.loads(pickle.dumps(qs))
msg = force_text(recorded.pop().message)
self.assertEqual(
msg,
"Pickled queryset instance's Django version 1.0 does not "
"match the current version %s." % get_version()
)
| bsd-3-clause |
marcusramberg/dotfiles | bin/.venv-ansible-venv/lib/python2.6/site-packages/ansible/modules/extras/system/at.py | 28 | 6404 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2014, Richard Isaacson <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: at
short_description: Schedule the execution of a command or script file via the at command.
description:
- Use this module to schedule a command or script file to run once in the future.
- All jobs are executed in the 'a' queue.
version_added: "1.5"
options:
command:
description:
- A command to be executed in the future.
required: false
default: null
script_file:
description:
- An existing script file to be executed in the future.
required: false
default: null
count:
description:
- The count of units in the future to execute the command or script file.
required: true
units:
description:
- The type of units in the future to execute the command or script file.
required: true
choices: ["minutes", "hours", "days", "weeks"]
state:
description:
- The state dictates if the command or script file should be evaluated as present(added) or absent(deleted).
required: false
choices: ["present", "absent"]
default: "present"
unique:
description:
- If a matching job is present a new job will not be added.
required: false
default: false
requirements:
- at
author: Richard Isaacson
'''
EXAMPLES = '''
# Schedule a command to execute in 20 minutes as root.
- at: command="ls -d / > /dev/null" count=20 units="minutes"
# Match a command to an existing job and delete the job.
- at: command="ls -d / > /dev/null" state="absent"
# Schedule a command to execute in 20 minutes making sure it is unique in the queue.
- at: command="ls -d / > /dev/null" unique=true count=20 units="minutes"
'''
import os
import tempfile
def add_job(module, result, at_cmd, count, units, command, script_file):
at_command = "%s -f %s now + %s %s" % (at_cmd, script_file, count, units)
rc, out, err = module.run_command(at_command, check_rc=True)
if command:
os.unlink(script_file)
result['changed'] = True
def delete_job(module, result, at_cmd, command, script_file):
for matching_job in get_matching_jobs(module, at_cmd, script_file):
at_command = "%s -d %s" % (at_cmd, matching_job)
rc, out, err = module.run_command(at_command, check_rc=True)
result['changed'] = True
if command:
os.unlink(script_file)
module.exit_json(**result)
def get_matching_jobs(module, at_cmd, script_file):
matching_jobs = []
atq_cmd = module.get_bin_path('atq', True)
# Get list of job numbers for the user.
atq_command = "%s" % atq_cmd
rc, out, err = module.run_command(atq_command, check_rc=True)
current_jobs = out.splitlines()
if len(current_jobs) == 0:
return matching_jobs
# Read script_file into a string.
script_file_string = open(script_file).read().strip()
# Loop through the jobs.
# If the script text is contained in a job add job number to list.
for current_job in current_jobs:
split_current_job = current_job.split()
at_command = "%s -c %s" % (at_cmd, split_current_job[0])
rc, out, err = module.run_command(at_command, check_rc=True)
if script_file_string in out:
matching_jobs.append(split_current_job[0])
# Return the list.
return matching_jobs
def create_tempfile(command):
filed, script_file = tempfile.mkstemp(prefix='at')
fileh = os.fdopen(filed, 'w')
fileh.write(command)
fileh.close()
return script_file
def main():
module = AnsibleModule(
argument_spec = dict(
command=dict(required=False,
type='str'),
script_file=dict(required=False,
type='str'),
count=dict(required=False,
type='int'),
units=dict(required=False,
default=None,
choices=['minutes', 'hours', 'days', 'weeks'],
type='str'),
state=dict(required=False,
default='present',
choices=['present', 'absent'],
type='str'),
unique=dict(required=False,
default=False,
type='bool')
),
mutually_exclusive=[['command', 'script_file']],
required_one_of=[['command', 'script_file']],
supports_check_mode=False
)
at_cmd = module.get_bin_path('at', True)
command = module.params['command']
script_file = module.params['script_file']
count = module.params['count']
units = module.params['units']
state = module.params['state']
unique = module.params['unique']
if (state == 'present') and (not count or not units):
module.fail_json(msg="present state requires count and units")
result = {'state': state, 'changed': False}
# If command transform it into a script_file
if command:
script_file = create_tempfile(command)
# if absent remove existing and return
if state == 'absent':
delete_job(module, result, at_cmd, command, script_file)
# if unique if existing return unchanged
if unique:
if len(get_matching_jobs(module, at_cmd, script_file)) != 0:
if command:
os.unlink(script_file)
module.exit_json(**result)
result['script_file'] = script_file
result['count'] = count
result['units'] = units
add_job(module, result, at_cmd, count, units, command, script_file)
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
main()
| mit |
gorakhargosh/mom | mom/os/path.py | 1 | 5970 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <[email protected]>
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:module: mom.os.path
:synopsis: Directory walking, listing, and path sanitizing functions.
Functions
---------
.. autofunction:: get_dir_walker
.. autofunction:: walk
.. autofunction:: listdir
.. autofunction:: list_directories
.. autofunction:: list_files
.. autofunction:: absolute_path
.. autofunction:: real_absolute_path
.. autofunction:: parent_dir_path
"""
from __future__ import absolute_import
import functools
import os
from mom import builtins
__author__ = "[email protected] (Yesudeep Mangalapilly)"
__all__ = [
"absolute_path",
"get_dir_walker",
"list_directories",
"list_files",
"listdir",
"parent_dir_path",
"real_absolute_path",
"walk",
]
def get_dir_walker(recursive, topdown=True, followlinks=False):
"""
Returns a recursive or a non-recursive directory walker.
:param recursive:
``True`` produces a recursive walker; ``False`` produces a non-recursive
walker.
:returns:
A walker function.
"""
if recursive:
walker = functools.partial(os.walk,
topdown=topdown,
followlinks=followlinks)
else:
def walker(path, topdown=topdown, followlinks=followlinks):
"""Alternative walker."""
yield builtins.next(os.walk(path,
topdown=topdown,
followlinks=followlinks))
return walker
def walk(dir_pathname, recursive=True, topdown=True, followlinks=False):
"""
Walks a directory tree optionally recursively. Works exactly like
:func:`os.walk` only adding the `recursive` argument.
:param dir_pathname:
The directory to traverse.
:param recursive:
``True`` for walking recursively through the directory tree;
``False`` otherwise.
:param topdown:
Please see the documentation for :func:`os.walk`
:param followlinks:
Please see the documentation for :func:`os.walk`
"""
walk_func = get_dir_walker(recursive, topdown, followlinks)
for root, dir_names, file_names in walk_func(dir_pathname):
yield (root, dir_names, file_names)
def listdir(dir_pathname,
recursive=True,
topdown=True,
followlinks=False):
"""
Enlists all items using their absolute paths in a directory, optionally
non-recursively.
:param dir_pathname:
The directory to traverse.
:param recursive:
``True`` (default) for walking recursively through the directory tree;
``False`` otherwise.
:param topdown:
Please see the documentation for :func:`os.walk`
:param followlinks:
Please see the documentation for :func:`os.walk`
"""
for root, dir_names, file_names in walk(dir_pathname,
recursive, topdown, followlinks):
for dir_name in dir_names:
yield absolute_path(os.path.join(root, dir_name))
for file_name in file_names:
yield absolute_path(os.path.join(root, file_name))
def list_directories(dir_pathname, recursive=True, topdown=True,
followlinks=False):
"""
Enlists all the directories using their absolute paths within the
specified directory, optionally non-recursively.
:param dir_pathname:
The directory to traverse.
:param recursive:
``True`` (default) for walking recursively through the directory
tree; ``False`` otherwise.
:param topdown:
Please see the documentation for :func:`os.walk`
:param followlinks:
Please see the documentation for :func:`os.walk`
"""
for root, dir_names, _ in walk(dir_pathname, recursive, topdown, followlinks):
for dir_name in dir_names:
yield absolute_path(os.path.join(root, dir_name))
def list_files(dir_pathname, recursive=True, topdown=True, followlinks=False):
"""
Enlists all the files using their absolute paths within the
specified directory, optionally recursively.
:param dir_pathname:
The directory to traverse.
:param recursive:
``True`` for walking recursively through the directory tree;
``False`` otherwise.
:param topdown:
Please see the documentation for :func:`os.walk`
:param followlinks:
Please see the documentation for :func:`os.walk`
"""
for root, _, file_names in walk(dir_pathname,
recursive, topdown, followlinks):
for file_name in file_names:
yield absolute_path(os.path.join(root, file_name))
def absolute_path(path):
"""
Returns the absolute path for the given path and normalizes the
path.
:param path:
Path for which the absolute normalized path will be found.
:returns:
Absolute normalized path.
"""
return os.path.abspath(os.path.normpath(path))
def real_absolute_path(path):
"""
Returns the real absolute normalized path for the given path.
:param path:
Path for which the real absolute normalized path will be found.
:returns:
Real absolute normalized path.
"""
return os.path.realpath(absolute_path(path))
def parent_dir_path(path):
"""
Returns the parent directory path.
:param path:
Path for which the parent directory will be obtained.
:returns:
Parent directory path.
"""
return absolute_path(os.path.dirname(path))
| apache-2.0 |
UnicornButter/external_chromium | chrome/common/extensions/docs/server/chromeextensionsdocs.py | 64 | 15204 | #!/usr/bin/python
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import cgi
import logging
import re
import os
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.api import memcache
from google.appengine.api import urlfetch
# TODO(nickbaum): unit tests
# TODO(nickbaum): is this the right way to do constants?
class Channel():
def __init__(self, name, tag):
self.name = name
self.tag = tag
# TODO(nickbaum): unit test this
def matchPath(self, path):
match = "/" + self.name + "/"
if path[0:len(match)] == match:
return true
else:
return false
Channel.DEV = Channel("dev", "2.0-dev")
Channel.BETA = Channel("beta", "1.1-beta")
Channel.STABLE = Channel("stable", "")
Channel.CHANNELS = [Channel.DEV, Channel.BETA, Channel.STABLE]
Channel.TRUNK = Channel("trunk", "")
Channel.DEFAULT = Channel.STABLE
DEFAULT_CACHE_TIME = 300
class MainPage(webapp.RequestHandler):
# get page from memcache, or else fetch it from src
def get(self):
path = os.path.realpath(os.path.join('/', self.request.path))
# special path to invoke the unit tests
# TODO(nickbaum): is there a less ghetto way to invoke the unit test?
if path == "/test":
self.unitTest()
return
# if root, redirect to index.html
# TODO(nickbaum): this doesn't handle /chrome/extensions/trunk, etc
if (path == "/chrome/extensions") or (path == "chrome/extensions/"):
self.redirect("/chrome/extensions/index.html")
return
# else remove prefix
if(path[:18] == "/chrome/extensions"):
path = path[18:]
# TODO(nickbaum): there's a subtle bug here: if there are two instances of the app,
# their default caches will override each other. This is bad!
result = memcache.get(path)
if result is None:
logging.info("Cache miss: " + path)
url = self.getSrcUrl(path)
if (url[1] is not Channel.TRUNK) and (url[0] != "http://src.chromium.org/favicon.ico"):
branch = self.getBranch(url[1])
url = url[0] % branch
else:
url = url[0]
logging.info("Path: " + self.request.path)
logging.info("Url: " + url)
try:
result = urlfetch.fetch(url + self.request.query_string)
if result.status_code != 200:
logging.error("urlfetch failed: " + url)
# TODO(nickbaum): what should we do when the urlfetch fails?
except:
logging.error("urlfetch failed: " + url)
# TODO(nickbaum): what should we do when the urlfetch fails?
try:
if not memcache.add(path, result, DEFAULT_CACHE_TIME):
logging.error("Memcache set failed.")
except:
logging.error("Memcache set failed.")
for key in result.headers:
self.response.headers[key] = result.headers[key]
self.response.out.write(result.content)
def head(self):
self.get()
# get the src url corresponding to the request
# returns a tuple of the url and the branch
# this function is the only part that is unit tested
def getSrcUrl(self, path):
# from the path they provided, figure out which channel they requested
# TODO(nickbaum) clean this logic up
# find the first subdirectory of the path
path = path.split('/', 2)
url = "http://src.chromium.org/viewvc/chrome/"
channel = None
# if there's no subdirectory, choose the default channel
# otherwise, figure out if the subdirectory corresponds to a channel
if len(path) == 2:
path.append("")
if path[1] == "":
channel = Channel.DEFAULT
if(Channel.DEFAULT == Channel.TRUNK):
url = url + "trunk/src/chrome/"
else:
url = url + "branches/%s/src/chrome/"
path = ""
elif path[1] == Channel.TRUNK.name:
url = url + "trunk/src/chrome/"
channel = Channel.TRUNK
path = path[2]
else:
# otherwise, run through the different channel options
for c in Channel.CHANNELS:
if(path[1] == c.name):
channel = c
url = url + "branches/%s/src/chrome/"
path = path[2]
break
# if the subdirectory doesn't correspond to a channel, use the default
if channel is None:
channel = Channel.DEFAULT
if(Channel.DEFAULT == Channel.TRUNK):
url = url + "trunk/src/chrome/"
else:
url = url + "branches/%s/src/chrome/"
if path[2] != "":
path = path[1] + "/" + path[2]
else:
path = path[1]
# special cases
# TODO(nickbaum): this is super cumbersome to maintain
if path == "third_party/jstemplate/jstemplate_compiled.js":
url = url + path
elif path == "api/extension_api.json":
url = url + "common/extensions/" + path
elif path == "favicon.ico":
url = "http://src.chromium.org/favicon.ico"
else:
if path == "":
path = "index.html"
url = url + "common/extensions/docs/" + path
return [url, channel]
# get the current version number for the channel requested (dev, beta or stable)
# TODO(nickbaum): move to Channel object
def getBranch(self, channel):
branch = memcache.get(channel.name)
if branch is None:
# query Omaha to figure out which version corresponds to this channel
postdata = """<?xml version="1.0" encoding="UTF-8"?>
<o:gupdate xmlns:o="http://www.google.com/update2/request" protocol="2.0" testsource="crxdocs">
<o:app appid="{8A69D345-D564-463C-AFF1-A69D9E530F96}" version="0.0.0.0" lang="">
<o:updatecheck tag="%s" installsource="ondemandcheckforupdates" />
</o:app>
</o:gupdate>
""" % channel.tag
result = urlfetch.fetch(url="https://tools.google.com/service/update2",
payload=postdata,
method=urlfetch.POST,
headers={'Content-Type': 'application/x-www-form-urlencoded',
'X-USER-IP': '72.1.1.1'})
if result.status_code != 200:
logging.error("urlfetch failed.")
# TODO(nickbaum): what should we do when the urlfetch fails?
# find branch in response
match = re.search(r'<updatecheck Version="\d+\.\d+\.(\d+)\.\d+"', result.content)
if match is None:
logging.error("Version number not found: " + result.content)
#TODO(nickbaum): should we fall back on trunk in this case?
branch = match.group(1)
# TODO(nickbaum): make cache time a constant
if not memcache.add(channel.name, branch, DEFAULT_CACHE_TIME):
logging.error("Memcache set failed.")
return branch
# TODO(nickbaum): is there a more elegant way to write this unit test?
# I deliberately kept it dumb to avoid errors sneaking in, but it's so verbose...
# TODO(nickbaum): should I break this up into multiple files?
def unitTest(self):
self.response.out.write("Testing TRUNK<br/>")
self.check("/trunk/", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/docs/index.html", Channel.TRUNK)
self.check("/trunk/index.html", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/docs/index.html", Channel.TRUNK)
self.check("/trunk/getstarted.html", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/docs/getstarted.html", Channel.TRUNK)
self.check("/trunk/images/toolstrip.png", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/docs/images/toolstrip.png", Channel.TRUNK)
self.response.out.write("<br/>Testing DEV<br/>")
self.check("/dev/", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/index.html", Channel.DEV)
self.check("/dev/index.html", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/index.html", Channel.DEV)
self.check("/dev/getstarted.html", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/getstarted.html", Channel.DEV)
self.check("/dev/images/toolstrip.png", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/images/toolstrip.png", Channel.DEV)
self.response.out.write("<br/>Testing BETA<br/>")
self.check("/beta/", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/index.html", Channel.BETA)
self.check("/beta/index.html", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/index.html", Channel.BETA)
self.check("/beta/getstarted.html", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/getstarted.html", Channel.BETA)
self.check("/beta/images/toolstrip.png", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/images/toolstrip.png", Channel.BETA)
self.response.out.write("<br/>Testing STABLE<br/>")
self.check("/stable/", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/index.html", Channel.STABLE)
self.check("/stable/index.html", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/index.html", Channel.STABLE)
self.check("/stable/getstarted.html", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/getstarted.html", Channel.STABLE)
self.check("/stable/images/toolstrip.png", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/images/toolstrip.png", Channel.STABLE)
self.response.out.write("<br/>Testing jstemplate_compiled.js<br/>")
self.check("/trunk/third_party/jstemplate/jstemplate_compiled.js", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/third_party/jstemplate/jstemplate_compiled.js", Channel.TRUNK)
self.check("/dev/third_party/jstemplate/jstemplate_compiled.js", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/third_party/jstemplate/jstemplate_compiled.js", Channel.DEV)
self.check("/beta/third_party/jstemplate/jstemplate_compiled.js", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/third_party/jstemplate/jstemplate_compiled.js", Channel.BETA)
self.check("/stable/third_party/jstemplate/jstemplate_compiled.js", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/third_party/jstemplate/jstemplate_compiled.js", Channel.STABLE)
self.response.out.write("<br/>Testing extension_api.json<br/>")
self.check("/trunk/api/extension_api.json", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/api/extension_api.json", Channel.TRUNK)
self.check("/dev/api/extension_api.json", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/api/extension_api.json", Channel.DEV)
self.check("/beta/api/extension_api.json", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/api/extension_api.json", Channel.BETA)
self.check("/stable/api/extension_api.json", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/api/extension_api.json", Channel.STABLE)
self.response.out.write("<br/>Testing favicon.ico<br/>")
self.check("/trunk/favicon.ico", "http://src.chromium.org/favicon.ico", Channel.TRUNK)
self.check("/dev/favicon.ico", "http://src.chromium.org/favicon.ico", Channel.DEV)
self.check("/beta/favicon.ico", "http://src.chromium.org/favicon.ico", Channel.BETA)
self.check("/stable/favicon.ico", "http://src.chromium.org/favicon.ico", Channel.STABLE)
self.response.out.write("<br/>Testing DEFAULT<br/>")
temp = Channel.DEFAULT
Channel.DEFAULT = Channel.DEV
self.check("/", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/index.html", Channel.DEV)
self.check("/index.html", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/index.html", Channel.DEV)
self.check("/getstarted.html", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/getstarted.html", Channel.DEV)
self.check("/images/toolstrip.png", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/images/toolstrip.png", Channel.DEV)
self.check("/third_party/jstemplate/jstemplate_compiled.js", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/third_party/jstemplate/jstemplate_compiled.js", Channel.DEV)
self.check("/api/extension_api.json", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/api/extension_api.json", Channel.DEV)
self.check("/css/ApiRefStyles.css", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/css/ApiRefStyles.css", Channel.DEV)
self.check("/favicon.ico", "http://src.chromium.org/favicon.ico", Channel.DEV)
self.response.out.write("<br/>Testing DEFAULT (trunk)<br/>")
Channel.DEFAULT = Channel.TRUNK
self.check("/", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/docs/index.html", Channel.TRUNK)
self.check("/index.html", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/docs/index.html", Channel.TRUNK)
self.check("/getstarted.html", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/docs/getstarted.html", Channel.TRUNK)
self.check("/images/toolstrip.png", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/docs/images/toolstrip.png", Channel.TRUNK)
self.check("/third_party/jstemplate/jstemplate_compiled.js", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/third_party/jstemplate/jstemplate_compiled.js", Channel.TRUNK)
self.check("/api/extension_api.json", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/api/extension_api.json", Channel.TRUNK)
self.check("/css/ApiRefStyles.css", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/docs/css/ApiRefStyles.css", Channel.TRUNK)
self.check("/favicon.ico", "http://src.chromium.org/favicon.ico", Channel.TRUNK)
Channel.DEFAULT = temp
return
# utility function for my unit test
# checks that getSrcUrl(path) returns the expected values
# TODO(nickbaum): can this be replaced by assert or something similar?
def check(self, path, expectedUrl, expectedChannel):
actual = self.getSrcUrl(path)
if (actual[0] != expectedUrl):
self.response.out.write('<span style="color:#f00;">Failure:</span> path ' + path + " gave url " + actual[0] + "<br/>")
elif (actual[1] != expectedChannel):
self.response.out.write('<span style="color:#f00;">Failure:</span> path ' + path + " gave branch " + actual[1].name + "<br/>")
else:
self.response.out.write("Path " + path + ' <span style="color:#0f0;">OK</span><br/>')
return
application = webapp.WSGIApplication([
('/.*', MainPage),
], debug=False)
def main():
run_wsgi_app(application)
if __name__ == '__main__':
main()
| bsd-3-clause |
dhuppenkothen/stingray | stingray/conftest.py | 31 | 1240 | # this contains imports plugins that configure py.test for astropy tests.
# by importing them here in conftest.py they are discoverable by py.test
# no matter how it is invoked within the source tree.
from astropy.tests.pytest_plugins import *
## Uncomment the following line to treat all DeprecationWarnings as
## exceptions
# enable_deprecations_as_exceptions()
## Uncomment and customize the following lines to add/remove entries
## from the list of packages for which version numbers are displayed
## when running the tests
# try:
# PYTEST_HEADER_MODULES['Astropy'] = 'astropy'
# PYTEST_HEADER_MODULES['scikit-image'] = 'skimage'
# del PYTEST_HEADER_MODULES['h5py']
# except NameError: # needed to support Astropy < 1.0
# pass
## Uncomment the following lines to display the version number of the
## package rather than the version number of Astropy in the top line when
## running the tests.
# import os
#
## This is to figure out the affiliated package version, rather than
## using Astropy's
# from . import version
#
# try:
# packagename = os.path.basename(os.path.dirname(__file__))
# TESTED_VERSIONS[packagename] = version.version
# except NameError: # Needed to support Astropy <= 1.0.0
# pass
| mit |
dongjoon-hyun/DIGITS | digits/scheduler.py | 3 | 19330 | # Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from collections import OrderedDict
import os
import re
import shutil
import signal
import time
import traceback
import flask
import gevent
import gevent.event
import gevent.queue
from . import utils
from .config import config_value
from .dataset import DatasetJob
from .job import Job
from .log import logger
from .model import ModelJob
from .status import Status
from digits.utils import errors
"""
This constant configures how long to wait before automatically
deleting completed non-persistent jobs
"""
NON_PERSISTENT_JOB_DELETE_TIMEOUT_SECONDS = 3600
class Resource(object):
"""
Stores information about which tasks are using a resource
"""
class ResourceAllocation(object):
"""
Marks that a task is using [part of] a resource
"""
def __init__(self, task, value):
"""
Arguments:
task -- which task is using the resource
value -- how much of the resource is being used
"""
self.task = task
self.value = value
def __init__(self, identifier=None, max_value=1):
"""
Keyword arguments:
identifier -- some way to identify this resource
max_value -- a numeric representation of the capacity of this resource
"""
if identifier is None:
self.identifier = id(self)
else:
self.identifier = identifier
self.max_value = max_value
self.allocations = []
def remaining(self):
"""
Returns the amount of this resource that is not being used
"""
return self.max_value - sum(a.value for a in self.allocations)
def allocate(self, task, value):
"""
A task is requesting to use this resource
"""
if self.remaining() - value < 0:
raise RuntimeError('Resource is already maxed out at %s/%s' % (
self.remaining(),
self.max_value)
)
self.allocations.append(self.ResourceAllocation(task, value))
def deallocate(self, task):
"""
The task has finished using this resource
"""
for i, a in enumerate(self.allocations):
if id(task) == id(a.task):
self.allocations.pop(i)
return True
return False
class Scheduler:
"""
Coordinates execution of Jobs
"""
def __init__(self, gpu_list=None, verbose=False):
"""
Keyword arguments:
gpu_list -- a comma-separated string which is a list of GPU id's
verbose -- if True, print more errors
"""
self.jobs = OrderedDict()
self.verbose = verbose
# Keeps track of resource usage
self.resources = {
# TODO: break this into CPU cores, memory usage, IO usage, etc.
'parse_folder_task_pool': [Resource()],
'create_db_task_pool': [Resource(max_value=2)],
'analyze_db_task_pool': [Resource(max_value=4)],
'inference_task_pool': [Resource(max_value=4)],
'gpus': [Resource(identifier=index)
for index in gpu_list.split(',')] if gpu_list else [],
}
self.running = False
self.shutdown = gevent.event.Event()
def load_past_jobs(self):
"""
Look in the jobs directory and load all valid jobs
"""
loaded_jobs = []
failed_jobs = []
for dir_name in sorted(os.listdir(config_value('jobs_dir'))):
if os.path.isdir(os.path.join(config_value('jobs_dir'), dir_name)):
# Make sure it hasn't already been loaded
if dir_name in self.jobs:
continue
try:
job = Job.load(dir_name)
# The server might have crashed
if job.status.is_running():
job.status = Status.ABORT
for task in job.tasks:
if task.status.is_running():
task.status = Status.ABORT
# We might have changed some attributes here or in __setstate__
job.save()
loaded_jobs.append(job)
except Exception as e:
failed_jobs.append((dir_name, e))
# add DatasetJobs
for job in loaded_jobs:
if isinstance(job, DatasetJob):
self.jobs[job.id()] = job
# add ModelJobs
for job in loaded_jobs:
if isinstance(job, ModelJob):
try:
# load the DatasetJob
job.load_dataset()
self.jobs[job.id()] = job
except Exception as e:
failed_jobs.append((dir_name, e))
logger.info('Loaded %d jobs.' % len(self.jobs))
if len(failed_jobs):
logger.warning('Failed to load %d jobs.' % len(failed_jobs))
if self.verbose:
for job_id, e in failed_jobs:
logger.debug('%s - %s: %s' % (job_id, type(e).__name__, str(e)))
def add_job(self, job):
"""
Add a job to self.jobs
"""
if not self.running:
logger.error('Scheduler not running. Cannot add job.')
return False
else:
self.jobs[job.id()] = job
# Need to fix this properly
# if True or flask._app_ctx_stack.top is not None:
from digits.webapp import app, socketio
with app.app_context():
# send message to job_management room that the job is added
socketio.emit('job update',
{
'update': 'added',
'job_id': job.id(),
},
namespace='/jobs',
room='job_management',
)
if 'DIGITS_MODE_TEST' not in os.environ:
# Let the scheduler do a little work before returning
time.sleep(utils.wait_time())
return True
def get_job(self, job_id):
"""
Look through self.jobs to try to find the Job
Returns None if not found
"""
if job_id is None:
return None
return self.jobs.get(job_id, None)
def get_related_jobs(self, job):
"""
Look through self.jobs to try to find the Jobs
whose parent contains job
"""
related_jobs = []
if isinstance(job, ModelJob):
datajob = job.dataset
related_jobs.append(datajob)
elif isinstance(job, DatasetJob):
datajob = job
else:
raise ValueError("Unhandled job type %s" % job.job_type())
for j in self.jobs.values():
# Any model that shares (this/the same) dataset should be added too:
if isinstance(j, ModelJob):
if datajob == j.train_task().dataset and j.id() != job.id():
related_jobs.append(j)
return related_jobs
def abort_job(self, job_id):
"""
Aborts a running Job
Returns True if the job was found and aborted
"""
job = self.get_job(job_id)
if job is None or not job.status.is_running():
return False
job.abort()
logger.info('Job aborted.', job_id=job_id)
return True
def delete_job(self, job):
"""
Deletes an entire job folder from disk
Returns True if the Job was found and deleted
"""
if isinstance(job, str) or isinstance(job, unicode):
job_id = str(job)
elif isinstance(job, Job):
job_id = job.id()
else:
raise ValueError('called delete_job with a %s' % type(job))
dependent_jobs = []
# try to find the job
job = self.jobs.get(job_id, None)
if job:
if isinstance(job, DatasetJob):
# check for dependencies
for j in self.jobs.values():
if isinstance(j, ModelJob) and j.dataset_id == job.id():
logger.error('Cannot delete "%s" (%s) because "%s" (%s) depends on it.' % (job.name(), job.id(), j.name(), j.id()))
dependent_jobs.append(j.name())
if len(dependent_jobs)>0:
error_message = 'Cannot delete "%s" because %d model%s depend%s on it: %s' % (
job.name(),
len(dependent_jobs),
('s' if len(dependent_jobs) != 1 else ''),
('s' if len(dependent_jobs) == 1 else ''),
', '.join(['"%s"' % j for j in dependent_jobs]))
raise errors.DeleteError(error_message)
self.jobs.pop(job_id, None)
job.abort()
if os.path.exists(job.dir()):
shutil.rmtree(job.dir())
logger.info('Job deleted.', job_id=job_id)
from digits.webapp import socketio
socketio.emit('job update',
{
'update': 'deleted',
'job_id': job.id()
},
namespace='/jobs',
room='job_management',
)
return True
# see if the folder exists on disk
path = os.path.join(config_value('jobs_dir'), job_id)
path = os.path.normpath(path)
if os.path.dirname(path) == config_value('jobs_dir') and os.path.exists(path):
shutil.rmtree(path)
return True
return False
def running_dataset_jobs(self):
"""a query utility"""
return sorted(
[j for j in self.jobs.values() if isinstance(j, DatasetJob) and j.status.is_running()],
cmp=lambda x,y: cmp(y.id(), x.id())
)
def completed_dataset_jobs(self):
"""a query utility"""
return sorted(
[j for j in self.jobs.values() if isinstance(j, DatasetJob) and not j.status.is_running()],
cmp=lambda x,y: cmp(y.id(), x.id())
)
def running_model_jobs(self):
"""a query utility"""
return sorted(
[j for j in self.jobs.values() if isinstance(j, ModelJob) and j.status.is_running()],
cmp=lambda x,y: cmp(y.id(), x.id())
)
def completed_model_jobs(self):
"""a query utility"""
return sorted(
[j for j in self.jobs.values() if isinstance(j, ModelJob) and not j.status.is_running()],
cmp=lambda x,y: cmp(y.id(), x.id())
)
def start(self):
"""
Start the Scheduler
Returns True on success
"""
if self.running:
return True
gevent.spawn(self.main_thread)
self.running = True
return True
def stop(self):
"""
Stop the Scheduler
Returns True if the shutdown was graceful
"""
self.shutdown.set()
wait_limit = 5
start = time.time()
while self.running:
if time.time() - start > wait_limit:
return False
time.sleep(0.1)
return True
def main_thread(self):
"""
Monitors the jobs in current_jobs, updates their statuses,
and puts their tasks in queues to be processed by other threads
"""
signal.signal(signal.SIGTERM, self.sigterm_handler)
try:
last_saved = None
while not self.shutdown.is_set():
# Iterate backwards so we can delete jobs
for job in self.jobs.values():
if job.status == Status.INIT:
def start_this_job(job):
if isinstance(job, ModelJob):
if job.dataset.status == Status.DONE:
job.status = Status.RUN
elif job.dataset.status in [Status.ABORT, Status.ERROR]:
job.abort()
else:
job.status = Status.WAIT
else:
job.status = Status.RUN
if 'DIGITS_MODE_TEST' in os.environ:
start_this_job(job)
else:
# Delay start by one second for initial page load
gevent.spawn_later(1, start_this_job, job)
if job.status == Status.WAIT:
if isinstance(job, ModelJob):
if job.dataset.status == Status.DONE:
job.status = Status.RUN
elif job.dataset.status in [Status.ABORT, Status.ERROR]:
job.abort()
else:
job.status = Status.RUN
if job.status == Status.RUN:
alldone = True
for task in job.tasks:
if task.status in [Status.INIT, Status.WAIT]:
alldone = False
# try to start the task
if task.ready_to_queue():
requested_resources = task.offer_resources(self.resources)
if requested_resources is None:
task.status = Status.WAIT
else:
if self.reserve_resources(task, requested_resources):
gevent.spawn(self.run_task,
task, requested_resources)
elif task.status == Status.RUN:
# job is not done
alldone = False
elif task.status in [Status.DONE, Status.ABORT]:
# job is done
pass
elif task.status == Status.ERROR:
# propagate error status up to job
job.status = Status.ERROR
alldone = False
break
else:
logger.warning('Unrecognized task status: "%s"', task.status, job_id=job.id())
if alldone:
job.status = Status.DONE
logger.info('Job complete.', job_id=job.id())
job.save()
# save running jobs every 15 seconds
if not last_saved or time.time()-last_saved > 15:
for job in self.jobs.values():
if job.status.is_running():
if job.is_persistent():
job.save()
elif (not job.is_persistent()) and (time.time() - job.status_history[-1][1] > NON_PERSISTENT_JOB_DELETE_TIMEOUT_SECONDS):
# job has been unclaimed for far too long => proceed to garbage collection
self.delete_job(job)
last_saved = time.time()
if 'DIGITS_MODE_TEST' not in os.environ:
time.sleep(utils.wait_time())
else:
time.sleep(0.05)
except KeyboardInterrupt:
pass
# Shutdown
for job in self.jobs.values():
job.abort()
job.save()
self.running = False
def sigterm_handler(self, signal, frame):
"""
Gunicorn shuts down workers with SIGTERM, not SIGKILL
"""
self.shutdown.set()
def task_error(self, task, error):
"""
Handle an error while executing a task
"""
logger.error('%s: %s' % (type(error).__name__, error), job_id=task.job_id)
task.exception = error
task.traceback = traceback.format_exc()
task.status = Status.ERROR
def reserve_resources(self, task, resources):
"""
Reserve resources for a task
"""
try:
# reserve resources
for resource_type, requests in resources.iteritems():
for identifier, value in requests:
found = False
for resource in self.resources[resource_type]:
if resource.identifier == identifier:
resource.allocate(task, value)
self.emit_gpus_available()
found = True
break
if not found:
raise RuntimeError('Resource "%s" with identifier="%s" not found' % (
resource_type, identifier))
task.current_resources = resources
return True
except Exception as e:
self.task_error(task, e)
self.release_resources(task, resources)
return False
def release_resources(self, task, resources):
"""
Release resources previously reserved for a task
"""
# release resources
for resource_type, requests in resources.iteritems():
for identifier, value in requests:
for resource in self.resources[resource_type]:
if resource.identifier == identifier:
resource.deallocate(task)
self.emit_gpus_available()
task.current_resources = None
def run_task(self, task, resources):
"""
Executes a task
Arguments:
task -- the task to run
resources -- the resources allocated for this task
a dict mapping resource_type to lists of (identifier, value) tuples
"""
try:
task.run(resources)
except Exception as e:
self.task_error(task, e)
finally:
self.release_resources(task, resources)
def emit_gpus_available(self):
"""
Call socketio.emit gpu availability
"""
from digits.webapp import scheduler, socketio
socketio.emit('server update',
{
'update': 'gpus_available',
'total_gpu_count': len(self.resources['gpus']),
'remaining_gpu_count': sum(r.remaining() for r in scheduler.resources['gpus']),
},
namespace='/jobs',
room='job_management'
)
| bsd-3-clause |
dreadsci/forget-me-not | test_structures.py | 2 | 13775 | import unittest
from datasets import *
class TestSimplest(Structure):
_fields = [Parsable('base', required=True, positional=True, keyword=False),
Parsable('myParam', required=True, positional=False, keyword=True)]
def test_simplest():
t = TestSimplest(base='a', myParam='b')
assert t.get_dir_string() == 'a/myParam-b'
def test_simpler_strings():
t = TestSimplest(base='baseInst', myParam='paramInst')
assert t.get_arg_string() == '--base baseInst --myParam paramInst'
assert t.get_dir_string() == 'baseInst/myParam-paramInst'
def test_simple_assert_matches():
t = TestSimplest(base='baseInst', myParam='paramInst')
assert t.matches(assert_vals={'base': ['baseInst', 'other']}) == True
assert t.matches(assert_vals={'myParam': ['baseInst', 'other']}) == False
def test_simple_negative_matches():
t = TestSimplest(base='baseInst', myParam='paramInst')
assert t.matches(drop_vals={'base': ['baseInst', 'other']}) == False
assert t.matches(drop_vals={'myParam': ['baseInst', 'other']}) == True
def test_extra_matches():
t = TestSimplest(base='baseInst', myParam='paramInst')
assert t.matches(assert_vals={'base2': ['baseInst', 'other']}) == False
t = TestSimplest(base='baseInst', myParam='paramInst')
assert t.matches(assert_vals={'base': []}) == False
assert t.matches(drop_vals={'base2': ['baseInst', 'other']}) == True
def test_simple_arg_parse():
t = TestSimplest(base='a', myParam='b')
assert t.get_arg_string() == '--base a --myParam b'
t2 = TestSimplest.from_args(['--base', 'a', '--myParam', 'b'])
assert t.get_dir_string() == t2.get_dir_string()
def test_simple_none():
t = TestSimplest(base='a', myParam=None)
assert t.get_dir_string() == 'a/myParam-None'
assert t.myParam is None
class TestNonList(Structure):
_fields = [Parsable('mine', keyword=False, positional=True, default='sub-1'),
Listable('mylist', keyword=False, positional=True, default='0-1-2'),
Keyed('mykey', keyword=False, positional=True, default='a-0_b-1')]
def test_nonlist():
t = TestNonList()
assert t.mine == 'sub-1'
assert t.mylist == [0, 1, 2]
assert t.mykey['a'] == 0
assert t.mykey['b'] == 1
def test_nonlist_string():
t = TestNonList()
ds = t.get_dir_string()
print(ds)
assert ds == 'sub-1/0-1-2/a-0_b-1'
def test_funky_matches():
t = TestNonList()
assert t.matches(assert_vals={'mine': ['sub-1', 'sub-2'],
'mylist':['0-1-2', '1-2-3'],
'mykey': ['a-0_b-0', 'a-0_b-1']}) == True
assert t.matches(assert_vals={'mine': ['sub-1', 'sub-2'],
'mylist':['0-1-2', '1-2-3'],
'mykey': ['a-0_b-0', 'a-1_b-1']}) == False
assert t.matches(assert_vals={'mine': ['sub-1', 'sub-2'],
'mylist':['0-1-2', '1-2-3'],
'mykey': ['a-0_b-0', 'a-0_b-1'],
'other': ['a', 'b']}) == False
class TestTypes(Structure):
_fields = [Parsable('RPK', required=True, positional=True, keyword=True),
Parsable('RPnoK', required=True, positional=True, keyword=False),
Parsable('RnoPK', required=True, positional=False, keyword=True),
Parsable('RnoPnoK', required=True, positional=False, keyword=False),
Parsable('noRPK', required=False, positional=True, keyword=True),
Parsable('noRnoPK', required=False, positional=False, keyword=True),
#Parsable('noRnoPnoK', required=False, positional=False, keyword=False),
#Parsable('noRPnoK', required=False, positional=True, keyword=False),
# can't have optional without a keyword, too hard to parse
]
def test_all_type_config():
t = TestTypes(RPK="overkill",
RPnoK="arrogant",
RnoPK="simple",
RnoPnoK="pushy",
noRPK="verbose",
noRnoPK="simpleopt"
)
print(t.get_dir_string())
assert t.get_dir_string() == "RPK-overkill/arrogant/noRPK-verbose/pushy_RnoPK-simple_noRnoPK-simpleopt"
assert hasattr(t, 'noRPK')
def test_nonreq_type():
t = TestTypes(RPK="overkill",
RPnoK="arrogant",
RnoPK="simple",
RnoPnoK="pushy"
)
assert t.get_dir_string() == "RPK-overkill/arrogant/pushy_RnoPK-simple"
assert not hasattr(t, 'noRPK')
def test_reverse_types():
d = "RPK-overkill/arrogant/noRPK-verbose/pushy_RnoPK-simple_noRnoPK-simpleopt"
dp = TestTypes.params_from_dir_string(d)
print(dp)
assert dp['RPK'] == 'overkill'
assert dp['RPnoK'] == "arrogant"
assert dp["RnoPK"] == "simple"
assert dp["noRPK"] == "verbose"
assert dp["noRnoPK"] == "simpleopt"
def test_missing_opt():
d = "RPK-overkill/arrogant/noRPK-verbose/pushy_RnoPK-simple"
dp = TestTypes.params_from_dir_string(d)
print(dp)
assert dp['RPK'] == 'overkill'
assert dp['RPnoK'] == "arrogant"
assert dp["RnoPK"] == "simple"
assert dp["noRPK"] == "verbose"
assert 'noRnoPK' not in dp
def test_missing_pos():
d = "RPK-overkill/arrogant/pushy_RnoPK-simple_noRnoPK-simpleopt"
dp = TestTypes.params_from_dir_string(d)
print(dp)
assert dp['RPK'] == 'overkill'
assert dp['RPnoK'] == "arrogant"
assert dp["RnoPK"] == "simple"
assert dp["noRnoPK"] == "simpleopt"
assert 'noRPK' not in dp
class TestBools(Structure):
_fields = [Boolean('safety', default=True),
Boolean('verbose', default=False),
Parsable('hello', default='world')]
def test_falses():
t = TestBools.from_args(["--safety_off", "--verbose_off"])
assert t.safety == False
assert t.verbose == False
print(t.get_arg_string())
assert t.get_arg_string() == "--safety_off --verbose_off --hello world"
def test_trues():
t = TestBools.from_args(["--safety", "--verbose", "--hello", "universe"])
assert t.safety
assert t.verbose
assert t.get_arg_string() == "--safety --verbose --hello universe"
class TestImpossibleParsable(Structure):
_fields = [Parsable('opt', required=True, positional=False),
Parsable('helpful', required=True, positional=False, default='nada')]
def test_impossible_string():
t = TestImpossibleParsable(opt='hello')
print(t.get_dir_string())
assert t.get_dir_string() == 'helpful-nada_opt-hello'
def test_gottaGiveSomething():
t = TestImpossibleParsable(opt='hello')
try:
t = TestImpossibleParsable()
except TypeError:
return True
return False
class TestKeywordlessKeyed(Structure):
_fields = [Keyed('myReq', required=True, keyword=False, positional=True),
Parsable('hello', required=True, default='world')]
def test_keywordless():
t = TestKeywordlessKeyed(myReq='got-it')
print(t.get_dir_string())
assert t.get_dir_string() == 'got-it/hello-world'
class TestKeywordedKeyed(Structure):
_fields = [Keyed('myKeyReq', required=True, keyword=True),
Parsable('hello', required=True, default='world', positional=True)]
def test_keyworded():
t = TestKeywordedKeyed(myKeyReq='got-it')
print(t.get_dir_string())
assert t.get_dir_string() == 'hello-world/myKeyReq-got-it'
class TestDefaults(Structure):
"""
If empty is used as the default, then it's only an attribute if it's been set.
If None is used, it's assumed to be a valid value
"""
_fields = [Parsable('must', required=True),
Parsable('default', required=False, default=None),
Parsable('conditional', required=False, default=empty)]
def test_defaults_set():
t = TestDefaults(must='hello', default='world', conditional='hi')
assert t.get_dir_string() == 'conditional-hi_default-world_must-hello'
tas = t.get_arg_string()
assert tas == '--must hello --default world --conditional hi'
t2 = TestDefaults.from_args(tas.split())
assert t2.get_dir_string() == t2.get_dir_string()
def test_defaults_not_set():
t = TestDefaults(must='hello')
assert t.default == None
assert t.get_dir_string() == 'default-None_must-hello'
tas = t.get_arg_string()
assert tas == '--must hello --default None '
t2 = TestDefaults.from_args(tas.split())
assert t2.get_dir_string() == t2.get_dir_string()
class TestListables(Structure):
_fields = [Listable('myPrimeList', required=True, keyword=False),
Listable('myOtherList', required=True, keyword=True),
]
def test_list_params():
t = TestListables(myPrimeList='a-b-c', myOtherList='0-1-2')
ds = t.get_dir_string()
print(ds)
print("npnk key: ", t._has_npnk)
assert ds == 'a-b-c_myOtherList-0-1-2'
dp = t.params_from_dir_string(ds)
print(dp)
assert dp['base_dir'] == ''
assert dp['myPrimeList'] == 'a-b-c'
assert dp['myOtherList'] == '0-1-2'
def test_number_list():
t = TestListables(myPrimeList='0-1-2', myOtherList='0.99')
assert t.myPrimeList == [0, 1, 2]
assert t.myOtherList == [0.99]
class TestStructs(Structure):
_fields = [Parsable('nom', default='hello', required=True),
Struct('child', dtype=TestSimplest),
Struct('problem_child', dtype=TestDefaults),
]
def test_simple_inherit():
ts = TestStructs(nom='hi', base='a', myParam='b', must='hello')
assert ts.child.base == 'a'
assert ts.nom == 'hi'
class TestChoices(Structure):
_fields = [Parsable('pick1', choices=['a', 'b', 'c'])]
def test_choices():
tc = TestChoices(pick1='a')
assert tc.pick1 == 'a'
def test_bad_choices():
try:
tc = TestChoices(pick1='d')
assert False
except TypeError:
assert True
class TestNargs(Structure):
_fields = [Parsable('must', nargs=1),
Parsable('may', nargs='+'),
Parsable('might', nargs='*')]
def test_simple_nargs():
tn = TestNargs.from_args('--must hello --may be --might somewhere'.split())
assert tn.must == ['hello']
assert tn.may == ['be']
assert tn.might == ['somewhere']
tn = TestNargs.from_args('--must be there --may be here --might be somewhere'.split())
assert tn.must == ['be']
assert tn.may == ['be', 'here']
assert tn.might == ['be', 'somewhere']
def test_nargs_direct():
tn = TestNargs(must='hello', may='be', might='somewhere')
assert tn.must == ['hello']
assert tn.may == ['be']
assert tn.might == ['somewhere']
tn = TestNargs(must=['hello'], may=['be'], might=['somewhere'])
assert tn.must == ['hello']
assert tn.may == ['be']
assert tn.might == ['somewhere']
tn = TestNargs(must=['be', 'there'], may=['be', 'here'],
might=['be', 'somewhere'])
assert tn.must == ['be']
assert tn.may == ['be', 'here']
assert tn.might == ['be', 'somewhere']
def test_missing_narg():
try:
tn = TestNargs.from_args('--must --may --might'.split())
assert False
except SystemExit:
assert True
tn = TestNargs.from_args('--must too many --may one --might two'.split())
assert tn.must == ['too']
tn = TestNargs.from_args('--must just --may one --might'.split())
assert tn.must == ['just']
assert tn.may == ['one']
assert tn.might == []
def test_missing_narg_keyword():
try:
tn = TestNargs()
assert False
except TypeError:
assert True
tn = TestNargs(must=['too', 'many'], may='one', might='two')
assert tn.must == ['too']
assert tn.may == ['one']
assert tn.might == ['two']
tn = TestNargs(must=['just'], may='one')
assert tn.must == ['just']
assert tn.may == ['one']
assert not hasattr(tn, 'might') #TODO: is this difference going to be problematic?
class TestAllNargs(Structure):
_fields = [Parsable('strict', nargs=2),
Integer('binary', nargs='?'),
Parsable('flexible', nargs='*'),
Parsable('something', nargs='+')]
def test_all_nargs_given():
tn = TestAllNargs(strict=['a', 'b'], binary='0', flexible=['1', '2', '3'], something=['d', 'e'])
assert tn.strict == ['a', 'b']
assert tn.binary == [0]
assert tn.flexible == ['1', '2', '3']
assert tn.something == ['d', 'e']
def test_mandatory_nargs_given():
tn = TestAllNargs(strict=['a', 'b'], something='a')
assert tn.strict == ['a', 'b']
assert tn.something == ['a']
def test_all_nargs_args():
tn = TestAllNargs.from_args('--strict a b --binary 0 --flexible 1 2 3 --something d e'.split())
assert tn.strict == ['a', 'b']
assert tn.binary == [0]
assert tn.flexible == ['1', '2', '3']
assert tn.something == ['d', 'e']
class TestChoiceNargs(Structure):
_fields = [Parsable('select', choices=['a', 'b','c']),
Parsable('letters', nargs='+'),
Integer('numbers', choices=[0, 1, 2], nargs='+')]
def test_chosen_nargs():
test = TestChoiceNargs(select='a', letters=['d', 'e', 'f'], numbers=[0, 1])
assert test.select == 'a'
assert test.letters == ['d', 'e', 'f']
assert test.numbers == [0, 1]
def test_invalid_narg_choices():
test = TestChoiceNargs(select='a', letters='a', numbers=0)
assert test.numbers == [0]
try:
test = TestChoiceNargs(select='a', letters='a', numbers=99)
assert False
except TypeError:
assert True
if __name__ == '__main__':
unittest.main() | unlicense |
40223143/2015_0505 | static/Brython3.1.1-20150328-091302/Lib/ui/slider.py | 603 | 2394 | from . import widget
from browser import doc,html
class Slider(widget.Widget):
def __init__(self, id=None, label=False):
self._div_shell=html.DIV(Class="ui-slider ui-slider-horizontal ui-widget ui-widget-content ui-corner-all")
widget.Widget.__init__(self, self._div_shell, 'slider', id)
self._handle=html.A(Class="ui-slider-handle ui-state-default ui-corner-all",
Href='#', style={'left': '0px'})
self._value=0
self._isMouseDown=False
self.m0 = [None, None]
def startSlide(ev):
self._isMouseDown=True
self._upperBound = self._div_shell.offsetWidth - self._handle.offsetWidth
pos = widget.getMousePosition(ev)
self._startMouseX=pos['x']
print('left', self._handle.style.left,'ev.x',ev.x)
self._lastElementLeft = int(self._handle.left)
print('left', self._lastElementLeft)
updatePosition(ev)
def updatePosition(ev):
#pos = widget.getMousePosition(ev)
#print('mose pos',pos)
_newPos = self._lastElementLeft + ev.x - self._startMouseX
_newPos = max(0, _newPos)
_newPos = min(_newPos, self._upperBound)
self._handle.left = _newPos
print('new position',self._handle.style.left)
self._lastElementLeft = _newPos
def moving(e):
if self._isMouseDown:
updatePosition(e)
def dropCallback(e):
self._isMouseDown=False
self._handle.unbind('mousemove', moving)
self._handle.bind('mousemove', moving)
self._handle.bind('mouseup', dropCallback)
#self._handle.bind('mouseout', dropCallback)
self._handle.bind('mousedown', startSlide)
def mouseover(e):
_class=self._handle.getAttribute('class')
self._handle.setAttribute('class', '%s %s' % (_class, 'ui-state-hover'))
def mouseout(e):
self._isMouseDown=False
_class=self._handle.getAttribute('class')
self._handle.setAttribute('class', _class.replace('ui-state-hover', ''))
self._handle.bind('mouseover', mouseover)
self._handle.bind('mouseout', mouseout)
self._div_shell <= self._handle
def get_value(self):
return self._value
#def set_value(self, value):
# self._value=value
# self._handle.style.left='%spx' % value
| agpl-3.0 |
reversefold/mysql-connector-python | lib/mysql/connector/catch23.py | 26 | 3574 | # MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Python v2 to v3 migration module"""
from decimal import Decimal
import struct
import sys
from .custom_types import HexLiteral
# pylint: disable=E0602,E1103
PY2 = sys.version_info[0] == 2
if PY2:
NUMERIC_TYPES = (int, float, Decimal, HexLiteral, long)
INT_TYPES = (int, long)
UNICODE_TYPES = (unicode,)
STRING_TYPES = (str, unicode)
BYTE_TYPES = (bytearray,)
else:
NUMERIC_TYPES = (int, float, Decimal, HexLiteral)
INT_TYPES = (int,)
UNICODE_TYPES = (str,)
STRING_TYPES = (str,)
BYTE_TYPES = (bytearray, bytes)
def init_bytearray(payload=b'', encoding='utf-8'):
"""Initializes a bytearray from the payload"""
if isinstance(payload, bytearray):
return payload
if PY2:
return bytearray(payload)
if isinstance(payload, int):
return bytearray(payload)
elif not isinstance(payload, bytes):
try:
return bytearray(payload.encode(encoding=encoding))
except AttributeError:
raise ValueError("payload must be a str or bytes")
return bytearray(payload)
def isstr(obj):
"""Returns whether a variable is a string"""
if PY2:
return isinstance(obj, basestring)
else:
return isinstance(obj, str)
def isunicode(obj):
"""Returns whether a variable is a of unicode type"""
if PY2:
return isinstance(obj, unicode)
else:
return isinstance(obj, str)
if PY2:
def struct_unpack(fmt, buf):
"""Wrapper around struct.unpack handling buffer as bytes and strings"""
if isinstance(buf, (bytearray, bytes)):
return struct.unpack_from(fmt, buffer(buf))
return struct.unpack_from(fmt, buf)
else:
struct_unpack = struct.unpack # pylint: disable=C0103
def make_abc(base_class):
"""Decorator used to create a abstract base class
We use this decorator to create abstract base classes instead of
using the abc-module. The decorator makes it possible to do the
same in both Python v2 and v3 code.
"""
def wrapper(class_):
"""Wrapper"""
attrs = class_.__dict__.copy()
for attr in '__dict__', '__weakref__':
attrs.pop(attr, None) # ignore missing attributes
bases = class_.__bases__
if PY2:
attrs['__metaclass__'] = class_
else:
bases = (class_,) + bases
return base_class(class_.__name__, bases, attrs)
return wrapper
| gpl-2.0 |
Shekharrajak/password-alert | server/admin.py | 5 | 3396 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Administrative frontend for viewing reports and setting status of hosts."""
__author__ = '[email protected] (Drew Hintz)'
import json
import logging
import os
import auth
import datastore
import jinja2
import password_change
import webapp2
import xsrf
from google.appengine.ext import db
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
JINJA_ENVIRONMENT.globals['xsrf_token'] = xsrf.xsrf_token
class MainHandler(webapp2.RequestHandler):
"""Displays the list of recent reports from users."""
@auth.admin_authorization_required
def get(self):
query = datastore.Report.all().order('-date')
query.filter('domain =', datastore.CURRENT_DOMAIN)
reports = query.fetch(100)
if not reports:
reports = None # Conversion for templating.
template_values = {
'reports': reports,
'current_domain': datastore.CURRENT_DOMAIN,
'xsrf_token': xsrf.xsrf_token()
}
template = JINJA_ENVIRONMENT.get_template('templates/admin.html')
self.response.write(template.render(template_values))
class HostsHandler(webapp2.RequestHandler):
"""Display the list of allowed hosts."""
@auth.admin_authorization_required
def get(self):
query = datastore.Host.all()
query.filter('domain =', datastore.CURRENT_DOMAIN)
query.filter('status = ', datastore.ALLOWED)
query.order('host')
hosts = query.fetch(100)
template_values = {
'hosts': hosts,
'current_domain': datastore.CURRENT_DOMAIN
}
template = JINJA_ENVIRONMENT.get_template('templates/hosts.html')
self.response.write(template.render(template_values))
@xsrf.xsrf_protect
@auth.admin_authorization_required
def post(self):
host = datastore.Host(
key=db.Key.from_path(
'Host',
datastore.CURRENT_DOMAIN + ':' + self.request.get('host')))
host.domain = datastore.CURRENT_DOMAIN
host.host = datastore.NormalizeUrl(self.request.get('host'))
host.status = datastore.GetStatus(self.request.get('updatedHostStatusName'))
host.put()
self.response.write('{}') # core-ajax library expects a JSON response.
class PasswordHandler(webapp2.RequestHandler):
"""Expires user passwords."""
@xsrf.xsrf_protect
@auth.admin_authorization_required
def post(self):
email = self.request.get('email')
logging.info('Expiring password for: %s', email)
result = password_change.ChangePasswordAtNextLogin(email)
self.response.headers['Content-Type'] = 'application/json'
return self.response.out.write(json.dumps(result))
application = webapp2.WSGIApplication([
('/', MainHandler),
('/hosts', HostsHandler),
('/password', PasswordHandler)
])
| apache-2.0 |
samthetechie/pyFolia | venv/lib/python2.7/site-packages/distribute-0.6.24-py2.7.egg/setuptools/command/bdist_rpm.py | 470 | 2025 | # This is just a kludge so that bdist_rpm doesn't guess wrong about the
# distribution name and version, if the egg_info command is going to alter
# them, another kludge to allow you to build old-style non-egg RPMs, and
# finally, a kludge to track .rpm files for uploading when run on Python <2.5.
from distutils.command.bdist_rpm import bdist_rpm as _bdist_rpm
import sys, os
class bdist_rpm(_bdist_rpm):
def initialize_options(self):
_bdist_rpm.initialize_options(self)
self.no_egg = None
if sys.version<"2.5":
# Track for uploading any .rpm file(s) moved to self.dist_dir
def move_file(self, src, dst, level=1):
_bdist_rpm.move_file(self, src, dst, level)
if dst==self.dist_dir and src.endswith('.rpm'):
getattr(self.distribution,'dist_files',[]).append(
('bdist_rpm',
src.endswith('.src.rpm') and 'any' or sys.version[:3],
os.path.join(dst, os.path.basename(src)))
)
def run(self):
self.run_command('egg_info') # ensure distro name is up-to-date
_bdist_rpm.run(self)
def _make_spec_file(self):
version = self.distribution.get_version()
rpmversion = version.replace('-','_')
spec = _bdist_rpm._make_spec_file(self)
line23 = '%define version '+version
line24 = '%define version '+rpmversion
spec = [
line.replace(
"Source0: %{name}-%{version}.tar",
"Source0: %{name}-%{unmangled_version}.tar"
).replace(
"setup.py install ",
"setup.py install --single-version-externally-managed "
).replace(
"%setup",
"%setup -n %{name}-%{unmangled_version}"
).replace(line23,line24)
for line in spec
]
spec.insert(spec.index(line24)+1, "%define unmangled_version "+version)
return spec
| gpl-3.0 |
fengjiang96/tushare | tushare/stock/cons.py | 10 | 8788 | # -*- coding:utf-8 -*-
"""
Created on 2014/07/31
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
VERSION = '0.3.6'
K_LABELS = ['D', 'W', 'M']
K_MIN_LABELS = ['5', '15', '30', '60']
K_TYPE = {'D': 'akdaily', 'W': 'akweekly', 'M': 'akmonthly'}
INDEX_LABELS = ['sh', 'sz', 'hs300', 'sz50', 'cyb', 'zxb']
INDEX_LIST = {'sh': 'sh000001', 'sz': 'sz399001', 'hs300': 'sz399300',
'sz50': 'sh000016', 'zxb': 'sz399005', 'cyb': 'sz399006'}
P_TYPE = {'http': 'http://', 'ftp': 'ftp://'}
PAGE_NUM = [38, 60, 80, 100]
FORMAT = lambda x: '%.2f' % x
DOMAINS = {'sina': 'sina.com.cn', 'sinahq': 'sinajs.cn',
'ifeng': 'ifeng.com', 'sf': 'finance.sina.com.cn',
'vsf': 'vip.stock.finance.sina.com.cn',
'idx': 'www.csindex.com.cn', '163': 'money.163.com',
'em': 'eastmoney.com', 'sseq': 'query.sse.com.cn',
'sse': 'www.sse.com.cn', 'szse': 'www.szse.cn',
'oss': '218.244.146.57',
'shibor': 'www.shibor.org'}
PAGES = {'fd': 'index.phtml', 'dl': 'downxls.php', 'jv': 'json_v2.php',
'cpt': 'newFLJK.php', 'ids': 'newSinaHy.php', 'lnews':'rollnews_ch_out_interface.php',
'ntinfo':'vCB_BulletinGather.php', 'hs300b':'000300cons.xls',
'hs300w':'000300closeweight.xls','sz50b':'000016cons.xls',
'dp':'all_fpya.php', '163dp':'fpyg.html',
'emxsg':'JS.aspx', '163fh':'jjcgph.php',
'newstock':'vRPD_NewStockIssue.php', 'zz500b':'000905cons.xls',
't_ticks':'vMS_tradedetail.php', 'dw': 'downLoad.html',
'qmd':'queryMargin.do', 'szsefc':'FrontController.szse',
'ssecq':'commonQuery.do'}
TICK_COLUMNS = ['time', 'price', 'change', 'volume', 'amount', 'type']
TODAY_TICK_COLUMNS = ['time', 'price', 'pchange', 'change', 'volume', 'amount', 'type']
DAY_TRADING_COLUMNS = ['code', 'symbol', 'name', 'changepercent',
'trade', 'open', 'high', 'low', 'settlement', 'volume', 'turnoverratio']
REPORT_COLS = ['code', 'name', 'eps', 'eps_yoy', 'bvps', 'roe',
'epcf', 'net_profits', 'profits_yoy', 'distrib', 'report_date']
FORECAST_COLS = ['code', 'name', 'type', 'report_date', 'pre_eps', 'range']
PROFIT_COLS = ['code', 'name', 'roe', 'net_profit_ratio',
'gross_profit_rate', 'net_profits', 'eps', 'business_income', 'bips']
OPERATION_COLS = ['code', 'name', 'arturnover', 'arturndays', 'inventory_turnover',
'inventory_days', 'currentasset_turnover', 'currentasset_days']
GROWTH_COLS = ['code', 'name', 'mbrg', 'nprg', 'nav', 'targ', 'epsg', 'seg']
DEBTPAYING_COLS = ['code', 'name', 'currentratio',
'quickratio', 'cashratio', 'icratio', 'sheqratio', 'adratio']
CASHFLOW_COLS = ['code', 'name', 'cf_sales', 'rateofreturn',
'cf_nm', 'cf_liabilities', 'cashflowratio']
DAY_PRICE_COLUMNS = ['date', 'open', 'high', 'close', 'low', 'volume', 'price_change', 'p_change',
'ma5', 'ma10', 'ma20', 'v_ma5', 'v_ma10', 'v_ma20', 'turnover']
INX_DAY_PRICE_COLUMNS = ['date', 'open', 'high', 'close', 'low', 'volume', 'price_change', 'p_change',
'ma5', 'ma10', 'ma20', 'v_ma5', 'v_ma10', 'v_ma20']
LIVE_DATA_COLS = ['name', 'open', 'pre_close', 'price', 'high', 'low', 'bid', 'ask', 'volume', 'amount',
'b1_v', 'b1_p', 'b2_v', 'b2_p', 'b3_v', 'b3_p', 'b4_v', 'b4_p', 'b5_v', 'b5_p',
'a1_v', 'a1_p', 'a2_v', 'a2_p', 'a3_v', 'a3_p', 'a4_v', 'a4_p', 'a5_v', 'a5_p', 'date', 'time', 's']
FOR_CLASSIFY_B_COLS = ['code','name']
FOR_CLASSIFY_W_COLS = ['date','code','weight']
THE_FIELDS = ['code','symbol','name','changepercent','trade','open','high','low','settlement','volume','turnoverratio']
TICK_PRICE_URL = '%smarket.%s/%s?date=%s&symbol=%s'
TODAY_TICKS_PAGE_URL = '%s%s/quotes_service/api/%s/CN_Transactions.getAllPageTime?date=%s&symbol=%s'
TODAY_TICKS_URL = '%s%s/quotes_service/view/%s?symbol=%s&date=%s&page=%s'
DAY_PRICE_URL = '%sapi.finance.%s/%s/?code=%s&type=last'
LIVE_DATA_URL = '%shq.%s/rn=%s&list=%s'
DAY_PRICE_MIN_URL = '%sapi.finance.%s/akmin?scode=%s&type=%s'
SINA_DAY_PRICE_URL = '%s%s/quotes_service/api/%s/Market_Center.getHQNodeData?num=80&sort=changepercent&asc=0&node=hs_a&symbol=&_s_r_a=page&page=%s'
REPORT_URL = '%s%s/q/go.php/vFinanceAnalyze/kind/mainindex/%s?s_i=&s_a=&s_c=&reportdate=%s&quarter=%s&p=%s&num=%s'
FORECAST_URL = '%s%s/q/go.php/vFinanceAnalyze/kind/performance/%s?s_i=&s_a=&s_c=&s_type=&reportdate=%s&quarter=%s&p=%s&num=%s'
PROFIT_URL = '%s%s/q/go.php/vFinanceAnalyze/kind/profit/%s?s_i=&s_a=&s_c=&reportdate=%s&quarter=%s&p=%s&num=%s'
OPERATION_URL = '%s%s/q/go.php/vFinanceAnalyze/kind/operation/%s?s_i=&s_a=&s_c=&reportdate=%s&quarter=%s&p=%s&num=%s'
GROWTH_URL = '%s%s/q/go.php/vFinanceAnalyze/kind/grow/%s?s_i=&s_a=&s_c=&reportdate=%s&quarter=%s&p=%s&num=%s'
DEBTPAYING_URL = '%s%s/q/go.php/vFinanceAnalyze/kind/debtpaying/%s?s_i=&s_a=&s_c=&reportdate=%s&quarter=%s&p=%s&num=%s'
CASHFLOW_URL = '%s%s/q/go.php/vFinanceAnalyze/kind/cashflow/%s?s_i=&s_a=&s_c=&reportdate=%s&quarter=%s&p=%s&num=%s'
SHIBOR_TYPE ={'Shibor': 'Shibor数据', 'Quote': '报价数据', 'Tendency': 'Shibor均值数据',
'LPR': 'LPR数据', 'LPR_Tendency': 'LPR均值数据'}
SHIBOR_DATA_URL = '%s%s/shibor/web/html/%s?nameNew=Historical_%s_Data_%s.xls&downLoadPath=data&nameOld=%s%s.xls&shiborSrc=http://www.shibor.org/shibor/'
ALL_STOCK_BASICS_FILE = '%s%s/static/all.csv'%(P_TYPE['http'], DOMAINS['oss'])
SINA_CONCEPTS_INDEX_URL = '%smoney.%s/q/view/%s?param=class'
SINA_INDUSTRY_INDEX_URL = '%s%s/q/view/%s'
SINA_DATA_DETAIL_URL = '%s%s/quotes_service/api/%s/Market_Center.getHQNodeData?page=1&num=400&sort=symbol&asc=1&node=%s&symbol=&_s_r_a=page'
INDEX_C_COMM = 'sseportal/ps/zhs/hqjt/csi'
HS300_CLASSIFY_URL = '%s%s/%s/%s'
HIST_FQ_URL = '%s%s/corp/go.php/vMS_FuQuanMarketHistory/stockid/%s.phtml?year=%s&jidu=%s'
HIST_INDEX_URL = '%s%s/corp/go.php/vMS_MarketHistory/stockid/%s/type/S.phtml?year=%s&jidu=%s'
HIST_FQ_FACTOR_URL = '%s%s/api/json.php/BasicStockSrv.getStockFuQuanData?symbol=%s&type=hfq'
INDEX_HQ_URL = '''%shq.%s/rn=xppzh&list=sh000001,sh000002,sh000003,sh000008,sh000009,sh000010,sh000011,sh000012,sh000016,sh000017,sh000300,sz399001,sz399002,sz399003,sz399004,sz399005,sz399006,sz399100,sz399101,sz399106,sz399107,sz399108,sz399333,sz399606'''
SSEQ_CQ_REF_URL = '%s%s/assortment/stock/list/name'
ALL_STK_URL = '%s%s/all.csv'
SHIBOR_COLS = ['date', 'ON', '1W', '2W', '1M', '3M', '6M', '9M', '1Y']
QUOTE_COLS = ['date', 'bank', 'ON_B', 'ON_A', '1W_B', '1W_A', '2W_B', '2W_A', '1M_B', '1M_A',
'3M_B', '3M_A', '6M_B', '6M_A', '9M_B', '9M_A', '1Y_B', '1Y_A']
SHIBOR_MA_COLS = ['date', 'ON_5', 'ON_10', 'ON_20', '1W_5', '1W_10', '1W_20','2W_5', '2W_10', '2W_20',
'1M_5', '1M_10', '1M_20', '3M_5', '3M_10', '3M_20', '6M_5', '6M_10', '6M_20',
'9M_5', '9M_10', '9M_20','1Y_5', '1Y_10', '1Y_20']
LPR_COLS = ['date', '1Y']
LPR_MA_COLS = ['date', '1Y_5', '1Y_10', '1Y_20']
INDEX_HEADER = 'code,name,open,preclose,close,high,low,0,0,volume,amount,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,d,c,3\n'
INDEX_COLS = ['code', 'name', 'change', 'open', 'preclose', 'close', 'high', 'low', 'volume', 'amount']
HIST_FQ_COLS = ['date', 'open', 'high', 'close', 'low', 'volume', 'amount', 'factor']
HIST_FQ_FACTOR_COLS = ['code','value']
DATA_GETTING_TIPS = '[Getting data:]'
DATA_GETTING_FLAG = '#'
DATA_ROWS_TIPS = '%s rows data found.Please wait for a moment.'
DATA_INPUT_ERROR_MSG = 'date input error.'
NETWORK_URL_ERROR_MSG = '获取失败,请检查网络和URL'
DATE_CHK_MSG = '年度输入错误:请输入1989年以后的年份数字,格式:YYYY'
DATE_CHK_Q_MSG = '季度输入错误:请输入1、2、3或4数字'
TOP_PARAS_MSG = 'top有误,请输入整数或all.'
LHB_MSG = '周期输入有误,请输入数字5、10、30或60'
import sys
PY3 = (sys.version_info[0] >= 3)
def _write_head():
sys.stdout.write(DATA_GETTING_TIPS)
sys.stdout.flush()
def _write_console():
sys.stdout.write(DATA_GETTING_FLAG)
sys.stdout.flush()
def _write_tips(tip):
sys.stdout.write(DATA_ROWS_TIPS%tip)
sys.stdout.flush()
def _write_msg(msg):
sys.stdout.write(msg)
sys.stdout.flush()
def _check_input(year, quarter):
if isinstance(year, str) or year < 1989 :
raise TypeError(DATE_CHK_MSG)
elif quarter is None or isinstance(quarter, str) or quarter not in [1, 2, 3, 4]:
raise TypeError(DATE_CHK_Q_MSG)
else:
return True
def _check_lhb_input(last):
if last not in [5, 10, 30, 60]:
raise TypeError(LHB_MSG)
else:
return True | bsd-3-clause |
f3r/scikit-learn | benchmarks/bench_plot_randomized_svd.py | 38 | 17557 | """
Benchmarks on the power iterations phase in randomized SVD.
We test on various synthetic and real datasets the effect of increasing
the number of power iterations in terms of quality of approximation
and running time. A number greater than 0 should help with noisy matrices,
which are characterized by a slow spectral decay.
We test several policy for normalizing the power iterations. Normalization
is crucial to avoid numerical issues.
The quality of the approximation is measured by the spectral norm discrepancy
between the original input matrix and the reconstructed one (by multiplying
the randomized_svd's outputs). The spectral norm is always equivalent to the
largest singular value of a matrix. (3) justifies this choice. However, one can
notice in these experiments that Frobenius and spectral norms behave
very similarly in a qualitative sense. Therefore, we suggest to run these
benchmarks with `enable_spectral_norm = False`, as Frobenius' is MUCH faster to
compute.
The benchmarks follow.
(a) plot: time vs norm, varying number of power iterations
data: many datasets
goal: compare normalization policies and study how the number of power
iterations affect time and norm
(b) plot: n_iter vs norm, varying rank of data and number of components for
randomized_SVD
data: low-rank matrices on which we control the rank
goal: study whether the rank of the matrix and the number of components
extracted by randomized SVD affect "the optimal" number of power iterations
(c) plot: time vs norm, varing datasets
data: many datasets
goal: compare default configurations
We compare the following algorithms:
- randomized_svd(..., power_iteration_normalizer='none')
- randomized_svd(..., power_iteration_normalizer='LU')
- randomized_svd(..., power_iteration_normalizer='QR')
- randomized_svd(..., power_iteration_normalizer='auto')
- fbpca.pca() from https://github.com/facebook/fbpca (if installed)
Conclusion
----------
- n_iter=2 appears to be a good default value
- power_iteration_normalizer='none' is OK if n_iter is small, otherwise LU
gives similar errors to QR but is cheaper. That's what 'auto' implements.
References
----------
(1) Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
(2) A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
(3) An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
# Author: Giorgio Patrini
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import gc
import pickle
from time import time
from collections import defaultdict
import os.path
from sklearn.utils import gen_batches
from sklearn.utils.validation import check_random_state
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import (make_low_rank_matrix,
make_sparse_uncorrelated)
from sklearn.datasets import (fetch_lfw_people,
fetch_mldata,
fetch_20newsgroups_vectorized,
fetch_olivetti_faces,
fetch_rcv1)
try:
import fbpca
fbpca_available = True
except ImportError:
fbpca_available = False
# If this is enabled, tests are much slower and will crash with the large data
enable_spectral_norm = False
# TODO: compute approximate spectral norms with the power method as in
# Estimating the largest eigenvalues by the power and Lanczos methods with
# a random start, Jacek Kuczynski and Henryk Wozniakowski, SIAM Journal on
# Matrix Analysis and Applications, 13 (4): 1094-1122, 1992.
# This approximation is a very fast estimate of the spectral norm, but depends
# on starting random vectors.
# Determine when to switch to batch computation for matrix norms,
# in case the reconstructed (dense) matrix is too large
MAX_MEMORY = np.int(2e9)
# The following datasets can be dowloaded manually from:
# CIFAR 10: http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz
# SVHN: http://ufldl.stanford.edu/housenumbers/train_32x32.mat
CIFAR_FOLDER = "./cifar-10-batches-py/"
SVHN_FOLDER = "./SVHN/"
datasets = ['low rank matrix', 'lfw_people', 'olivetti_faces', '20newsgroups',
'MNIST original', 'CIFAR', 'a1a', 'SVHN', 'uncorrelated matrix']
big_sparse_datasets = ['big sparse matrix', 'rcv1']
def unpickle(file_name):
with open(file_name, 'rb') as fo:
return pickle.load(fo, encoding='latin1')["data"]
def handle_missing_dataset(file_folder):
if not os.path.isdir(file_folder):
print("%s file folder not found. Test skipped." % file_folder)
return 0
def get_data(dataset_name):
print("Getting dataset: %s" % dataset_name)
if dataset_name == 'lfw_people':
X = fetch_lfw_people().data
elif dataset_name == '20newsgroups':
X = fetch_20newsgroups_vectorized().data[:, :100000]
elif dataset_name == 'olivetti_faces':
X = fetch_olivetti_faces().data
elif dataset_name == 'rcv1':
X = fetch_rcv1().data
elif dataset_name == 'CIFAR':
if handle_missing_dataset(CIFAR_FOLDER) == "skip":
return
X1 = [unpickle("%sdata_batch_%d" % (CIFAR_FOLDER, i + 1))
for i in range(5)]
X = np.vstack(X1)
del X1
elif dataset_name == 'SVHN':
if handle_missing_dataset(SVHN_FOLDER) == 0:
return
X1 = sp.io.loadmat("%strain_32x32.mat" % SVHN_FOLDER)['X']
X2 = [X1[:, :, :, i].reshape(32 * 32 * 3) for i in range(X1.shape[3])]
X = np.vstack(X2)
del X1
del X2
elif dataset_name == 'low rank matrix':
X = make_low_rank_matrix(n_samples=500, n_features=np.int(1e4),
effective_rank=100, tail_strength=.5,
random_state=random_state)
elif dataset_name == 'uncorrelated matrix':
X, _ = make_sparse_uncorrelated(n_samples=500, n_features=10000,
random_state=random_state)
elif dataset_name == 'big sparse matrix':
sparsity = np.int(1e6)
size = np.int(1e6)
small_size = np.int(1e4)
data = np.random.normal(0, 1, np.int(sparsity/10))
data = np.repeat(data, 10)
row = np.random.uniform(0, small_size, sparsity)
col = np.random.uniform(0, small_size, sparsity)
X = sp.sparse.csr_matrix((data, (row, col)), shape=(size, small_size))
del data
del row
del col
else:
X = fetch_mldata(dataset_name).data
return X
def plot_time_vs_s(time, norm, point_labels, title):
plt.figure()
colors = ['g', 'b', 'y']
for i, l in enumerate(sorted(norm.keys())):
if l is not "fbpca":
plt.plot(time[l], norm[l], label=l, marker='o', c=colors.pop())
else:
plt.plot(time[l], norm[l], label=l, marker='^', c='red')
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(label, xy=(x, y), xytext=(0, -20),
textcoords='offset points', ha='right', va='bottom')
plt.legend(loc="upper right")
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("running time [s]")
def scatter_time_vs_s(time, norm, point_labels, title):
plt.figure()
size = 100
for i, l in enumerate(sorted(norm.keys())):
if l is not "fbpca":
plt.scatter(time[l], norm[l], label=l, marker='o', c='b', s=size)
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(label, xy=(x, y), xytext=(0, -80),
textcoords='offset points', ha='right',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"),
va='bottom', size=11, rotation=90)
else:
plt.scatter(time[l], norm[l], label=l, marker='^', c='red', s=size)
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(label, xy=(x, y), xytext=(0, 30),
textcoords='offset points', ha='right',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"),
va='bottom', size=11, rotation=90)
plt.legend(loc="best")
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("running time [s]")
def plot_power_iter_vs_s(power_iter, s, title):
plt.figure()
for l in sorted(s.keys()):
plt.plot(power_iter, s[l], label=l, marker='o')
plt.legend(loc="lower right", prop={'size': 10})
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("n_iter")
def svd_timing(X, n_comps, n_iter, n_oversamples,
power_iteration_normalizer='auto', method=None):
"""
Measure time for decomposition
"""
print("... running SVD ...")
if method is not 'fbpca':
gc.collect()
t0 = time()
U, mu, V = randomized_svd(X, n_comps, n_oversamples, n_iter,
power_iteration_normalizer,
random_state=random_state, transpose=False)
call_time = time() - t0
else:
gc.collect()
t0 = time()
# There is a different convention for l here
U, mu, V = fbpca.pca(X, n_comps, raw=True, n_iter=n_iter,
l=n_oversamples+n_comps)
call_time = time() - t0
return U, mu, V, call_time
def norm_diff(A, norm=2, msg=True):
"""
Compute the norm diff with the original matrix, when randomized
SVD is called with *params.
norm: 2 => spectral; 'fro' => Frobenius
"""
if msg:
print("... computing %s norm ..." % norm)
if norm == 2:
# s = sp.linalg.norm(A, ord=2) # slow
value = sp.sparse.linalg.svds(A, k=1, return_singular_vectors=False)
else:
if sp.sparse.issparse(A):
value = sp.sparse.linalg.norm(A, ord=norm)
else:
value = sp.linalg.norm(A, ord=norm)
return value
def scalable_frobenius_norm_discrepancy(X, U, s, V):
# if the input is not too big, just call scipy
if X.shape[0] * X.shape[1] < MAX_MEMORY:
A = X - U.dot(np.diag(s).dot(V))
return norm_diff(A, norm='fro')
print("... computing fro norm by batches...")
batch_size = 1000
Vhat = np.diag(s).dot(V)
cum_norm = .0
for batch in gen_batches(X.shape[0], batch_size):
M = X[batch, :] - U[batch, :].dot(Vhat)
cum_norm += norm_diff(M, norm='fro', msg=False)
return np.sqrt(cum_norm)
def bench_a(X, dataset_name, power_iter, n_oversamples, n_comps):
all_time = defaultdict(list)
if enable_spectral_norm:
all_spectral = defaultdict(list)
X_spectral_norm = norm_diff(X, norm=2, msg=False)
all_frobenius = defaultdict(list)
X_fro_norm = norm_diff(X, norm='fro', msg=False)
for pi in power_iter:
for pm in ['none', 'LU', 'QR']:
print("n_iter = %d on sklearn - %s" % (pi, pm))
U, s, V, time = svd_timing(X, n_comps, n_iter=pi,
power_iteration_normalizer=pm,
n_oversamples=n_oversamples)
label = "sklearn - %s" % pm
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if fbpca_available:
print("n_iter = %d on fbca" % (pi))
U, s, V, time = svd_timing(X, n_comps, n_iter=pi,
power_iteration_normalizer=pm,
n_oversamples=n_oversamples,
method='fbpca')
label = "fbpca"
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if enable_spectral_norm:
title = "%s: spectral norm diff vs running time" % (dataset_name)
plot_time_vs_s(all_time, all_spectral, power_iter, title)
title = "%s: Frobenius norm diff vs running time" % (dataset_name)
plot_time_vs_s(all_time, all_frobenius, power_iter, title)
def bench_b(power_list):
n_samples, n_features = 1000, 10000
data_params = {'n_samples': n_samples, 'n_features': n_features,
'tail_strength': .7, 'random_state': random_state}
dataset_name = "low rank matrix %d x %d" % (n_samples, n_features)
ranks = [10, 50, 100]
if enable_spectral_norm:
all_spectral = defaultdict(list)
all_frobenius = defaultdict(list)
for rank in ranks:
X = make_low_rank_matrix(effective_rank=rank, **data_params)
if enable_spectral_norm:
X_spectral_norm = norm_diff(X, norm=2, msg=False)
X_fro_norm = norm_diff(X, norm='fro', msg=False)
for n_comp in [np.int(rank/2), rank, rank*2]:
label = "rank=%d, n_comp=%d" % (rank, n_comp)
print(label)
for pi in power_list:
U, s, V, _ = svd_timing(X, n_comp, n_iter=pi, n_oversamples=2,
power_iteration_normalizer='LU')
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if enable_spectral_norm:
title = "%s: spectral norm diff vs n power iteration" % (dataset_name)
plot_power_iter_vs_s(power_iter, all_spectral, title)
title = "%s: frobenius norm diff vs n power iteration" % (dataset_name)
plot_power_iter_vs_s(power_iter, all_frobenius, title)
def bench_c(datasets, n_comps):
all_time = defaultdict(list)
if enable_spectral_norm:
all_spectral = defaultdict(list)
all_frobenius = defaultdict(list)
for dataset_name in datasets:
X = get_data(dataset_name)
if X is None:
continue
if enable_spectral_norm:
X_spectral_norm = norm_diff(X, norm=2, msg=False)
X_fro_norm = norm_diff(X, norm='fro', msg=False)
n_comps = np.minimum(n_comps, np.min(X.shape))
label = "sklearn"
print("%s %d x %d - %s" %
(dataset_name, X.shape[0], X.shape[1], label))
U, s, V, time = svd_timing(X, n_comps, n_iter=2, n_oversamples=10,
method=label)
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if fbpca_available:
label = "fbpca"
print("%s %d x %d - %s" %
(dataset_name, X.shape[0], X.shape[1], label))
U, s, V, time = svd_timing(X, n_comps, n_iter=2, n_oversamples=2,
method=label)
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if len(all_time) == 0:
raise ValueError("No tests ran. Aborting.")
if enable_spectral_norm:
title = "normalized spectral norm diff vs running time"
scatter_time_vs_s(all_time, all_spectral, datasets, title)
title = "normalized Frobenius norm diff vs running time"
scatter_time_vs_s(all_time, all_frobenius, datasets, title)
if __name__ == '__main__':
random_state = check_random_state(1234)
power_iter = np.linspace(0, 6, 7, dtype=int)
n_comps = 50
for dataset_name in datasets:
X = get_data(dataset_name)
if X is None:
continue
print(" >>>>>> Benching sklearn and fbpca on %s %d x %d" %
(dataset_name, X.shape[0], X.shape[1]))
bench_a(X, dataset_name, power_iter, n_oversamples=2,
n_comps=np.minimum(n_comps, np.min(X.shape)))
print(" >>>>>> Benching on simulated low rank matrix with variable rank")
bench_b(power_iter)
print(" >>>>>> Benching sklearn and fbpca default configurations")
bench_c(datasets + big_sparse_datasets, n_comps)
plt.show()
| bsd-3-clause |
Serag8/Bachelor | google_appengine/google/appengine/ext/ndb/django_middleware.py | 20 | 2158 | #!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Django middleware for NDB."""
__author__ = 'James A. Morrison'
from . import eventloop, tasklets
class NdbDjangoMiddleware(object):
"""Django middleware for NDB.
To use NDB with django, add
'ndb.NdbDjangoMiddleware',
to the MIDDLEWARE_CLASSES entry in your Django settings.py file.
Or, if you are using the ndb version from the SDK, use
'google.appengine.ext.ndb.NdbDjangoMiddleware',
It's best to insert it in front of any other middleware classes,
since some other middleware may make datastore calls and those won't be
handled properly if that middleware is invoked before this middleware.
See http://docs.djangoproject.com/en/dev/topics/http/middleware/.
"""
def process_request(self, unused_request):
"""Called by Django before deciding which view to execute."""
# Compare to the first half of toplevel() in context.py.
tasklets._state.clear_all_pending()
# Create and install a new context.
ctx = tasklets.make_default_context()
tasklets.set_context(ctx)
@staticmethod
def _finish():
# Compare to the finally clause in toplevel() in context.py.
ctx = tasklets.get_context()
tasklets.set_context(None)
ctx.flush().check_success()
eventloop.run() # Ensure writes are flushed, etc.
def process_response(self, request, response):
"""Called by Django just before returning a response."""
self._finish()
return response
def process_exception(self, unused_request, unused_exception):
"""Called by Django when a view raises an exception."""
self._finish()
return None
| mit |
achang97/YouTunes | lib/python2.7/site-packages/youtube_dl/extractor/ard.py | 8 | 12072 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from .generic import GenericIE
from ..utils import (
determine_ext,
ExtractorError,
qualities,
int_or_none,
parse_duration,
unified_strdate,
xpath_text,
update_url_query,
)
from ..compat import compat_etree_fromstring
class ARDMediathekIE(InfoExtractor):
IE_NAME = 'ARD:mediathek'
_VALID_URL = r'^https?://(?:(?:www\.)?ardmediathek\.de|mediathek\.(?:daserste|rbb-online)\.de)/(?:.*/)(?P<video_id>[0-9]+|[^0-9][^/\?]+)[^/\?]*(?:\?.*)?'
_TESTS = [{
'url': 'http://www.ardmediathek.de/tv/Dokumentation-und-Reportage/Ich-liebe-das-Leben-trotzdem/rbb-Fernsehen/Video?documentId=29582122&bcastId=3822114',
'info_dict': {
'id': '29582122',
'ext': 'mp4',
'title': 'Ich liebe das Leben trotzdem',
'description': 'md5:45e4c225c72b27993314b31a84a5261c',
'duration': 4557,
},
'params': {
# m3u8 download
'skip_download': True,
},
'skip': 'HTTP Error 404: Not Found',
}, {
'url': 'http://www.ardmediathek.de/tv/Tatort/Tatort-Scheinwelten-H%C3%B6rfassung-Video/Das-Erste/Video?documentId=29522730&bcastId=602916',
'md5': 'f4d98b10759ac06c0072bbcd1f0b9e3e',
'info_dict': {
'id': '29522730',
'ext': 'mp4',
'title': 'Tatort: Scheinwelten - Hörfassung (Video tgl. ab 20 Uhr)',
'description': 'md5:196392e79876d0ac94c94e8cdb2875f1',
'duration': 5252,
},
'skip': 'HTTP Error 404: Not Found',
}, {
# audio
'url': 'http://www.ardmediathek.de/tv/WDR-H%C3%B6rspiel-Speicher/Tod-eines-Fu%C3%9Fballers/WDR-3/Audio-Podcast?documentId=28488308&bcastId=23074086',
'md5': '219d94d8980b4f538c7fcb0865eb7f2c',
'info_dict': {
'id': '28488308',
'ext': 'mp3',
'title': 'Tod eines Fußballers',
'description': 'md5:f6e39f3461f0e1f54bfa48c8875c86ef',
'duration': 3240,
},
'skip': 'HTTP Error 404: Not Found',
}, {
'url': 'http://mediathek.daserste.de/sendungen_a-z/328454_anne-will/22429276_vertrauen-ist-gut-spionieren-ist-besser-geht',
'only_matching': True,
}, {
# audio
'url': 'http://mediathek.rbb-online.de/radio/Hörspiel/Vor-dem-Fest/kulturradio/Audio?documentId=30796318&topRessort=radio&bcastId=9839158',
'md5': '4e8f00631aac0395fee17368ac0e9867',
'info_dict': {
'id': '30796318',
'ext': 'mp3',
'title': 'Vor dem Fest',
'description': 'md5:c0c1c8048514deaed2a73b3a60eecacb',
'duration': 3287,
},
'skip': 'Video is no longer available',
}]
def _extract_media_info(self, media_info_url, webpage, video_id):
media_info = self._download_json(
media_info_url, video_id, 'Downloading media JSON')
formats = self._extract_formats(media_info, video_id)
if not formats:
if '"fsk"' in webpage:
raise ExtractorError(
'This video is only available after 20:00', expected=True)
elif media_info.get('_geoblocked'):
raise ExtractorError('This video is not available due to geo restriction', expected=True)
self._sort_formats(formats)
duration = int_or_none(media_info.get('_duration'))
thumbnail = media_info.get('_previewImage')
is_live = media_info.get('_isLive') is True
subtitles = {}
subtitle_url = media_info.get('_subtitleUrl')
if subtitle_url:
subtitles['de'] = [{
'ext': 'ttml',
'url': subtitle_url,
}]
return {
'id': video_id,
'duration': duration,
'thumbnail': thumbnail,
'is_live': is_live,
'formats': formats,
'subtitles': subtitles,
}
def _extract_formats(self, media_info, video_id):
type_ = media_info.get('_type')
media_array = media_info.get('_mediaArray', [])
formats = []
for num, media in enumerate(media_array):
for stream in media.get('_mediaStreamArray', []):
stream_urls = stream.get('_stream')
if not stream_urls:
continue
if not isinstance(stream_urls, list):
stream_urls = [stream_urls]
quality = stream.get('_quality')
server = stream.get('_server')
for stream_url in stream_urls:
ext = determine_ext(stream_url)
if quality != 'auto' and ext in ('f4m', 'm3u8'):
continue
if ext == 'f4m':
formats.extend(self._extract_f4m_formats(
update_url_query(stream_url, {
'hdcore': '3.1.1',
'plugin': 'aasp-3.1.1.69.124'
}),
video_id, f4m_id='hds', fatal=False))
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
stream_url, video_id, 'mp4', m3u8_id='hls', fatal=False))
else:
if server and server.startswith('rtmp'):
f = {
'url': server,
'play_path': stream_url,
'format_id': 'a%s-rtmp-%s' % (num, quality),
}
elif stream_url.startswith('http'):
f = {
'url': stream_url,
'format_id': 'a%s-%s-%s' % (num, ext, quality)
}
else:
continue
m = re.search(r'_(?P<width>\d+)x(?P<height>\d+)\.mp4$', stream_url)
if m:
f.update({
'width': int(m.group('width')),
'height': int(m.group('height')),
})
if type_ == 'audio':
f['vcodec'] = 'none'
formats.append(f)
return formats
def _real_extract(self, url):
# determine video id from url
m = re.match(self._VALID_URL, url)
document_id = None
numid = re.search(r'documentId=([0-9]+)', url)
if numid:
document_id = video_id = numid.group(1)
else:
video_id = m.group('video_id')
webpage = self._download_webpage(url, video_id)
ERRORS = (
('>Leider liegt eine Störung vor.', 'Video %s is unavailable'),
('>Der gewünschte Beitrag ist nicht mehr verfügbar.<',
'Video %s is no longer available'),
)
for pattern, message in ERRORS:
if pattern in webpage:
raise ExtractorError(message % video_id, expected=True)
if re.search(r'[\?&]rss($|[=&])', url):
doc = compat_etree_fromstring(webpage.encode('utf-8'))
if doc.tag == 'rss':
return GenericIE()._extract_rss(url, video_id, doc)
title = self._html_search_regex(
[r'<h1(?:\s+class="boxTopHeadline")?>(.*?)</h1>',
r'<meta name="dcterms.title" content="(.*?)"/>',
r'<h4 class="headline">(.*?)</h4>'],
webpage, 'title')
description = self._html_search_meta(
'dcterms.abstract', webpage, 'description', default=None)
if description is None:
description = self._html_search_meta(
'description', webpage, 'meta description')
# Thumbnail is sometimes not present.
# It is in the mobile version, but that seems to use a different URL
# structure altogether.
thumbnail = self._og_search_thumbnail(webpage, default=None)
media_streams = re.findall(r'''(?x)
mediaCollection\.addMediaStream\([0-9]+,\s*[0-9]+,\s*"[^"]*",\s*
"([^"]+)"''', webpage)
if media_streams:
QUALITIES = qualities(['lo', 'hi', 'hq'])
formats = []
for furl in set(media_streams):
if furl.endswith('.f4m'):
fid = 'f4m'
else:
fid_m = re.match(r'.*\.([^.]+)\.[^.]+$', furl)
fid = fid_m.group(1) if fid_m else None
formats.append({
'quality': QUALITIES(fid),
'format_id': fid,
'url': furl,
})
self._sort_formats(formats)
info = {
'formats': formats,
}
else: # request JSON file
if not document_id:
video_id = self._search_regex(
r'/play/(?:config|media)/(\d+)', webpage, 'media id')
info = self._extract_media_info(
'http://www.ardmediathek.de/play/media/%s' % video_id,
webpage, video_id)
info.update({
'id': video_id,
'title': self._live_title(title) if info.get('is_live') else title,
'description': description,
'thumbnail': thumbnail,
})
return info
class ARDIE(InfoExtractor):
_VALID_URL = r'(?P<mainurl>https?://(www\.)?daserste\.de/[^?#]+/videos/(?P<display_id>[^/?#]+)-(?P<id>[0-9]+))\.html'
_TEST = {
'url': 'http://www.daserste.de/information/reportage-dokumentation/dokus/videos/die-story-im-ersten-mission-unter-falscher-flagge-100.html',
'md5': 'd216c3a86493f9322545e045ddc3eb35',
'info_dict': {
'display_id': 'die-story-im-ersten-mission-unter-falscher-flagge',
'id': '100',
'ext': 'mp4',
'duration': 2600,
'title': 'Die Story im Ersten: Mission unter falscher Flagge',
'upload_date': '20140804',
'thumbnail': r're:^https?://.*\.jpg$',
},
'skip': 'HTTP Error 404: Not Found',
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('display_id')
player_url = mobj.group('mainurl') + '~playerXml.xml'
doc = self._download_xml(player_url, display_id)
video_node = doc.find('./video')
upload_date = unified_strdate(xpath_text(
video_node, './broadcastDate'))
thumbnail = xpath_text(video_node, './/teaserImage//variant/url')
formats = []
for a in video_node.findall('.//asset'):
f = {
'format_id': a.attrib['type'],
'width': int_or_none(a.find('./frameWidth').text),
'height': int_or_none(a.find('./frameHeight').text),
'vbr': int_or_none(a.find('./bitrateVideo').text),
'abr': int_or_none(a.find('./bitrateAudio').text),
'vcodec': a.find('./codecVideo').text,
'tbr': int_or_none(a.find('./totalBitrate').text),
}
if a.find('./serverPrefix').text:
f['url'] = a.find('./serverPrefix').text
f['playpath'] = a.find('./fileName').text
else:
f['url'] = a.find('./fileName').text
formats.append(f)
self._sort_formats(formats)
return {
'id': mobj.group('id'),
'formats': formats,
'display_id': display_id,
'title': video_node.find('./title').text,
'duration': parse_duration(video_node.find('./duration').text),
'upload_date': upload_date,
'thumbnail': thumbnail,
}
| mit |
mwv/scikit-learn | sklearn/preprocessing/_function_transformer.py | 163 | 2407 | from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
def _identity(X):
"""The identity function.
"""
return X
class FunctionTransformer(BaseEstimator, TransformerMixin):
"""Constructs a transformer from an arbitrary callable.
A FunctionTransformer forwards its X (and optionally y) arguments to a
user-defined function or function object and returns the result of this
function. This is useful for stateless transformations such as taking the
log of frequencies, doing custom scaling, etc.
A FunctionTransformer will not do any checks on its function's output.
Note: If a lambda is used as the function, then the resulting
transformer will not be pickleable.
Parameters
----------
func : callable, optional default=None
The callable to use for the transformation. This will be passed
the same arguments as transform, with args and kwargs forwarded.
If func is None, then func will be the identity function.
validate : bool, optional default=True
Indicate that the input X array should be checked before calling
func. If validate is false, there will be no input validation.
If it is true, then X will be converted to a 2-dimensional NumPy
array or sparse matrix. If this conversion is not possible or X
contains NaN or infinity, an exception is raised.
accept_sparse : boolean, optional
Indicate that func accepts a sparse matrix as input. If validate is
False, this has no effect. Otherwise, if accept_sparse is false,
sparse matrix inputs will cause an exception to be raised.
pass_y: bool, optional default=False
Indicate that transform should forward the y argument to the
inner callable.
"""
def __init__(self, func=None, validate=True,
accept_sparse=False, pass_y=False):
self.func = func
self.validate = validate
self.accept_sparse = accept_sparse
self.pass_y = pass_y
def fit(self, X, y=None):
if self.validate:
check_array(X, self.accept_sparse)
return self
def transform(self, X, y=None):
if self.validate:
X = check_array(X, self.accept_sparse)
func = self.func if self.func is not None else _identity
return func(X, *((y,) if self.pass_y else ()))
| bsd-3-clause |
jtyr/ansible-modules-core | utilities/logic/pause.py | 10 | 2331 | # -*- mode: python -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: pause
short_description: Pause playbook execution
description:
- Pauses playbook execution for a set amount of time, or until a prompt is acknowledged. All parameters are optional. The default behavior is to pause with a prompt.
- "You can use C(ctrl+c) if you wish to advance a pause earlier than it is set to expire or if you need to abort a playbook run entirely. To continue early: press C(ctrl+c) and then C(c). To abort a playbook: press C(ctrl+c) and then C(a)."
- "The pause module integrates into async/parallelized playbooks without any special considerations (see also: Rolling Updates). When using pauses with the C(serial) playbook parameter (as in rolling updates) you are only prompted once for the current group of hosts."
version_added: "0.8"
options:
minutes:
description:
- A positive number of minutes to pause for.
required: false
default: null
seconds:
description:
- A positive number of seconds to pause for.
required: false
default: null
prompt:
description:
- Optional text to use for the prompt message.
required: false
default: null
author: "Tim Bielawa (@tbielawa)"
notes:
- Starting in 2.2, if you specify 0 or negative for minutes or seconds, it will wait for 1 second, previously it would wait indefinitely.
'''
EXAMPLES = '''
# Pause for 5 minutes to build app cache.
- pause:
minutes: 5
# Pause until you can verify updates to an application were successful.
- pause:
# A helpful reminder of what to look out for post-update.
- pause:
prompt: "Make sure org.foo.FooOverload exception is not present"
'''
| gpl-3.0 |
guerrerocarlos/odoo | addons/subscription/__openerp__.py | 261 | 1885 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Recurring Documents',
'version': '1.0',
'category': 'Tools',
'description': """
Create recurring documents.
===========================
This module allows to create new documents and add subscriptions on that document.
e.g. To have an invoice generated automatically periodically:
-------------------------------------------------------------
* Define a document type based on Invoice object
* Define a subscription whose source document is the document defined as
above. Specify the interval information and partner to be invoice.
""",
'author': 'OpenERP SA',
'depends': ['base'],
'data': ['security/subcription_security.xml', 'security/ir.model.access.csv', 'subscription_view.xml'],
'demo': ['subscription_demo.xml',],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
tenaciousjzh/titan-solr-cloud-test | zookeeper-3.3.5/contrib/zkpython/src/python/zk.py | 61 | 2528 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import zookeeper, time, threading
f = open("out.log","w")
zookeeper.set_log_stream(f)
connected = False
conn_cv = threading.Condition( )
def my_connection_watcher(handle,type,state,path):
global connected, conn_cv
print "Connected, handle is ", handle
conn_cv.acquire()
connected = True
conn_cv.notifyAll()
conn_cv.release()
conn_cv.acquire()
print "Connecting to localhost:2181 -- "
handle = zookeeper.init("localhost:2181", my_connection_watcher, 10000, 0)
while not connected:
conn_cv.wait()
conn_cv.release()
def my_getc_watch( handle, type, state, path ):
print "Watch fired -- "
print type, state, path
ZOO_OPEN_ACL_UNSAFE = {"perms":0x1f, "scheme":"world", "id" :"anyone"};
try:
zookeeper.create(handle, "/zk-python", "data", [ZOO_OPEN_ACL_UNSAFE], 0)
zookeeper.get_children(handle, "/zk-python", my_getc_watch)
for i in xrange(5):
print "Creating sequence node ", i, " ", zookeeper.create(handle, "/zk-python/sequencenode", "data", [ZOO_OPEN_ACL_UNSAFE], zookeeper.SEQUENCE )
except:
pass
def pp_zk(handle,root, indent = 0):
"""Pretty print a zookeeper tree, starting at root"""
def make_path(child):
if root == "/":
return "/" + child
return root + "/" + child
children = zookeeper.get_children(handle, root, None)
out = ""
for i in xrange(indent):
out += "\t"
out += "|---"+root + " :: " + zookeeper.get(handle, root, None)[0]
print out
for child in children:
pp_zk(handle,make_path(child),indent+1)
print "ZNode tree -- "
pp_zk(handle,"/")
print "Getting ACL / Stat for /zk-python --"
(stat, acl) = zookeeper.get_acl(handle, "/zk-python")
print "Stat:: ", stat
print "Acl:: ", acl
| apache-2.0 |
pidydx/grr | grr/lib/aff4_objects/reports_test.py | 2 | 1849 | #!/usr/bin/env python
"""Reporting tests."""
from grr.lib import aff4
from grr.lib import flags
from grr.lib import test_lib
from grr.lib.aff4_objects import reports
from grr.lib.rdfvalues import client as rdf_client
class ReportsTest(test_lib.AFF4ObjectTest):
"""Test the timeline implementation."""
def testClientListReport(self):
"""Check that we can create and run a ClientList Report."""
# Create some clients.
client_ids = self.SetupClients(10)
with aff4.FACTORY.Open(
client_ids[0], token=self.token, mode="rw") as client:
interfaces = client.Schema.INTERFACES()
interfaces.Append(
addresses=[
rdf_client.NetworkAddress(
human_readable="1.1.1.1", address_type="INET")
],
mac_address="11:11:11:11:11:11",
ifname="eth0")
client.Set(interfaces)
client.Set(client.Schema.HOSTNAME("lawman"))
# Also initialize a broken client with no hostname.
with aff4.FACTORY.Open(
client_ids[1], token=self.token, mode="rw") as client:
client.Set(client.Schema.CLIENT_INFO())
# Create a report for all clients.
report = reports.ClientListReport(token=self.token)
report.Run()
self.assertEqual(len(report.results), 10)
hostnames = [x.get("Host") for x in report.results]
self.assertTrue("lawman" in hostnames)
report.SortResults("Host")
self.assertEqual(len(report.AsDict()), 10)
self.assertEqual(len(report.AsCsv().getvalue().splitlines()), 11)
self.assertEqual(len(report.AsText().getvalue().splitlines()), 10)
self.assertEqual(report.results[-1]["Interfaces"], "1.1.1.1")
self.assertEqual(len(report.broken_clients), 1)
def main(argv):
# Run the full test suite
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
| apache-2.0 |
ptoraskar/django | tests/template_tests/syntax_tests/test_invalid_string.py | 440 | 2310 | from django.test import SimpleTestCase
from ..utils import setup
class InvalidStringTests(SimpleTestCase):
libraries = {'i18n': 'django.templatetags.i18n'}
@setup({'invalidstr01': '{{ var|default:"Foo" }}'})
def test_invalidstr01(self):
output = self.engine.render_to_string('invalidstr01')
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, 'Foo')
@setup({'invalidstr02': '{{ var|default_if_none:"Foo" }}'})
def test_invalidstr02(self):
output = self.engine.render_to_string('invalidstr02')
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'invalidstr03': '{% for v in var %}({{ v }}){% endfor %}'})
def test_invalidstr03(self):
output = self.engine.render_to_string('invalidstr03')
self.assertEqual(output, '')
@setup({'invalidstr04': '{% if var %}Yes{% else %}No{% endif %}'})
def test_invalidstr04(self):
output = self.engine.render_to_string('invalidstr04')
self.assertEqual(output, 'No')
@setup({'invalidstr04_2': '{% if var|default:"Foo" %}Yes{% else %}No{% endif %}'})
def test_invalidstr04_2(self):
output = self.engine.render_to_string('invalidstr04_2')
self.assertEqual(output, 'Yes')
@setup({'invalidstr05': '{{ var }}'})
def test_invalidstr05(self):
output = self.engine.render_to_string('invalidstr05')
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'invalidstr06': '{{ var.prop }}'})
def test_invalidstr06(self):
output = self.engine.render_to_string('invalidstr06')
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'invalidstr07': '{% load i18n %}{% blocktrans %}{{ var }}{% endblocktrans %}'})
def test_invalidstr07(self):
output = self.engine.render_to_string('invalidstr07')
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
| bsd-3-clause |
phlax/pootle | pootle/apps/pootle_translationproject/migrations/0006_relink_or_drop_orphan_translationprojects.py | 7 | 1197 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2016-11-08 21:27
from __future__ import unicode_literals
from django.db import migrations
def relink_or_drop_orphan_translationprojects(apps, schema_editor):
"""Relink or drop TPs with no project."""
Project = apps.get_model("pootle_project.Project")
TP = apps.get_model("pootle_translationproject.TranslationProject")
for proj_key in set(TP.objects.values_list("project_id", flat=True)):
if not Project.objects.filter(pk=proj_key).exists():
for tp in TP.objects.filter(project_id=proj_key):
proj_code = tp.pootle_path.split("/")[2]
projects = Project.objects.filter(code=proj_code)
if projects.exists():
tp.project = projects.first()
tp.save()
else:
tp.delete()
class Migration(migrations.Migration):
dependencies = [
('pootle_translationproject', '0005_remove_empty_translationprojects'),
('pootle_project', '0014_just_rename_label_for_choice'),
]
operations = [
migrations.RunPython(relink_or_drop_orphan_translationprojects),
]
| gpl-3.0 |
sertac/django | django/contrib/gis/gdal/__init__.py | 327 | 2635 | """
This module houses ctypes interfaces for GDAL objects. The following GDAL
objects are supported:
CoordTransform: Used for coordinate transformations from one spatial
reference system to another.
Driver: Wraps an OGR data source driver.
DataSource: Wrapper for the OGR data source object, supports
OGR-supported data sources.
Envelope: A ctypes structure for bounding boxes (GDAL library
not required).
OGRGeometry: Object for accessing OGR Geometry functionality.
OGRGeomType: A class for representing the different OGR Geometry
types (GDAL library not required).
SpatialReference: Represents OSR Spatial Reference objects.
The GDAL library will be imported from the system path using the default
library name for the current OS. The default library path may be overridden
by setting `GDAL_LIBRARY_PATH` in your settings with the path to the GDAL C
library on your system.
GDAL links to a large number of external libraries that consume RAM when
loaded. Thus, it may desirable to disable GDAL on systems with limited
RAM resources -- this may be accomplished by setting `GDAL_LIBRARY_PATH`
to a non-existent file location (e.g., `GDAL_LIBRARY_PATH='/null/path'`;
setting to None/False/'' will not work as a string must be given).
"""
from django.contrib.gis.gdal.error import ( # NOQA
GDALException, OGRException, OGRIndexError, SRSException, check_err,
)
from django.contrib.gis.gdal.geomtype import OGRGeomType # NOQA
__all__ = [
'check_err', 'GDALException', 'OGRException', 'OGRIndexError',
'SRSException', 'OGRGeomType', 'HAS_GDAL',
]
# Attempting to import objects that depend on the GDAL library. The
# HAS_GDAL flag will be set to True if the library is present on
# the system.
try:
from django.contrib.gis.gdal.driver import Driver # NOQA
from django.contrib.gis.gdal.datasource import DataSource # NOQA
from django.contrib.gis.gdal.libgdal import gdal_version, gdal_full_version, GDAL_VERSION # NOQA
from django.contrib.gis.gdal.raster.source import GDALRaster # NOQA
from django.contrib.gis.gdal.srs import SpatialReference, CoordTransform # NOQA
from django.contrib.gis.gdal.geometries import OGRGeometry # NOQA
HAS_GDAL = True
__all__ += [
'Driver', 'DataSource', 'gdal_version', 'gdal_full_version',
'GDALRaster', 'GDAL_VERSION', 'SpatialReference', 'CoordTransform',
'OGRGeometry',
]
except GDALException:
HAS_GDAL = False
try:
from django.contrib.gis.gdal.envelope import Envelope
__all__ += ['Envelope']
except ImportError:
# No ctypes, but don't raise an exception.
pass
| bsd-3-clause |
shakilkanji/rmc | data/processor.py | 3 | 23484 | import rmc.shared.constants as c
import rmc.shared.util as rmc_util
import rmc.models as m
import argparse
from datetime import datetime
import dateutil.parser
import glob
import json
import mongoengine as me
import os
import time
import re
import sys
def import_departments():
def clean_opendata_department(department):
return {
'id': department['subject'].lower(),
'name': department['name'],
'faculty_id': department['faculty_id'].lower(),
'url': 'http://ugradcalendar.uwaterloo.ca/courses/{0}'.format(
department['subject'].lower()),
}
file_name = os.path.join(os.path.dirname(__file__),
c.DEPARTMENTS_DATA_DIR, 'opendata2_departments.json')
with open(file_name, 'r') as f:
data = json.load(f)
for department in data:
department = clean_opendata_department(department)
if m.Department.objects.with_id(department['id']):
continue
m.Department(**department).save()
print 'imported departments:', m.Department.objects.count()
def import_courses():
def get_department_name_from_file_path(file_path):
return re.findall(r'([^/]*).json$', file_path)[0].lower()
def build_keywords(department, number, course_title):
department = department.lower()
number = str(number)
course_title = course_title.lower()
course_title = re.sub(r'\s+', ' ', course_title)
# Separate on hypens in title for keywords list
course_title = re.sub(r'-', ' ', course_title)
keywords = [department, number, department + number]
keywords.extend(course_title.split(' '))
return keywords
def clean_description(des):
return des or "No description"
def clean_opendata_course(dep, course):
number = course['catalog_number'].lower()
return {
'id': '%s%s' % (dep, number),
'department_id': dep,
'number': number,
'name': course['title'],
'description': clean_description(course['description']),
'_keywords': build_keywords(
dep, number, course['title']),
'antireqs': course['antirequisites'],
'coreqs': course['corequisites'],
'prereqs': course['prerequisites'],
}
added = 0
updated = 0
for file_name in glob.glob(os.path.join(os.path.dirname(__file__),
c.OPENDATA2_COURSES_DATA_DIR, '*.json')):
with open(file_name, 'r') as f:
courses = json.load(f)
dep_name = get_department_name_from_file_path(file_name)
if not m.Department.objects.with_id(dep_name):
print 'could not find department %s' % dep_name
continue
# The input data can be a list or dict (with course number as key)
if isinstance(courses, dict):
courses = courses.values()
# For each course, update it if it already exists, else insert it
for course in courses:
if not course:
continue
course = clean_opendata_course(dep_name, course)
old_course = m.Course.objects.with_id(course['id'])
if old_course:
for key, value in course.iteritems():
if key == 'id':
continue
old_course[key] = value
old_course.save()
updated += 1
else:
m.Course(**course).save()
added += 1
# Update courses with terms offered data
with open(os.path.join(os.path.dirname(__file__),
c.TERMS_OFFERED_DATA_DIR, 'terms_offered.txt')) as f:
def map_term(term):
return {
'W': '01',
'S': '05',
'F': '09',
}[term]
terms_offered_by_course = json.load(f)
for course_id, terms_offered in terms_offered_by_course.items():
course = m.Course.objects.with_id(course_id)
if not course:
continue
course.terms_offered = map(map_term, terms_offered)
course.save()
for course in m.Course.objects:
if course.prereqs:
course.prereqs = normalize_reqs_str(course.prereqs)
if course.coreqs:
course.coreqs = normalize_reqs_str(course.coreqs)
if course.antireqs:
course.antireqs = normalize_reqs_str(course.antireqs)
course.save()
print 'OpenData courses, added: %d, updated: %d' % (added, updated)
print 'Total courses:', m.Course.objects.count()
def normalize_reqs_str(str_):
"""Normalize the prereq string of a course
TODO(mack): handle the following special cases:
1) "CS/ECE 121"
"""
# Split on non-alphanumeric characters (includes chars we split on)
old_splits = re.compile('(\W+)').split(str_)
# Newly normalized splits
new_splits = []
# Last department id encountered as we traverse prereq str
last_dep_id = None
# Traverse the splits
for split in old_splits:
new_split = split
if last_dep_id and re.findall(r'^[0-9]{3}[a-z]?$', split.lower()):
# If there's a previous dep id and this matches the number portion
# of a course, check if this is a valid course
# NOTE: we're not validating whether the course exists since
# we should still normalize to make the output to look consistent,
# even when the course does not exist
new_split = last_dep_id.upper() + split
elif (re.findall('^[A-Z]+', split) and
m.Department.objects.with_id(split.lower())):
# We check it's uppercase, so we don't have false positives like
# "Earth" that was part of "Earth Science student"
last_dep_id = split.lower()
# Do not include the department id since it will be included
# with the course we find
new_split = ''
new_splits.append(new_split)
# We're here if this split matches a department id
# Increment idx by 1 more to skip the next non-alphanum character
new_str = ''.join(new_splits)
# While removing department ids, we could have left redundant spaces
# (e.g. "CS 247" => " CS247", so remove them now.
return re.sub('\s+', ' ', new_str).strip()
# TODO(mack): should return (first_name, last_name)
def get_prof_name(prof_name_menlo):
matches = re.findall(r'^(.+?), (.+)$', prof_name_menlo)[0]
return {
'first_name': matches[1],
'last_name': matches[0],
}
def import_professors():
# NOTE: not safe to drop table anymore since users can add their own
# professors now
def clean_professor(professor):
def clean_name(name):
return re.sub(r'\s+', ' ', name.strip())
prof_name = get_prof_name(professor['prof_name'])
return {
'first_name': clean_name(prof_name['first_name']),
'last_name': clean_name(prof_name['last_name']),
}
file_names = glob.glob(os.path.join(os.path.dirname(__file__),
c.REVIEWS_DATA_DIR, '*.txt'))
for file_name in file_names:
with open(file_name, 'r') as f:
data = json.load(f)
professor = clean_professor(data)
# Since user's can now add professors, gotta first check
# that the professor does not aleady exist
if not m.Professor.objects(**professor):
m.Professor(**professor).save()
print 'imported professors:', m.Professor.objects.count()
def import_reviews():
m.MenloCourse.objects._collection.drop()
def clean_review(review):
course = review['class']
if course is None:
return {}
course = course.lower()
matches = re.findall(r'([a-z]+).*?([0-9]{3}[a-z]?)(?:[^0-9]|$)',
course)
# TODO(mack): investigate if we are missing any good courses with
# this regex
if len(matches) != 1 or len(matches[0]) != 2:
return {}
department_id = matches[0][0].lower()
course_number = matches[0][1].lower()
course_id = department_id + course_number
prof_name = get_prof_name(data['prof_name'])
prof_id = m.Professor.get_id_from_name(
prof_name['first_name'], prof_name['last_name'])
clean_review = {
'professor_id': prof_id,
'course_id': course_id,
'course_review': m.CourseReview(),
'professor_review': m.ProfessorReview(),
}
def normalize_rating(menlo_rating):
# normalize 1..5 to Yes/No:
# 1,2 => No, 3 => None, 4,5 => Yes
try:
menlo_rating = int(menlo_rating)
if menlo_rating <= 2:
return 0
elif menlo_rating >= 4:
return 1
else:
return None
except:
return None
# TODO(mack): include 'r_helpful'?
if 'r_clarity' in review:
clean_review['professor_review'].clarity = \
normalize_rating(review['r_clarity'])
if 'r_easy' in review:
clean_review['course_review'].easiness = \
normalize_rating(review['r_easy'])
if 'r_interest' in review:
clean_review['course_review'].interest = \
normalize_rating(review['r_interest'])
clean_review['professor_review'].comment = review['comment']
clean_review['professor_review'].comment_date = datetime.strptime(
review['date'], '%m/%d/%y')
return clean_review
file_names = glob.glob(os.path.join(os.path.dirname(__file__),
c.REVIEWS_DATA_DIR, '*.txt'))
for file_name in file_names:
with open(file_name, 'r') as f:
data = json.load(f)
for review in data['ratings']:
review = clean_review(review)
if (not 'course_id' in review
or not m.Course.objects.with_id(review['course_id'])):
#print 'skipping rating because invalid course_id ' + course_id
continue
try:
m.MenloCourse(**review).save()
except:
print 'failed on review', review
print 'imported reviews:', m.MenloCourse.objects.count()
def group_similar_exam_sections(exam_sections):
"""Groups together exam sections that have the same date, time,
and location.
Args:
exam_sections: A list of sections for an exam as returned by OpenData's
examschedule.json endpoint.
Returns a consolidated list of sections in the same format, where each item
has a unique date/time/location.
"""
def order_sections(sections):
sections_list = sorted(sections.split(', '))
return ', '.join(sections_list)
def is_similar(first, second):
return (first.get('start_time') == second.get('start_time') and
first.get('end_time') == second.get('end_time') and
first.get('date') == second.get('date') and
first.get('location') == second.get('location'))
different_sections = []
for section in exam_sections:
similar_exams = [s for s in different_sections if
is_similar(s, section)]
if similar_exams:
similar_exams[0]['section'] += ', ' + section.get('section')
else:
different_sections.append(section)
for section in different_sections:
section['section'] = order_sections(section.get('section'))
return different_sections
def import_opendata_exam_schedules():
"""Import exam schedules data from the OpenData API"""
today = datetime.today()
file_name = os.path.join(
os.path.dirname(__file__),
'%s/uw_exams_%s.txt' % (c.EXAMS_DATA_DIR,
today.strftime('%Y_%m_%d')))
processed_exams = []
errors = []
with open(file_name, 'r') as f:
data = json.load(f)
# Data will contain something like this:
#
# [{
# "course": "AFM 131",
# "sections": [
# {
# "date": "2014-04-17",
# "day": "Thursday",
# "end_time": "10:00 PM",
# "location": "DC 1350",
# "notes": "",
# "section": "001",
# "start_time": "7:30 PM"
# },
# {
# "date": "",
# "day": "",
# "end_time": "",
# "location": "",
# "notes": "See blah blah blah",
# "section": "081 Online",
# "start_time": ""
# }
# ]
# }, ...]
#
# TODO(jlfwong): Refactor this to separate concerns of file IO, db
# storage, and data processing so that the data processing step can be
# tested, and this example can be moved into tests.
for exam_data in data:
course_id = m.Course.code_to_id(exam_data.get('course'))
grouped_sections = group_similar_exam_sections(
exam_data.get('sections', []))
for section_data in grouped_sections:
section = section_data.get('section')
day = section_data.get('day')
# Catch these to be more detailed in our errors
if section.endswith('Online'):
errors.append("Skipping online course: %s %s"
% (course_id, section))
continue
if 'Exam removed' in day:
errors.append("Skipping removed course: %s" % (course_id))
continue
if 'See http:' in day:
errors.append("Skipping url for course: %s" % (course_id))
continue
# E.g. 2014-04-17
date = section_data.get('date')
# E.g. 11:30 AM
start_time = section_data.get('start_time')
end_time = section_data.get('end_time')
# E.g. 2014-04-17 7:30 PM
# 2014-04-17 10:00 PM
date_format = "%Y-%m-%d %I:%M %p"
start_date_string = "%s %s" % (date, start_time)
end_date_string = "%s %s" % (date, end_time)
try:
start_date = rmc_util.eastern_to_utc(
datetime.fromtimestamp(
time.mktime(
time.strptime(start_date_string,
date_format))))
end_date = rmc_util.eastern_to_utc(
datetime.fromtimestamp(
time.mktime(
time.strptime(end_date_string, date_format))))
except Exception as exp:
errors.append("Could not get date (%s)\n%s" %
(section_data, exp))
continue
exam = m.Exam(
course_id=course_id,
sections=section,
start_date=start_date,
end_date=end_date,
location=section_data.get('location'),
info_known=bool(start_date and end_date),
)
processed_exams.append(exam)
# Do some sanity checks to make sure OpenData is being reasonable.
# This number is arbitrary and just reminds us to double-check
# TODO(Sandy): This ranges from 775 (Fall & Winter) to 325 (Spring)
season = m.Term.get_season_from_id(m.Term.get_current_term_id())
EXAM_ITEMS_THRESHOLD = 325 if season == 'Spring' else 775
if len(processed_exams) < EXAM_ITEMS_THRESHOLD:
raise ValueError("processor.py: too few exam items %d (< %d)"
% (len(processed_exams), EXAM_ITEMS_THRESHOLD))
# Everything should be fine by here, drop the old exams collection
m.Exam.objects.delete()
for exam in processed_exams:
exam.save()
return errors
def _opendata_to_section_meeting(data, term_year):
"""Converts OpenData class section info to a SectionMeeting instance.
Args:
data: An object from the `classes` field returned by OpenData.
term_year: The year this term is in.
"""
date = data['date']
days = []
if date['weekdays']:
days = re.findall(r'[A-Z][a-z]?',
date['weekdays'].replace('U', 'Su'))
# TODO(david): Actually use the term begin/end dates when we get nulls
date_format = '%m/%d'
start_date = datetime.strptime(date['start_date'], date_format).replace(
year=term_year) if date['start_date'] else None
end_date = datetime.strptime(date['end_date'], date_format).replace(
year=term_year) if date['end_date'] else None
time_format = '%H:%M'
# TODO(david): DRY-up
start_seconds = None
if date['start_time']:
start_time = datetime.strptime(date['start_time'], time_format)
start_seconds = (start_time -
start_time.replace(hour=0, minute=0, second=0)).seconds
end_seconds = None
if date['end_time']:
end_time = datetime.strptime(date['end_time'], time_format)
end_seconds = (end_time -
end_time.replace(hour=0, minute=0, second=0)).seconds
meeting = m.SectionMeeting(
start_seconds=start_seconds,
end_seconds=end_seconds,
days=days,
start_date=start_date,
end_date=end_date,
building=data['location']['building'],
room=data['location']['room'],
is_tba=date['is_tba'],
is_cancelled=date['is_cancelled'],
is_closed=date['is_closed'],
)
if data['instructors']:
last_name, first_name = data['instructors'][0].split(',')
prof_id = m.Professor.get_id_from_name(first_name, last_name)
if not m.Professor.objects.with_id(prof_id):
m.Professor(id=prof_id, first_name=first_name,
last_name=last_name).save()
meeting.prof_id = prof_id
return meeting
def _clean_section(data):
"""Converts OpenData section info to a dict that can be consumed by
Section.
"""
course_id = m.Course.code_to_id(data['subject'] + data['catalog_number'])
term_id = m.Term.get_term_id_from_quest_id(data['term'])
section_type, section_num = data['section'].split(' ')
last_updated = dateutil.parser.parse(data['last_updated'])
year = m.Term.get_year_from_id(term_id)
meetings = map(lambda klass: _opendata_to_section_meeting(klass, year),
data['classes'])
return {
'course_id': course_id,
'term_id': term_id,
'section_type': section_type.upper(),
'section_num': section_num,
'campus': data['campus'],
'enrollment_capacity': data['enrollment_capacity'],
'enrollment_total': data['enrollment_total'],
'waiting_capacity': data['waiting_capacity'],
'waiting_total': data['waiting_total'],
'meetings': meetings,
'class_num': str(data['class_number']),
'units': data['units'],
'note': data['note'],
'last_updated': last_updated,
}
def _clean_scholarship(data):
"""Converts OpenData scholarship data in to a dict that can be used by
Scholarship
"""
return {
'id': str(data['id']),
'title': data['title'],
'description': data['description'],
'citizenship': data['citizenship'],
'programs': data['programs'],
'eligibility': data['application']['eligibility'],
'instructions': data['application']['instructions'],
'enrollment_year': data['application']['enrollment_year'],
'contact': data['contact'],
'link': data['link'],
}
def import_opendata_sections():
num_added = 0
num_updated = 0
filenames = glob.glob(os.path.join(os.path.dirname(__file__),
c.SECTIONS_DATA_DIR, '*.json'))
for filename in filenames:
with open(filename, 'r') as f:
data = json.load(f)
for section_data in data:
section_dict = _clean_section(section_data)
# TODO(david): Is there a more natural way of doing an
# upsert with MongoEngine?
existing_section = m.Section.objects(
course_id=section_dict['course_id'],
term_id=section_dict['term_id'],
section_type=section_dict['section_type'],
section_num=section_dict['section_num'],
).first()
if existing_section:
for key, val in section_dict.iteritems():
existing_section[key] = val
existing_section.save()
num_updated += 1
else:
m.Section(**section_dict).save()
num_added += 1
print 'Added %s sections and updated %s sections' % (
num_added, num_updated)
def import_scholarships():
num_added = 0
num_updated = 0
filenames = glob.glob(os.path.join(os.path.dirname(__file__),
c.SCHOLARSHIPS_DATA_DIR, '*.json'))
for filename in filenames:
with open(filename, 'r') as f:
data = json.load(f).get('data')
for scholarship_data in data:
scholarship_dict = _clean_scholarship(scholarship_data)
existing_scholarship = m.Scholarship.objects(
id=scholarship_dict['id']
).first()
if existing_scholarship:
for key, val in scholarship_dict.iteritems():
if key != 'id':
existing_scholarship[key] = val
existing_scholarship.save()
num_updated += 1
else:
m.Scholarship(**scholarship_dict).save()
num_added += 1
print 'Added %s scholarships and updated %s scholarships' % (
num_added, num_updated)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
supported_modes = ['professors', 'departments', 'courses', 'reviews']
parser.add_argument('mode', help='one of %s' % ','.join(supported_modes))
args = parser.parse_args()
me.connect(c.MONGO_DB_RMC, host=c.MONGO_HOST, port=c.MONGO_PORT)
if args.mode == 'professors':
import_professors()
elif args.mode == 'departments':
import_departments()
elif args.mode == 'courses':
import_courses()
elif args.mode == 'reviews':
import_reviews()
elif args.mode == 'exams':
import_opendata_exam_schedules()
elif args.mode == 'sections':
import_opendata_sections()
elif args.mode == 'scholarships':
import_scholarships()
else:
sys.exit('The mode %s is not supported' % args.mode)
| mit |
bdrung/audacity | lib-src/lv2/lv2/plugins/eg-metro.lv2/waflib/Tools/d.py | 278 | 2076 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
from waflib import Utils,Task,Errors
from waflib.TaskGen import taskgen_method,feature,extension
from waflib.Tools import d_scan,d_config
from waflib.Tools.ccroot import link_task,stlink_task
class d(Task.Task):
color='GREEN'
run_str='${D} ${DFLAGS} ${DINC_ST:INCPATHS} ${D_SRC_F:SRC} ${D_TGT_F:TGT}'
scan=d_scan.scan
class d_with_header(d):
run_str='${D} ${DFLAGS} ${DINC_ST:INCPATHS} ${D_HDR_F:tgt.outputs[1].bldpath()} ${D_SRC_F:SRC} ${D_TGT_F:tgt.outputs[0].bldpath()}'
class d_header(Task.Task):
color='BLUE'
run_str='${D} ${D_HEADER} ${SRC}'
class dprogram(link_task):
run_str='${D_LINKER} ${LINKFLAGS} ${DLNK_SRC_F}${SRC} ${DLNK_TGT_F:TGT} ${RPATH_ST:RPATH} ${DSTLIB_MARKER} ${DSTLIBPATH_ST:STLIBPATH} ${DSTLIB_ST:STLIB} ${DSHLIB_MARKER} ${DLIBPATH_ST:LIBPATH} ${DSHLIB_ST:LIB}'
inst_to='${BINDIR}'
class dshlib(dprogram):
inst_to='${LIBDIR}'
class dstlib(stlink_task):
pass
@extension('.d','.di','.D')
def d_hook(self,node):
ext=Utils.destos_to_binfmt(self.env.DEST_OS)=='pe'and'obj'or'o'
out='%s.%d.%s'%(node.name,self.idx,ext)
def create_compiled_task(self,name,node):
task=self.create_task(name,node,node.parent.find_or_declare(out))
try:
self.compiled_tasks.append(task)
except AttributeError:
self.compiled_tasks=[task]
return task
if getattr(self,'generate_headers',None):
tsk=create_compiled_task(self,'d_with_header',node)
tsk.outputs.append(node.change_ext(self.env['DHEADER_ext']))
else:
tsk=create_compiled_task(self,'d',node)
return tsk
@taskgen_method
def generate_header(self,filename):
try:
self.header_lst.append([filename,self.install_path])
except AttributeError:
self.header_lst=[[filename,self.install_path]]
@feature('d')
def process_header(self):
for i in getattr(self,'header_lst',[]):
node=self.path.find_resource(i[0])
if not node:
raise Errors.WafError('file %r not found on d obj'%i[0])
self.create_task('d_header',node,node.change_ext('.di'))
| gpl-2.0 |
frankbp/robotframework-selenium2library | test/run_tests.py | 42 | 3301 | #!/usr/bin/env python
import env
import os
import sys
from subprocess import Popen, call
from tempfile import TemporaryFile
from run_unit_tests import run_unit_tests
ROBOT_ARGS = [
'--doc', 'SeleniumSPacceptanceSPtestsSPwithSP%(browser)s',
'--outputdir', '%(outdir)s',
'--variable', 'browser:%(browser)s',
'--escape', 'space:SP',
'--report', 'none',
'--log', 'none',
#'--suite', 'Acceptance.Keywords.Textfields',
'--loglevel', 'DEBUG',
'--pythonpath', '%(pythonpath)s',
]
REBOT_ARGS = [
'--outputdir', '%(outdir)s',
'--name', '%(browser)sSPAcceptanceSPTests',
'--escape', 'space:SP',
'--critical', 'regression',
'--noncritical', 'inprogress',
]
ARG_VALUES = {'outdir': env.RESULTS_DIR, 'pythonpath': env.SRC_DIR}
def acceptance_tests(interpreter, browser, args):
ARG_VALUES['browser'] = browser.replace('*', '')
start_http_server()
runner = {'python': 'pybot', 'jython': 'jybot', 'ipy': 'ipybot'}[interpreter]
if os.sep == '\\':
runner += '.bat'
execute_tests(runner, args)
stop_http_server()
return process_output()
def start_http_server():
server_output = TemporaryFile()
Popen(['python', env.HTTP_SERVER_FILE ,'start'],
stdout=server_output, stderr=server_output)
def execute_tests(runner, args):
if not os.path.exists(env.RESULTS_DIR):
os.mkdir(env.RESULTS_DIR)
command = [runner] + [arg % ARG_VALUES for arg in ROBOT_ARGS] + args + [env.ACCEPTANCE_TEST_DIR]
print ''
print 'Starting test execution with command:\n' + ' '.join(command)
syslog = os.path.join(env.RESULTS_DIR, 'syslog.txt')
call(command, shell=os.sep=='\\', env=dict(os.environ, ROBOT_SYSLOG_FILE=syslog))
def stop_http_server():
call(['python', env.HTTP_SERVER_FILE, 'stop'])
def process_output():
print
if _has_robot_27():
call(['python', os.path.join(env.RESOURCES_DIR, 'statuschecker.py'),
os.path.join(env.RESULTS_DIR, 'output.xml')])
rebot = 'rebot' if os.sep == '/' else 'rebot.bat'
rebot_cmd = [rebot] + [ arg % ARG_VALUES for arg in REBOT_ARGS ] + \
[os.path.join(ARG_VALUES['outdir'], 'output.xml') ]
rc = call(rebot_cmd, env=os.environ)
if rc == 0:
print 'All critical tests passed'
else:
print '%d critical test%s failed' % (rc, 's' if rc != 1 else '')
return rc
def _has_robot_27():
try:
from robot.result import ExecutionResult
except:
return False
return True
def _exit(rc):
sys.exit(rc)
def _help():
print 'Usage: python run_tests.py python|jython browser [options]'
print
print 'See README.txt for details.'
return 255
def _run_unit_tests():
print 'Running unit tests'
failures = run_unit_tests()
if failures != 0:
print '\n%d unit tests failed - not running acceptance tests!' % failures
else:
print 'All unit tests passed'
return failures
if __name__ == '__main__':
if not len(sys.argv) > 2:
_exit(_help())
unit_failures = _run_unit_tests()
if unit_failures:
_exit(unit_failures)
interpreter = sys.argv[1]
browser = sys.argv[2].lower()
args = sys.argv[3:]
if browser != 'unit':
_exit(acceptance_tests(interpreter, browser, args))
| apache-2.0 |
DataMarket/multilingual | multilingual/query.py | 1 | 28615 | """
Django-multilingual: a QuerySet subclass for models with translatable
fields.
This file contains the implementation for QSRF Django.
"""
import datetime
from django.core.exceptions import FieldError
from django.db import connection
from django.db.models.fields import FieldDoesNotExist
from django.db.models.query import QuerySet, Q, ValuesQuerySet
from django.db.models.sql.query import Query
from django.db.models.sql.datastructures import (
EmptyResultSet,
Empty,
MultiJoin)
from django.db.models.sql.constants import *
from django.db.models.sql.where import WhereNode, EverythingNode, AND, OR
try:
# handle internal API changes in Django rev. 9700
from django.db.models.sql.where import Constraint
def constraint_tuple(alias, col, field, lookup_type, value):
return (Constraint(alias, col, field), lookup_type, value)
except ImportError:
# backwards compatibility, for Django versions 1.0 to rev. 9699
def constraint_tuple(alias, col, field, lookup_type, value):
return (alias, col, field, lookup_type, value)
from multilingual.languages import (
get_translation_table_alias,
get_language_id_list,
get_default_language,
get_translated_field_alias,
get_language_id_from_id_or_code)
__ALL__ = ['MultilingualModelQuerySet']
class MultilingualQuery(Query):
def __init__(self, model, connection, where=WhereNode):
self.extra_join = {}
self.include_translation_data = True
extra_select = {}
super(MultilingualQuery, self).__init__(model, connection, where=where)
opts = self.model._meta
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
master_table_name = opts.db_table
translation_opts = opts.translation_model._meta
trans_table_name = translation_opts.db_table
if hasattr(opts, 'translation_model'):
master_table_name = opts.db_table
for language_id in get_language_id_list():
for fname in [f.attname for f in translation_opts.fields]:
table_alias = get_translation_table_alias(trans_table_name,
language_id)
field_alias = get_translated_field_alias(fname,
language_id)
extra_select[field_alias] = qn2(table_alias) + '.' + qn2(fname)
self.add_extra(extra_select, None, None, None, None, None)
self._trans_extra_select_count = len(self.extra_select)
def clone(self, klass=None, **kwargs):
defaults = {
'extra_join': self.extra_join,
'include_translation_data': self.include_translation_data,
}
defaults.update(kwargs)
return super(MultilingualQuery, self).clone(klass=klass, **defaults)
def pre_sql_setup(self):
"""Adds the JOINS and SELECTS for fetching multilingual data.
"""
super(MultilingualQuery, self).pre_sql_setup()
if not self.include_translation_data:
return
opts = self.model._meta
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
if hasattr(opts, 'translation_model'):
master_table_name = opts.db_table
translation_opts = opts.translation_model._meta
trans_table_name = translation_opts.db_table
for language_id in get_language_id_list():
table_alias = get_translation_table_alias(trans_table_name,
language_id)
trans_join = ('LEFT JOIN %s AS %s ON ((%s.master_id = %s.%s) AND (%s.language_id = %s))'
% (qn2(translation_opts.db_table),
qn2(table_alias),
qn2(table_alias),
qn(master_table_name),
qn2(self.model._meta.pk.column),
qn2(table_alias),
language_id))
self.extra_join[table_alias] = trans_join
def get_from_clause(self):
"""Add the JOINS for related multilingual fields filtering.
"""
result = super(MultilingualQuery, self).get_from_clause()
if not self.include_translation_data:
return result
from_ = result[0]
for join in self.extra_join.values():
from_.append(join)
return (from_, result[1])
def add_filter(self, filter_expr, connector=AND, negate=False, trim=False,
can_reuse=None, process_extras=True):
"""Copied from add_filter to generate WHERES for translation fields.
"""
arg, value = filter_expr
parts = arg.split(LOOKUP_SEP)
if not parts:
raise FieldError("Cannot parse keyword query %r" % arg)
# Work out the lookup type and remove it from 'parts', if necessary.
if len(parts) == 1 or parts[-1] not in self.query_terms:
lookup_type = 'exact'
else:
lookup_type = parts.pop()
# Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all
# uses of None as a query value.
if value is None:
if lookup_type != 'exact':
raise ValueError("Cannot use None as a query value")
lookup_type = 'isnull'
value = True
elif (value == '' and lookup_type == 'exact' and
connection.features.interprets_empty_strings_as_nulls):
lookup_type = 'isnull'
value = True
elif callable(value):
value = value()
opts = self.get_meta()
alias = self.get_initial_alias()
allow_many = trim or not negate
try:
field, target, opts, join_list, last, extra_filters = self.setup_joins(
parts, opts, alias, True, allow_many, can_reuse=can_reuse,
negate=negate, process_extras=process_extras)
except MultiJoin, e:
self.split_exclude(filter_expr, LOOKUP_SEP.join(parts[:e.level]),
can_reuse)
return
#NOTE: here comes Django Multilingual
if hasattr(opts, 'translation_model'):
field_name = parts[-1]
if field_name == 'pk':
field_name = opts.pk.name
translation_opts = opts.translation_model._meta
if field_name in translation_opts.translated_fields.keys():
field, model, direct, m2m = opts.get_field_by_name(field_name)
if model == opts.translation_model:
language_id = translation_opts.translated_fields[field_name][1]
if language_id is None:
language_id = get_default_language()
master_table_name = opts.db_table
trans_table_alias = get_translation_table_alias(
model._meta.db_table, language_id)
new_table = (master_table_name + "__" + trans_table_alias)
self.where.add(constraint_tuple(new_table, field.column, field, lookup_type, value), connector)
return
final = len(join_list)
penultimate = last.pop()
if penultimate == final:
penultimate = last.pop()
if trim and len(join_list) > 1:
extra = join_list[penultimate:]
join_list = join_list[:penultimate]
final = penultimate
penultimate = last.pop()
col = self.alias_map[extra[0]][LHS_JOIN_COL]
for alias in extra:
self.unref_alias(alias)
else:
col = target.column
alias = join_list[-1]
while final > 1:
# An optimization: if the final join is against the same column as
# we are comparing against, we can go back one step in the join
# chain and compare against the lhs of the join instead (and then
# repeat the optimization). The result, potentially, involves less
# table joins.
join = self.alias_map[alias]
if col != join[RHS_JOIN_COL]:
break
self.unref_alias(alias)
alias = join[LHS_ALIAS]
col = join[LHS_JOIN_COL]
join_list = join_list[:-1]
final -= 1
if final == penultimate:
penultimate = last.pop()
if (lookup_type == 'isnull' and value is True and not negate and
final > 1):
# If the comparison is against NULL, we need to use a left outer
# join when connecting to the previous model. We make that
# adjustment here. We don't do this unless needed as it's less
# efficient at the database level.
self.promote_alias(join_list[penultimate])
if connector == OR:
# Some joins may need to be promoted when adding a new filter to a
# disjunction. We walk the list of new joins and where it diverges
# from any previous joins (ref count is 1 in the table list), we
# make the new additions (and any existing ones not used in the new
# join list) an outer join.
join_it = iter(join_list)
table_it = iter(self.tables)
join_it.next(), table_it.next()
table_promote = False
join_promote = False
for join in join_it:
table = table_it.next()
if join == table and self.alias_refcount[join] > 1:
continue
join_promote = self.promote_alias(join)
if table != join:
table_promote = self.promote_alias(table)
break
self.promote_alias_chain(join_it, join_promote)
self.promote_alias_chain(table_it, table_promote)
self.where.add(constraint_tuple(alias, col, field, lookup_type, value), connector)
if negate:
self.promote_alias_chain(join_list)
if lookup_type != 'isnull':
if final > 1:
for alias in join_list:
if self.alias_map[alias][JOIN_TYPE] == self.LOUTER:
j_col = self.alias_map[alias][RHS_JOIN_COL]
entry = self.where_class()
entry.add(constraint_tuple(alias, j_col, None, 'isnull', True), AND)
entry.negate()
self.where.add(entry, AND)
break
elif not (lookup_type == 'in' and not value) and field.null:
# Leaky abstraction artifact: We have to specifically
# exclude the "foo__in=[]" case from this handling, because
# it's short-circuited in the Where class.
entry = self.where_class()
entry.add(constraint_tuple(alias, col, None, 'isnull', True), AND)
entry.negate()
self.where.add(entry, AND)
if can_reuse is not None:
can_reuse.update(join_list)
if process_extras:
for filter in extra_filters:
self.add_filter(filter, negate=negate, can_reuse=can_reuse,
process_extras=False)
def _setup_joins_with_translation(self, names, opts, alias,
dupe_multis, allow_many=True,
allow_explicit_fk=False, can_reuse=None,
negate=False, process_extras=True):
"""
This is based on a full copy of Query.setup_joins because
currently I see no way to handle it differently.
TO DO: there might actually be a way, by splitting a single
multi-name setup_joins call into separate calls. Check it.
-- [email protected]
Compute the necessary table joins for the passage through the fields
given in 'names'. 'opts' is the Options class for the current model
(which gives the table we are joining to), 'alias' is the alias for the
table we are joining to. If dupe_multis is True, any many-to-many or
many-to-one joins will always create a new alias (necessary for
disjunctive filters).
Returns the final field involved in the join, the target database
column (used for any 'where' constraint), the final 'opts' value and the
list of tables joined.
"""
joins = [alias]
last = [0]
dupe_set = set()
exclusions = set()
extra_filters = []
for pos, name in enumerate(names):
try:
exclusions.add(int_alias)
except NameError:
pass
exclusions.add(alias)
last.append(len(joins))
if name == 'pk':
name = opts.pk.name
try:
field, model, direct, m2m = opts.get_field_by_name(name)
except FieldDoesNotExist:
for f in opts.fields:
if allow_explicit_fk and name == f.attname:
# XXX: A hack to allow foo_id to work in values() for
# backwards compatibility purposes. If we dropped that
# feature, this could be removed.
field, model, direct, m2m = opts.get_field_by_name(f.name)
break
else:
names = opts.get_all_field_names()
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(names)))
if not allow_many and (m2m or not direct):
for alias in joins:
self.unref_alias(alias)
raise MultiJoin(pos + 1)
#NOTE: Start Django Multilingual specific code
if hasattr(opts, 'translation_model'):
translation_opts = opts.translation_model._meta
if model == opts.translation_model:
language_id = translation_opts.translated_fields[name][1]
if language_id is None:
language_id = get_default_language()
#TODO: check alias
master_table_name = opts.db_table
trans_table_alias = get_translation_table_alias(
model._meta.db_table, language_id)
new_table = (master_table_name + "__" + trans_table_alias)
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
trans_join = ('LEFT JOIN %s AS %s ON ((%s.master_id = %s.%s) AND (%s.language_id = %s))'
% (qn2(model._meta.db_table),
qn2(new_table),
qn2(new_table),
qn(master_table_name),
qn2(model._meta.pk.column),
qn2(new_table),
language_id))
self.extra_join[new_table] = trans_join
target = field
continue
#NOTE: End Django Multilingual specific code
elif model:
# The field lives on a base class of the current model.
for int_model in opts.get_base_chain(model):
lhs_col = opts.parents[int_model].column
dedupe = lhs_col in opts.duplicate_targets
if dedupe:
exclusions.update(self.dupe_avoidance.get(
(id(opts), lhs_col), ()))
dupe_set.add((opts, lhs_col))
opts = int_model._meta
alias = self.join((alias, opts.db_table, lhs_col,
opts.pk.column), exclusions=exclusions)
joins.append(alias)
exclusions.add(alias)
for (dupe_opts, dupe_col) in dupe_set:
self.update_dupe_avoidance(dupe_opts, dupe_col, alias)
cached_data = opts._join_cache.get(name)
orig_opts = opts
dupe_col = direct and field.column or field.field.column
dedupe = dupe_col in opts.duplicate_targets
if dupe_set or dedupe:
if dedupe:
dupe_set.add((opts, dupe_col))
exclusions.update(self.dupe_avoidance.get((id(opts), dupe_col),
()))
if process_extras and hasattr(field, 'extra_filters'):
extra_filters.extend(field.extra_filters(names, pos, negate))
if direct:
if m2m:
# Many-to-many field defined on the current model.
if cached_data:
(table1, from_col1, to_col1, table2, from_col2,
to_col2, opts, target) = cached_data
else:
table1 = field.m2m_db_table()
from_col1 = opts.pk.column
to_col1 = field.m2m_column_name()
opts = field.rel.to._meta
table2 = opts.db_table
from_col2 = field.m2m_reverse_name()
to_col2 = opts.pk.column
target = opts.pk
orig_opts._join_cache[name] = (table1, from_col1,
to_col1, table2, from_col2, to_col2, opts,
target)
int_alias = self.join((alias, table1, from_col1, to_col1),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
if int_alias == table2 and from_col2 == to_col2:
joins.append(int_alias)
alias = int_alias
else:
alias = self.join(
(int_alias, table2, from_col2, to_col2),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
joins.extend([int_alias, alias])
elif field.rel:
# One-to-one or many-to-one field
if cached_data:
(table, from_col, to_col, opts, target) = cached_data
else:
opts = field.rel.to._meta
target = field.rel.get_related_field()
table = opts.db_table
from_col = field.column
to_col = target.column
orig_opts._join_cache[name] = (table, from_col, to_col,
opts, target)
alias = self.join((alias, table, from_col, to_col),
exclusions=exclusions, nullable=field.null)
joins.append(alias)
else:
# Non-relation fields.
target = field
break
else:
orig_field = field
field = field.field
if m2m:
# Many-to-many field defined on the target model.
if cached_data:
(table1, from_col1, to_col1, table2, from_col2,
to_col2, opts, target) = cached_data
else:
table1 = field.m2m_db_table()
from_col1 = opts.pk.column
to_col1 = field.m2m_reverse_name()
opts = orig_field.opts
table2 = opts.db_table
from_col2 = field.m2m_column_name()
to_col2 = opts.pk.column
target = opts.pk
orig_opts._join_cache[name] = (table1, from_col1,
to_col1, table2, from_col2, to_col2, opts,
target)
int_alias = self.join((alias, table1, from_col1, to_col1),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
alias = self.join((int_alias, table2, from_col2, to_col2),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
joins.extend([int_alias, alias])
else:
# One-to-many field (ForeignKey defined on the target model)
if cached_data:
(table, from_col, to_col, opts, target) = cached_data
else:
local_field = opts.get_field_by_name(
field.rel.field_name)[0]
opts = orig_field.opts
table = opts.db_table
from_col = local_field.column
to_col = field.column
target = opts.pk
orig_opts._join_cache[name] = (table, from_col, to_col,
opts, target)
alias = self.join((alias, table, from_col, to_col),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
joins.append(alias)
for (dupe_opts, dupe_col) in dupe_set:
try:
self.update_dupe_avoidance(dupe_opts, dupe_col, int_alias)
except NameError:
self.update_dupe_avoidance(dupe_opts, dupe_col, alias)
if pos != len(names) - 1:
raise FieldError("Join on field %r not permitted." % name)
return field, target, opts, joins, last, extra_filters
def setup_joins(self, names, opts, alias, dupe_multis, allow_many=True,
allow_explicit_fk=False, can_reuse=None, negate=False,
process_extras=True):
if not self.include_translation_data:
return super(MultilingualQuery, self).setup_joins(names, opts, alias,
dupe_multis, allow_many,
allow_explicit_fk,
can_reuse, negate,
process_extras)
else:
return self._setup_joins_with_translation(names, opts, alias, dupe_multis,
allow_many, allow_explicit_fk,
can_reuse, negate, process_extras)
def get_count(self):
# optimize for the common special case: count without any
# filters
if ((not (self.select or self.where))
and self.include_translation_data):
obj = self.clone(extra_select = {},
extra_join = {},
include_translation_data = False)
return obj.get_count()
else:
return super(MultilingualQuery, self).get_count()
class MultilingualModelQuerySet(QuerySet):
"""
A specialized QuerySet that knows how to handle translatable
fields in ordering and filtering methods.
"""
def __init__(self, model=None, query=None):
query = query or MultilingualQuery(model, connection)
super(MultilingualModelQuerySet, self).__init__(model, query)
def for_language(self, language_id_or_code):
"""
Set the default language for all objects returned with this
query.
"""
clone = self._clone()
clone._default_language = get_language_id_from_id_or_code(language_id_or_code)
return clone
def iterator(self):
"""
Add the default language information to all returned objects.
"""
default_language = getattr(self, '_default_language', None)
for obj in super(MultilingualModelQuerySet, self).iterator():
obj._default_language = default_language
yield obj
def _clone(self, klass=None, **kwargs):
"""
Override _clone to preserve additional information needed by
MultilingualModelQuerySet.
"""
clone = super(MultilingualModelQuerySet, self)._clone(klass, **kwargs)
clone._default_language = getattr(self, '_default_language', None)
return clone
def order_by(self, *field_names):
if hasattr(self.model._meta, 'translation_model'):
trans_opts = self.model._meta.translation_model._meta
new_field_names = []
for field_name in field_names:
prefix = ''
if field_name[0] == '-':
prefix = '-'
field_name = field_name[1:]
field_and_lang = trans_opts.translated_fields.get(field_name)
if field_and_lang:
field, language_id = field_and_lang
if language_id is None:
language_id = getattr(self, '_default_language', None)
real_name = get_translated_field_alias(field.attname,
language_id)
new_field_names.append(prefix + real_name)
else:
new_field_names.append(prefix + field_name)
return super(MultilingualModelQuerySet, self).extra(order_by=new_field_names)
else:
return super(MultilingualModelQuerySet, self).order_by(*field_names)
def values(self, *fields):
if hasattr(self.model._meta, 'translation_model'):
extra_select = {}
trans_opts = self.model._meta.translation_model._meta
trans_table_name = trans_opts.db_table
qn2 = self.query.connection.ops.quote_name
for field_name in fields:
field_and_lang = trans_opts.translated_fields.get(field_name)
if field_and_lang:
field, language_id = field_and_lang
if language_id is None:
language_id = getattr(self, '_default_language', None)
table_alias = get_translation_table_alias(trans_table_name,
language_id)
extra_select[field_name] = qn2(table_alias) + '.' + qn2(field.attname)
# this maps columns to required field_names
result = self.extra(select = extra_select)
# and it returns MultilingualModelQuerySet instance, so we have to super it
return super(MultilingualModelQuerySet, result).values(*fields)
else:
return super(MultilingualModelQuerySet, self).values(*fields)
def values_list(self, *fields, **kwargs):
if hasattr(self.model._meta, 'translation_model'):
extra_select = {}
trans_opts = self.model._meta.translation_model._meta
trans_table_name = trans_opts.db_table
qn2 = self.query.connection.ops.quote_name
for field_name in fields:
field_and_lang = trans_opts.translated_fields.get(field_name)
if field_and_lang:
field, language_id = field_and_lang
if language_id is None:
language_id = getattr(self, '_default_language', None)
table_alias = get_translation_table_alias(trans_table_name,
language_id)
extra_select[field_name] = qn2(table_alias) + '.' + qn2(field.attname)
# this maps columns to required field_names
result = self.extra(select = extra_select)
# and it return MultilingualModelQuerySet instance, so we have to super it
return super(MultilingualModelQuerySet, result).values_list(*fields, **kwargs)
else:
return super(MultilingualModelQuerySet, self).values_list(*fields, **kwargs)
| mit |
scifiswapnil/Project-LoCatr | lib/python2.7/site-packages/django/db/models/base.py | 16 | 75879 | from __future__ import unicode_literals
import copy
import inspect
import warnings
from itertools import chain
from django.apps import apps
from django.conf import settings
from django.core import checks
from django.core.exceptions import (
NON_FIELD_ERRORS, FieldDoesNotExist, FieldError, MultipleObjectsReturned,
ObjectDoesNotExist, ValidationError,
)
from django.db import (
DEFAULT_DB_ALIAS, DJANGO_VERSION_PICKLE_KEY, DatabaseError, connection,
connections, router, transaction,
)
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import CASCADE, Collector
from django.db.models.fields.related import (
ForeignObjectRel, OneToOneField, lazy_related_operation, resolve_relation,
)
from django.db.models.manager import Manager
from django.db.models.options import Options
from django.db.models.query import Q
from django.db.models.signals import (
class_prepared, post_init, post_save, pre_init, pre_save,
)
from django.db.models.utils import make_model_tuple
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import (
force_str, force_text, python_2_unicode_compatible,
)
from django.utils.functional import curry
from django.utils.six.moves import zip
from django.utils.text import capfirst, get_text_list
from django.utils.translation import ugettext_lazy as _
from django.utils.version import get_version
@python_2_unicode_compatible
class Deferred(object):
def __repr__(self):
return str('<Deferred field>')
def __str__(self):
return str('<Deferred field>')
DEFERRED = Deferred()
def subclass_exception(name, parents, module, attached_to=None):
"""
Create exception subclass. Used by ModelBase below.
If 'attached_to' is supplied, the exception will be created in a way that
allows it to be pickled, assuming the returned exception class will be added
as an attribute to the 'attached_to' class.
"""
class_dict = {'__module__': module}
if attached_to is not None:
def __reduce__(self):
# Exceptions are special - they've got state that isn't
# in self.__dict__. We assume it is all in self.args.
return (unpickle_inner_exception, (attached_to, name), self.args)
def __setstate__(self, args):
self.args = args
class_dict['__reduce__'] = __reduce__
class_dict['__setstate__'] = __setstate__
return type(name, parents, class_dict)
class ModelBase(type):
"""
Metaclass for all models.
"""
def __new__(cls, name, bases, attrs):
super_new = super(ModelBase, cls).__new__
# Also ensure initialization is only performed for subclasses of Model
# (excluding Model class itself).
parents = [b for b in bases if isinstance(b, ModelBase)]
if not parents:
return super_new(cls, name, bases, attrs)
# Create the class.
module = attrs.pop('__module__')
new_attrs = {'__module__': module}
classcell = attrs.pop('__classcell__', None)
if classcell is not None:
new_attrs['__classcell__'] = classcell
new_class = super_new(cls, name, bases, new_attrs)
attr_meta = attrs.pop('Meta', None)
abstract = getattr(attr_meta, 'abstract', False)
if not attr_meta:
meta = getattr(new_class, 'Meta', None)
else:
meta = attr_meta
base_meta = getattr(new_class, '_meta', None)
app_label = None
# Look for an application configuration to attach the model to.
app_config = apps.get_containing_app_config(module)
if getattr(meta, 'app_label', None) is None:
if app_config is None:
if not abstract:
raise RuntimeError(
"Model class %s.%s doesn't declare an explicit "
"app_label and isn't in an application in "
"INSTALLED_APPS." % (module, name)
)
else:
app_label = app_config.label
new_class.add_to_class('_meta', Options(meta, app_label))
if not abstract:
new_class.add_to_class(
'DoesNotExist',
subclass_exception(
str('DoesNotExist'),
tuple(
x.DoesNotExist for x in parents if hasattr(x, '_meta') and not x._meta.abstract
) or (ObjectDoesNotExist,),
module,
attached_to=new_class))
new_class.add_to_class(
'MultipleObjectsReturned',
subclass_exception(
str('MultipleObjectsReturned'),
tuple(
x.MultipleObjectsReturned for x in parents if hasattr(x, '_meta') and not x._meta.abstract
) or (MultipleObjectsReturned,),
module,
attached_to=new_class))
if base_meta and not base_meta.abstract:
# Non-abstract child classes inherit some attributes from their
# non-abstract parent (unless an ABC comes before it in the
# method resolution order).
if not hasattr(meta, 'ordering'):
new_class._meta.ordering = base_meta.ordering
if not hasattr(meta, 'get_latest_by'):
new_class._meta.get_latest_by = base_meta.get_latest_by
is_proxy = new_class._meta.proxy
# If the model is a proxy, ensure that the base class
# hasn't been swapped out.
if is_proxy and base_meta and base_meta.swapped:
raise TypeError("%s cannot proxy the swapped model '%s'." % (name, base_meta.swapped))
# Add all attributes to the class.
for obj_name, obj in attrs.items():
new_class.add_to_class(obj_name, obj)
# All the fields of any type declared on this model
new_fields = chain(
new_class._meta.local_fields,
new_class._meta.local_many_to_many,
new_class._meta.private_fields
)
field_names = {f.name for f in new_fields}
# Basic setup for proxy models.
if is_proxy:
base = None
for parent in [kls for kls in parents if hasattr(kls, '_meta')]:
if parent._meta.abstract:
if parent._meta.fields:
raise TypeError(
"Abstract base class containing model fields not "
"permitted for proxy model '%s'." % name
)
else:
continue
if base is None:
base = parent
elif parent._meta.concrete_model is not base._meta.concrete_model:
raise TypeError("Proxy model '%s' has more than one non-abstract model base class." % name)
if base is None:
raise TypeError("Proxy model '%s' has no non-abstract model base class." % name)
new_class._meta.setup_proxy(base)
new_class._meta.concrete_model = base._meta.concrete_model
else:
new_class._meta.concrete_model = new_class
# Collect the parent links for multi-table inheritance.
parent_links = {}
for base in reversed([new_class] + parents):
# Conceptually equivalent to `if base is Model`.
if not hasattr(base, '_meta'):
continue
# Skip concrete parent classes.
if base != new_class and not base._meta.abstract:
continue
# Locate OneToOneField instances.
for field in base._meta.local_fields:
if isinstance(field, OneToOneField):
related = resolve_relation(new_class, field.remote_field.model)
parent_links[make_model_tuple(related)] = field
# Track fields inherited from base models.
inherited_attributes = set()
# Do the appropriate setup for any model parents.
for base in new_class.mro():
if base not in parents or not hasattr(base, '_meta'):
# Things without _meta aren't functional models, so they're
# uninteresting parents.
inherited_attributes |= set(base.__dict__.keys())
continue
parent_fields = base._meta.local_fields + base._meta.local_many_to_many
if not base._meta.abstract:
# Check for clashes between locally declared fields and those
# on the base classes.
for field in parent_fields:
if field.name in field_names:
raise FieldError(
'Local field %r in class %r clashes with field of '
'the same name from base class %r.' % (
field.name,
name,
base.__name__,
)
)
else:
inherited_attributes.add(field.name)
# Concrete classes...
base = base._meta.concrete_model
base_key = make_model_tuple(base)
if base_key in parent_links:
field = parent_links[base_key]
elif not is_proxy:
attr_name = '%s_ptr' % base._meta.model_name
field = OneToOneField(
base,
on_delete=CASCADE,
name=attr_name,
auto_created=True,
parent_link=True,
)
if attr_name in field_names:
raise FieldError(
"Auto-generated field '%s' in class %r for "
"parent_link to base class %r clashes with "
"declared field of the same name." % (
attr_name,
name,
base.__name__,
)
)
# Only add the ptr field if it's not already present;
# e.g. migrations will already have it specified
if not hasattr(new_class, attr_name):
new_class.add_to_class(attr_name, field)
else:
field = None
new_class._meta.parents[base] = field
else:
base_parents = base._meta.parents.copy()
# Add fields from abstract base class if it wasn't overridden.
for field in parent_fields:
if (field.name not in field_names and
field.name not in new_class.__dict__ and
field.name not in inherited_attributes):
new_field = copy.deepcopy(field)
new_class.add_to_class(field.name, new_field)
# Replace parent links defined on this base by the new
# field. It will be appropriately resolved if required.
if field.one_to_one:
for parent, parent_link in base_parents.items():
if field == parent_link:
base_parents[parent] = new_field
# Pass any non-abstract parent classes onto child.
new_class._meta.parents.update(base_parents)
# Inherit private fields (like GenericForeignKey) from the parent
# class
for field in base._meta.private_fields:
if field.name in field_names:
if not base._meta.abstract:
raise FieldError(
'Local field %r in class %r clashes with field of '
'the same name from base class %r.' % (
field.name,
name,
base.__name__,
)
)
else:
new_class.add_to_class(field.name, copy.deepcopy(field))
if base_meta and base_meta.abstract and not abstract:
new_class._meta.indexes = [copy.deepcopy(idx) for idx in new_class._meta.indexes]
# Set the name of _meta.indexes. This can't be done in
# Options.contribute_to_class() because fields haven't been added
# to the model at that point.
for index in new_class._meta.indexes:
if not index.name:
index.set_name_with_model(new_class)
if abstract:
# Abstract base models can't be instantiated and don't appear in
# the list of models for an app. We do the final setup for them a
# little differently from normal models.
attr_meta.abstract = False
new_class.Meta = attr_meta
return new_class
new_class._prepare()
new_class._meta.apps.register_model(new_class._meta.app_label, new_class)
return new_class
def add_to_class(cls, name, value):
# We should call the contribute_to_class method only if it's bound
if not inspect.isclass(value) and hasattr(value, 'contribute_to_class'):
value.contribute_to_class(cls, name)
else:
setattr(cls, name, value)
def _prepare(cls):
"""
Creates some methods once self._meta has been populated.
"""
opts = cls._meta
opts._prepare(cls)
if opts.order_with_respect_to:
cls.get_next_in_order = curry(cls._get_next_or_previous_in_order, is_next=True)
cls.get_previous_in_order = curry(cls._get_next_or_previous_in_order, is_next=False)
# Defer creating accessors on the foreign class until it has been
# created and registered. If remote_field is None, we're ordering
# with respect to a GenericForeignKey and don't know what the
# foreign class is - we'll add those accessors later in
# contribute_to_class().
if opts.order_with_respect_to.remote_field:
wrt = opts.order_with_respect_to
remote = wrt.remote_field.model
lazy_related_operation(make_foreign_order_accessors, cls, remote)
# Give the class a docstring -- its definition.
if cls.__doc__ is None:
cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join(f.name for f in opts.fields))
get_absolute_url_override = settings.ABSOLUTE_URL_OVERRIDES.get(opts.label_lower)
if get_absolute_url_override:
setattr(cls, 'get_absolute_url', get_absolute_url_override)
if not opts.managers or cls._requires_legacy_default_manager():
if any(f.name == 'objects' for f in opts.fields):
raise ValueError(
"Model %s must specify a custom Manager, because it has a "
"field named 'objects'." % cls.__name__
)
manager = Manager()
manager.auto_created = True
cls.add_to_class('objects', manager)
class_prepared.send(sender=cls)
def _requires_legacy_default_manager(cls): # RemovedInDjango20Warning
opts = cls._meta
if opts.manager_inheritance_from_future:
return False
future_default_manager = opts.default_manager
# Step 1: Locate a manager that would have been promoted
# to default manager with the legacy system.
for manager in opts.managers:
originating_model = manager._originating_model
if (cls is originating_model or cls._meta.proxy or
originating_model._meta.abstract):
if manager is not cls._default_manager and not opts.default_manager_name:
warnings.warn(
"Managers from concrete parents will soon qualify as default "
"managers if they appear before any other managers in the "
"MRO. As a result, '{legacy_default_manager}' declared on "
"'{legacy_default_manager_model}' will no longer be the "
"default manager for '{model}' in favor of "
"'{future_default_manager}' declared on "
"'{future_default_manager_model}'. "
"You can redeclare '{legacy_default_manager}' on '{cls}' "
"to keep things the way they are or you can switch to the new "
"behavior right away by setting "
"`Meta.manager_inheritance_from_future` to `True`.".format(
cls=cls.__name__,
model=opts.label,
legacy_default_manager=manager.name,
legacy_default_manager_model=manager._originating_model._meta.label,
future_default_manager=future_default_manager.name,
future_default_manager_model=future_default_manager._originating_model._meta.label,
),
RemovedInDjango20Warning, 2
)
opts.default_manager_name = manager.name
opts._expire_cache()
break
# Step 2: Since there are managers but none of them qualified as
# default managers under the legacy system (meaning that there are
# managers from concrete parents that would be promoted under the
# new system), we need to create a new Manager instance for the
# 'objects' attribute as a deprecation shim.
else:
# If the "future" default manager was auto created there is no
# point warning the user since it's basically the same manager.
if not future_default_manager.auto_created:
warnings.warn(
"Managers from concrete parents will soon qualify as "
"default managers. As a result, the 'objects' manager "
"won't be created (or recreated) automatically "
"anymore on '{model}' and '{future_default_manager}' "
"declared on '{future_default_manager_model}' will be "
"promoted to default manager. You can declare "
"explicitly `objects = models.Manager()` on '{cls}' "
"to keep things the way they are or you can switch "
"to the new behavior right away by setting "
"`Meta.manager_inheritance_from_future` to `True`.".format(
cls=cls.__name__,
model=opts.label,
future_default_manager=future_default_manager.name,
future_default_manager_model=future_default_manager._originating_model._meta.label,
),
RemovedInDjango20Warning, 2
)
return True
@property
def _base_manager(cls):
return cls._meta.base_manager
@property
def _default_manager(cls):
return cls._meta.default_manager
class ModelState(object):
"""
A class for storing instance state
"""
def __init__(self, db=None):
self.db = db
# If true, uniqueness validation checks will consider this a new, as-yet-unsaved object.
# Necessary for correct validation of new instances of objects with explicit (non-auto) PKs.
# This impacts validation only; it has no effect on the actual save.
self.adding = True
class Model(six.with_metaclass(ModelBase)):
def __init__(self, *args, **kwargs):
# Alias some things as locals to avoid repeat global lookups
cls = self.__class__
opts = self._meta
_setattr = setattr
_DEFERRED = DEFERRED
pre_init.send(sender=cls, args=args, kwargs=kwargs)
# Set up the storage for instance state
self._state = ModelState()
# There is a rather weird disparity here; if kwargs, it's set, then args
# overrides it. It should be one or the other; don't duplicate the work
# The reason for the kwargs check is that standard iterator passes in by
# args, and instantiation for iteration is 33% faster.
if len(args) > len(opts.concrete_fields):
# Daft, but matches old exception sans the err msg.
raise IndexError("Number of args exceeds number of fields")
if not kwargs:
fields_iter = iter(opts.concrete_fields)
# The ordering of the zip calls matter - zip throws StopIteration
# when an iter throws it. So if the first iter throws it, the second
# is *not* consumed. We rely on this, so don't change the order
# without changing the logic.
for val, field in zip(args, fields_iter):
if val is _DEFERRED:
continue
_setattr(self, field.attname, val)
else:
# Slower, kwargs-ready version.
fields_iter = iter(opts.fields)
for val, field in zip(args, fields_iter):
if val is _DEFERRED:
continue
_setattr(self, field.attname, val)
kwargs.pop(field.name, None)
# Now we're left with the unprocessed fields that *must* come from
# keywords, or default.
for field in fields_iter:
is_related_object = False
# Virtual field
if field.attname not in kwargs and field.column is None:
continue
if kwargs:
if isinstance(field.remote_field, ForeignObjectRel):
try:
# Assume object instance was passed in.
rel_obj = kwargs.pop(field.name)
is_related_object = True
except KeyError:
try:
# Object instance wasn't passed in -- must be an ID.
val = kwargs.pop(field.attname)
except KeyError:
val = field.get_default()
else:
# Object instance was passed in. Special case: You can
# pass in "None" for related objects if it's allowed.
if rel_obj is None and field.null:
val = None
else:
try:
val = kwargs.pop(field.attname)
except KeyError:
# This is done with an exception rather than the
# default argument on pop because we don't want
# get_default() to be evaluated, and then not used.
# Refs #12057.
val = field.get_default()
else:
val = field.get_default()
if is_related_object:
# If we are passed a related instance, set it using the
# field.name instead of field.attname (e.g. "user" instead of
# "user_id") so that the object gets properly cached (and type
# checked) by the RelatedObjectDescriptor.
if rel_obj is not _DEFERRED:
_setattr(self, field.name, rel_obj)
else:
if val is not _DEFERRED:
_setattr(self, field.attname, val)
if kwargs:
property_names = opts._property_names
for prop in tuple(kwargs):
try:
# Any remaining kwargs must correspond to properties or
# virtual fields.
if prop in property_names or opts.get_field(prop):
if kwargs[prop] is not _DEFERRED:
_setattr(self, prop, kwargs[prop])
del kwargs[prop]
except (AttributeError, FieldDoesNotExist):
pass
if kwargs:
raise TypeError("'%s' is an invalid keyword argument for this function" % list(kwargs)[0])
super(Model, self).__init__()
post_init.send(sender=cls, instance=self)
@classmethod
def from_db(cls, db, field_names, values):
if len(values) != len(cls._meta.concrete_fields):
values = list(values)
values.reverse()
values = [values.pop() if f.attname in field_names else DEFERRED for f in cls._meta.concrete_fields]
new = cls(*values)
new._state.adding = False
new._state.db = db
return new
def __repr__(self):
try:
u = six.text_type(self)
except (UnicodeEncodeError, UnicodeDecodeError):
u = '[Bad Unicode data]'
return force_str('<%s: %s>' % (self.__class__.__name__, u))
def __str__(self):
if six.PY2 and hasattr(self, '__unicode__'):
return force_text(self).encode('utf-8')
return str('%s object' % self.__class__.__name__)
def __eq__(self, other):
if not isinstance(other, Model):
return False
if self._meta.concrete_model != other._meta.concrete_model:
return False
my_pk = self._get_pk_val()
if my_pk is None:
return self is other
return my_pk == other._get_pk_val()
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
if self._get_pk_val() is None:
raise TypeError("Model instances without primary key value are unhashable")
return hash(self._get_pk_val())
def __reduce__(self):
data = self.__dict__
data[DJANGO_VERSION_PICKLE_KEY] = get_version()
class_id = self._meta.app_label, self._meta.object_name
return model_unpickle, (class_id,), data
def __setstate__(self, state):
msg = None
pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY)
if pickled_version:
current_version = get_version()
if current_version != pickled_version:
msg = (
"Pickled model instance's Django version %s does not match "
"the current version %s." % (pickled_version, current_version)
)
else:
msg = "Pickled model instance's Django version is not specified."
if msg:
warnings.warn(msg, RuntimeWarning, stacklevel=2)
self.__dict__.update(state)
def _get_pk_val(self, meta=None):
if not meta:
meta = self._meta
return getattr(self, meta.pk.attname)
def _set_pk_val(self, value):
return setattr(self, self._meta.pk.attname, value)
pk = property(_get_pk_val, _set_pk_val)
def get_deferred_fields(self):
"""
Returns a set containing names of deferred fields for this instance.
"""
return {
f.attname for f in self._meta.concrete_fields
if f.attname not in self.__dict__
}
def refresh_from_db(self, using=None, fields=None):
"""
Reloads field values from the database.
By default, the reloading happens from the database this instance was
loaded from, or by the read router if this instance wasn't loaded from
any database. The using parameter will override the default.
Fields can be used to specify which fields to reload. The fields
should be an iterable of field attnames. If fields is None, then
all non-deferred fields are reloaded.
When accessing deferred fields of an instance, the deferred loading
of the field will call this method.
"""
if fields is not None:
if len(fields) == 0:
return
if any(LOOKUP_SEP in f for f in fields):
raise ValueError(
'Found "%s" in fields argument. Relations and transforms '
'are not allowed in fields.' % LOOKUP_SEP)
db = using if using is not None else self._state.db
db_instance_qs = self.__class__._default_manager.using(db).filter(pk=self.pk)
# Use provided fields, if not set then reload all non-deferred fields.
deferred_fields = self.get_deferred_fields()
if fields is not None:
fields = list(fields)
db_instance_qs = db_instance_qs.only(*fields)
elif deferred_fields:
fields = [f.attname for f in self._meta.concrete_fields
if f.attname not in deferred_fields]
db_instance_qs = db_instance_qs.only(*fields)
db_instance = db_instance_qs.get()
non_loaded_fields = db_instance.get_deferred_fields()
for field in self._meta.concrete_fields:
if field.attname in non_loaded_fields:
# This field wasn't refreshed - skip ahead.
continue
setattr(self, field.attname, getattr(db_instance, field.attname))
# Throw away stale foreign key references.
if field.is_relation and field.get_cache_name() in self.__dict__:
rel_instance = getattr(self, field.get_cache_name())
local_val = getattr(db_instance, field.attname)
related_val = None if rel_instance is None else getattr(rel_instance, field.target_field.attname)
if local_val != related_val or (local_val is None and related_val is None):
del self.__dict__[field.get_cache_name()]
self._state.db = db_instance._state.db
def serializable_value(self, field_name):
"""
Returns the value of the field name for this instance. If the field is
a foreign key, returns the id value, instead of the object. If there's
no Field object with this name on the model, the model attribute's
value is returned directly.
Used to serialize a field's value (in the serializer, or form output,
for example). Normally, you would just access the attribute directly
and not use this method.
"""
try:
field = self._meta.get_field(field_name)
except FieldDoesNotExist:
return getattr(self, field_name)
return getattr(self, field.attname)
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
"""
Saves the current instance. Override this in a subclass if you want to
control the saving process.
The 'force_insert' and 'force_update' parameters can be used to insist
that the "save" must be an SQL insert or update (or equivalent for
non-SQL backends), respectively. Normally, they should not be set.
"""
# Ensure that a model instance without a PK hasn't been assigned to
# a ForeignKey or OneToOneField on this model. If the field is
# nullable, allowing the save() would result in silent data loss.
for field in self._meta.concrete_fields:
if field.is_relation:
# If the related field isn't cached, then an instance hasn't
# been assigned and there's no need to worry about this check.
try:
getattr(self, field.get_cache_name())
except AttributeError:
continue
obj = getattr(self, field.name, None)
# A pk may have been assigned manually to a model instance not
# saved to the database (or auto-generated in a case like
# UUIDField), but we allow the save to proceed and rely on the
# database to raise an IntegrityError if applicable. If
# constraints aren't supported by the database, there's the
# unavoidable risk of data corruption.
if obj and obj.pk is None:
# Remove the object from a related instance cache.
if not field.remote_field.multiple:
delattr(obj, field.remote_field.get_cache_name())
raise ValueError(
"save() prohibited to prevent data loss due to "
"unsaved related object '%s'." % field.name
)
using = using or router.db_for_write(self.__class__, instance=self)
if force_insert and (force_update or update_fields):
raise ValueError("Cannot force both insert and updating in model saving.")
deferred_fields = self.get_deferred_fields()
if update_fields is not None:
# If update_fields is empty, skip the save. We do also check for
# no-op saves later on for inheritance cases. This bailout is
# still needed for skipping signal sending.
if len(update_fields) == 0:
return
update_fields = frozenset(update_fields)
field_names = set()
for field in self._meta.fields:
if not field.primary_key:
field_names.add(field.name)
if field.name != field.attname:
field_names.add(field.attname)
non_model_fields = update_fields.difference(field_names)
if non_model_fields:
raise ValueError("The following fields do not exist in this "
"model or are m2m fields: %s"
% ', '.join(non_model_fields))
# If saving to the same database, and this model is deferred, then
# automatically do a "update_fields" save on the loaded fields.
elif not force_insert and deferred_fields and using == self._state.db:
field_names = set()
for field in self._meta.concrete_fields:
if not field.primary_key and not hasattr(field, 'through'):
field_names.add(field.attname)
loaded_fields = field_names.difference(deferred_fields)
if loaded_fields:
update_fields = frozenset(loaded_fields)
self.save_base(using=using, force_insert=force_insert,
force_update=force_update, update_fields=update_fields)
save.alters_data = True
def save_base(self, raw=False, force_insert=False,
force_update=False, using=None, update_fields=None):
"""
Handles the parts of saving which should be done only once per save,
yet need to be done in raw saves, too. This includes some sanity
checks and signal sending.
The 'raw' argument is telling save_base not to save any parent
models and not to do any changes to the values before save. This
is used by fixture loading.
"""
using = using or router.db_for_write(self.__class__, instance=self)
assert not (force_insert and (force_update or update_fields))
assert update_fields is None or len(update_fields) > 0
cls = origin = self.__class__
# Skip proxies, but keep the origin as the proxy model.
if cls._meta.proxy:
cls = cls._meta.concrete_model
meta = cls._meta
if not meta.auto_created:
pre_save.send(
sender=origin, instance=self, raw=raw, using=using,
update_fields=update_fields,
)
with transaction.atomic(using=using, savepoint=False):
if not raw:
self._save_parents(cls, using, update_fields)
updated = self._save_table(raw, cls, force_insert, force_update, using, update_fields)
# Store the database on which the object was saved
self._state.db = using
# Once saved, this is no longer a to-be-added instance.
self._state.adding = False
# Signal that the save is complete
if not meta.auto_created:
post_save.send(
sender=origin, instance=self, created=(not updated),
update_fields=update_fields, raw=raw, using=using,
)
save_base.alters_data = True
def _save_parents(self, cls, using, update_fields):
"""
Saves all the parents of cls using values from self.
"""
meta = cls._meta
for parent, field in meta.parents.items():
# Make sure the link fields are synced between parent and self.
if (field and getattr(self, parent._meta.pk.attname) is None and
getattr(self, field.attname) is not None):
setattr(self, parent._meta.pk.attname, getattr(self, field.attname))
self._save_parents(cls=parent, using=using, update_fields=update_fields)
self._save_table(cls=parent, using=using, update_fields=update_fields)
# Set the parent's PK value to self.
if field:
setattr(self, field.attname, self._get_pk_val(parent._meta))
# Since we didn't have an instance of the parent handy set
# attname directly, bypassing the descriptor. Invalidate
# the related object cache, in case it's been accidentally
# populated. A fresh instance will be re-built from the
# database if necessary.
cache_name = field.get_cache_name()
if hasattr(self, cache_name):
delattr(self, cache_name)
def _save_table(self, raw=False, cls=None, force_insert=False,
force_update=False, using=None, update_fields=None):
"""
Does the heavy-lifting involved in saving. Updates or inserts the data
for a single table.
"""
meta = cls._meta
non_pks = [f for f in meta.local_concrete_fields if not f.primary_key]
if update_fields:
non_pks = [f for f in non_pks
if f.name in update_fields or f.attname in update_fields]
pk_val = self._get_pk_val(meta)
if pk_val is None:
pk_val = meta.pk.get_pk_value_on_save(self)
setattr(self, meta.pk.attname, pk_val)
pk_set = pk_val is not None
if not pk_set and (force_update or update_fields):
raise ValueError("Cannot force an update in save() with no primary key.")
updated = False
# If possible, try an UPDATE. If that doesn't update anything, do an INSERT.
if pk_set and not force_insert:
base_qs = cls._base_manager.using(using)
values = [(f, None, (getattr(self, f.attname) if raw else f.pre_save(self, False)))
for f in non_pks]
forced_update = update_fields or force_update
updated = self._do_update(base_qs, using, pk_val, values, update_fields,
forced_update)
if force_update and not updated:
raise DatabaseError("Forced update did not affect any rows.")
if update_fields and not updated:
raise DatabaseError("Save with update_fields did not affect any rows.")
if not updated:
if meta.order_with_respect_to:
# If this is a model with an order_with_respect_to
# autopopulate the _order field
field = meta.order_with_respect_to
filter_args = field.get_filter_kwargs_for_object(self)
order_value = cls._base_manager.using(using).filter(**filter_args).count()
self._order = order_value
fields = meta.local_concrete_fields
if not pk_set:
fields = [f for f in fields if f is not meta.auto_field]
update_pk = meta.auto_field and not pk_set
result = self._do_insert(cls._base_manager, using, fields, update_pk, raw)
if update_pk:
setattr(self, meta.pk.attname, result)
return updated
def _do_update(self, base_qs, using, pk_val, values, update_fields, forced_update):
"""
This method will try to update the model. If the model was updated (in
the sense that an update query was done and a matching row was found
from the DB) the method will return True.
"""
filtered = base_qs.filter(pk=pk_val)
if not values:
# We can end up here when saving a model in inheritance chain where
# update_fields doesn't target any field in current model. In that
# case we just say the update succeeded. Another case ending up here
# is a model with just PK - in that case check that the PK still
# exists.
return update_fields is not None or filtered.exists()
if self._meta.select_on_save and not forced_update:
if filtered.exists():
# It may happen that the object is deleted from the DB right after
# this check, causing the subsequent UPDATE to return zero matching
# rows. The same result can occur in some rare cases when the
# database returns zero despite the UPDATE being executed
# successfully (a row is matched and updated). In order to
# distinguish these two cases, the object's existence in the
# database is again checked for if the UPDATE query returns 0.
return filtered._update(values) > 0 or filtered.exists()
else:
return False
return filtered._update(values) > 0
def _do_insert(self, manager, using, fields, update_pk, raw):
"""
Do an INSERT. If update_pk is defined then this method should return
the new pk for the model.
"""
return manager._insert([self], fields=fields, return_id=update_pk,
using=using, raw=raw)
def delete(self, using=None, keep_parents=False):
using = using or router.db_for_write(self.__class__, instance=self)
assert self._get_pk_val() is not None, (
"%s object can't be deleted because its %s attribute is set to None." %
(self._meta.object_name, self._meta.pk.attname)
)
collector = Collector(using=using)
collector.collect([self], keep_parents=keep_parents)
return collector.delete()
delete.alters_data = True
def _get_FIELD_display(self, field):
value = getattr(self, field.attname)
return force_text(dict(field.flatchoices).get(value, value), strings_only=True)
def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs):
if not self.pk:
raise ValueError("get_next/get_previous cannot be used on unsaved objects.")
op = 'gt' if is_next else 'lt'
order = '' if is_next else '-'
param = force_text(getattr(self, field.attname))
q = Q(**{'%s__%s' % (field.name, op): param})
q = q | Q(**{field.name: param, 'pk__%s' % op: self.pk})
qs = self.__class__._default_manager.using(self._state.db).filter(**kwargs).filter(q).order_by(
'%s%s' % (order, field.name), '%spk' % order
)
try:
return qs[0]
except IndexError:
raise self.DoesNotExist("%s matching query does not exist." % self.__class__._meta.object_name)
def _get_next_or_previous_in_order(self, is_next):
cachename = "__%s_order_cache" % is_next
if not hasattr(self, cachename):
op = 'gt' if is_next else 'lt'
order = '_order' if is_next else '-_order'
order_field = self._meta.order_with_respect_to
filter_args = order_field.get_filter_kwargs_for_object(self)
obj = self.__class__._default_manager.filter(**filter_args).filter(**{
'_order__%s' % op: self.__class__._default_manager.values('_order').filter(**{
self._meta.pk.name: self.pk
})
}).order_by(order)[:1].get()
setattr(self, cachename, obj)
return getattr(self, cachename)
def prepare_database_save(self, field):
if self.pk is None:
raise ValueError("Unsaved model instance %r cannot be used in an ORM query." % self)
return getattr(self, field.remote_field.get_related_field().attname)
def clean(self):
"""
Hook for doing any extra model-wide validation after clean() has been
called on every field by self.clean_fields. Any ValidationError raised
by this method will not be associated with a particular field; it will
have a special-case association with the field defined by NON_FIELD_ERRORS.
"""
pass
def validate_unique(self, exclude=None):
"""
Checks unique constraints on the model and raises ``ValidationError``
if any failed.
"""
unique_checks, date_checks = self._get_unique_checks(exclude=exclude)
errors = self._perform_unique_checks(unique_checks)
date_errors = self._perform_date_checks(date_checks)
for k, v in date_errors.items():
errors.setdefault(k, []).extend(v)
if errors:
raise ValidationError(errors)
def _get_unique_checks(self, exclude=None):
"""
Gather a list of checks to perform. Since validate_unique could be
called from a ModelForm, some fields may have been excluded; we can't
perform a unique check on a model that is missing fields involved
in that check.
Fields that did not validate should also be excluded, but they need
to be passed in via the exclude argument.
"""
if exclude is None:
exclude = []
unique_checks = []
unique_togethers = [(self.__class__, self._meta.unique_together)]
for parent_class in self._meta.get_parent_list():
if parent_class._meta.unique_together:
unique_togethers.append((parent_class, parent_class._meta.unique_together))
for model_class, unique_together in unique_togethers:
for check in unique_together:
for name in check:
# If this is an excluded field, don't add this check.
if name in exclude:
break
else:
unique_checks.append((model_class, tuple(check)))
# These are checks for the unique_for_<date/year/month>.
date_checks = []
# Gather a list of checks for fields declared as unique and add them to
# the list of checks.
fields_with_class = [(self.__class__, self._meta.local_fields)]
for parent_class in self._meta.get_parent_list():
fields_with_class.append((parent_class, parent_class._meta.local_fields))
for model_class, fields in fields_with_class:
for f in fields:
name = f.name
if name in exclude:
continue
if f.unique:
unique_checks.append((model_class, (name,)))
if f.unique_for_date and f.unique_for_date not in exclude:
date_checks.append((model_class, 'date', name, f.unique_for_date))
if f.unique_for_year and f.unique_for_year not in exclude:
date_checks.append((model_class, 'year', name, f.unique_for_year))
if f.unique_for_month and f.unique_for_month not in exclude:
date_checks.append((model_class, 'month', name, f.unique_for_month))
return unique_checks, date_checks
def _perform_unique_checks(self, unique_checks):
errors = {}
for model_class, unique_check in unique_checks:
# Try to look up an existing object with the same values as this
# object's values for all the unique field.
lookup_kwargs = {}
for field_name in unique_check:
f = self._meta.get_field(field_name)
lookup_value = getattr(self, f.attname)
# TODO: Handle multiple backends with different feature flags.
if (lookup_value is None or
(lookup_value == '' and connection.features.interprets_empty_strings_as_nulls)):
# no value, skip the lookup
continue
if f.primary_key and not self._state.adding:
# no need to check for unique primary key when editing
continue
lookup_kwargs[str(field_name)] = lookup_value
# some fields were skipped, no reason to do the check
if len(unique_check) != len(lookup_kwargs):
continue
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
# Note that we need to use the pk as defined by model_class, not
# self.pk. These can be different fields because model inheritance
# allows single model to have effectively multiple primary keys.
# Refs #17615.
model_class_pk = self._get_pk_val(model_class._meta)
if not self._state.adding and model_class_pk is not None:
qs = qs.exclude(pk=model_class_pk)
if qs.exists():
if len(unique_check) == 1:
key = unique_check[0]
else:
key = NON_FIELD_ERRORS
errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check))
return errors
def _perform_date_checks(self, date_checks):
errors = {}
for model_class, lookup_type, field, unique_for in date_checks:
lookup_kwargs = {}
# there's a ticket to add a date lookup, we can remove this special
# case if that makes it's way in
date = getattr(self, unique_for)
if date is None:
continue
if lookup_type == 'date':
lookup_kwargs['%s__day' % unique_for] = date.day
lookup_kwargs['%s__month' % unique_for] = date.month
lookup_kwargs['%s__year' % unique_for] = date.year
else:
lookup_kwargs['%s__%s' % (unique_for, lookup_type)] = getattr(date, lookup_type)
lookup_kwargs[field] = getattr(self, field)
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
if not self._state.adding and self.pk is not None:
qs = qs.exclude(pk=self.pk)
if qs.exists():
errors.setdefault(field, []).append(
self.date_error_message(lookup_type, field, unique_for)
)
return errors
def date_error_message(self, lookup_type, field_name, unique_for):
opts = self._meta
field = opts.get_field(field_name)
return ValidationError(
message=field.error_messages['unique_for_date'],
code='unique_for_date',
params={
'model': self,
'model_name': six.text_type(capfirst(opts.verbose_name)),
'lookup_type': lookup_type,
'field': field_name,
'field_label': six.text_type(capfirst(field.verbose_name)),
'date_field': unique_for,
'date_field_label': six.text_type(capfirst(opts.get_field(unique_for).verbose_name)),
}
)
def unique_error_message(self, model_class, unique_check):
opts = model_class._meta
params = {
'model': self,
'model_class': model_class,
'model_name': six.text_type(capfirst(opts.verbose_name)),
'unique_check': unique_check,
}
# A unique field
if len(unique_check) == 1:
field = opts.get_field(unique_check[0])
params['field_label'] = six.text_type(capfirst(field.verbose_name))
return ValidationError(
message=field.error_messages['unique'],
code='unique',
params=params,
)
# unique_together
else:
field_labels = [capfirst(opts.get_field(f).verbose_name) for f in unique_check]
params['field_labels'] = six.text_type(get_text_list(field_labels, _('and')))
return ValidationError(
message=_("%(model_name)s with this %(field_labels)s already exists."),
code='unique_together',
params=params,
)
def full_clean(self, exclude=None, validate_unique=True):
"""
Calls clean_fields, clean, and validate_unique, on the model,
and raises a ``ValidationError`` for any errors that occurred.
"""
errors = {}
if exclude is None:
exclude = []
else:
exclude = list(exclude)
try:
self.clean_fields(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
# Form.clean() is run even if other validation fails, so do the
# same with Model.clean() for consistency.
try:
self.clean()
except ValidationError as e:
errors = e.update_error_dict(errors)
# Run unique checks, but only for fields that passed validation.
if validate_unique:
for name in errors.keys():
if name != NON_FIELD_ERRORS and name not in exclude:
exclude.append(name)
try:
self.validate_unique(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
if errors:
raise ValidationError(errors)
def clean_fields(self, exclude=None):
"""
Cleans all fields and raises a ValidationError containing a dict
of all validation errors if any occur.
"""
if exclude is None:
exclude = []
errors = {}
for f in self._meta.fields:
if f.name in exclude:
continue
# Skip validation for empty fields with blank=True. The developer
# is responsible for making sure they have a valid value.
raw_value = getattr(self, f.attname)
if f.blank and raw_value in f.empty_values:
continue
try:
setattr(self, f.attname, f.clean(raw_value, self))
except ValidationError as e:
errors[f.name] = e.error_list
if errors:
raise ValidationError(errors)
@classmethod
def check(cls, **kwargs):
errors = []
errors.extend(cls._check_swappable())
errors.extend(cls._check_model())
errors.extend(cls._check_managers(**kwargs))
if not cls._meta.swapped:
errors.extend(cls._check_fields(**kwargs))
errors.extend(cls._check_m2m_through_same_relationship())
errors.extend(cls._check_long_column_names())
clash_errors = (
cls._check_id_field() +
cls._check_field_name_clashes() +
cls._check_model_name_db_lookup_clashes()
)
errors.extend(clash_errors)
# If there are field name clashes, hide consequent column name
# clashes.
if not clash_errors:
errors.extend(cls._check_column_name_clashes())
errors.extend(cls._check_index_together())
errors.extend(cls._check_unique_together())
errors.extend(cls._check_ordering())
return errors
@classmethod
def _check_swappable(cls):
""" Check if the swapped model exists. """
errors = []
if cls._meta.swapped:
try:
apps.get_model(cls._meta.swapped)
except ValueError:
errors.append(
checks.Error(
"'%s' is not of the form 'app_label.app_name'." % cls._meta.swappable,
id='models.E001',
)
)
except LookupError:
app_label, model_name = cls._meta.swapped.split('.')
errors.append(
checks.Error(
"'%s' references '%s.%s', which has not been "
"installed, or is abstract." % (
cls._meta.swappable, app_label, model_name
),
id='models.E002',
)
)
return errors
@classmethod
def _check_model(cls):
errors = []
if cls._meta.proxy:
if cls._meta.local_fields or cls._meta.local_many_to_many:
errors.append(
checks.Error(
"Proxy model '%s' contains model fields." % cls.__name__,
id='models.E017',
)
)
return errors
@classmethod
def _check_managers(cls, **kwargs):
""" Perform all manager checks. """
errors = []
for manager in cls._meta.managers:
errors.extend(manager.check(**kwargs))
return errors
@classmethod
def _check_fields(cls, **kwargs):
""" Perform all field checks. """
errors = []
for field in cls._meta.local_fields:
errors.extend(field.check(**kwargs))
for field in cls._meta.local_many_to_many:
errors.extend(field.check(from_model=cls, **kwargs))
return errors
@classmethod
def _check_m2m_through_same_relationship(cls):
""" Check if no relationship model is used by more than one m2m field.
"""
errors = []
seen_intermediary_signatures = []
fields = cls._meta.local_many_to_many
# Skip when the target model wasn't found.
fields = (f for f in fields if isinstance(f.remote_field.model, ModelBase))
# Skip when the relationship model wasn't found.
fields = (f for f in fields if isinstance(f.remote_field.through, ModelBase))
for f in fields:
signature = (f.remote_field.model, cls, f.remote_field.through)
if signature in seen_intermediary_signatures:
errors.append(
checks.Error(
"The model has two many-to-many relations through "
"the intermediate model '%s'." % f.remote_field.through._meta.label,
obj=cls,
id='models.E003',
)
)
else:
seen_intermediary_signatures.append(signature)
return errors
@classmethod
def _check_id_field(cls):
""" Check if `id` field is a primary key. """
fields = list(f for f in cls._meta.local_fields if f.name == 'id' and f != cls._meta.pk)
# fields is empty or consists of the invalid "id" field
if fields and not fields[0].primary_key and cls._meta.pk.name == 'id':
return [
checks.Error(
"'id' can only be used as a field name if the field also "
"sets 'primary_key=True'.",
obj=cls,
id='models.E004',
)
]
else:
return []
@classmethod
def _check_field_name_clashes(cls):
""" Ref #17673. """
errors = []
used_fields = {} # name or attname -> field
# Check that multi-inheritance doesn't cause field name shadowing.
for parent in cls._meta.get_parent_list():
for f in parent._meta.local_fields:
clash = used_fields.get(f.name) or used_fields.get(f.attname) or None
if clash:
errors.append(
checks.Error(
"The field '%s' from parent model "
"'%s' clashes with the field '%s' "
"from parent model '%s'." % (
clash.name, clash.model._meta,
f.name, f.model._meta
),
obj=cls,
id='models.E005',
)
)
used_fields[f.name] = f
used_fields[f.attname] = f
# Check that fields defined in the model don't clash with fields from
# parents, including auto-generated fields like multi-table inheritance
# child accessors.
for parent in cls._meta.get_parent_list():
for f in parent._meta.get_fields():
if f not in used_fields:
used_fields[f.name] = f
for f in cls._meta.local_fields:
clash = used_fields.get(f.name) or used_fields.get(f.attname) or None
# Note that we may detect clash between user-defined non-unique
# field "id" and automatically added unique field "id", both
# defined at the same model. This special case is considered in
# _check_id_field and here we ignore it.
id_conflict = f.name == "id" and clash and clash.name == "id" and clash.model == cls
if clash and not id_conflict:
errors.append(
checks.Error(
"The field '%s' clashes with the field '%s' "
"from model '%s'." % (
f.name, clash.name, clash.model._meta
),
obj=f,
id='models.E006',
)
)
used_fields[f.name] = f
used_fields[f.attname] = f
return errors
@classmethod
def _check_column_name_clashes(cls):
# Store a list of column names which have already been used by other fields.
used_column_names = []
errors = []
for f in cls._meta.local_fields:
_, column_name = f.get_attname_column()
# Ensure the column name is not already in use.
if column_name and column_name in used_column_names:
errors.append(
checks.Error(
"Field '%s' has column name '%s' that is used by "
"another field." % (f.name, column_name),
hint="Specify a 'db_column' for the field.",
obj=cls,
id='models.E007'
)
)
else:
used_column_names.append(column_name)
return errors
@classmethod
def _check_model_name_db_lookup_clashes(cls):
errors = []
model_name = cls.__name__
if model_name.startswith('_') or model_name.endswith('_'):
errors.append(
checks.Error(
"The model name '%s' cannot start or end with an underscore "
"as it collides with the query lookup syntax." % model_name,
obj=cls,
id='models.E023'
)
)
elif LOOKUP_SEP in model_name:
errors.append(
checks.Error(
"The model name '%s' cannot contain double underscores as "
"it collides with the query lookup syntax." % model_name,
obj=cls,
id='models.E024'
)
)
return errors
@classmethod
def _check_index_together(cls):
""" Check the value of "index_together" option. """
if not isinstance(cls._meta.index_together, (tuple, list)):
return [
checks.Error(
"'index_together' must be a list or tuple.",
obj=cls,
id='models.E008',
)
]
elif any(not isinstance(fields, (tuple, list)) for fields in cls._meta.index_together):
return [
checks.Error(
"All 'index_together' elements must be lists or tuples.",
obj=cls,
id='models.E009',
)
]
else:
errors = []
for fields in cls._meta.index_together:
errors.extend(cls._check_local_fields(fields, "index_together"))
return errors
@classmethod
def _check_unique_together(cls):
""" Check the value of "unique_together" option. """
if not isinstance(cls._meta.unique_together, (tuple, list)):
return [
checks.Error(
"'unique_together' must be a list or tuple.",
obj=cls,
id='models.E010',
)
]
elif any(not isinstance(fields, (tuple, list)) for fields in cls._meta.unique_together):
return [
checks.Error(
"All 'unique_together' elements must be lists or tuples.",
obj=cls,
id='models.E011',
)
]
else:
errors = []
for fields in cls._meta.unique_together:
errors.extend(cls._check_local_fields(fields, "unique_together"))
return errors
@classmethod
def _check_local_fields(cls, fields, option):
from django.db import models
# In order to avoid hitting the relation tree prematurely, we use our
# own fields_map instead of using get_field()
forward_fields_map = {
field.name: field for field in cls._meta._get_fields(reverse=False)
}
errors = []
for field_name in fields:
try:
field = forward_fields_map[field_name]
except KeyError:
errors.append(
checks.Error(
"'%s' refers to the non-existent field '%s'." % (
option, field_name,
),
obj=cls,
id='models.E012',
)
)
else:
if isinstance(field.remote_field, models.ManyToManyRel):
errors.append(
checks.Error(
"'%s' refers to a ManyToManyField '%s', but "
"ManyToManyFields are not permitted in '%s'." % (
option, field_name, option,
),
obj=cls,
id='models.E013',
)
)
elif field not in cls._meta.local_fields:
errors.append(
checks.Error(
"'%s' refers to field '%s' which is not local to model '%s'."
% (option, field_name, cls._meta.object_name),
hint="This issue may be caused by multi-table inheritance.",
obj=cls,
id='models.E016',
)
)
return errors
@classmethod
def _check_ordering(cls):
""" Check "ordering" option -- is it a list of strings and do all fields
exist? """
if cls._meta._ordering_clash:
return [
checks.Error(
"'ordering' and 'order_with_respect_to' cannot be used together.",
obj=cls,
id='models.E021',
),
]
if cls._meta.order_with_respect_to or not cls._meta.ordering:
return []
if not isinstance(cls._meta.ordering, (list, tuple)):
return [
checks.Error(
"'ordering' must be a tuple or list (even if you want to order by only one field).",
obj=cls,
id='models.E014',
)
]
errors = []
fields = cls._meta.ordering
# Skip '?' fields.
fields = (f for f in fields if f != '?')
# Convert "-field" to "field".
fields = ((f[1:] if f.startswith('-') else f) for f in fields)
# Skip ordering in the format field1__field2 (FIXME: checking
# this format would be nice, but it's a little fiddly).
fields = (f for f in fields if LOOKUP_SEP not in f)
# Skip ordering on pk. This is always a valid order_by field
# but is an alias and therefore won't be found by opts.get_field.
fields = {f for f in fields if f != 'pk'}
# Check for invalid or non-existent fields in ordering.
invalid_fields = []
# Any field name that is not present in field_names does not exist.
# Also, ordering by m2m fields is not allowed.
opts = cls._meta
valid_fields = set(chain.from_iterable(
(f.name, f.attname) if not (f.auto_created and not f.concrete) else (f.field.related_query_name(),)
for f in chain(opts.fields, opts.related_objects)
))
invalid_fields.extend(fields - valid_fields)
for invalid_field in invalid_fields:
errors.append(
checks.Error(
"'ordering' refers to the non-existent field '%s'." % invalid_field,
obj=cls,
id='models.E015',
)
)
return errors
@classmethod
def _check_long_column_names(cls):
"""
Check that any auto-generated column names are shorter than the limits
for each database in which the model will be created.
"""
errors = []
allowed_len = None
db_alias = None
# Find the minimum max allowed length among all specified db_aliases.
for db in settings.DATABASES.keys():
# skip databases where the model won't be created
if not router.allow_migrate_model(db, cls):
continue
connection = connections[db]
max_name_length = connection.ops.max_name_length()
if max_name_length is None or connection.features.truncates_names:
continue
else:
if allowed_len is None:
allowed_len = max_name_length
db_alias = db
elif max_name_length < allowed_len:
allowed_len = max_name_length
db_alias = db
if allowed_len is None:
return errors
for f in cls._meta.local_fields:
_, column_name = f.get_attname_column()
# Check if auto-generated name for the field is too long
# for the database.
if f.db_column is None and column_name is not None and len(column_name) > allowed_len:
errors.append(
checks.Error(
'Autogenerated column name too long for field "%s". '
'Maximum length is "%s" for database "%s".'
% (column_name, allowed_len, db_alias),
hint="Set the column name manually using 'db_column'.",
obj=cls,
id='models.E018',
)
)
for f in cls._meta.local_many_to_many:
# Skip nonexistent models.
if isinstance(f.remote_field.through, six.string_types):
continue
# Check if auto-generated name for the M2M field is too long
# for the database.
for m2m in f.remote_field.through._meta.local_fields:
_, rel_name = m2m.get_attname_column()
if m2m.db_column is None and rel_name is not None and len(rel_name) > allowed_len:
errors.append(
checks.Error(
'Autogenerated column name too long for M2M field '
'"%s". Maximum length is "%s" for database "%s".'
% (rel_name, allowed_len, db_alias),
hint=(
"Use 'through' to create a separate model for "
"M2M and then set column_name using 'db_column'."
),
obj=cls,
id='models.E019',
)
)
return errors
############################################
# HELPER FUNCTIONS (CURRIED MODEL METHODS) #
############################################
# ORDERING METHODS #########################
def method_set_order(ordered_obj, self, id_list, using=None):
if using is None:
using = DEFAULT_DB_ALIAS
order_wrt = ordered_obj._meta.order_with_respect_to
filter_args = order_wrt.get_forward_related_filter(self)
# FIXME: It would be nice if there was an "update many" version of update
# for situations like this.
with transaction.atomic(using=using, savepoint=False):
for i, j in enumerate(id_list):
ordered_obj.objects.filter(pk=j, **filter_args).update(_order=i)
def method_get_order(ordered_obj, self):
order_wrt = ordered_obj._meta.order_with_respect_to
filter_args = order_wrt.get_forward_related_filter(self)
pk_name = ordered_obj._meta.pk.name
return ordered_obj.objects.filter(**filter_args).values_list(pk_name, flat=True)
def make_foreign_order_accessors(model, related_model):
setattr(
related_model,
'get_%s_order' % model.__name__.lower(),
curry(method_get_order, model)
)
setattr(
related_model,
'set_%s_order' % model.__name__.lower(),
curry(method_set_order, model)
)
########
# MISC #
########
def model_unpickle(model_id):
"""
Used to unpickle Model subclasses with deferred fields.
"""
if isinstance(model_id, tuple):
model = apps.get_model(*model_id)
else:
# Backwards compat - the model was cached directly in earlier versions.
model = model_id
return model.__new__(model)
model_unpickle.__safe_for_unpickle__ = True
def unpickle_inner_exception(klass, exception_name):
# Get the exception class from the class it is attached to:
exception = getattr(klass, exception_name)
return exception.__new__(exception)
| mit |
SgfPythonDevs/tchristell-raspi | scripts/Gmail.py | 2 | 4325 | #!/user/bin/env python
# Gmail.py
# Checks for new mail using IMAPclient and gmail account
# Uses callback to react to push button to send text message
from imapclient import IMAPClient
import time
import RPi.GPIO as GPIO
# Flag to enable debugging statements
DEBUG = True
# Used for IMAP mail retrieval
HOSTNAME = 'imap.gmail.com'
USERNAME = '[email protected]'
PASSWORD = 'password'
MAILBOX = 'Inbox'
#Loop timer for mail check
MAIL_CHECK_FREQUENCY = 60
# SMTPLIB uses this info for sending text
global EMAIL_USER
EMAIL_USER = "tlcruns"
global EMAIL_PASSWORD
EMAIL_PASSWORD = "password"
global FROM_EMAIL_ADDRESS
FROM_EMAIL_ADDRESS = "[email protected]"
global EMAIL_TO_ADDRESS
EMAIL_TO_ADDRESS = "[email protected]"
global CELL_TO_ADDRESS
CELL_TO_ADDRESS = "[email protected]"
# Flag to set number of emails
FIRST_TIME = True
# Only define one button to trigger text message
buttonOne = 17 #Connected to 3.3 volts
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
# Only using GREEN_LED to make things simple. RED left in for
# example
GREEN_LED = 4
RED_LED = 23
# set both LED pins to output
GPIO.setup(RED_LED, GPIO.OUT)
GPIO.setup(GREEN_LED, GPIO.OUT)
# When we push buttonOne it connects +3.3 volts to input pin 17
# GPIO.PUD_DOWN "pulls" the pin low (ground) so it can detect
# the "high" that the button sends when pushed
GPIO.setup(buttonOne, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)
# Create callback function for button one
def text_Function(Channel):
send_email("Test", " Button One Pressed", CELL_TO_ADDRESS)
# Add callback function to GPIO.Rising event on buttonOne (add bouncetime=300)
GPIO.add_event_detect(buttonOne, GPIO.RISING, callback=text_Function, bouncetime=300)
# ----------------------------------------------------------------------------
# send_email()
# this uses the smtplib library to generate emails, usig vtext to send text
# messages for this demo
# ----------------------------------------------------------------------------
def send_email(sub, text, to):
import smtplib
user = EMAIL_USER
pwd = EMAIL_PASSWORD
FROM = FROM_EMAIL_ADDRESS
TO = [to]
SUBJECT = sub
TEXT = text
# Prepare actual message
message = """\From: %s\nTo: %s\nSubject: %s\n\n%s
""" % (FROM, ", ".join(TO), SUBJECT, TEXT)
try:
server = smtplib.SMTP("smtp.gmail.com:587")
server.ehlo()
server.starttls()
server.login(user, pwd)
server.sendmail(FROM, TO, message)
server.close()
print "successfully sent the mail to: {}".format(to)
except:
print("Can't send emailto: {}".format(to))
# ----------------------------------------------------------------------------
# loop()
# loop logs into gmail account usint IMAPClient and checks for the number of
# unread email messages. If the count is greater than last time it lights the
# LED on pin 17 for 60 seconds, which is the loop timer
# ----------------------------------------------------------------------------
def loop():
global FIRST_TIME
global NEWMAIL_OFFSET
server = IMAPClient(HOSTNAME, use_uid=True, ssl=True)
server.login(USERNAME, PASSWORD)
if DEBUG:
print('Loggin in as ' + USERNAME)
select_info = server.select_folder(MAILBOX)
print('%d messages in INBOX' % select_info['EXISTS'])
folder_status = server.folder_status(MAILBOX, 'UNSEEN')
newmails = int(folder_status['UNSEEN'])
if FIRST_TIME:
FIRST_TIME = False
NEWMAIL_OFFSET = newmails
print('first time and newmail_offset is ', NEWMAIL_OFFSET)
if newmails > NEWMAIL_OFFSET:
print('newmails is ', newmails, ' and newmailoffset is ', NEWMAIL_OFFSET)
NEWMAIL_OFFSET = newmails
GPIO.output(GREEN_LED, True)
GPIO.output(RED_LED, False)
if DEBUG:
print "You have", newmails, "New emails"
else:
print('in else and newmail_offset is ', NEWMAIL_OFFSET)
GPIO.output(GREEN_LED, False)
GPIO.output(RED_LED, True)
server.logout()
time.sleep(MAIL_CHECK_FREQUENCY)
if __name__ == '__main__':
try:
print 'Press Ctrl-C to quit.'
while True:
loop()
finally:
GPIO.cleanup()
| mit |
skarphed/skarphed | admin/src/skarphedadmin/gui/YesNoPage.py | 1 | 2349 | #!/usr/bin/python
#-*- coding: utf-8 -*-
###########################################################
# © 2011 Daniel 'grindhold' Brendle and Team
#
# This file is part of Skarphed.
#
# Skarphed is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later
# version.
#
# Skarphed is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with Skarphed.
# If not, see http://www.gnu.org/licenses/.
###########################################################
import pygtk
pygtk.require("2.0")
import gtk
from skarphedadmin.glue.lng import _
class YesNoPage(gtk.Frame):
def __init__(self, par, message, callback):
gtk.Frame.__init__(self, _("Yes/No"))
self.par = par
self.hbox = gtk.HBox()
self.vbox = gtk.VBox()
self.dummy = gtk.Label("")
self.label = gtk.Label(message)
self.yes = gtk.Button(stock=gtk.STOCK_YES);
self.no = gtk.Button(stock=gtk.STOCK_NO)
self.hbox.pack_start(self.yes)
self.hbox.pack_start(self.no)
self.vbox.pack_start(self.label,False)
self.vbox.pack_start(self.hbox,False)
self.vbox.pack_start(self.dummy,True)
self.vbox.set_spacing(30)
self.alignment = gtk.Alignment(0.5,0.5,0.5,0.5)
self.alignment.add(self.vbox)
self.add(self.alignment)
self.callback = callback
self.yes.connect('clicked', self.yes_callback)
self.no.connect('clicked', self.no_callback)
self.getApplication().getMainWindow().openDialogPane(self)
def no_callback(self, button, data=None):
self.getApplication().getMainWindow().closeDialogPane()
def yes_callback(self, button, data=None):
if self.callback:
self.callback()
self.getApplication().getMainWindow().closeDialogPane()
def getPar(self):
return self.par
def getApplication(self):
return self.par.getApplication()
| agpl-3.0 |
mcflugen/dakota-experiments | dakota_utils/models/tests/test_hydrotrend.py | 2 | 4020 | #!/usr/bin/env python
#
# Tests for dakota_utils.models.hydrotrend.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper ([email protected])
from nose.tools import *
import os
import tempfile
import shutil
from dakota_utils.models.hydrotrend import HydroTrend
def setup_module():
print('HydroTrend tests:')
os.environ['_test_hydrotrend_dir'] = tempfile.mkdtemp()
os.chdir(os.environ['_test_hydrotrend_dir'])
global h
h = HydroTrend()
def teardown_module():
shutil.rmtree(os.environ['_test_hydrotrend_dir'])
def test_HydroTrend_no_arguments():
'''
Tests whether no arguments creates input and output directories.
'''
assert_true(os.path.exists(h.input_dir))
assert_true(os.path.exists(h.output_dir))
def test_HydroTrend_set_input_dir():
'''
Tests setting the input directory on init.
'''
os.chdir(os.environ['_test_hydrotrend_dir'])
input_dir = '__hydro_in'
h = HydroTrend(input_dir)
assert_equal(h.input_dir, input_dir)
def test_HydroTrend_get_input_dir():
'''
Tests getting the input directory.
'''
input_dir = 'HYDRO_IN' # the default
assert_equal(os.path.basename(h.input_dir), input_dir)
def test_HydroTrend_set_output_dir():
'''
Tests setting the output directory on init.
'''
os.chdir(os.environ['_test_hydrotrend_dir'])
output_dir = '__hydro_out'
h = HydroTrend(None, output_dir)
assert_equal(h.output_dir, output_dir)
def test_HydroTrend_get_output_dir():
'''
Tests getting the output directory.
'''
output_dir = 'HYDRO_OUTPUT' # the default
assert_equal(os.path.basename(h.output_dir), output_dir)
def test_HydroTrend_get_input_file():
'''
Tests getting the input file name.
'''
input_file = 'HYDRO.IN' # the default
assert_equal(h.input_file, input_file)
def test_HydroTrend_set_input_file():
'''
Tests setting the input file name.
'''
input_file = '__hydro.in'
h.input_file = input_file
assert_equal(h.input_file, input_file)
def test_HydroTrend_get_input_template():
'''
Tests getting the input template name.
'''
input_template = 'HYDRO.IN.template' # the default
assert_equal(h.input_template, input_template)
def test_HydroTrend_set_input_template():
'''
Tests setting the input template name.
'''
input_template = '__hydro.in.template'
h.input_template = input_template
assert_equal(h.input_template, input_template)
def test_HydroTrend_get_hypsometry_file():
'''
Tests getting the hypsometry file name.
'''
hypsometry_file = 'HYDRO0.HYPS' # the default
assert_equal(h.hypsometry_file, hypsometry_file)
def test_HydroTrend_set_hypsometry_file():
'''
Tests setting the hypsometry file name.
'''
hypsometry_file = '__hydro0.hyps'
h.hypsometry_file = hypsometry_file
assert_equal(h.hypsometry_file, hypsometry_file)
def test_HydroTrend_get_output_files():
'''
Tests getting the tuple of output file names.
'''
output_files = ('HYDROASCII.QS') # the default
assert_equal(h.output_files, output_files)
def test_HydroTrend_set_output_files():
'''
Tests setting the tuple of output file names.
'''
output_files = ('foo', 'bar', 'baz')
h.output_files = output_files
assert_equal(h.output_files, output_files)
def test_get_response_statistic():
'''
Tests getting the current response_statistic.
'''
rstat = 'mean' # the default
assert_equal(h.response_statistic, rstat)
def test_set_response_statistic():
'''
Tests setting the response_statistic.
'''
rstat = 'sum'
h.response_statistic = rstat
assert_equal(h.response_statistic, rstat)
@raises(TypeError)
def test_load_zero_arguments():
'''
Tests load() when no argument is passed.
'''
r = h.load()
def test_load_does_not_exist():
'''
Tests load() when a nonexistent output file is defined.
'''
r = h.load('vfnqeubnuen.f')
assert_is_none(r)
| mit |
jakevdp/megaman | megaman/embedding/tests/test_embeddings.py | 4 | 1798 | """General tests for embeddings"""
# LICENSE: Simplified BSD https://github.com/mmp2/megaman/blob/master/LICENSE
from itertools import product
import numpy as np
from numpy.testing import assert_raises, assert_allclose
from megaman.embedding import (Isomap, LocallyLinearEmbedding,
LTSA, SpectralEmbedding)
from megaman.geometry.geometry import Geometry
EMBEDDINGS = [Isomap, LocallyLinearEmbedding, LTSA, SpectralEmbedding]
# # TODO: make estimator_checks pass!
# def test_estimator_checks():
# from sklearn.utils.estimator_checks import check_estimator
# for Embedding in EMBEDDINGS:
# yield check_estimator, Embedding
def test_embeddings_fit_vs_transform():
rand = np.random.RandomState(42)
X = rand.rand(100, 5)
geom = Geometry(adjacency_kwds = {'radius':1.0},
affinity_kwds = {'radius':1.0})
def check_embedding(Embedding, n_components):
model = Embedding(n_components=n_components,
geom=geom, random_state=rand)
embedding = model.fit_transform(X)
assert model.embedding_.shape == (X.shape[0], n_components)
assert_allclose(embedding, model.embedding_)
for Embedding in EMBEDDINGS:
for n_components in [1, 2, 3]:
yield check_embedding, Embedding, n_components
def test_embeddings_bad_arguments():
rand = np.random.RandomState(32)
X = rand.rand(100, 3)
def check_bad_args(Embedding):
# no radius set
embedding = Embedding()
assert_raises(ValueError, embedding.fit, X)
# unrecognized geometry
embedding = Embedding(radius=2, geom='blah')
assert_raises(ValueError, embedding.fit, X)
for Embedding in EMBEDDINGS:
yield check_bad_args, Embedding
| bsd-2-clause |
m0ppers/arangodb | 3rdParty/boost/1.61.0/tools/build/src/build_system.py | 11 | 33823 | # Status: mostly ported. Missing is --out-xml support, 'configure' integration
# and some FIXME.
# Base revision: 64351
# Copyright 2003, 2005 Dave Abrahams
# Copyright 2006 Rene Rivera
# Copyright 2003, 2004, 2005, 2006, 2007 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
from b2.build.engine import Engine
from b2.manager import Manager
from b2.util.path import glob
from b2.build import feature, property_set
import b2.build.virtual_target
from b2.build.targets import ProjectTarget
from b2.util.sequence import unique
import b2.build.build_request
from b2.build.errors import ExceptionWithUserContext
import b2.tools.common
from b2.build.toolset import using
import b2.build.project as project
import b2.build.virtual_target as virtual_target
import b2.build.build_request as build_request
import b2.util.regex
from b2.manager import get_manager
from b2.util import cached
from b2.util import option
import bjam
import os
import sys
import re
################################################################################
#
# Module global data.
#
################################################################################
# Flag indicating we should display additional debugging information related to
# locating and loading Boost Build configuration files.
debug_config = False
# The cleaning is tricky. Say, if user says 'bjam --clean foo' where 'foo' is a
# directory, then we want to clean targets which are in 'foo' as well as those
# in any children Jamfiles under foo but not in any unrelated Jamfiles. To
# achieve this we collect a list of projects under which cleaning is allowed.
project_targets = []
# Virtual targets obtained when building main targets references on the command
# line. When running 'bjam --clean main_target' we want to clean only files
# belonging to that main target so we need to record which targets are produced
# for it.
results_of_main_targets = []
# Was an XML dump requested?
out_xml = False
# Default toolset & version to be used in case no other toolset has been used
# explicitly by either the loaded configuration files, the loaded project build
# scripts or an explicit toolset request on the command line. If not specified,
# an arbitrary default will be used based on the current host OS. This value,
# while not strictly necessary, has been added to allow testing Boost-Build's
# default toolset usage functionality.
default_toolset = None
default_toolset_version = None
################################################################################
#
# Public rules.
#
################################################################################
# Returns the property set with the free features from the currently processed
# build request.
#
def command_line_free_features():
return command_line_free_features
# Sets the default toolset & version to be used in case no other toolset has
# been used explicitly by either the loaded configuration files, the loaded
# project build scripts or an explicit toolset request on the command line. For
# more detailed information see the comment related to used global variables.
#
def set_default_toolset(toolset, version=None):
default_toolset = toolset
default_toolset_version = version
pre_build_hook = []
def add_pre_build_hook(callable):
pre_build_hook.append(callable)
post_build_hook = None
def set_post_build_hook(callable):
post_build_hook = callable
################################################################################
#
# Local rules.
#
################################################################################
# Returns actual Jam targets to be used for executing a clean request.
#
def actual_clean_targets(targets):
# Construct a list of projects explicitly detected as targets on this build
# system run. These are the projects under which cleaning is allowed.
for t in targets:
if isinstance(t, b2.build.targets.ProjectTarget):
project_targets.append(t.project_module())
# Construct a list of targets explicitly detected on this build system run
# as a result of building main targets.
targets_to_clean = set()
for t in results_of_main_targets:
# Do not include roots or sources.
targets_to_clean.update(virtual_target.traverse(t))
to_clean = []
for t in get_manager().virtual_targets().all_targets():
# Remove only derived targets.
if t.action():
p = t.project()
if t in targets_to_clean or should_clean_project(p.project_module()):
to_clean.append(t)
return [t.actualize() for t in to_clean]
_target_id_split = re.compile("(.*)//(.*)")
# Given a target id, try to find and return the corresponding target. This is
# only invoked when there is no Jamfile in ".". This code somewhat duplicates
# code in project-target.find but we can not reuse that code without a
# project-targets instance.
#
def find_target(target_id):
projects = get_manager().projects()
m = _target_id_split.match(target_id)
if m:
pm = projects.find(m.group(1), ".")
else:
pm = projects.find(target_id, ".")
if pm:
result = projects.target(pm)
if m:
result = result.find(m.group(2))
return result
def initialize_config_module(module_name, location=None):
get_manager().projects().initialize(module_name, location)
# Helper rule used to load configuration files. Loads the first configuration
# file with the given 'filename' at 'path' into module with name 'module-name'.
# Not finding the requested file may or may not be treated as an error depending
# on the must-find parameter. Returns a normalized path to the loaded
# configuration file or nothing if no file was loaded.
#
def load_config(module_name, filename, paths, must_find=False):
if debug_config:
print "notice: Searching '%s' for '%s' configuration file '%s." \
% (paths, module_name, filename)
where = None
for path in paths:
t = os.path.join(path, filename)
if os.path.exists(t):
where = t
break
if where:
where = os.path.realpath(where)
if debug_config:
print "notice: Loading '%s' configuration file '%s' from '%s'." \
% (module_name, filename, where)
# Set source location so that path-constant in config files
# with relative paths work. This is of most importance
# for project-config.jam, but may be used in other
# config files as well.
attributes = get_manager().projects().attributes(module_name) ;
attributes.set('source-location', os.path.dirname(where), True)
get_manager().projects().load_standalone(module_name, where)
else:
msg = "Configuration file '%s' not found in '%s'." % (filename, path)
if must_find:
get_manager().errors()(msg)
elif debug_config:
print msg
return where
# Loads all the configuration files used by Boost Build in the following order:
#
# -- test-config --
# Loaded only if specified on the command-line using the --test-config
# command-line parameter. It is ok for this file not to exist even if
# specified. If this configuration file is loaded, regular site and user
# configuration files will not be. If a relative path is specified, file is
# searched for in the current folder.
#
# -- site-config --
# Always named site-config.jam. Will only be found if located on the system
# root path (Windows), /etc (non-Windows), user's home folder or the Boost
# Build path, in that order. Not loaded in case the test-config configuration
# file is loaded or the --ignore-site-config command-line option is specified.
#
# -- user-config --
# Named user-config.jam by default or may be named explicitly using the
# --user-config command-line option or the BOOST_BUILD_USER_CONFIG environment
# variable. If named explicitly the file is looked for from the current working
# directory and if the default one is used then it is searched for in the
# user's home directory and the Boost Build path, in that order. Not loaded in
# case either the test-config configuration file is loaded or an empty file
# name is explicitly specified. If the file name has been given explicitly then
# the file must exist.
#
# Test configurations have been added primarily for use by Boost Build's
# internal unit testing system but may be used freely in other places as well.
#
def load_configuration_files():
# Flag indicating that site configuration should not be loaded.
ignore_site_config = "--ignore-site-config" in sys.argv
initialize_config_module("test-config")
test_config = None
for a in sys.argv:
m = re.match("--test-config=(.*)$", a)
if m:
test_config = b2.util.unquote(m.group(1))
break
if test_config:
where = load_config("test-config", os.path.basename(test_config), [os.path.dirname(test_config)])
if where:
if debug_config:
print "notice: Regular site and user configuration files will"
print "notice: be ignored due to the test configuration being loaded."
user_path = [os.path.expanduser("~")] + bjam.variable("BOOST_BUILD_PATH")
site_path = ["/etc"] + user_path
if os.name in ["nt"]:
site_path = [os.getenv("SystemRoot")] + user_path
if debug_config and not test_config and ignore_site_config:
print "notice: Site configuration files will be ignored due to the"
print "notice: --ignore-site-config command-line option."
initialize_config_module("site-config")
if not test_config and not ignore_site_config:
load_config('site-config', 'site-config.jam', site_path)
initialize_config_module('user-config')
if not test_config:
# Here, user_config has value of None if nothing is explicitly
# specified, and value of '' if user explicitly does not want
# to load any user config.
user_config = None
for a in sys.argv:
m = re.match("--user-config=(.*)$", a)
if m:
user_config = m.group(1)
break
if user_config is None:
user_config = os.getenv("BOOST_BUILD_USER_CONFIG")
# Special handling for the case when the OS does not strip the quotes
# around the file name, as is the case when using Cygwin bash.
user_config = b2.util.unquote(user_config)
explicitly_requested = user_config
if user_config is None:
user_config = "user-config.jam"
if user_config:
if explicitly_requested:
user_config = os.path.abspath(user_config)
if debug_config:
print "notice: Loading explicitly specified user configuration file:"
print " " + user_config
load_config('user-config', os.path.basename(user_config), [os.path.dirname(user_config)], True)
else:
load_config('user-config', os.path.basename(user_config), user_path)
else:
if debug_config:
print "notice: User configuration file loading explicitly disabled."
# We look for project-config.jam from "." upward. I am not sure this is
# 100% right decision, we might as well check for it only alongside the
# Jamroot file. However:
# - We need to load project-config.jam before Jamroot
# - We probably need to load project-config.jam even if there is no Jamroot
# - e.g. to implement automake-style out-of-tree builds.
if os.path.exists("project-config.jam"):
file = ["project-config.jam"]
else:
file = b2.util.path.glob_in_parents(".", ["project-config.jam"])
if file:
initialize_config_module('project-config', os.path.dirname(file[0]))
load_config('project-config', "project-config.jam", [os.path.dirname(file[0])], True)
# Autoconfigure toolsets based on any instances of --toolset=xx,yy,...zz or
# toolset=xx,yy,...zz in the command line. May return additional properties to
# be processed as if they had been specified by the user.
#
def process_explicit_toolset_requests():
extra_properties = []
option_toolsets = [e for option in b2.util.regex.transform(sys.argv, "^--toolset=(.*)$")
for e in option.split(',')]
feature_toolsets = [e for option in b2.util.regex.transform(sys.argv, "^toolset=(.*)$")
for e in option.split(',')]
for t in option_toolsets + feature_toolsets:
# Parse toolset-version/properties.
(toolset_version, toolset, version) = re.match("(([^-/]+)-?([^/]+)?)/?.*", t).groups()
if debug_config:
print "notice: [cmdline-cfg] Detected command-line request for '%s': toolset= %s version=%s" \
% (toolset_version, toolset, version)
# If the toolset is not known, configure it now.
known = False
if toolset in feature.values("toolset"):
known = True
if known and version and not feature.is_subvalue("toolset", toolset, "version", version):
known = False
# TODO: we should do 'using $(toolset)' in case no version has been
# specified and there are no versions defined for the given toolset to
# allow the toolset to configure its default version. For this we need
# to know how to detect whether a given toolset has any versions
# defined. An alternative would be to do this whenever version is not
# specified but that would require that toolsets correctly handle the
# case when their default version is configured multiple times which
# should be checked for all existing toolsets first.
if not known:
if debug_config:
print "notice: [cmdline-cfg] toolset '%s' not previously configured; attempting to auto-configure now" % toolset_version
if version is not None:
using(toolset, version)
else:
using(toolset)
else:
if debug_config:
print "notice: [cmdline-cfg] toolset '%s' already configured" % toolset_version
# Make sure we get an appropriate property into the build request in
# case toolset has been specified using the "--toolset=..." command-line
# option form.
if not t in sys.argv and not t in feature_toolsets:
if debug_config:
print "notice: [cmdline-cfg] adding toolset=%s) to the build request." % t ;
extra_properties += "toolset=%s" % t
return extra_properties
# Returns 'true' if the given 'project' is equal to or is a (possibly indirect)
# child to any of the projects requested to be cleaned in this build system run.
# Returns 'false' otherwise. Expects the .project-targets list to have already
# been constructed.
#
@cached
def should_clean_project(project):
if project in project_targets:
return True
else:
parent = get_manager().projects().attribute(project, "parent-module")
if parent and parent != "user-config":
return should_clean_project(parent)
else:
return False
################################################################################
#
# main()
# ------
#
################################################################################
def main():
sys.argv = bjam.variable("ARGV")
# FIXME: document this option.
if "--profiling" in sys.argv:
import cProfile
r = cProfile.runctx('main_real()', globals(), locals(), "stones.prof")
import pstats
stats = pstats.Stats("stones.prof")
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_callers(20)
return r
else:
try:
return main_real()
except ExceptionWithUserContext, e:
e.report()
def main_real():
global debug_config, out_xml
debug_config = "--debug-configuration" in sys.argv
out_xml = any(re.match("^--out-xml=(.*)$", a) for a in sys.argv)
engine = Engine()
global_build_dir = option.get("build-dir")
manager = Manager(engine, global_build_dir)
import b2.build.configure as configure
if "--version" in sys.argv:
from b2.build import version
version.report()
return
# This module defines types and generator and what not,
# and depends on manager's existence
import b2.tools.builtin
b2.tools.common.init(manager)
load_configuration_files()
# Load explicitly specified toolset modules.
extra_properties = process_explicit_toolset_requests()
# Load the actual project build script modules. We always load the project
# in the current folder so 'use-project' directives have any chance of
# being seen. Otherwise, we would not be able to refer to subprojects using
# target ids.
current_project = None
projects = get_manager().projects()
if projects.find(".", "."):
current_project = projects.target(projects.load("."))
# Load the default toolset module if no other has already been specified.
if not feature.values("toolset"):
dt = default_toolset
dtv = None
if default_toolset:
dtv = default_toolset_version
else:
dt = "gcc"
if os.name == 'nt':
dt = "msvc"
# FIXME:
#else if [ os.name ] = MACOSX
#{
# default-toolset = darwin ;
#}
print "warning: No toolsets are configured."
print "warning: Configuring default toolset '%s'." % dt
print "warning: If the default is wrong, your build may not work correctly."
print "warning: Use the \"toolset=xxxxx\" option to override our guess."
print "warning: For more configuration options, please consult"
print "warning: http://boost.org/boost-build2/doc/html/bbv2/advanced/configuration.html"
using(dt, dtv)
# Parse command line for targets and properties. Note that this requires
# that all project files already be loaded.
(target_ids, properties) = build_request.from_command_line(sys.argv[1:] + extra_properties)
# Check that we actually found something to build.
if not current_project and not target_ids:
get_manager().errors()("no Jamfile in current directory found, and no target references specified.")
# FIXME:
# EXIT
# Flags indicating that this build system run has been started in order to
# clean existing instead of create new targets. Note that these are not the
# final flag values as they may get changed later on due to some special
# targets being specified on the command line.
clean = "--clean" in sys.argv
cleanall = "--clean-all" in sys.argv
# List of explicitly requested files to build. Any target references read
# from the command line parameter not recognized as one of the targets
# defined in the loaded Jamfiles will be interpreted as an explicitly
# requested file to build. If any such files are explicitly requested then
# only those files and the targets they depend on will be built and they
# will be searched for among targets that would have been built had there
# been no explicitly requested files.
explicitly_requested_files = []
# List of Boost Build meta-targets, virtual-targets and actual Jam targets
# constructed in this build system run.
targets = []
virtual_targets = []
actual_targets = []
explicitly_requested_files = []
# Process each target specified on the command-line and convert it into
# internal Boost Build target objects. Detect special clean target. If no
# main Boost Build targets were explictly requested use the current project
# as the target.
for id in target_ids:
if id == "clean":
clean = 1
else:
t = None
if current_project:
t = current_project.find(id, no_error=1)
else:
t = find_target(id)
if not t:
print "notice: could not find main target '%s'" % id
print "notice: assuming it's a name of file to create " ;
explicitly_requested_files.append(id)
else:
targets.append(t)
if not targets:
targets = [projects.target(projects.module_name("."))]
# FIXME: put this BACK.
## if [ option.get dump-generators : : true ]
## {
## generators.dump ;
## }
# We wish to put config.log in the build directory corresponding
# to Jamroot, so that the location does not differ depending on
# directory where we do build. The amount of indirection necessary
# here is scary.
first_project = targets[0].project()
first_project_root_location = first_project.get('project-root')
first_project_root_module = manager.projects().load(first_project_root_location)
first_project_root = manager.projects().target(first_project_root_module)
first_build_build_dir = first_project_root.build_dir()
configure.set_log_file(os.path.join(first_build_build_dir, "config.log"))
virtual_targets = []
global results_of_main_targets
# Expand properties specified on the command line into multiple property
# sets consisting of all legal property combinations. Each expanded property
# set will be used for a single build run. E.g. if multiple toolsets are
# specified then requested targets will be built with each of them.
# The expansion is being performed as late as possible so that the feature
# validation is performed after all necessary modules (including project targets
# on the command line) have been loaded.
if properties:
expanded = []
for p in properties:
expanded.extend(build_request.convert_command_line_element(p))
expanded = build_request.expand_no_defaults(expanded)
else:
expanded = [property_set.empty()]
# Now that we have a set of targets to build and a set of property sets to
# build the targets with, we can start the main build process by using each
# property set to generate virtual targets from all of our listed targets
# and any of their dependants.
for p in expanded:
manager.set_command_line_free_features(property_set.create(p.free()))
for t in targets:
try:
g = t.generate(p)
if not isinstance(t, ProjectTarget):
results_of_main_targets.extend(g.targets())
virtual_targets.extend(g.targets())
except ExceptionWithUserContext, e:
e.report()
except Exception:
raise
# Convert collected virtual targets into actual raw Jam targets.
for t in virtual_targets:
actual_targets.append(t.actualize())
# FIXME: restore
## # If XML data output has been requested prepare additional rules and targets
## # so we can hook into Jam to collect build data while its building and have
## # it trigger the final XML report generation after all the planned targets
## # have been built.
## if $(.out-xml)
## {
## # Get a qualified virtual target name.
## rule full-target-name ( target )
## {
## local name = [ $(target).name ] ;
## local project = [ $(target).project ] ;
## local project-path = [ $(project).get location ] ;
## return $(project-path)//$(name) ;
## }
## # Generate an XML file containing build statistics for each constituent.
## #
## rule out-xml ( xml-file : constituents * )
## {
## # Prepare valid XML header and footer with some basic info.
## local nl = "
## " ;
## local jam = [ version.jam ] ;
## local os = [ modules.peek : OS OSPLAT JAMUNAME ] "" ;
## local timestamp = [ modules.peek : JAMDATE ] ;
## local cwd = [ PWD ] ;
## local command = $(.sys.argv) ;
## local bb-version = [ version.boost-build ] ;
## .header on $(xml-file) =
## "<?xml version=\"1.0\" encoding=\"utf-8\"?>"
## "$(nl)<build format=\"1.0\" version=\"$(bb-version)\">"
## "$(nl) <jam version=\"$(jam:J=.)\" />"
## "$(nl) <os name=\"$(os[1])\" platform=\"$(os[2])\"><![CDATA[$(os[3-]:J= )]]></os>"
## "$(nl) <timestamp><![CDATA[$(timestamp)]]></timestamp>"
## "$(nl) <directory><![CDATA[$(cwd)]]></directory>"
## "$(nl) <command><![CDATA[\"$(command:J=\" \")\"]]></command>"
## ;
## .footer on $(xml-file) =
## "$(nl)</build>" ;
## # Generate the target dependency graph.
## .contents on $(xml-file) +=
## "$(nl) <targets>" ;
## for local t in [ virtual-target.all-targets ]
## {
## local action = [ $(t).action ] ;
## if $(action)
## # If a target has no action, it has no dependencies.
## {
## local name = [ full-target-name $(t) ] ;
## local sources = [ $(action).sources ] ;
## local dependencies ;
## for local s in $(sources)
## {
## dependencies += [ full-target-name $(s) ] ;
## }
## local path = [ $(t).path ] ;
## local jam-target = [ $(t).actual-name ] ;
## .contents on $(xml-file) +=
## "$(nl) <target>"
## "$(nl) <name><![CDATA[$(name)]]></name>"
## "$(nl) <dependencies>"
## "$(nl) <dependency><![CDATA[$(dependencies)]]></dependency>"
## "$(nl) </dependencies>"
## "$(nl) <path><![CDATA[$(path)]]></path>"
## "$(nl) <jam-target><![CDATA[$(jam-target)]]></jam-target>"
## "$(nl) </target>"
## ;
## }
## }
## .contents on $(xml-file) +=
## "$(nl) </targets>" ;
## # Build $(xml-file) after $(constituents). Do so even if a
## # constituent action fails and regenerate the xml on every bjam run.
## INCLUDES $(xml-file) : $(constituents) ;
## ALWAYS $(xml-file) ;
## __ACTION_RULE__ on $(xml-file) = build-system.out-xml.generate-action ;
## out-xml.generate $(xml-file) ;
## }
## # The actual build actions are here; if we did this work in the actions
## # clause we would have to form a valid command line containing the
## # result of @(...) below (the name of the XML file).
## #
## rule out-xml.generate-action ( args * : xml-file
## : command status start end user system : output ? )
## {
## local contents =
## [ on $(xml-file) return $(.header) $(.contents) $(.footer) ] ;
## local f = @($(xml-file):E=$(contents)) ;
## }
## # Nothing to do here; the *real* actions happen in
## # out-xml.generate-action.
## actions quietly out-xml.generate { }
## # Define the out-xml file target, which depends on all the targets so
## # that it runs the collection after the targets have run.
## out-xml $(.out-xml) : $(actual-targets) ;
## # Set up a global __ACTION_RULE__ that records all the available
## # statistics about each actual target in a variable "on" the --out-xml
## # target.
## #
## rule out-xml.collect ( xml-file : target : command status start end user
## system : output ? )
## {
## local nl = "
## " ;
## # Open the action with some basic info.
## .contents on $(xml-file) +=
## "$(nl) <action status=\"$(status)\" start=\"$(start)\" end=\"$(end)\" user=\"$(user)\" system=\"$(system)\">" ;
## # If we have an action object we can print out more detailed info.
## local action = [ on $(target) return $(.action) ] ;
## if $(action)
## {
## local action-name = [ $(action).action-name ] ;
## local action-sources = [ $(action).sources ] ;
## local action-props = [ $(action).properties ] ;
## # The qualified name of the action which we created the target.
## .contents on $(xml-file) +=
## "$(nl) <name><![CDATA[$(action-name)]]></name>" ;
## # The sources that made up the target.
## .contents on $(xml-file) +=
## "$(nl) <sources>" ;
## for local source in $(action-sources)
## {
## local source-actual = [ $(source).actual-name ] ;
## .contents on $(xml-file) +=
## "$(nl) <source><![CDATA[$(source-actual)]]></source>" ;
## }
## .contents on $(xml-file) +=
## "$(nl) </sources>" ;
## # The properties that define the conditions under which the
## # target was built.
## .contents on $(xml-file) +=
## "$(nl) <properties>" ;
## for local prop in [ $(action-props).raw ]
## {
## local prop-name = [ MATCH ^<(.*)>$ : $(prop:G) ] ;
## .contents on $(xml-file) +=
## "$(nl) <property name=\"$(prop-name)\"><![CDATA[$(prop:G=)]]></property>" ;
## }
## .contents on $(xml-file) +=
## "$(nl) </properties>" ;
## }
## local locate = [ on $(target) return $(LOCATE) ] ;
## locate ?= "" ;
## .contents on $(xml-file) +=
## "$(nl) <jam-target><![CDATA[$(target)]]></jam-target>"
## "$(nl) <path><![CDATA[$(target:G=:R=$(locate))]]></path>"
## "$(nl) <command><![CDATA[$(command)]]></command>"
## "$(nl) <output><![CDATA[$(output)]]></output>" ;
## .contents on $(xml-file) +=
## "$(nl) </action>" ;
## }
## # When no __ACTION_RULE__ is set "on" a target, the search falls back to
## # the global module.
## module
## {
## __ACTION_RULE__ = build-system.out-xml.collect
## [ modules.peek build-system : .out-xml ] ;
## }
## IMPORT
## build-system :
## out-xml.collect
## out-xml.generate-action
## : :
## build-system.out-xml.collect
## build-system.out-xml.generate-action
## ;
## }
j = option.get("jobs")
if j:
bjam.call("set-variable", 'PARALLELISM', j)
k = option.get("keep-going", "true", "true")
if k in ["on", "yes", "true"]:
bjam.call("set-variable", "KEEP_GOING", "1")
elif k in ["off", "no", "false"]:
bjam.call("set-variable", "KEEP_GOING", "0")
else:
print "error: Invalid value for the --keep-going option"
sys.exit()
# The 'all' pseudo target is not strictly needed expect in the case when we
# use it below but people often assume they always have this target
# available and do not declare it themselves before use which may cause
# build failures with an error message about not being able to build the
# 'all' target.
bjam.call("NOTFILE", "all")
# And now that all the actual raw Jam targets and all the dependencies
# between them have been prepared all that is left is to tell Jam to update
# those targets.
if explicitly_requested_files:
# Note that this case can not be joined with the regular one when only
# exact Boost Build targets are requested as here we do not build those
# requested targets but only use them to construct the dependency tree
# needed to build the explicitly requested files.
# FIXME: add $(.out-xml)
bjam.call("UPDATE", ["<e>%s" % x for x in explicitly_requested_files])
elif cleanall:
bjam.call("UPDATE", "clean-all")
elif clean:
manager.engine().set_update_action("common.Clean", "clean",
actual_clean_targets(targets))
bjam.call("UPDATE", "clean")
else:
# FIXME:
#configure.print-configure-checks-summary ;
if pre_build_hook:
for h in pre_build_hook:
h()
bjam.call("DEPENDS", "all", actual_targets)
ok = bjam.call("UPDATE_NOW", "all") # FIXME: add out-xml
if post_build_hook:
post_build_hook(ok)
# Prevent automatic update of the 'all' target, now that
# we have explicitly updated what we wanted.
bjam.call("UPDATE")
if manager.errors().count() == 0:
return ["ok"]
else:
return []
| apache-2.0 |
kc4271/batch_downloader | requests/packages/chardet/chardistribution.py | 2755 | 9226 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .euctwfreq import (EUCTWCharToFreqOrder, EUCTW_TABLE_SIZE,
EUCTW_TYPICAL_DISTRIBUTION_RATIO)
from .euckrfreq import (EUCKRCharToFreqOrder, EUCKR_TABLE_SIZE,
EUCKR_TYPICAL_DISTRIBUTION_RATIO)
from .gb2312freq import (GB2312CharToFreqOrder, GB2312_TABLE_SIZE,
GB2312_TYPICAL_DISTRIBUTION_RATIO)
from .big5freq import (Big5CharToFreqOrder, BIG5_TABLE_SIZE,
BIG5_TYPICAL_DISTRIBUTION_RATIO)
from .jisfreq import (JISCharToFreqOrder, JIS_TABLE_SIZE,
JIS_TYPICAL_DISTRIBUTION_RATIO)
from .compat import wrap_ord
ENOUGH_DATA_THRESHOLD = 1024
SURE_YES = 0.99
SURE_NO = 0.01
MINIMUM_DATA_THRESHOLD = 3
class CharDistributionAnalysis:
def __init__(self):
# Mapping table to get frequency order from char order (get from
# GetOrder())
self._mCharToFreqOrder = None
self._mTableSize = None # Size of above table
# This is a constant value which varies from language to language,
# used in calculating confidence. See
# http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html
# for further detail.
self._mTypicalDistributionRatio = None
self.reset()
def reset(self):
"""reset analyser, clear any state"""
# If this flag is set to True, detection is done and conclusion has
# been made
self._mDone = False
self._mTotalChars = 0 # Total characters encountered
# The number of characters whose frequency order is less than 512
self._mFreqChars = 0
def feed(self, aBuf, aCharLen):
"""feed a character with known length"""
if aCharLen == 2:
# we only care about 2-bytes character in our distribution analysis
order = self.get_order(aBuf)
else:
order = -1
if order >= 0:
self._mTotalChars += 1
# order is valid
if order < self._mTableSize:
if 512 > self._mCharToFreqOrder[order]:
self._mFreqChars += 1
def get_confidence(self):
"""return confidence based on existing data"""
# if we didn't receive any character in our consideration range,
# return negative answer
if self._mTotalChars <= 0 or self._mFreqChars <= MINIMUM_DATA_THRESHOLD:
return SURE_NO
if self._mTotalChars != self._mFreqChars:
r = (self._mFreqChars / ((self._mTotalChars - self._mFreqChars)
* self._mTypicalDistributionRatio))
if r < SURE_YES:
return r
# normalize confidence (we don't want to be 100% sure)
return SURE_YES
def got_enough_data(self):
# It is not necessary to receive all data to draw conclusion.
# For charset detection, certain amount of data is enough
return self._mTotalChars > ENOUGH_DATA_THRESHOLD
def get_order(self, aBuf):
# We do not handle characters based on the original encoding string,
# but convert this encoding string to a number, here called order.
# This allows multiple encodings of a language to share one frequency
# table.
return -1
class EUCTWDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCTWCharToFreqOrder
self._mTableSize = EUCTW_TABLE_SIZE
self._mTypicalDistributionRatio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-TW encoding, we are interested
# first byte range: 0xc4 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xC4:
return 94 * (first_char - 0xC4) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class EUCKRDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCKRCharToFreqOrder
self._mTableSize = EUCKR_TABLE_SIZE
self._mTypicalDistributionRatio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-KR encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xB0:
return 94 * (first_char - 0xB0) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class GB2312DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = GB2312CharToFreqOrder
self._mTableSize = GB2312_TABLE_SIZE
self._mTypicalDistributionRatio = GB2312_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for GB2312 encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0xB0) and (second_char >= 0xA1):
return 94 * (first_char - 0xB0) + second_char - 0xA1
else:
return -1
class Big5DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = Big5CharToFreqOrder
self._mTableSize = BIG5_TABLE_SIZE
self._mTypicalDistributionRatio = BIG5_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for big5 encoding, we are interested
# first byte range: 0xa4 -- 0xfe
# second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if first_char >= 0xA4:
if second_char >= 0xA1:
return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63
else:
return 157 * (first_char - 0xA4) + second_char - 0x40
else:
return -1
class SJISDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for sjis encoding, we are interested
# first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
# second byte range: 0x40 -- 0x7e, 0x81 -- oxfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0x81) and (first_char <= 0x9F):
order = 188 * (first_char - 0x81)
elif (first_char >= 0xE0) and (first_char <= 0xEF):
order = 188 * (first_char - 0xE0 + 31)
else:
return -1
order = order + second_char - 0x40
if second_char > 0x7F:
order = -1
return order
class EUCJPDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-JP encoding, we are interested
# first byte range: 0xa0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
char = wrap_ord(aBuf[0])
if char >= 0xA0:
return 94 * (char - 0xA1) + wrap_ord(aBuf[1]) - 0xa1
else:
return -1
| mit |
cryptickp/heat | heat/engine/resources/openstack/glance/glance_image.py | 4 | 4527 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common.i18n import _
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
class GlanceImage(resource.Resource):
'''
A resource managing for image in Glance.
'''
support_status = support.SupportStatus(version='2014.2')
PROPERTIES = (
NAME, IMAGE_ID, IS_PUBLIC, MIN_DISK, MIN_RAM, PROTECTED,
DISK_FORMAT, CONTAINER_FORMAT, LOCATION
) = (
'name', 'id', 'is_public', 'min_disk', 'min_ram', 'protected',
'disk_format', 'container_format', 'location'
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Name for the image. The name of an image is not '
'unique to a Image Service node.')
),
IMAGE_ID: properties.Schema(
properties.Schema.STRING,
_('The image ID. Glance will generate a UUID if not specified.')
),
IS_PUBLIC: properties.Schema(
properties.Schema.BOOLEAN,
_('Scope of image accessibility. Public or private. '
'Default value is False means private.'),
default=False,
),
MIN_DISK: properties.Schema(
properties.Schema.INTEGER,
_('Amount of disk space (in GB) required to boot image. '
'Default value is 0 if not specified '
'and means no limit on the disk size.'),
constraints=[
constraints.Range(min=0),
]
),
MIN_RAM: properties.Schema(
properties.Schema.INTEGER,
_('Amount of ram (in MB) required to boot image. Default value '
'is 0 if not specified and means no limit on the ram size.'),
constraints=[
constraints.Range(min=0),
]
),
PROTECTED: properties.Schema(
properties.Schema.BOOLEAN,
_('Whether the image can be deleted. If the value is True, '
'the image is protected and cannot be deleted.')
),
DISK_FORMAT: properties.Schema(
properties.Schema.STRING,
_('Disk format of image.'),
required=True,
constraints=[
constraints.AllowedValues(['ami', 'ari', 'aki',
'vhd', 'vmdk', 'raw',
'qcow2', 'vdi', 'iso'])
]
),
CONTAINER_FORMAT: properties.Schema(
properties.Schema.STRING,
_('Container format of image.'),
required=True,
constraints=[
constraints.AllowedValues(['ami', 'ari', 'aki',
'bare', 'ova', 'ovf'])
]
),
LOCATION: properties.Schema(
properties.Schema.STRING,
_('URL where the data for this image already resides. For '
'example, if the image data is stored in swift, you could '
'specify "swift://example.com/container/obj".'),
required=True,
),
}
default_client_name = 'glance'
entity = 'images'
def handle_create(self):
args = dict((k, v) for k, v in self.properties.items()
if v is not None)
image_id = self.client().images.create(**args).id
self.resource_id_set(image_id)
return image_id
def check_create_complete(self, image_id):
image = self.client().images.get(image_id)
return image.status == 'active'
def _show_resource(self):
if self.glance().version == 1.0:
return super(GlanceImage, self)._show_resource()
else:
image = self.glance().images.get(self.resource_id)
return dict(image)
def resource_mapping():
return {
'OS::Glance::Image': GlanceImage
}
| apache-2.0 |
mogoweb/webkit_for_android5.1 | webkit/Tools/Scripts/webkitpy/common/system/stack_utils.py | 215 | 2734 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Simple routines for logging, obtaining thread stack information."""
import sys
import traceback
def log_thread_state(logger, name, thread_id, msg=''):
"""Log information about the given thread state."""
stack = _find_thread_stack(thread_id)
assert(stack is not None)
logger("")
logger("%s (tid %d) %s" % (name, thread_id, msg))
_log_stack(logger, stack)
logger("")
def _find_thread_stack(thread_id):
"""Returns a stack object that can be used to dump a stack trace for
the given thread id (or None if the id is not found)."""
for tid, stack in sys._current_frames().items():
if tid == thread_id:
return stack
return None
def _log_stack(logger, stack):
"""Log a stack trace to the logger callback."""
for filename, lineno, name, line in traceback.extract_stack(stack):
logger('File: "%s", line %d, in %s' % (filename, lineno, name))
if line:
logger(' %s' % line.strip())
def log_traceback(logger, tb):
stack = traceback.extract_tb(tb)
for frame_str in traceback.format_list(stack):
for line in frame_str.split('\n'):
if line:
logger(" %s" % line)
| apache-2.0 |
arbrandes/edx-platform | openedx/core/djangoapps/content_libraries/tests/test_content_libraries.py | 3 | 41306 | """
Tests for Blockstore-based Content Libraries
"""
from uuid import UUID
from unittest.mock import patch
import ddt
from django.conf import settings
from django.contrib.auth.models import Group
from django.test.client import Client
from django.test.utils import override_settings
from organizations.models import Organization
from rest_framework.test import APITestCase
from openedx.core.djangoapps.content_libraries.libraries_index import LibraryBlockIndexer, ContentLibraryIndexer
from openedx.core.djangoapps.content_libraries.tests.base import (
ContentLibrariesRestApiTest,
elasticsearch_test,
URL_BLOCK_METADATA_URL,
URL_BLOCK_RENDER_VIEW,
URL_BLOCK_GET_HANDLER_URL,
URL_BLOCK_XBLOCK_HANDLER,
)
from openedx.core.djangoapps.content_libraries.constants import VIDEO, COMPLEX, PROBLEM, CC_4_BY, ALL_RIGHTS_RESERVED
from common.djangoapps.student.tests.factories import UserFactory
@ddt.ddt
@elasticsearch_test
class ContentLibrariesTest(ContentLibrariesRestApiTest):
"""
General tests for Blockstore-based Content Libraries
These tests use the REST API, which in turn relies on the Python API.
Some tests may use the python API directly if necessary to provide
coverage of any code paths not accessible via the REST API.
In general, these tests should
(1) Use public APIs only - don't directly create data using other methods,
which results in a less realistic test and ties the test suite too
closely to specific implementation details.
(Exception: users can be provisioned using a user factory)
(2) Assert that fields are present in responses, but don't assert that the
entire response has some specific shape. That way, things like adding
new fields to an API response, which are backwards compatible, won't
break any tests, but backwards-incompatible API changes will.
WARNING: every test should have a unique library slug, because even though
the django/mysql database gets reset for each test case, the lookup between
library slug and bundle UUID does not because it's assumed to be immutable
and cached forever.
"""
def setUp(self):
super().setUp()
if settings.ENABLE_ELASTICSEARCH_FOR_TESTS:
ContentLibraryIndexer.remove_all_items()
LibraryBlockIndexer.remove_all_items()
def test_library_crud(self):
"""
Test Create, Read, Update, and Delete of a Content Library
"""
# Create:
lib = self._create_library(
slug="lib-crud", title="A Test Library", description="Just Testing", license_type=CC_4_BY,
)
expected_data = {
"id": "lib:CL-TEST:lib-crud",
"org": "CL-TEST",
"slug": "lib-crud",
"title": "A Test Library",
"description": "Just Testing",
"version": 0,
"type": COMPLEX,
"license": CC_4_BY,
"has_unpublished_changes": False,
"has_unpublished_deletes": False,
}
self.assertDictContainsEntries(lib, expected_data)
# Check that bundle_uuid looks like a valid UUID
UUID(lib["bundle_uuid"]) # will raise an exception if not valid
# Read:
lib2 = self._get_library(lib["id"])
self.assertDictContainsEntries(lib2, expected_data)
# Update:
lib3 = self._update_library(lib["id"], title="New Title")
expected_data["title"] = "New Title"
self.assertDictContainsEntries(lib3, expected_data)
# Delete:
self._delete_library(lib["id"])
# And confirm it is deleted:
self._get_library(lib["id"], expect_response=404)
self._delete_library(lib["id"], expect_response=404)
@ddt.data(VIDEO, PROBLEM, COMPLEX)
def test_library_alternative_type(self, target_type):
"""
Create a library with a specific type
"""
lib = self._create_library(
slug="some-slug", title="Video Library", description="Test Video Library", library_type=target_type,
)
expected_data = {
"id": "lib:CL-TEST:some-slug",
"org": "CL-TEST",
"slug": "some-slug",
"title": "Video Library",
"type": target_type,
"description": "Test Video Library",
"version": 0,
"has_unpublished_changes": False,
"has_unpublished_deletes": False,
"license": ALL_RIGHTS_RESERVED,
}
self.assertDictContainsEntries(lib, expected_data)
# Need to use a different slug each time here. Seems to be a race condition on test cleanup that will break things
# otherwise.
@ddt.data(
('to-video-fail', COMPLEX, VIDEO, (("problem", "problemA"),), 400),
('to-video-empty', COMPLEX, VIDEO, tuple(), 200),
('to-problem', COMPLEX, PROBLEM, (("problem", "problemB"),), 200),
('to-problem-fail', COMPLEX, PROBLEM, (("video", "videoA"),), 400),
('to-problem-empty', COMPLEX, PROBLEM, tuple(), 200),
('to-complex-from-video', VIDEO, COMPLEX, (("video", "videoB"),), 200),
('to-complex-from-problem', PROBLEM, COMPLEX, (("problem", "problemC"),), 200),
('to-complex-from-problem-empty', PROBLEM, COMPLEX, tuple(), 200),
('to-problem-from-video-empty', PROBLEM, VIDEO, tuple(), 200),
)
@ddt.unpack
def test_library_update_type_conversion(self, slug, start_type, target_type, xblock_specs, expect_response):
"""
Test conversion of one library type to another. Restricts options based on type/block matching.
"""
lib = self._create_library(
slug=slug, title="A Test Library", description="Just Testing", library_type=start_type,
)
assert lib['type'] == start_type
for block_type, block_slug in xblock_specs:
self._add_block_to_library(lib['id'], block_type, block_slug)
self._commit_library_changes(lib['id'])
result = self._update_library(lib['id'], type=target_type, expect_response=expect_response)
if expect_response == 200:
assert result['type'] == target_type
assert 'type' in result
else:
lib = self._get_library(lib['id'])
assert lib['type'] == start_type
def test_no_convert_on_unpublished(self):
"""
Verify that you can't change a library's type, even if it would normally be valid,
when there are unpublished changes. This is so that a reversion of blocks won't cause an inconsistency.
"""
lib = self._create_library(
slug='resolute', title="A complex library", description="Unconvertable", library_type=COMPLEX,
)
self._add_block_to_library(lib['id'], "video", 'vid-block')
result = self._update_library(lib['id'], type=VIDEO, expect_response=400)
assert 'type' in result
def test_no_convert_on_pending_deletes(self):
"""
Verify that you can't change a library's type, even if it would normally be valid,
when there are unpublished changes. This is so that a reversion of blocks won't cause an inconsistency.
"""
lib = self._create_library(
slug='still-alive', title="A complex library", description="Unconvertable", library_type=COMPLEX,
)
block = self._add_block_to_library(lib['id'], "video", 'vid-block')
self._commit_library_changes(lib['id'])
self._delete_library_block(block['id'])
result = self._update_library(lib['id'], type=VIDEO, expect_response=400)
assert 'type' in result
def test_library_validation(self):
"""
You can't create a library with the same slug as an existing library,
or an invalid slug.
"""
self._create_library(slug="some-slug", title="Existing Library")
self._create_library(slug="some-slug", title="Duplicate Library", expect_response=400)
self._create_library(slug="Invalid Slug!", title="Library with Bad Slug", expect_response=400)
@ddt.data(True, False)
@patch("openedx.core.djangoapps.content_libraries.views.LibraryApiPagination.page_size", new=2)
def test_list_library(self, is_indexing_enabled):
"""
Test the /libraries API and its pagination
"""
with override_settings(FEATURES={**settings.FEATURES, 'ENABLE_CONTENT_LIBRARY_INDEX': is_indexing_enabled}):
lib1 = self._create_library(slug="some-slug-1", title="Existing Library")
lib2 = self._create_library(slug="some-slug-2", title="Existing Library")
if not is_indexing_enabled:
lib1['num_blocks'] = lib2['num_blocks'] = None
lib1['last_published'] = lib2['last_published'] = None
lib1['has_unpublished_changes'] = lib2['has_unpublished_changes'] = None
lib1['has_unpublished_deletes'] = lib2['has_unpublished_deletes'] = None
result = self._list_libraries()
assert len(result) == 2
assert lib1 in result
assert lib2 in result
result = self._list_libraries({'pagination': 'true'})
assert len(result['results']) == 2
assert result['next'] is None
# Create another library which causes number of libraries to exceed the page size
self._create_library(slug="some-slug-3", title="Existing Library")
# Verify that if `pagination` param isn't sent, API still honors the max page size.
# This is for maintaining compatibility with older non pagination-aware clients.
result = self._list_libraries()
assert len(result) == 2
# Pagination enabled:
# Verify total elements and valid 'next' in page 1
result = self._list_libraries({'pagination': 'true'})
assert len(result['results']) == 2
assert 'page=2' in result['next']
assert 'pagination=true' in result['next']
# Verify total elements and null 'next' in page 2
result = self._list_libraries({'pagination': 'true', 'page': '2'})
assert len(result['results']) == 1
assert result['next'] is None
@ddt.data(True, False)
def test_library_filters(self, is_indexing_enabled):
"""
Test the filters in the list libraries API
"""
suffix = str(is_indexing_enabled)
with override_settings(FEATURES={**settings.FEATURES, 'ENABLE_CONTENT_LIBRARY_INDEX': is_indexing_enabled}):
self._create_library(
slug=f"test-lib-filter-{suffix}-1", title="Fob", description=f"Bar-{suffix}", library_type=VIDEO,
)
self._create_library(
slug=f"test-lib-filter-{suffix}-2", title=f"Library-Title-{suffix}-2", description=f"Bar-{suffix}-2",
)
self._create_library(
slug=f"l3{suffix}", title=f"Library-Title-{suffix}-3", description="Description", library_type=VIDEO,
)
Organization.objects.get_or_create(
short_name=f"org-test-{suffix}",
defaults={"name": "Content Libraries Tachyon Exploration & Survey Team"},
)
self._create_library(
slug=f"l4-{suffix}", title=f"Library-Title-{suffix}-4",
description="Library-Description", org=f'org-test-{suffix}',
library_type=VIDEO,
)
self._create_library(
slug="l5", title=f"Library-Title-{suffix}-5", description="Library-Description",
org=f'org-test-{suffix}',
)
assert len(self._list_libraries()) == 5
assert len(self._list_libraries({'org': f'org-test-{suffix}'})) == 2
assert len(self._list_libraries({'text_search': f'test-lib-filter-{suffix}'})) == 2
assert len(self._list_libraries({'text_search': f'test-lib-filter-{suffix}', 'type': VIDEO})) == 1
assert len(self._list_libraries({'text_search': f'library-title-{suffix}'})) == 4
assert len(self._list_libraries({'text_search': f'library-title-{suffix}', 'type': VIDEO})) == 2
assert len(self._list_libraries({'text_search': f'bar-{suffix}'})) == 2
assert len(self._list_libraries({'text_search': f'org-test-{suffix}'})) == 2
assert len(self._list_libraries({'org': f'org-test-{suffix}',
'text_search': f'library-title-{suffix}-4'})) == 1
assert len(self._list_libraries({'type': VIDEO})) == 3
# General Content Library XBlock tests:
def test_library_blocks(self):
"""
Test the happy path of creating and working with XBlocks in a content
library.
"""
lib = self._create_library(slug="testlib1", title="A Test Library", description="Testing XBlocks")
lib_id = lib["id"]
assert lib['has_unpublished_changes'] is False
# A library starts out empty:
assert self._get_library_blocks(lib_id) == []
# Add a 'problem' XBlock to the library:
block_data = self._add_block_to_library(lib_id, "problem", "problem1")
self.assertDictContainsEntries(block_data, {
"id": "lb:CL-TEST:testlib1:problem:problem1",
"display_name": "Blank Advanced Problem",
"block_type": "problem",
"has_unpublished_changes": True,
})
block_id = block_data["id"]
# Confirm that the result contains a definition key, but don't check its value,
# which for the purposes of these tests is an implementation detail.
assert 'def_key' in block_data
# now the library should contain one block and have unpublished changes:
assert self._get_library_blocks(lib_id) == [block_data]
assert self._get_library(lib_id)['has_unpublished_changes'] is True
# Publish the changes:
self._commit_library_changes(lib_id)
assert self._get_library(lib_id)['has_unpublished_changes'] is False
# And now the block information should also show that block has no unpublished changes:
block_data["has_unpublished_changes"] = False
self.assertDictContainsEntries(self._get_library_block(block_id), block_data)
assert self._get_library_blocks(lib_id) == [block_data]
# Now update the block's OLX:
orig_olx = self._get_library_block_olx(block_id)
assert '<problem' in orig_olx
new_olx = """
<problem display_name="New Multi Choice Question" max_attempts="5">
<multiplechoiceresponse>
<p>This is a normal capa problem with unicode 🔥. It has "maximum attempts" set to **5**.</p>
<label>Blockstore is designed to store.</label>
<choicegroup type="MultipleChoice">
<choice correct="false">XBlock metadata only</choice>
<choice correct="true">XBlock data/metadata and associated static asset files</choice>
<choice correct="false">Static asset files for XBlocks and courseware</choice>
<choice correct="false">XModule metadata only</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""".strip()
self._set_library_block_olx(block_id, new_olx)
# now reading it back, we should get that exact OLX (no change to whitespace etc.):
assert self._get_library_block_olx(block_id) == new_olx
# And the display name and "unpublished changes" status of the block should be updated:
self.assertDictContainsEntries(self._get_library_block(block_id), {
"display_name": "New Multi Choice Question",
"has_unpublished_changes": True,
})
# Now view the XBlock's student_view (including draft changes):
fragment = self._render_block_view(block_id, "student_view")
assert 'resources' in fragment
assert 'Blockstore is designed to store.' in fragment['content']
# Also call a handler to make sure that's working:
handler_url = self._get_block_handler_url(block_id, "xmodule_handler") + "problem_get"
problem_get_response = self.client.get(handler_url)
assert problem_get_response.status_code == 200
assert 'You have used 0 of 5 attempts' in problem_get_response.content.decode('utf-8')
# Now delete the block:
assert self._get_library(lib_id)['has_unpublished_deletes'] is False
self._delete_library_block(block_id)
# Confirm it's deleted:
self._render_block_view(block_id, "student_view", expect_response=404)
self._get_library_block(block_id, expect_response=404)
assert self._get_library(lib_id)['has_unpublished_deletes'] is True
# Now revert all the changes back until the last publish:
self._revert_library_changes(lib_id)
assert self._get_library(lib_id)['has_unpublished_deletes'] is False
assert self._get_library_block_olx(block_id) == orig_olx
# fin
@ddt.data(True, False)
@patch("openedx.core.djangoapps.content_libraries.views.LibraryApiPagination.page_size", new=2)
def test_list_library_blocks(self, is_indexing_enabled):
"""
Test the /libraries/{lib_key_str}/blocks API and its pagination
"""
with override_settings(FEATURES={**settings.FEATURES, 'ENABLE_CONTENT_LIBRARY_INDEX': is_indexing_enabled}):
lib = self._create_library(slug="list_blocks-slug" + str(is_indexing_enabled), title="Library 1")
block1 = self._add_block_to_library(lib["id"], "problem", "problem1")
block2 = self._add_block_to_library(lib["id"], "unit", "unit1")
self._add_block_to_library(lib["id"], "problem", "problem2", parent_block=block2["id"])
result = self._get_library_blocks(lib["id"])
assert len(result) == 2
assert block1 in result
result = self._get_library_blocks(lib["id"], {'pagination': 'true'})
assert len(result['results']) == 2
assert result['next'] is None
self._add_block_to_library(lib["id"], "problem", "problem3")
# Test pagination
result = self._get_library_blocks(lib["id"])
assert len(result) == 3
result = self._get_library_blocks(lib["id"], {'pagination': 'true'})
assert len(result['results']) == 2
assert 'page=2' in result['next']
assert 'pagination=true' in result['next']
result = self._get_library_blocks(lib["id"], {'pagination': 'true', 'page': '2'})
assert len(result['results']) == 1
assert result['next'] is None
@ddt.data(True, False)
def test_library_blocks_filters(self, is_indexing_enabled):
"""
Test the filters in the list libraries API
"""
with override_settings(FEATURES={**settings.FEATURES, 'ENABLE_CONTENT_LIBRARY_INDEX': is_indexing_enabled}):
lib = self._create_library(slug="test-lib-blocks" + str(is_indexing_enabled), title="Title")
block1 = self._add_block_to_library(lib["id"], "problem", "foo-bar")
self._add_block_to_library(lib["id"], "video", "vid-baz")
self._add_block_to_library(lib["id"], "html", "html-baz")
self._add_block_to_library(lib["id"], "problem", "foo-baz")
self._add_block_to_library(lib["id"], "problem", "bar-baz")
self._set_library_block_olx(block1["id"], "<problem display_name=\"DisplayName\"></problem>")
assert len(self._get_library_blocks(lib['id'])) == 5
assert len(self._get_library_blocks(lib['id'], {'text_search': 'Foo'})) == 2
assert len(self._get_library_blocks(lib['id'], {'text_search': 'Display'})) == 1
assert len(self._get_library_blocks(lib['id'], {'text_search': 'Video'})) == 1
assert len(self._get_library_blocks(lib['id'], {'text_search': 'Foo', 'block_type': 'video'})) == 0
assert len(self._get_library_blocks(lib['id'], {'text_search': 'Baz', 'block_type': 'video'})) == 1
assert len(self._get_library_blocks(lib['id'], {'text_search': 'Baz', 'block_type': ['video', 'html']})) ==\
2
assert len(self._get_library_blocks(lib['id'], {'block_type': 'video'})) == 1
assert len(self._get_library_blocks(lib['id'], {'block_type': 'problem'})) == 3
assert len(self._get_library_blocks(lib['id'], {'block_type': 'squirrel'})) == 0
@ddt.data(
('video-problem', VIDEO, 'problem', 400),
('video-video', VIDEO, 'video', 200),
('problem-problem', PROBLEM, 'problem', 200),
('problem-video', PROBLEM, 'video', 400),
('complex-video', COMPLEX, 'video', 200),
('complex-problem', COMPLEX, 'problem', 200),
)
@ddt.unpack
def test_library_blocks_type_constrained(self, slug, library_type, block_type, expect_response):
"""
Test that type-constrained libraries enforce their constraint when adding an XBlock.
"""
lib = self._create_library(
slug=slug, title="A Test Library", description="Testing XBlocks", library_type=library_type,
)
lib_id = lib["id"]
# Add a 'problem' XBlock to the library:
self._add_block_to_library(lib_id, block_type, 'test-block', expect_response=expect_response)
def test_library_blocks_with_hierarchy(self):
"""
Test library blocks with children
"""
lib = self._create_library(slug="hierarchy_test_lib", title="A Test Library")
lib_id = lib["id"]
# Add a 'unit' XBlock to the library:
unit_block = self._add_block_to_library(lib_id, "unit", "unit1")
# Add an HTML child block:
child1 = self._add_block_to_library(lib_id, "html", "html1", parent_block=unit_block["id"])
self._set_library_block_olx(child1["id"], "<html>Hello world</html>")
# Add a problem child block:
child2 = self._add_block_to_library(lib_id, "problem", "problem1", parent_block=unit_block["id"])
self._set_library_block_olx(child2["id"], """
<problem><multiplechoiceresponse>
<p>What is an even number?</p>
<choicegroup type="MultipleChoice">
<choice correct="false">3</choice>
<choice correct="true">2</choice>
</choicegroup>
</multiplechoiceresponse></problem>
""")
# Check the resulting OLX of the unit:
assert self._get_library_block_olx(unit_block['id']) ==\
'<unit xblock-family="xblock.v1">\n <xblock-include definition="html/html1"/>\n' \
' <xblock-include definition="problem/problem1"/>\n</unit>\n'
# The unit can see and render its children:
fragment = self._render_block_view(unit_block["id"], "student_view")
assert 'Hello world' in fragment['content']
assert 'What is an even number?' in fragment['content']
# We cannot add a duplicate ID to the library, either at the top level or as a child:
self._add_block_to_library(lib_id, "problem", "problem1", expect_response=400)
self._add_block_to_library(lib_id, "problem", "problem1", parent_block=unit_block["id"], expect_response=400)
# Test that permissions are enforced for content libraries
def test_library_permissions(self): # pylint: disable=too-many-statements
"""
Test that permissions are enforced for content libraries, and that
permissions can be read and manipulated using the REST API (which in
turn tests the python API).
This is a single giant test case, because that optimizes for the fastest
test run time, even though it can make debugging failures harder.
"""
# Create a few users to use for all of these tests:
admin = UserFactory.create(username="Admin", email="[email protected]")
author = UserFactory.create(username="Author", email="[email protected]")
reader = UserFactory.create(username="Reader", email="[email protected]")
group = Group.objects.create(name="group1")
author_group_member = UserFactory.create(username="GroupMember", email="[email protected]")
author_group_member.groups.add(group)
random_user = UserFactory.create(username="Random", email="[email protected]")
never_added = UserFactory.create(username="Never", email="[email protected]")
# Library CRUD #########################################################
# Create a library, owned by "Admin"
with self.as_user(admin):
lib = self._create_library(slug="permtest", title="Permission Test Library", description="Testing")
lib_id = lib["id"]
# By default, "public learning" and public read access are disallowed.
assert lib['allow_public_learning'] is False
assert lib['allow_public_read'] is False
# By default, the creator of a new library is the only admin
data = self._get_library_team(lib_id)
assert len(data) == 1
self.assertDictContainsEntries(data[0], {
"username": admin.username, "group_name": None, "access_level": "admin",
})
# Add the other users to the content library:
self._set_user_access_level(lib_id, author.username, access_level="author")
# Delete it, add it again.
self._remove_user_access(lib_id, author.username)
self._set_user_access_level(lib_id, author.username, access_level="author")
# Add one of them via the email-based creation endpoint.
self._add_user_by_email(lib_id, reader.email, access_level="read")
self._set_group_access_level(lib_id, group.name, access_level="author")
team_response = self._get_library_team(lib_id)
assert len(team_response) == 4
# We'll use this one later.
reader_grant = {"username": reader.username, "group_name": None, "access_level": "read"}
# The response should also always be sorted in a specific order (by username and group name):
expected_response = [
{"username": None, "group_name": "group1", "access_level": "author"},
{"username": admin.username, "group_name": None, "access_level": "admin"},
{"username": author.username, "group_name": None, "access_level": "author"},
reader_grant,
]
for entry, expected in zip(team_response, expected_response):
self.assertDictContainsEntries(entry, expected)
# A random user cannot get the library nor its team:
with self.as_user(random_user):
self._get_library(lib_id, expect_response=403)
self._get_library_team(lib_id, expect_response=403)
self._add_user_by_email(lib_id, never_added.email, access_level="read", expect_response=403)
# But every authorized user can:
for user in [admin, author, author_group_member]:
with self.as_user(user):
self._get_library(lib_id)
data = self._get_library_team(lib_id)
assert data == team_response
data = self._get_user_access_level(lib_id, reader.username)
assert data == {**reader_grant, 'username': 'Reader', 'email': '[email protected]'}
# A user with only read permission can get data about the library but not the team:
with self.as_user(reader):
self._get_library(lib_id)
self._get_library_team(lib_id, expect_response=403)
self._get_user_access_level(lib_id, author.username, expect_response=403)
self._add_user_by_email(lib_id, never_added.email, access_level="read", expect_response=403)
# Users without admin access cannot delete the library nor change its team:
for user in [author, reader, author_group_member, random_user]:
with self.as_user(user):
self._delete_library(lib_id, expect_response=403)
self._set_user_access_level(lib_id, author.username, access_level="admin", expect_response=403)
self._set_user_access_level(lib_id, admin.username, access_level=None, expect_response=403)
self._set_user_access_level(lib_id, random_user.username, access_level="read", expect_response=403)
self._remove_user_access(lib_id, admin.username, expect_response=403)
self._add_user_by_email(lib_id, never_added.email, access_level="read", expect_response=403)
# Users with author access (or higher) can edit the library's properties:
with self.as_user(author):
self._update_library(lib_id, description="Revised description")
with self.as_user(author_group_member):
self._update_library(lib_id, title="New Library Title")
# But other users cannot:
with self.as_user(reader):
self._update_library(lib_id, description="Prohibited description", expect_response=403)
with self.as_user(random_user):
self._update_library(lib_id, title="I can't set this title", expect_response=403)
# Verify the permitted changes were made:
with self.as_user(admin):
data = self._get_library(lib_id)
assert data['description'] == 'Revised description'
assert data['title'] == 'New Library Title'
# Library XBlock editing ###############################################
# users with read permission or less cannot add blocks:
for user in [reader, random_user]:
with self.as_user(user):
self._add_block_to_library(lib_id, "problem", "problem1", expect_response=403)
# But authors and admins can:
with self.as_user(admin):
self._add_block_to_library(lib_id, "problem", "problem1")
with self.as_user(author):
self._add_block_to_library(lib_id, "problem", "problem2")
with self.as_user(author_group_member):
block3_data = self._add_block_to_library(lib_id, "problem", "problem3")
block3_key = block3_data["id"]
# At this point, the library contains 3 draft problem XBlocks.
# A random user cannot read OLX nor assets (this library has allow_public_read False):
with self.as_user(random_user):
self._get_library_block_olx(block3_key, expect_response=403)
self._get_library_block_assets(block3_key, expect_response=403)
self._get_library_block_asset(block3_key, file_name="whatever.png", expect_response=403)
# But if we grant allow_public_read, then they can:
with self.as_user(admin):
self._update_library(lib_id, allow_public_read=True)
self._set_library_block_asset(block3_key, "whatever.png", b"data")
with self.as_user(random_user):
self._get_library_block_olx(block3_key)
self._get_library_block_assets(block3_key)
self._get_library_block_asset(block3_key, file_name="whatever.png")
# Users without authoring permission cannot edit nor delete XBlocks (this library has allow_public_read False):
for user in [reader, random_user]:
with self.as_user(user):
self._set_library_block_olx(block3_key, "<problem/>", expect_response=403)
self._set_library_block_asset(block3_key, "test.txt", b"data", expect_response=403)
self._delete_library_block(block3_key, expect_response=403)
self._commit_library_changes(lib_id, expect_response=403)
self._revert_library_changes(lib_id, expect_response=403)
# But users with author permission can:
with self.as_user(author_group_member):
olx = self._get_library_block_olx(block3_key)
self._set_library_block_olx(block3_key, olx)
self._get_library_block_assets(block3_key)
self._set_library_block_asset(block3_key, "test.txt", b"data")
self._get_library_block_asset(block3_key, file_name="test.txt")
self._delete_library_block(block3_key)
self._commit_library_changes(lib_id)
self._revert_library_changes(lib_id) # This is a no-op after the commit, but should still have 200 response
def test_no_lockout(self):
"""
Test that administrators cannot be removed if they are the only administrator granted access.
"""
admin = UserFactory.create(username="Admin", email="[email protected]")
successor = UserFactory.create(username="Successor", email="[email protected]")
with self.as_user(admin):
lib = self._create_library(slug="permtest", title="Permission Test Library", description="Testing")
# Fail to downgrade permissions.
self._remove_user_access(lib_key=lib['id'], username=admin.username, expect_response=400)
# Promote another user.
self._set_user_access_level(
lib_key=lib['id'], username=successor.username, access_level="admin",
)
self._remove_user_access(lib_key=lib['id'], username=admin.username)
def test_library_blocks_with_links(self):
"""
Test that libraries can link to XBlocks in other content libraries
"""
# Create a problem bank:
bank_lib = self._create_library(slug="problem_bank", title="Problem Bank")
bank_lib_id = bank_lib["id"]
# Add problem1 to the problem bank:
p1 = self._add_block_to_library(bank_lib_id, "problem", "problem1")
self._set_library_block_olx(p1["id"], """
<problem><multiplechoiceresponse>
<p>What is an even number?</p>
<choicegroup type="MultipleChoice">
<choice correct="false">3</choice>
<choice correct="true">2</choice>
</choicegroup>
</multiplechoiceresponse></problem>
""")
# Commit the changes, creating version 1:
self._commit_library_changes(bank_lib_id)
# Now update problem 1 and create a new problem 2:
self._set_library_block_olx(p1["id"], """
<problem><multiplechoiceresponse>
<p>What is an odd number?</p>
<choicegroup type="MultipleChoice">
<choice correct="true">3</choice>
<choice correct="false">2</choice>
</choicegroup>
</multiplechoiceresponse></problem>
""")
p2 = self._add_block_to_library(bank_lib_id, "problem", "problem2")
self._set_library_block_olx(p2["id"], """
<problem><multiplechoiceresponse>
<p>What holds this XBlock?</p>
<choicegroup type="MultipleChoice">
<choice correct="false">A course</choice>
<choice correct="true">A problem bank</choice>
</choicegroup>
</multiplechoiceresponse></problem>
""")
# Commit the changes, creating version 2:
self._commit_library_changes(bank_lib_id)
# At this point, bank_lib contains two problems and has two versions.
# In version 1, problem1 is "What is an event number", and in version 2 it's "What is an odd number".
# Problem2 exists only in version 2 and asks "What holds this XBlock?"
lib = self._create_library(slug="links_test_lib", title="Link Test Library")
lib_id = lib["id"]
# Link to the problem bank:
self._link_to_library(lib_id, "problem_bank", bank_lib_id)
self._link_to_library(lib_id, "problem_bank_v1", bank_lib_id, version=1)
# Add a 'unit' XBlock to the library:
unit_block = self._add_block_to_library(lib_id, "unit", "unit1")
self._set_library_block_olx(unit_block["id"], """
<unit>
<!-- version 2 link to "What is an odd number?" -->
<xblock-include source="problem_bank" definition="problem/problem1"/>
<!-- version 1 link to "What is an even number?" -->
<xblock-include source="problem_bank_v1" definition="problem/problem1" usage="p1v1" />
<!-- link to "What holds this XBlock?" -->
<xblock-include source="problem_bank" definition="problem/problem2"/>
</unit>
""")
# The unit can see and render its children:
fragment = self._render_block_view(unit_block["id"], "student_view")
assert 'What is an odd number?' in fragment['content']
assert 'What is an even number?' in fragment['content']
assert 'What holds this XBlock?' in fragment['content']
# Also check the API for retrieving links:
links_created = self._get_library_links(lib_id)
links_created.sort(key=lambda link: link["id"])
assert len(links_created) == 2
assert links_created[0]['id'] == 'problem_bank'
assert links_created[0]['bundle_uuid'] == bank_lib['bundle_uuid']
assert links_created[0]['version'] == 2
assert links_created[0]['latest_version'] == 2
assert links_created[0]['opaque_key'] == bank_lib_id
assert links_created[1]['id'] == 'problem_bank_v1'
assert links_created[1]['bundle_uuid'] == bank_lib['bundle_uuid']
assert links_created[1]['version'] == 1
assert links_created[1]['latest_version'] == 2
assert links_created[1]['opaque_key'] == bank_lib_id
def test_library_blocks_limit(self):
"""
Test that libraries don't allow more than specified blocks
"""
with self.settings(MAX_BLOCKS_PER_CONTENT_LIBRARY=1):
lib = self._create_library(slug="test_lib_limits", title="Limits Test Library", description="Testing XBlocks limits in a library") # lint-amnesty, pylint: disable=line-too-long
lib_id = lib["id"]
block_data = self._add_block_to_library(lib_id, "unit", "unit1")
# Second block should throw error
self._add_block_to_library(lib_id, "problem", "problem1", expect_response=400)
# Also check that limit applies to child blocks too
self._add_block_to_library(lib_id, "html", "html1", parent_block=block_data['id'], expect_response=400)
@ddt.data(
('complex-types', COMPLEX, False),
('video-types', VIDEO, True),
('problem-types', PROBLEM, True),
)
@ddt.unpack
def test_block_types(self, slug, library_type, constrained):
"""
Test that the permitted block types listing for a library change based on type.
"""
lib = self._create_library(slug=slug, title='Test Block Types', library_type=library_type)
types = self._get_library_block_types(lib['id'])
if constrained:
assert len(types) == 1
assert types[0]['block_type'] == library_type
else:
assert len(types) > 1
@ddt.ddt
class ContentLibraryXBlockValidationTest(APITestCase):
"""Tests only focused on service validation, no Blockstore needed."""
@ddt.data(
(URL_BLOCK_METADATA_URL, dict(block_key='totally_invalid_key')),
(URL_BLOCK_RENDER_VIEW, dict(block_key='totally_invalid_key', view_name='random')),
(URL_BLOCK_GET_HANDLER_URL, dict(block_key='totally_invalid_key', handler_name='random')),
)
@ddt.unpack
def test_invalid_key(self, endpoint, endpoint_parameters):
"""Test all xblock related endpoints, when the key is invalid, return 404."""
response = self.client.get(
endpoint.format(**endpoint_parameters),
)
self.assertEqual(response.status_code, 404)
self.assertEqual(response.json(), {'detail': "Invalid XBlock key"})
def test_xblock_handler_invalid_key(self):
"""This endpoint is tested separately from the previous ones as it's not a DRF endpoint."""
client = Client()
response = client.get(URL_BLOCK_XBLOCK_HANDLER.format(**dict(
block_key='totally_invalid_key',
handler_name='random',
user_id='random',
secure_token='random',
)))
self.assertEqual(response.status_code, 404)
def test_not_found_fails_correctly(self):
"""Test fails with 404 when xblock key is valid but not found."""
valid_not_found_key = 'lb:valid:key:video:1'
response = self.client.get(URL_BLOCK_METADATA_URL.format(block_key=valid_not_found_key))
self.assertEqual(response.status_code, 404)
self.assertEqual(response.json(), {
'detail': f"XBlock {valid_not_found_key} does not exist, or you don't have permission to view it.",
})
| agpl-3.0 |
litlpoet/rl-library | system/common/libs/mwclient/simplejson/decoder.py | 4 | 9113 | """
Implementation of JSONDecoder
"""
import re
from simplejson.scanner import Scanner, pattern
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
import struct
import sys
_BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
def linecol(doc, pos):
lineno = doc.count('\n', 0, pos) + 1
if lineno == 1:
colno = pos
else:
colno = pos - doc.rindex('\n', 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
lineno, colno = linecol(doc, pos)
if end is None:
return '%s: line %d column %d (char %d)' % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
return '%s: line %d column %d - line %d column %d (char %d - %d)' % (
msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
'true': True,
'false': False,
'null': None,
}
def JSONConstant(match, context, c=_CONSTANTS):
return c[match.group(0)], None
pattern('(-?Infinity|NaN|true|false|null)')(JSONConstant)
def JSONNumber(match, context):
match = JSONNumber.regex.match(match.string, *match.span())
integer, frac, exp = match.groups()
if frac or exp:
res = float(integer + (frac or '') + (exp or ''))
else:
res = int(integer)
return res, None
pattern(r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?')(JSONNumber)
STRINGCHUNK = re.compile(r'(.*?)(["\\])', FLAGS)
BACKSLASH = {
'"': u'"', '\\': u'\\', '/': u'/',
'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
}
DEFAULT_ENCODING = "utf-8"
def scanstring(s, end, encoding=None, _b=BACKSLASH, _m=STRINGCHUNK.match):
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
end = chunk.end()
content, terminator = chunk.groups()
if content:
if not isinstance(content, unicode):
content = unicode(content, encoding)
_append(content)
if terminator == '"':
break
try:
esc = s[end]
except IndexError:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
if esc != 'u':
try:
m = _b[esc]
except KeyError:
raise ValueError(
errmsg("Invalid \\escape: %r" % (esc,), s, end))
end += 1
else:
esc = s[end + 1:end + 5]
try:
m = unichr(int(esc, 16))
if len(esc) != 4 or not esc.isalnum():
raise ValueError
except ValueError:
raise ValueError(errmsg("Invalid \\uXXXX escape", s, end))
end += 5
_append(m)
return u''.join(chunks), end
def JSONString(match, context):
encoding = getattr(context, 'encoding', None)
return scanstring(match.string, match.end(), encoding)
pattern(r'"')(JSONString)
WHITESPACE = re.compile(r'\s*', FLAGS)
def JSONObject(match, context, _w=WHITESPACE.match):
pairs = {}
s = match.string
end = _w(s, match.end()).end()
nextchar = s[end:end + 1]
# trivial empty object
if nextchar == '}':
return pairs, end + 1
if nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end))
end += 1
encoding = getattr(context, 'encoding', None)
iterscan = JSONScanner.iterscan
while True:
key, end = scanstring(s, end, encoding)
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise ValueError(errmsg("Expecting : delimiter", s, end))
end = _w(s, end + 1).end()
try:
value, end = iterscan(s, idx=end, context=context).next()
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
pairs[key] = value
end = _w(s, end).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == '}':
break
if nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end - 1))
end = _w(s, end).end()
nextchar = s[end:end + 1]
end += 1
if nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end - 1))
object_hook = getattr(context, 'object_hook', None)
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
pattern(r'{')(JSONObject)
def JSONArray(match, context, _w=WHITESPACE.match):
values = []
s = match.string
end = _w(s, match.end()).end()
# look-ahead for trivial empty array
nextchar = s[end:end + 1]
if nextchar == ']':
return values, end + 1
iterscan = JSONScanner.iterscan
while True:
try:
value, end = iterscan(s, idx=end, context=context).next()
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
values.append(value)
end = _w(s, end).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
if nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end))
end = _w(s, end).end()
return values, end
pattern(r'\[')(JSONArray)
ANYTHING = [
JSONObject,
JSONArray,
JSONString,
JSONConstant,
JSONNumber,
]
JSONScanner = Scanner(ANYTHING)
class JSONDecoder(object):
"""
Simple JSON <http://json.org> decoder
Performs the following translations in decoding:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
_scanner = Scanner(ANYTHING)
__all__ = ['__init__', 'decode', 'raw_decode']
def __init__(self, encoding=None, object_hook=None):
"""
``encoding`` determines the encoding used to interpret any ``str``
objects decoded by this instance (utf-8 by default). It has no
effect when decoding ``unicode`` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as ``unicode``.
``object_hook``, if specified, will be called with the result
of every JSON object decoded and its return value will be used in
place of the given ``dict``. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
"""
self.encoding = encoding
self.object_hook = object_hook
def decode(self, s, _w=WHITESPACE.match):
"""
Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise ValueError(errmsg("Extra data", s, end, len(s)))
return obj
def raw_decode(self, s, **kw):
"""
Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning
with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
kw.setdefault('context', self)
try:
obj, end = self._scanner.iterscan(s, **kw).next()
except StopIteration:
raise ValueError("No JSON object could be decoded")
return obj, end
__all__ = ['JSONDecoder']
| apache-2.0 |
pgmillon/ansible | lib/ansible/modules/cloud/amazon/aws_waf_rule.py | 16 | 13360 | #!/usr/bin/python
# Copyright (c) 2017 Will Thames
# Copyright (c) 2015 Mike Mochan
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: aws_waf_rule
short_description: create and delete WAF Rules
description:
- Read the AWS documentation for WAF
U(https://aws.amazon.com/documentation/waf/)
version_added: "2.5"
author:
- Mike Mochan (@mmochan)
- Will Thames (@willthames)
extends_documentation_fragment:
- aws
- ec2
options:
name:
description: Name of the Web Application Firewall rule
required: yes
metric_name:
description:
- A friendly name or description for the metrics for the rule
- The name can contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't contain whitespace.
- You can't change metric_name after you create the rule
- Defaults to the same as name with disallowed characters removed
state:
description: whether the rule should be present or absent
choices:
- present
- absent
default: present
conditions:
description: >
list of conditions used in the rule. Each condition should
contain I(type): which is one of [C(byte), C(geo), C(ip), C(size), C(sql) or C(xss)]
I(negated): whether the condition should be negated, and C(condition),
the name of the existing condition. M(aws_waf_condition) can be used to
create new conditions
purge_conditions:
description:
- Whether or not to remove conditions that are not passed when updating `conditions`.
default: false
type: bool
waf_regional:
description: Whether to use waf_regional module. Defaults to false
default: false
required: no
type: bool
version_added: "2.9"
'''
EXAMPLES = '''
- name: create WAF rule
aws_waf_rule:
name: my_waf_rule
conditions:
- name: my_regex_condition
type: regex
negated: no
- name: my_geo_condition
type: geo
negated: no
- name: my_byte_condition
type: byte
negated: yes
- name: remove WAF rule
aws_waf_rule:
name: "my_waf_rule"
state: absent
'''
RETURN = '''
rule:
description: WAF rule contents
returned: always
type: complex
contains:
metric_name:
description: Metric name for the rule
returned: always
type: str
sample: ansibletest1234rule
name:
description: Friendly name for the rule
returned: always
type: str
sample: ansible-test-1234_rule
predicates:
description: List of conditions used in the rule
returned: always
type: complex
contains:
data_id:
description: ID of the condition
returned: always
type: str
sample: 8251acdb-526c-42a8-92bc-d3d13e584166
negated:
description: Whether the sense of the condition is negated
returned: always
type: bool
sample: false
type:
description: type of the condition
returned: always
type: str
sample: ByteMatch
rule_id:
description: ID of the WAF rule
returned: always
type: str
sample: 15de0cbc-9204-4e1f-90e6-69b2f415c261
'''
import re
try:
import botocore
except ImportError:
pass # handled by AnsibleAWSModule
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import boto3_conn, get_aws_connection_info, ec2_argument_spec
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
from ansible.module_utils.aws.waf import run_func_with_change_token_backoff, list_rules_with_backoff, list_regional_rules_with_backoff, MATCH_LOOKUP
from ansible.module_utils.aws.waf import get_web_acl_with_backoff, list_web_acls_with_backoff, list_regional_web_acls_with_backoff
def get_rule_by_name(client, module, name):
rules = [d['RuleId'] for d in list_rules(client, module) if d['Name'] == name]
if rules:
return rules[0]
def get_rule(client, module, rule_id):
try:
return client.get_rule(RuleId=rule_id)['Rule']
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not get WAF rule')
def list_rules(client, module):
if client.__class__.__name__ == 'WAF':
try:
return list_rules_with_backoff(client)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not list WAF rules')
elif client.__class__.__name__ == 'WAFRegional':
try:
return list_regional_rules_with_backoff(client)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not list WAF Regional rules')
def list_regional_rules(client, module):
try:
return list_regional_rules_with_backoff(client)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not list WAF rules')
def find_and_update_rule(client, module, rule_id):
rule = get_rule(client, module, rule_id)
rule_id = rule['RuleId']
existing_conditions = dict((condition_type, dict()) for condition_type in MATCH_LOOKUP)
desired_conditions = dict((condition_type, dict()) for condition_type in MATCH_LOOKUP)
all_conditions = dict()
for condition_type in MATCH_LOOKUP:
method = 'list_' + MATCH_LOOKUP[condition_type]['method'] + 's'
all_conditions[condition_type] = dict()
try:
paginator = client.get_paginator(method)
func = paginator.paginate().build_full_result
except (KeyError, botocore.exceptions.OperationNotPageableError):
# list_geo_match_sets and list_regex_match_sets do not have a paginator
# and throw different exceptions
func = getattr(client, method)
try:
pred_results = func()[MATCH_LOOKUP[condition_type]['conditionset'] + 's']
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not list %s conditions' % condition_type)
for pred in pred_results:
pred['DataId'] = pred[MATCH_LOOKUP[condition_type]['conditionset'] + 'Id']
all_conditions[condition_type][pred['Name']] = camel_dict_to_snake_dict(pred)
all_conditions[condition_type][pred['DataId']] = camel_dict_to_snake_dict(pred)
for condition in module.params['conditions']:
desired_conditions[condition['type']][condition['name']] = condition
reverse_condition_types = dict((v['type'], k) for (k, v) in MATCH_LOOKUP.items())
for condition in rule['Predicates']:
existing_conditions[reverse_condition_types[condition['Type']]][condition['DataId']] = camel_dict_to_snake_dict(condition)
insertions = list()
deletions = list()
for condition_type in desired_conditions:
for (condition_name, condition) in desired_conditions[condition_type].items():
if condition_name not in all_conditions[condition_type]:
module.fail_json(msg="Condition %s of type %s does not exist" % (condition_name, condition_type))
condition['data_id'] = all_conditions[condition_type][condition_name]['data_id']
if condition['data_id'] not in existing_conditions[condition_type]:
insertions.append(format_for_insertion(condition))
if module.params['purge_conditions']:
for condition_type in existing_conditions:
deletions.extend([format_for_deletion(condition) for condition in existing_conditions[condition_type].values()
if not all_conditions[condition_type][condition['data_id']]['name'] in desired_conditions[condition_type]])
changed = bool(insertions or deletions)
update = {
'RuleId': rule_id,
'Updates': insertions + deletions
}
if changed:
try:
run_func_with_change_token_backoff(client, module, update, client.update_rule, wait=True)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not update rule conditions')
return changed, get_rule(client, module, rule_id)
def format_for_insertion(condition):
return dict(Action='INSERT',
Predicate=dict(Negated=condition['negated'],
Type=MATCH_LOOKUP[condition['type']]['type'],
DataId=condition['data_id']))
def format_for_deletion(condition):
return dict(Action='DELETE',
Predicate=dict(Negated=condition['negated'],
Type=condition['type'],
DataId=condition['data_id']))
def remove_rule_conditions(client, module, rule_id):
conditions = get_rule(client, module, rule_id)['Predicates']
updates = [format_for_deletion(camel_dict_to_snake_dict(condition)) for condition in conditions]
try:
run_func_with_change_token_backoff(client, module, {'RuleId': rule_id, 'Updates': updates}, client.update_rule)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not remove rule conditions')
def ensure_rule_present(client, module):
name = module.params['name']
rule_id = get_rule_by_name(client, module, name)
params = dict()
if rule_id:
return find_and_update_rule(client, module, rule_id)
else:
params['Name'] = module.params['name']
metric_name = module.params['metric_name']
if not metric_name:
metric_name = re.sub(r'[^a-zA-Z0-9]', '', module.params['name'])
params['MetricName'] = metric_name
try:
new_rule = run_func_with_change_token_backoff(client, module, params, client.create_rule)['Rule']
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not create rule')
return find_and_update_rule(client, module, new_rule['RuleId'])
def find_rule_in_web_acls(client, module, rule_id):
web_acls_in_use = []
try:
if client.__class__.__name__ == 'WAF':
all_web_acls = list_web_acls_with_backoff(client)
elif client.__class__.__name__ == 'WAFRegional':
all_web_acls = list_regional_web_acls_with_backoff(client)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not list Web ACLs')
for web_acl in all_web_acls:
try:
web_acl_details = get_web_acl_with_backoff(client, web_acl['WebACLId'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not get Web ACL details')
if rule_id in [rule['RuleId'] for rule in web_acl_details['Rules']]:
web_acls_in_use.append(web_acl_details['Name'])
return web_acls_in_use
def ensure_rule_absent(client, module):
rule_id = get_rule_by_name(client, module, module.params['name'])
in_use_web_acls = find_rule_in_web_acls(client, module, rule_id)
if in_use_web_acls:
web_acl_names = ', '.join(in_use_web_acls)
module.fail_json(msg="Rule %s is in use by Web ACL(s) %s" %
(module.params['name'], web_acl_names))
if rule_id:
remove_rule_conditions(client, module, rule_id)
try:
return True, run_func_with_change_token_backoff(client, module, {'RuleId': rule_id}, client.delete_rule, wait=True)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not delete rule')
return False, {}
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True),
metric_name=dict(),
state=dict(default='present', choices=['present', 'absent']),
conditions=dict(type='list'),
purge_conditions=dict(type='bool', default=False),
waf_regional=dict(type='bool', default=False),
),
)
module = AnsibleAWSModule(argument_spec=argument_spec)
state = module.params.get('state')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
resource = 'waf' if not module.params['waf_regional'] else 'waf-regional'
client = boto3_conn(module, conn_type='client', resource=resource, region=region, endpoint=ec2_url, **aws_connect_kwargs)
if state == 'present':
(changed, results) = ensure_rule_present(client, module)
else:
(changed, results) = ensure_rule_absent(client, module)
module.exit_json(changed=changed, rule=camel_dict_to_snake_dict(results))
if __name__ == '__main__':
main()
| gpl-3.0 |
hanicker/odoo | addons/crm/sales_team.py | 321 | 5053 | # -*- coding: utf-8 -*-
import calendar
from datetime import date
from dateutil import relativedelta
import json
from openerp import tools
from openerp.osv import fields, osv
class crm_case_section(osv.Model):
_inherit = 'crm.case.section'
_inherits = {'mail.alias': 'alias_id'}
def _get_opportunities_data(self, cr, uid, ids, field_name, arg, context=None):
""" Get opportunities-related data for salesteam kanban view
monthly_open_leads: number of open lead during the last months
monthly_planned_revenue: planned revenu of opportunities during the last months
"""
obj = self.pool.get('crm.lead')
res = dict.fromkeys(ids, False)
month_begin = date.today().replace(day=1)
date_begin = month_begin - relativedelta.relativedelta(months=self._period_number - 1)
date_end = month_begin.replace(day=calendar.monthrange(month_begin.year, month_begin.month)[1])
lead_pre_domain = [('create_date', '>=', date_begin.strftime(tools.DEFAULT_SERVER_DATE_FORMAT)),
('create_date', '<=', date_end.strftime(tools.DEFAULT_SERVER_DATE_FORMAT)),
('type', '=', 'lead')]
opp_pre_domain = [('date_deadline', '>=', date_begin.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)),
('date_deadline', '<=', date_end.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)),
('type', '=', 'opportunity')]
for id in ids:
res[id] = dict()
lead_domain = lead_pre_domain + [('section_id', '=', id)]
opp_domain = opp_pre_domain + [('section_id', '=', id)]
res[id]['monthly_open_leads'] = json.dumps(self.__get_bar_values(cr, uid, obj, lead_domain, ['create_date'], 'create_date_count', 'create_date', context=context))
res[id]['monthly_planned_revenue'] = json.dumps(self.__get_bar_values(cr, uid, obj, opp_domain, ['planned_revenue', 'date_deadline'], 'planned_revenue', 'date_deadline', context=context))
return res
_columns = {
'resource_calendar_id': fields.many2one('resource.calendar', "Working Time", help="Used to compute open days"),
'stage_ids': fields.many2many('crm.case.stage', 'section_stage_rel', 'section_id', 'stage_id', 'Stages'),
'use_leads': fields.boolean('Leads',
help="The first contact you get with a potential customer is a lead you qualify before converting it into a real business opportunity. Check this box to manage leads in this sales team."),
'use_opportunities': fields.boolean('Opportunities', help="Check this box to manage opportunities in this sales team."),
'monthly_open_leads': fields.function(_get_opportunities_data,
type="char", readonly=True, multi='_get_opportunities_data',
string='Open Leads per Month'),
'monthly_planned_revenue': fields.function(_get_opportunities_data,
type="char", readonly=True, multi='_get_opportunities_data',
string='Planned Revenue per Month'),
'alias_id': fields.many2one('mail.alias', 'Alias', ondelete="restrict", required=True, help="The email address associated with this team. New emails received will automatically create new leads assigned to the team."),
}
def _auto_init(self, cr, context=None):
"""Installation hook to create aliases for all lead and avoid constraint errors."""
return self.pool.get('mail.alias').migrate_to_alias(cr, self._name, self._table, super(crm_case_section, self)._auto_init,
'crm.lead', self._columns['alias_id'], 'name', alias_prefix='Lead+', alias_defaults={}, context=context)
def _get_stage_common(self, cr, uid, context):
ids = self.pool.get('crm.case.stage').search(cr, uid, [('case_default', '=', 1)], context=context)
return ids
_defaults = {
'stage_ids': _get_stage_common,
'use_leads': True,
'use_opportunities': True,
}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
create_context = dict(context, alias_model_name='crm.lead', alias_parent_model_name=self._name)
section_id = super(crm_case_section, self).create(cr, uid, vals, context=create_context)
section = self.browse(cr, uid, section_id, context=context)
self.pool.get('mail.alias').write(cr, uid, [section.alias_id.id], {'alias_parent_thread_id': section_id, 'alias_defaults': {'section_id': section_id, 'type': 'lead'}}, context=context)
return section_id
def unlink(self, cr, uid, ids, context=None):
# Cascade-delete mail aliases as well, as they should not exist without the sales team.
mail_alias = self.pool.get('mail.alias')
alias_ids = [team.alias_id.id for team in self.browse(cr, uid, ids, context=context) if team.alias_id]
res = super(crm_case_section, self).unlink(cr, uid, ids, context=context)
mail_alias.unlink(cr, uid, alias_ids, context=context)
return res
| agpl-3.0 |
Hubert51/AutoGrading | learning/web_Haotian/venv/Lib/site-packages/werkzeug/wsgi.py | 17 | 49347 | # -*- coding: utf-8 -*-
"""
werkzeug.wsgi
~~~~~~~~~~~~~
This module implements WSGI related helpers.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import io
try:
import httplib
except ImportError:
from http import client as httplib
import mimetypes
import os
import posixpath
import re
import socket
from datetime import datetime
from functools import partial, update_wrapper
from itertools import chain
from time import mktime, time
from zlib import adler32
from werkzeug._compat import BytesIO, PY2, implements_iterator, iteritems, \
make_literal_wrapper, string_types, text_type, to_bytes, to_unicode, \
try_coerce_native, wsgi_get_bytes
from werkzeug._internal import _empty_stream, _encode_idna
from werkzeug.filesystem import get_filesystem_encoding
from werkzeug.http import http_date, is_resource_modified, \
is_hop_by_hop_header
from werkzeug.urls import uri_to_iri, url_join, url_parse, url_quote
from werkzeug.datastructures import EnvironHeaders
def responder(f):
"""Marks a function as responder. Decorate a function with it and it
will automatically call the return value as WSGI application.
Example::
@responder
def application(environ, start_response):
return Response('Hello World!')
"""
return update_wrapper(lambda *a: f(*a)(*a[-2:]), f)
def get_current_url(environ, root_only=False, strip_querystring=False,
host_only=False, trusted_hosts=None):
"""A handy helper function that recreates the full URL as IRI for the
current request or parts of it. Here's an example:
>>> from werkzeug.test import create_environ
>>> env = create_environ("/?param=foo", "http://localhost/script")
>>> get_current_url(env)
'http://localhost/script/?param=foo'
>>> get_current_url(env, root_only=True)
'http://localhost/script/'
>>> get_current_url(env, host_only=True)
'http://localhost/'
>>> get_current_url(env, strip_querystring=True)
'http://localhost/script/'
This optionally it verifies that the host is in a list of trusted hosts.
If the host is not in there it will raise a
:exc:`~werkzeug.exceptions.SecurityError`.
Note that the string returned might contain unicode characters as the
representation is an IRI not an URI. If you need an ASCII only
representation you can use the :func:`~werkzeug.urls.iri_to_uri`
function:
>>> from werkzeug.urls import iri_to_uri
>>> iri_to_uri(get_current_url(env))
'http://localhost/script/?param=foo'
:param environ: the WSGI environment to get the current URL from.
:param root_only: set `True` if you only want the root URL.
:param strip_querystring: set to `True` if you don't want the querystring.
:param host_only: set to `True` if the host URL should be returned.
:param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted`
for more information.
"""
tmp = [environ['wsgi.url_scheme'], '://', get_host(environ, trusted_hosts)]
cat = tmp.append
if host_only:
return uri_to_iri(''.join(tmp) + '/')
cat(url_quote(wsgi_get_bytes(environ.get('SCRIPT_NAME', ''))).rstrip('/'))
cat('/')
if not root_only:
cat(url_quote(wsgi_get_bytes(environ.get('PATH_INFO', '')).lstrip(b'/')))
if not strip_querystring:
qs = get_query_string(environ)
if qs:
cat('?' + qs)
return uri_to_iri(''.join(tmp))
def host_is_trusted(hostname, trusted_list):
"""Checks if a host is trusted against a list. This also takes care
of port normalization.
.. versionadded:: 0.9
:param hostname: the hostname to check
:param trusted_list: a list of hostnames to check against. If a
hostname starts with a dot it will match against
all subdomains as well.
"""
if not hostname:
return False
if isinstance(trusted_list, string_types):
trusted_list = [trusted_list]
def _normalize(hostname):
if ':' in hostname:
hostname = hostname.rsplit(':', 1)[0]
return _encode_idna(hostname)
try:
hostname = _normalize(hostname)
except UnicodeError:
return False
for ref in trusted_list:
if ref.startswith('.'):
ref = ref[1:]
suffix_match = True
else:
suffix_match = False
try:
ref = _normalize(ref)
except UnicodeError:
return False
if ref == hostname:
return True
if suffix_match and hostname.endswith(b'.' + ref):
return True
return False
def get_host(environ, trusted_hosts=None):
"""Return the real host for the given WSGI environment. This first checks
the `X-Forwarded-Host` header, then the normal `Host` header, and finally
the `SERVER_NAME` environment variable (using the first one it finds).
Optionally it verifies that the host is in a list of trusted hosts.
If the host is not in there it will raise a
:exc:`~werkzeug.exceptions.SecurityError`.
:param environ: the WSGI environment to get the host of.
:param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted`
for more information.
"""
if 'HTTP_X_FORWARDED_HOST' in environ:
rv = environ['HTTP_X_FORWARDED_HOST'].split(',', 1)[0].strip()
elif 'HTTP_HOST' in environ:
rv = environ['HTTP_HOST']
else:
rv = environ['SERVER_NAME']
if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not \
in (('https', '443'), ('http', '80')):
rv += ':' + environ['SERVER_PORT']
if trusted_hosts is not None:
if not host_is_trusted(rv, trusted_hosts):
from werkzeug.exceptions import SecurityError
raise SecurityError('Host "%s" is not trusted' % rv)
return rv
def get_content_length(environ):
"""Returns the content length from the WSGI environment as
integer. If it's not available or chunked transfer encoding is used,
``None`` is returned.
.. versionadded:: 0.9
:param environ: the WSGI environ to fetch the content length from.
"""
if environ.get('HTTP_TRANSFER_ENCODING', '') == 'chunked':
return None
content_length = environ.get('CONTENT_LENGTH')
if content_length is not None:
try:
return max(0, int(content_length))
except (ValueError, TypeError):
pass
def get_input_stream(environ, safe_fallback=True):
"""Returns the input stream from the WSGI environment and wraps it
in the most sensible way possible. The stream returned is not the
raw WSGI stream in most cases but one that is safe to read from
without taking into account the content length.
If content length is not set, the stream will be empty for safety reasons.
If the WSGI server supports chunked or infinite streams, it should set
the ``wsgi.input_terminated`` value in the WSGI environ to indicate that.
.. versionadded:: 0.9
:param environ: the WSGI environ to fetch the stream from.
:param safe_fallback: use an empty stream as a safe fallback when the
content length is not set. Disabling this allows infinite streams,
which can be a denial-of-service risk.
"""
stream = environ['wsgi.input']
content_length = get_content_length(environ)
# A wsgi extension that tells us if the input is terminated. In
# that case we return the stream unchanged as we know we can safely
# read it until the end.
if environ.get('wsgi.input_terminated'):
return stream
# If the request doesn't specify a content length, returning the stream is
# potentially dangerous because it could be infinite, malicious or not. If
# safe_fallback is true, return an empty stream instead for safety.
if content_length is None:
return safe_fallback and _empty_stream or stream
# Otherwise limit the stream to the content length
return LimitedStream(stream, content_length)
def get_query_string(environ):
"""Returns the `QUERY_STRING` from the WSGI environment. This also takes
care about the WSGI decoding dance on Python 3 environments as a
native string. The string returned will be restricted to ASCII
characters.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the query string from.
"""
qs = wsgi_get_bytes(environ.get('QUERY_STRING', ''))
# QUERY_STRING really should be ascii safe but some browsers
# will send us some unicode stuff (I am looking at you IE).
# In that case we want to urllib quote it badly.
return try_coerce_native(url_quote(qs, safe=':&%=+$!*\'(),'))
def get_path_info(environ, charset='utf-8', errors='replace'):
"""Returns the `PATH_INFO` from the WSGI environment and properly
decodes it. This also takes care about the WSGI decoding dance
on Python 3 environments. if the `charset` is set to `None` a
bytestring is returned.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the path from.
:param charset: the charset for the path info, or `None` if no
decoding should be performed.
:param errors: the decoding error handling.
"""
path = wsgi_get_bytes(environ.get('PATH_INFO', ''))
return to_unicode(path, charset, errors, allow_none_charset=True)
def get_script_name(environ, charset='utf-8', errors='replace'):
"""Returns the `SCRIPT_NAME` from the WSGI environment and properly
decodes it. This also takes care about the WSGI decoding dance
on Python 3 environments. if the `charset` is set to `None` a
bytestring is returned.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the path from.
:param charset: the charset for the path, or `None` if no
decoding should be performed.
:param errors: the decoding error handling.
"""
path = wsgi_get_bytes(environ.get('SCRIPT_NAME', ''))
return to_unicode(path, charset, errors, allow_none_charset=True)
def pop_path_info(environ, charset='utf-8', errors='replace'):
"""Removes and returns the next segment of `PATH_INFO`, pushing it onto
`SCRIPT_NAME`. Returns `None` if there is nothing left on `PATH_INFO`.
If the `charset` is set to `None` a bytestring is returned.
If there are empty segments (``'/foo//bar``) these are ignored but
properly pushed to the `SCRIPT_NAME`:
>>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
>>> pop_path_info(env)
'a'
>>> env['SCRIPT_NAME']
'/foo/a'
>>> pop_path_info(env)
'b'
>>> env['SCRIPT_NAME']
'/foo/a/b'
.. versionadded:: 0.5
.. versionchanged:: 0.9
The path is now decoded and a charset and encoding
parameter can be provided.
:param environ: the WSGI environment that is modified.
"""
path = environ.get('PATH_INFO')
if not path:
return None
script_name = environ.get('SCRIPT_NAME', '')
# shift multiple leading slashes over
old_path = path
path = path.lstrip('/')
if path != old_path:
script_name += '/' * (len(old_path) - len(path))
if '/' not in path:
environ['PATH_INFO'] = ''
environ['SCRIPT_NAME'] = script_name + path
rv = wsgi_get_bytes(path)
else:
segment, path = path.split('/', 1)
environ['PATH_INFO'] = '/' + path
environ['SCRIPT_NAME'] = script_name + segment
rv = wsgi_get_bytes(segment)
return to_unicode(rv, charset, errors, allow_none_charset=True)
def peek_path_info(environ, charset='utf-8', errors='replace'):
"""Returns the next segment on the `PATH_INFO` or `None` if there
is none. Works like :func:`pop_path_info` without modifying the
environment:
>>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
>>> peek_path_info(env)
'a'
>>> peek_path_info(env)
'a'
If the `charset` is set to `None` a bytestring is returned.
.. versionadded:: 0.5
.. versionchanged:: 0.9
The path is now decoded and a charset and encoding
parameter can be provided.
:param environ: the WSGI environment that is checked.
"""
segments = environ.get('PATH_INFO', '').lstrip('/').split('/', 1)
if segments:
return to_unicode(wsgi_get_bytes(segments[0]),
charset, errors, allow_none_charset=True)
def extract_path_info(environ_or_baseurl, path_or_url, charset='utf-8',
errors='replace', collapse_http_schemes=True):
"""Extracts the path info from the given URL (or WSGI environment) and
path. The path info returned is a unicode string, not a bytestring
suitable for a WSGI environment. The URLs might also be IRIs.
If the path info could not be determined, `None` is returned.
Some examples:
>>> extract_path_info('http://example.com/app', '/app/hello')
u'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello')
u'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello',
... collapse_http_schemes=False) is None
True
Instead of providing a base URL you can also pass a WSGI environment.
.. versionadded:: 0.6
:param environ_or_baseurl: a WSGI environment dict, a base URL or
base IRI. This is the root of the
application.
:param path_or_url: an absolute path from the server root, a
relative path (in which case it's the path info)
or a full URL. Also accepts IRIs and unicode
parameters.
:param charset: the charset for byte data in URLs
:param errors: the error handling on decode
:param collapse_http_schemes: if set to `False` the algorithm does
not assume that http and https on the
same server point to the same
resource.
"""
def _normalize_netloc(scheme, netloc):
parts = netloc.split(u'@', 1)[-1].split(u':', 1)
if len(parts) == 2:
netloc, port = parts
if (scheme == u'http' and port == u'80') or \
(scheme == u'https' and port == u'443'):
port = None
else:
netloc = parts[0]
port = None
if port is not None:
netloc += u':' + port
return netloc
# make sure whatever we are working on is a IRI and parse it
path = uri_to_iri(path_or_url, charset, errors)
if isinstance(environ_or_baseurl, dict):
environ_or_baseurl = get_current_url(environ_or_baseurl,
root_only=True)
base_iri = uri_to_iri(environ_or_baseurl, charset, errors)
base_scheme, base_netloc, base_path = url_parse(base_iri)[:3]
cur_scheme, cur_netloc, cur_path, = \
url_parse(url_join(base_iri, path))[:3]
# normalize the network location
base_netloc = _normalize_netloc(base_scheme, base_netloc)
cur_netloc = _normalize_netloc(cur_scheme, cur_netloc)
# is that IRI even on a known HTTP scheme?
if collapse_http_schemes:
for scheme in base_scheme, cur_scheme:
if scheme not in (u'http', u'https'):
return None
else:
if not (base_scheme in (u'http', u'https') and
base_scheme == cur_scheme):
return None
# are the netlocs compatible?
if base_netloc != cur_netloc:
return None
# are we below the application path?
base_path = base_path.rstrip(u'/')
if not cur_path.startswith(base_path):
return None
return u'/' + cur_path[len(base_path):].lstrip(u'/')
class ProxyMiddleware(object):
"""This middleware routes some requests to the provided WSGI app and
proxies some requests to an external server. This is not something that
can generally be done on the WSGI layer and some HTTP requests will not
tunnel through correctly (for instance websocket requests cannot be
proxied through WSGI). As a result this is only really useful for some
basic requests that can be forwarded.
Example configuration::
app = ProxyMiddleware(app, {
'/static/': {
'target': 'http://127.0.0.1:5001/',
}
})
For each host options can be specified. The following options are
supported:
``target``:
the target URL to dispatch to
``remove_prefix``:
if set to `True` the prefix is chopped off the URL before
dispatching it to the server.
``host``:
When set to ``'<auto>'`` which is the default the host header is
automatically rewritten to the URL of the target. If set to `None`
then the host header is unmodified from the client request. Any
other value overwrites the host header with that value.
``headers``:
An optional dictionary of headers that should be sent with the
request to the target host.
``ssl_context``:
In case this is an HTTPS target host then an SSL context can be
provided here (:class:`ssl.SSLContext`). This can be used for instance
to disable SSL verification.
In this case everything below ``'/static/'`` is proxied to the server on
port 5001. The host header is automatically rewritten and so are request
URLs (eg: the leading `/static/` prefix here gets chopped off).
.. versionadded:: 0.14
"""
def __init__(self, app, targets, chunk_size=2 << 13, timeout=10):
def _set_defaults(opts):
opts.setdefault('remove_prefix', False)
opts.setdefault('host', '<auto>')
opts.setdefault('headers', {})
opts.setdefault('ssl_context', None)
return opts
self.app = app
self.targets = dict(('/%s/' % k.strip('/'), _set_defaults(v))
for k, v in iteritems(targets))
self.chunk_size = chunk_size
self.timeout = timeout
def proxy_to(self, opts, path, prefix):
target = url_parse(opts['target'])
def application(environ, start_response):
headers = list(EnvironHeaders(environ).items())
headers[:] = [(k, v) for k, v in headers
if not is_hop_by_hop_header(k) and
k.lower() not in ('content-length', 'host')]
headers.append(('Connection', 'close'))
if opts['host'] == '<auto>':
headers.append(('Host', target.ascii_host))
elif opts['host'] is None:
headers.append(('Host', environ['HTTP_HOST']))
else:
headers.append(('Host', opts['host']))
headers.extend(opts['headers'].items())
remote_path = path
if opts['remove_prefix']:
remote_path = '%s/%s' % (
target.path.rstrip('/'),
remote_path[len(prefix):].lstrip('/')
)
content_length = environ.get('CONTENT_LENGTH')
chunked = False
if content_length not in ('', None):
headers.append(('Content-Length', content_length))
elif content_length is not None:
headers.append(('Transfer-Encoding', 'chunked'))
chunked = True
try:
if target.scheme == 'http':
con = httplib.HTTPConnection(
target.ascii_host, target.port or 80,
timeout=self.timeout)
elif target.scheme == 'https':
con = httplib.HTTPSConnection(
target.ascii_host, target.port or 443,
timeout=self.timeout,
context=opts['ssl_context'])
con.connect()
con.putrequest(environ['REQUEST_METHOD'], url_quote(remote_path),
skip_host=True)
for k, v in headers:
if k.lower() == 'connection':
v = 'close'
con.putheader(k, v)
con.endheaders()
stream = get_input_stream(environ)
while 1:
data = stream.read(self.chunk_size)
if not data:
break
if chunked:
con.send(b'%x\r\n%s\r\n' % (len(data), data))
else:
con.send(data)
resp = con.getresponse()
except socket.error:
from werkzeug.exceptions import BadGateway
return BadGateway()(environ, start_response)
start_response('%d %s' % (resp.status, resp.reason),
[(k.title(), v) for k, v in resp.getheaders()
if not is_hop_by_hop_header(k)])
def read():
while 1:
try:
data = resp.read(self.chunk_size)
except socket.error:
break
if not data:
break
yield data
return read()
return application
def __call__(self, environ, start_response):
path = environ['PATH_INFO']
app = self.app
for prefix, opts in iteritems(self.targets):
if path.startswith(prefix):
app = self.proxy_to(opts, path, prefix)
break
return app(environ, start_response)
class SharedDataMiddleware(object):
"""A WSGI middleware that provides static content for development
environments or simple server setups. Usage is quite simple::
import os
from werkzeug.wsgi import SharedDataMiddleware
app = SharedDataMiddleware(app, {
'/shared': os.path.join(os.path.dirname(__file__), 'shared')
})
The contents of the folder ``./shared`` will now be available on
``http://example.com/shared/``. This is pretty useful during development
because a standalone media server is not required. One can also mount
files on the root folder and still continue to use the application because
the shared data middleware forwards all unhandled requests to the
application, even if the requests are below one of the shared folders.
If `pkg_resources` is available you can also tell the middleware to serve
files from package data::
app = SharedDataMiddleware(app, {
'/shared': ('myapplication', 'shared_files')
})
This will then serve the ``shared_files`` folder in the `myapplication`
Python package.
The optional `disallow` parameter can be a list of :func:`~fnmatch.fnmatch`
rules for files that are not accessible from the web. If `cache` is set to
`False` no caching headers are sent.
Currently the middleware does not support non ASCII filenames. If the
encoding on the file system happens to be the encoding of the URI it may
work but this could also be by accident. We strongly suggest using ASCII
only file names for static files.
The middleware will guess the mimetype using the Python `mimetype`
module. If it's unable to figure out the charset it will fall back
to `fallback_mimetype`.
.. versionchanged:: 0.5
The cache timeout is configurable now.
.. versionadded:: 0.6
The `fallback_mimetype` parameter was added.
:param app: the application to wrap. If you don't want to wrap an
application you can pass it :exc:`NotFound`.
:param exports: a list or dict of exported files and folders.
:param disallow: a list of :func:`~fnmatch.fnmatch` rules.
:param fallback_mimetype: the fallback mimetype for unknown files.
:param cache: enable or disable caching headers.
:param cache_timeout: the cache timeout in seconds for the headers.
"""
def __init__(self, app, exports, disallow=None, cache=True,
cache_timeout=60 * 60 * 12, fallback_mimetype='text/plain'):
self.app = app
self.exports = []
self.cache = cache
self.cache_timeout = cache_timeout
if hasattr(exports, 'items'):
exports = iteritems(exports)
for key, value in exports:
if isinstance(value, tuple):
loader = self.get_package_loader(*value)
elif isinstance(value, string_types):
if os.path.isfile(value):
loader = self.get_file_loader(value)
else:
loader = self.get_directory_loader(value)
else:
raise TypeError('unknown def %r' % value)
self.exports.append((key, loader))
if disallow is not None:
from fnmatch import fnmatch
self.is_allowed = lambda x: not fnmatch(x, disallow)
self.fallback_mimetype = fallback_mimetype
def is_allowed(self, filename):
"""Subclasses can override this method to disallow the access to
certain files. However by providing `disallow` in the constructor
this method is overwritten.
"""
return True
def _opener(self, filename):
return lambda: (
open(filename, 'rb'),
datetime.utcfromtimestamp(os.path.getmtime(filename)),
int(os.path.getsize(filename))
)
def get_file_loader(self, filename):
return lambda x: (os.path.basename(filename), self._opener(filename))
def get_package_loader(self, package, package_path):
from pkg_resources import DefaultProvider, ResourceManager, \
get_provider
loadtime = datetime.utcnow()
provider = get_provider(package)
manager = ResourceManager()
filesystem_bound = isinstance(provider, DefaultProvider)
def loader(path):
if path is None:
return None, None
path = posixpath.join(package_path, path)
if not provider.has_resource(path):
return None, None
basename = posixpath.basename(path)
if filesystem_bound:
return basename, self._opener(
provider.get_resource_filename(manager, path))
s = provider.get_resource_string(manager, path)
return basename, lambda: (
BytesIO(s),
loadtime,
len(s)
)
return loader
def get_directory_loader(self, directory):
def loader(path):
if path is not None:
path = os.path.join(directory, path)
else:
path = directory
if os.path.isfile(path):
return os.path.basename(path), self._opener(path)
return None, None
return loader
def generate_etag(self, mtime, file_size, real_filename):
if not isinstance(real_filename, bytes):
real_filename = real_filename.encode(get_filesystem_encoding())
return 'wzsdm-%d-%s-%s' % (
mktime(mtime.timetuple()),
file_size,
adler32(real_filename) & 0xffffffff
)
def __call__(self, environ, start_response):
cleaned_path = get_path_info(environ)
if PY2:
cleaned_path = cleaned_path.encode(get_filesystem_encoding())
# sanitize the path for non unix systems
cleaned_path = cleaned_path.strip('/')
for sep in os.sep, os.altsep:
if sep and sep != '/':
cleaned_path = cleaned_path.replace(sep, '/')
path = '/' + '/'.join(x for x in cleaned_path.split('/')
if x and x != '..')
file_loader = None
for search_path, loader in self.exports:
if search_path == path:
real_filename, file_loader = loader(None)
if file_loader is not None:
break
if not search_path.endswith('/'):
search_path += '/'
if path.startswith(search_path):
real_filename, file_loader = loader(path[len(search_path):])
if file_loader is not None:
break
if file_loader is None or not self.is_allowed(real_filename):
return self.app(environ, start_response)
guessed_type = mimetypes.guess_type(real_filename)
mime_type = guessed_type[0] or self.fallback_mimetype
f, mtime, file_size = file_loader()
headers = [('Date', http_date())]
if self.cache:
timeout = self.cache_timeout
etag = self.generate_etag(mtime, file_size, real_filename)
headers += [
('Etag', '"%s"' % etag),
('Cache-Control', 'max-age=%d, public' % timeout)
]
if not is_resource_modified(environ, etag, last_modified=mtime):
f.close()
start_response('304 Not Modified', headers)
return []
headers.append(('Expires', http_date(time() + timeout)))
else:
headers.append(('Cache-Control', 'public'))
headers.extend((
('Content-Type', mime_type),
('Content-Length', str(file_size)),
('Last-Modified', http_date(mtime))
))
start_response('200 OK', headers)
return wrap_file(environ, f)
class DispatcherMiddleware(object):
"""Allows one to mount middlewares or applications in a WSGI application.
This is useful if you want to combine multiple WSGI applications::
app = DispatcherMiddleware(app, {
'/app2': app2,
'/app3': app3
})
"""
def __init__(self, app, mounts=None):
self.app = app
self.mounts = mounts or {}
def __call__(self, environ, start_response):
script = environ.get('PATH_INFO', '')
path_info = ''
while '/' in script:
if script in self.mounts:
app = self.mounts[script]
break
script, last_item = script.rsplit('/', 1)
path_info = '/%s%s' % (last_item, path_info)
else:
app = self.mounts.get(script, self.app)
original_script_name = environ.get('SCRIPT_NAME', '')
environ['SCRIPT_NAME'] = original_script_name + script
environ['PATH_INFO'] = path_info
return app(environ, start_response)
@implements_iterator
class ClosingIterator(object):
"""The WSGI specification requires that all middlewares and gateways
respect the `close` callback of an iterator. Because it is useful to add
another close action to a returned iterator and adding a custom iterator
is a boring task this class can be used for that::
return ClosingIterator(app(environ, start_response), [cleanup_session,
cleanup_locals])
If there is just one close function it can be passed instead of the list.
A closing iterator is not needed if the application uses response objects
and finishes the processing if the response is started::
try:
return response(environ, start_response)
finally:
cleanup_session()
cleanup_locals()
"""
def __init__(self, iterable, callbacks=None):
iterator = iter(iterable)
self._next = partial(next, iterator)
if callbacks is None:
callbacks = []
elif callable(callbacks):
callbacks = [callbacks]
else:
callbacks = list(callbacks)
iterable_close = getattr(iterator, 'close', None)
if iterable_close:
callbacks.insert(0, iterable_close)
self._callbacks = callbacks
def __iter__(self):
return self
def __next__(self):
return self._next()
def close(self):
for callback in self._callbacks:
callback()
def wrap_file(environ, file, buffer_size=8192):
"""Wraps a file. This uses the WSGI server's file wrapper if available
or otherwise the generic :class:`FileWrapper`.
.. versionadded:: 0.5
If the file wrapper from the WSGI server is used it's important to not
iterate over it from inside the application but to pass it through
unchanged. If you want to pass out a file wrapper inside a response
object you have to set :attr:`~BaseResponse.direct_passthrough` to `True`.
More information about file wrappers are available in :pep:`333`.
:param file: a :class:`file`-like object with a :meth:`~file.read` method.
:param buffer_size: number of bytes for one iteration.
"""
return environ.get('wsgi.file_wrapper', FileWrapper)(file, buffer_size)
@implements_iterator
class FileWrapper(object):
"""This class can be used to convert a :class:`file`-like object into
an iterable. It yields `buffer_size` blocks until the file is fully
read.
You should not use this class directly but rather use the
:func:`wrap_file` function that uses the WSGI server's file wrapper
support if it's available.
.. versionadded:: 0.5
If you're using this object together with a :class:`BaseResponse` you have
to use the `direct_passthrough` mode.
:param file: a :class:`file`-like object with a :meth:`~file.read` method.
:param buffer_size: number of bytes for one iteration.
"""
def __init__(self, file, buffer_size=8192):
self.file = file
self.buffer_size = buffer_size
def close(self):
if hasattr(self.file, 'close'):
self.file.close()
def seekable(self):
if hasattr(self.file, 'seekable'):
return self.file.seekable()
if hasattr(self.file, 'seek'):
return True
return False
def seek(self, *args):
if hasattr(self.file, 'seek'):
self.file.seek(*args)
def tell(self):
if hasattr(self.file, 'tell'):
return self.file.tell()
return None
def __iter__(self):
return self
def __next__(self):
data = self.file.read(self.buffer_size)
if data:
return data
raise StopIteration()
@implements_iterator
class _RangeWrapper(object):
# private for now, but should we make it public in the future ?
"""This class can be used to convert an iterable object into
an iterable that will only yield a piece of the underlying content.
It yields blocks until the underlying stream range is fully read.
The yielded blocks will have a size that can't exceed the original
iterator defined block size, but that can be smaller.
If you're using this object together with a :class:`BaseResponse` you have
to use the `direct_passthrough` mode.
:param iterable: an iterable object with a :meth:`__next__` method.
:param start_byte: byte from which read will start.
:param byte_range: how many bytes to read.
"""
def __init__(self, iterable, start_byte=0, byte_range=None):
self.iterable = iter(iterable)
self.byte_range = byte_range
self.start_byte = start_byte
self.end_byte = None
if byte_range is not None:
self.end_byte = self.start_byte + self.byte_range
self.read_length = 0
self.seekable = hasattr(iterable, 'seekable') and iterable.seekable()
self.end_reached = False
def __iter__(self):
return self
def _next_chunk(self):
try:
chunk = next(self.iterable)
self.read_length += len(chunk)
return chunk
except StopIteration:
self.end_reached = True
raise
def _first_iteration(self):
chunk = None
if self.seekable:
self.iterable.seek(self.start_byte)
self.read_length = self.iterable.tell()
contextual_read_length = self.read_length
else:
while self.read_length <= self.start_byte:
chunk = self._next_chunk()
if chunk is not None:
chunk = chunk[self.start_byte - self.read_length:]
contextual_read_length = self.start_byte
return chunk, contextual_read_length
def _next(self):
if self.end_reached:
raise StopIteration()
chunk = None
contextual_read_length = self.read_length
if self.read_length == 0:
chunk, contextual_read_length = self._first_iteration()
if chunk is None:
chunk = self._next_chunk()
if self.end_byte is not None and self.read_length >= self.end_byte:
self.end_reached = True
return chunk[:self.end_byte - contextual_read_length]
return chunk
def __next__(self):
chunk = self._next()
if chunk:
return chunk
self.end_reached = True
raise StopIteration()
def close(self):
if hasattr(self.iterable, 'close'):
self.iterable.close()
def _make_chunk_iter(stream, limit, buffer_size):
"""Helper for the line and chunk iter functions."""
if isinstance(stream, (bytes, bytearray, text_type)):
raise TypeError('Passed a string or byte object instead of '
'true iterator or stream.')
if not hasattr(stream, 'read'):
for item in stream:
if item:
yield item
return
if not isinstance(stream, LimitedStream) and limit is not None:
stream = LimitedStream(stream, limit)
_read = stream.read
while 1:
item = _read(buffer_size)
if not item:
break
yield item
def make_line_iter(stream, limit=None, buffer_size=10 * 1024,
cap_at_buffer=False):
"""Safely iterates line-based over an input stream. If the input stream
is not a :class:`LimitedStream` the `limit` parameter is mandatory.
This uses the stream's :meth:`~file.read` method internally as opposite
to the :meth:`~file.readline` method that is unsafe and can only be used
in violation of the WSGI specification. The same problem applies to the
`__iter__` function of the input stream which calls :meth:`~file.readline`
without arguments.
If you need line-by-line processing it's strongly recommended to iterate
over the input stream using this helper function.
.. versionchanged:: 0.8
This function now ensures that the limit was reached.
.. versionadded:: 0.9
added support for iterators as input stream.
.. versionadded:: 0.11.10
added support for the `cap_at_buffer` parameter.
:param stream: the stream or iterate to iterate over.
:param limit: the limit in bytes for the stream. (Usually
content length. Not necessary if the `stream`
is a :class:`LimitedStream`.
:param buffer_size: The optional buffer size.
:param cap_at_buffer: if this is set chunks are split if they are longer
than the buffer size. Internally this is implemented
that the buffer size might be exhausted by a factor
of two however.
"""
_iter = _make_chunk_iter(stream, limit, buffer_size)
first_item = next(_iter, '')
if not first_item:
return
s = make_literal_wrapper(first_item)
empty = s('')
cr = s('\r')
lf = s('\n')
crlf = s('\r\n')
_iter = chain((first_item,), _iter)
def _iter_basic_lines():
_join = empty.join
buffer = []
while 1:
new_data = next(_iter, '')
if not new_data:
break
new_buf = []
buf_size = 0
for item in chain(buffer, new_data.splitlines(True)):
new_buf.append(item)
buf_size += len(item)
if item and item[-1:] in crlf:
yield _join(new_buf)
new_buf = []
elif cap_at_buffer and buf_size >= buffer_size:
rv = _join(new_buf)
while len(rv) >= buffer_size:
yield rv[:buffer_size]
rv = rv[buffer_size:]
new_buf = [rv]
buffer = new_buf
if buffer:
yield _join(buffer)
# This hackery is necessary to merge 'foo\r' and '\n' into one item
# of 'foo\r\n' if we were unlucky and we hit a chunk boundary.
previous = empty
for item in _iter_basic_lines():
if item == lf and previous[-1:] == cr:
previous += item
item = empty
if previous:
yield previous
previous = item
if previous:
yield previous
def make_chunk_iter(stream, separator, limit=None, buffer_size=10 * 1024,
cap_at_buffer=False):
"""Works like :func:`make_line_iter` but accepts a separator
which divides chunks. If you want newline based processing
you should use :func:`make_line_iter` instead as it
supports arbitrary newline markers.
.. versionadded:: 0.8
.. versionadded:: 0.9
added support for iterators as input stream.
.. versionadded:: 0.11.10
added support for the `cap_at_buffer` parameter.
:param stream: the stream or iterate to iterate over.
:param separator: the separator that divides chunks.
:param limit: the limit in bytes for the stream. (Usually
content length. Not necessary if the `stream`
is otherwise already limited).
:param buffer_size: The optional buffer size.
:param cap_at_buffer: if this is set chunks are split if they are longer
than the buffer size. Internally this is implemented
that the buffer size might be exhausted by a factor
of two however.
"""
_iter = _make_chunk_iter(stream, limit, buffer_size)
first_item = next(_iter, '')
if not first_item:
return
_iter = chain((first_item,), _iter)
if isinstance(first_item, text_type):
separator = to_unicode(separator)
_split = re.compile(r'(%s)' % re.escape(separator)).split
_join = u''.join
else:
separator = to_bytes(separator)
_split = re.compile(b'(' + re.escape(separator) + b')').split
_join = b''.join
buffer = []
while 1:
new_data = next(_iter, '')
if not new_data:
break
chunks = _split(new_data)
new_buf = []
buf_size = 0
for item in chain(buffer, chunks):
if item == separator:
yield _join(new_buf)
new_buf = []
buf_size = 0
else:
buf_size += len(item)
new_buf.append(item)
if cap_at_buffer and buf_size >= buffer_size:
rv = _join(new_buf)
while len(rv) >= buffer_size:
yield rv[:buffer_size]
rv = rv[buffer_size:]
new_buf = [rv]
buf_size = len(rv)
buffer = new_buf
if buffer:
yield _join(buffer)
@implements_iterator
class LimitedStream(io.IOBase):
"""Wraps a stream so that it doesn't read more than n bytes. If the
stream is exhausted and the caller tries to get more bytes from it
:func:`on_exhausted` is called which by default returns an empty
string. The return value of that function is forwarded
to the reader function. So if it returns an empty string
:meth:`read` will return an empty string as well.
The limit however must never be higher than what the stream can
output. Otherwise :meth:`readlines` will try to read past the
limit.
.. admonition:: Note on WSGI compliance
calls to :meth:`readline` and :meth:`readlines` are not
WSGI compliant because it passes a size argument to the
readline methods. Unfortunately the WSGI PEP is not safely
implementable without a size argument to :meth:`readline`
because there is no EOF marker in the stream. As a result
of that the use of :meth:`readline` is discouraged.
For the same reason iterating over the :class:`LimitedStream`
is not portable. It internally calls :meth:`readline`.
We strongly suggest using :meth:`read` only or using the
:func:`make_line_iter` which safely iterates line-based
over a WSGI input stream.
:param stream: the stream to wrap.
:param limit: the limit for the stream, must not be longer than
what the string can provide if the stream does not
end with `EOF` (like `wsgi.input`)
"""
def __init__(self, stream, limit):
self._read = stream.read
self._readline = stream.readline
self._pos = 0
self.limit = limit
def __iter__(self):
return self
@property
def is_exhausted(self):
"""If the stream is exhausted this attribute is `True`."""
return self._pos >= self.limit
def on_exhausted(self):
"""This is called when the stream tries to read past the limit.
The return value of this function is returned from the reading
function.
"""
# Read null bytes from the stream so that we get the
# correct end of stream marker.
return self._read(0)
def on_disconnect(self):
"""What should happen if a disconnect is detected? The return
value of this function is returned from read functions in case
the client went away. By default a
:exc:`~werkzeug.exceptions.ClientDisconnected` exception is raised.
"""
from werkzeug.exceptions import ClientDisconnected
raise ClientDisconnected()
def exhaust(self, chunk_size=1024 * 64):
"""Exhaust the stream. This consumes all the data left until the
limit is reached.
:param chunk_size: the size for a chunk. It will read the chunk
until the stream is exhausted and throw away
the results.
"""
to_read = self.limit - self._pos
chunk = chunk_size
while to_read > 0:
chunk = min(to_read, chunk)
self.read(chunk)
to_read -= chunk
def read(self, size=None):
"""Read `size` bytes or if size is not provided everything is read.
:param size: the number of bytes read.
"""
if self._pos >= self.limit:
return self.on_exhausted()
if size is None or size == -1: # -1 is for consistence with file
size = self.limit
to_read = min(self.limit - self._pos, size)
try:
read = self._read(to_read)
except (IOError, ValueError):
return self.on_disconnect()
if to_read and len(read) != to_read:
return self.on_disconnect()
self._pos += len(read)
return read
def readline(self, size=None):
"""Reads one line from the stream."""
if self._pos >= self.limit:
return self.on_exhausted()
if size is None:
size = self.limit - self._pos
else:
size = min(size, self.limit - self._pos)
try:
line = self._readline(size)
except (ValueError, IOError):
return self.on_disconnect()
if size and not line:
return self.on_disconnect()
self._pos += len(line)
return line
def readlines(self, size=None):
"""Reads a file into a list of strings. It calls :meth:`readline`
until the file is read to the end. It does support the optional
`size` argument if the underlaying stream supports it for
`readline`.
"""
last_pos = self._pos
result = []
if size is not None:
end = min(self.limit, last_pos + size)
else:
end = self.limit
while 1:
if size is not None:
size -= last_pos - self._pos
if self._pos >= end:
break
result.append(self.readline(size))
if size is not None:
last_pos = self._pos
return result
def tell(self):
"""Returns the position of the stream.
.. versionadded:: 0.9
"""
return self._pos
def __next__(self):
line = self.readline()
if not line:
raise StopIteration()
return line
def readable(self):
return True
| mit |
CVML/pycortex | cortex/dataset/braindata.py | 2 | 14173 | import hashlib
import numpy as np
import h5py
from ..database import db
class BrainData(object):
def __init__(self, data, subject, **kwargs):
if isinstance(data, str):
import nibabel
nib = nibabel.load(data)
data = nib.get_data().T
self._data = data
try:
basestring
except NameError:
subject = subject if isinstance(subject, str) else subject.decode('utf-8')
self.subject = subject
super(BrainData, self).__init__(**kwargs)
@property
def data(self):
if isinstance(self._data, h5py.Dataset):
return self._data.value
return self._data
@data.setter
def data(self, data):
self._data = data
@property
def name(self):
'''Name of this BrainData, according to its hash'''
return "__%s"%_hash(self.data)[:16]
def exp(self):
"""Copy of this object with data exponentiated.
"""
return self.copy(np.exp(self.data))
def uniques(self, collapse=False):
yield self
def __hash__(self):
return hash(_hash(self.data))
def _write_hdf(self, h5, name=None):
if name is None:
name = self.name
dgrp = h5.require_group("/data")
if name in dgrp and "__%s"%_hash(dgrp[name].value)[:16] == name:
#don't need to update anything, since it's the same data
return h5.get("/data/%s"%name)
node = _hdf_write(h5, self.data, name=name)
node.attrs['subject'] = self.subject
return node
def to_json(self, simple=False):
sdict = super(BrainData, self).to_json(simple=simple)
if simple:
sdict.update(dict(name=self.name,
subject=self.subject,
min=float(np.nan_to_num(self.data).min()),
max=float(np.nan_to_num(self.data).max()),
))
return sdict
@classmethod
def add_numpy_methods(cls):
"""Adds numpy operator methods (+, -, etc.) to this class to allow
simple manipulation of the data, e.g. with VolumeData v:
v + 1 # Returns new VolumeData with 1 added to data
v ** 2 # Returns new VolumeData with data squared
"""
# Binary operations
npops = ["__add__", "__sub__", "__mul__", "__div__", "__pow__",
"__neg__", "__abs__"]
def make_opfun(op): # function nesting creates closure containing op
def opfun(self, *args):
return self.copy(getattr(self.data, op)(*args))
return opfun
for op in npops:
opfun = make_opfun(op)
opfun.__name__ = op
setattr(cls, opfun.__name__, opfun)
BrainData.add_numpy_methods()
class VolumeData(BrainData):
def __init__(self, data, subject, xfmname, mask=None, **kwargs):
"""Three possible variables: volume, movie, vertex. Enumerated with size:
volume movie: (t, z, y, x)
volume image: (z, y, x)
linear movie: (t, v)
linear image: (v,)
"""
if self.__class__ == VolumeData:
raise TypeError('Cannot directly instantiate VolumeData objects')
super(VolumeData, self).__init__(data, subject, **kwargs)
try:
basestring
except NameError:
xfmname = xfmname if isinstance(xfmname, str) else xfmname.decode('utf-8')
self.xfmname = xfmname
self._check_size(mask)
self.masked = _masker(self)
def to_json(self, simple=False):
if simple:
sdict = super(VolumeData, self).to_json(simple=simple)
sdict["shape"] = self.shape
return sdict
xfm = db.get_xfm(self.subject, self.xfmname, 'coord').xfm
sdict = dict(xfm=[list(np.array(xfm).ravel())], data=[self.name])
sdict.update(super(VolumeData, self).to_json())
return sdict
@classmethod
def empty(cls, subject, xfmname, **kwargs):
xfm = db.get_xfm(subject, xfmname)
shape = xfm.shape
return cls(np.zeros(shape), subject, xfmname, **kwargs)
@classmethod
def random(cls, subject, xfmname, **kwargs):
xfm = db.get_xfm(subject, xfmname)
shape = xfm.shape
return cls(np.random.randn(*shape), subject, xfmname, **kwargs)
def _check_size(self, mask):
if self.data.ndim not in (1, 2, 3, 4):
raise ValueError("Invalid data shape")
self.linear = self.data.ndim in (1, 2)
self.movie = self.data.ndim in (2, 4)
if self.linear:
#Guess the mask
if mask is None:
nvox = self.data.shape[-1]
self._mask, self.mask = _find_mask(nvox, self.subject, self.xfmname)
elif isinstance(mask, str):
self.mask = db.get_mask(self.subject, self.xfmname, mask)
self._mask = mask
elif isinstance(mask, np.ndarray):
self.mask = mask > 0
self._mask = mask > 0
self.shape = self.mask.shape
else:
self._mask = None
shape = self.data.shape
if self.movie:
shape = shape[1:]
xfm = db.get_xfm(self.subject, self.xfmname)
if xfm.shape != shape:
raise ValueError("Volumetric data (shape %s) is not the same shape as reference for transform (shape %s)" % (str(shape), str(xfm.shape)))
self.shape = shape
def map(self, projection="nearest"):
"""Convert this VolumeData into a VertexData using the given sampler
"""
from .. import utils
mapper = utils.get_mapper(self.subject, self.xfmname, projection)
data = mapper(self)
return data
def __repr__(self):
maskstr = "volumetric"
if self.linear:
name = self._mask
if isinstance(self._mask, np.ndarray):
name = "custom"
maskstr = "%s masked"%name
if self.movie:
maskstr += " movie"
maskstr = maskstr[0].upper()+maskstr[1:]
return "<%s data for (%s, %s)>"%(maskstr, self.subject, self.xfmname)
def copy(self, data):
return super(VolumeData, self).copy(data, self.subject, self.xfmname, mask=self._mask)
@property
def volume(self):
"""Standardizes the VolumeData, ensuring that masked data are unmasked"""
from .. import volume
if self.linear:
data = volume.unmask(self.mask, self.data[:])
else:
data = self.data[:]
if not self.movie:
data = data[np.newaxis]
return data
def save(self, filename, name=None):
"""Save the dataset into an hdf file with the provided name
"""
import os
if isinstance(filename, str):
fname, ext = os.path.splitext(filename)
if ext in (".hdf", ".h5",".hf5"):
h5 = h5py.File(filename, "a")
self._write_hdf(h5, name=name)
h5.close()
else:
raise TypeError('Unknown file type')
elif isinstance(filename, h5py.Group):
self._write_hdf(filename, name=name)
def _write_hdf(self, h5, name=None):
node = super(VolumeData, self)._write_hdf(h5, name=name)
#write the mask into the file, as necessary
if self._mask is not None:
mask = self._mask
if isinstance(self._mask, np.ndarray):
mgrp = "/subjects/{subj}/transforms/{xfm}/masks/"
mgrp = mgrp.format(subj=self.subject, xfm=self.xfmname)
mname = "__%s" % _hash(self._mask)[:8]
_hdf_write(h5, self._mask, name=mname, group=mgrp)
mask = mname
node.attrs['mask'] = mask
return node
def save_nii(self, filename):
"""Save as a nifti file at the given filename. Nifti headers are
copied from the reference nifti file.
"""
xfm = db.get_xfm(self.subject, self.xfmname)
affine = xfm.reference.get_affine()
import nibabel
new_nii = nibabel.Nifti1Image(self.volume.T, affine)
nibabel.save(new_nii, filename)
class VertexData(BrainData):
def __init__(self, data, subject, **kwargs):
"""Represents `data` at each vertex on a `subject`s cortex.
`data` shape possibilities:
reg linear movie: (t, v)
reg linear image: (v,)
None: creates zero-filled VertexData
where t is the number of time points, c is colors (i.e. RGB), and v is the
number of vertices (either in both hemispheres or one hemisphere).
"""
if self.__class__ == VertexData:
raise TypeError('Cannot directly instantiate VertexData objects')
super(VertexData, self).__init__(data, subject, **kwargs)
try:
left, right = db.get_surf(self.subject, "wm")
except IOError:
left, right = db.get_surf(self.subject, "fiducial")
self.llen = len(left[0])
self.rlen = len(right[0])
self._set_data(data)
@classmethod
def empty(cls, subject, **kwargs):
try:
left, right = db.get_surf(subject, "wm")
except IOError:
left, right = db.get_surf(subject, "fiducial")
nverts = len(left[0]) + len(right[0])
return cls(np.zeros((nverts,)), subject, **kwargs)
@classmethod
def random(cls, subject, **kwargs):
try:
left, right = db.get_surf(subject, "wm")
except IOError:
left, right = db.get_surf(subject, "fiducial")
nverts = len(left[0]) + len(right[0])
return cls(np.random.randn(nverts), subject, **kwargs)
def _set_data(self, data):
"""Stores data for this VertexData. Also sets flags if `data` appears to
be in 'movie' or 'raw' format. See __init__ for `data` shape possibilities.
"""
if data is None:
data = np.zeros((self.llen + self.rlen,))
self._data = data
self.movie = self.data.ndim > 1
self.nverts = self.data.shape[-1]
if self.llen == self.nverts:
# Just data for left hemisphere
self.hem = "left"
rshape = list(self.data.shape)
rshape[1 if self.movie else 0] = self.rlen
self._data = np.hstack([self.data, np.zeros(rshape, dtype=self.data.dtype)])
elif self.rlen == self.nverts:
# Just data for right hemisphere
self.hem = "right"
lshape = list(self.data.shape)
lshape[1 if self.movie else 0] = self.llen
self._data = np.hstack([np.zeros(lshape, dtype=self.data.dtype), self.data])
elif self.llen + self.rlen == self.nverts:
# Data for both hemispheres
self.hem = "both"
else:
raise ValueError('Invalid number of vertices for subject (given %d, should be %d for left hem, %d for right hem, or %d for both)' % (self.nverts, self.llen, self.rlen, self.llen+self.rlen))
def copy(self, data):
return super(VertexData, self).copy(data, self.subject)
def volume(self, xfmname, projection='nearest', **kwargs):
import warnings
warnings.warn('Inverse mapping cannot be accurate')
from .. import utils
mapper = utils.get_mapper(self.subject, xfmname, projection)
return mapper.backwards(self, **kwargs)
def __repr__(self):
maskstr = ""
if self.movie:
maskstr = "movie "
return "<Vertex %sdata for %s>"%(maskstr, self.subject)
def __getitem__(self, idx):
if not self.movie:
raise TypeError("Cannot index non-movie data")
#return VertexData(self.data[idx], self.subject, **self.attrs)
return self.copy(self.data[idx])
def to_json(self, simple=False):
if simple:
sdict = dict(split=self.llen, frames=self.vertices.shape[0])
sdict.update(super(VertexData, self).to_json(simple=simple))
return sdict
sdict = dict(data=[self.name])
sdict.update(super(VertexData, self).to_json())
return sdict
@property
def vertices(self):
verts = self.data
if not self.movie:
verts = verts[np.newaxis]
return verts
@property
def left(self):
if self.movie:
return self.data[:,:self.llen]
else:
return self.data[:self.llen]
@property
def right(self):
if self.movie:
return self.data[:,self.llen:]
else:
return self.data[self.llen:]
def _find_mask(nvox, subject, xfmname):
import os
import re
import glob
import nibabel
files = db.get_paths(subject)['masks'].format(xfmname=xfmname, type="*")
for fname in glob.glob(files):
nib = nibabel.load(fname)
mask = nib.get_data().T != 0
if nvox == np.sum(mask):
fname = os.path.split(fname)[1]
name = re.compile(r'mask_([\w]+).nii.gz').search(fname)
return name.group(1), mask
raise ValueError('Cannot find a valid mask')
class _masker(object):
def __init__(self, dv):
self.dv = dv
self.data = None
if dv.linear:
self.data = dv.data
def __getitem__(self, masktype):
try:
mask = db.get_mask(self.dv.subject, self.dv.xfmname, masktype)
return self.dv.copy(self.dv.volume[:,mask].squeeze())
except:
self.dv.copy(self.dv.volume[:, mask].squeeze())
def _hash(array):
'''A simple numpy hash function'''
return hashlib.sha1(array.tostring()).hexdigest()
def _hdf_write(h5, data, name="data", group="/data"):
try:
node = h5.require_dataset("%s/%s"%(group, name), data.shape, data.dtype, exact=True)
except TypeError:
del h5[group][name]
node = h5.create_dataset("%s/%s"%(group, name), data.shape, data.dtype, exact=True)
node[:] = data
return node
| bsd-2-clause |
lifeisstillgood/kashflo | setup.py | 1 | 1202 | #!/usr/bin/env python
#! -*- coding: utf-8 -*-
###
# Copyright (c) Paul Brian 2013
# This software is subject to
# the provisions of the GNU Affero General
# Public License version 3 (AGPLv3).
# See LICENCE.txt for details.
###
"""
setup for HomeSpendWatch
"""
from setuptools import setup, find_packages
import os, glob
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
def get_version():
""" return a version number, or error string.
We are assuming a file version.txt always exists. By convention
populate that file with output of git describe
"""
try:
v = open("version.txt").read().strip()
except:
v = "UNABLE_TO_FIND_RELEASE_VERSION_FILE"
return v
setup(
name='homespendwatch',
version=get_version(),
packages=find_packages(),
author='See AUTHORS.txt',
author_email='[email protected]',
long_description=README,
license='LICENSE.txt',
description="Simple Home Accounts spending tracker "\
"to work with any bank",
entry_points = """\
[console_scripts]
homespendwatch-run = homespendwatch.run:main
""",
)
| agpl-3.0 |
projectcalico/calico-nova | nova/api/openstack/compute/plugins/v3/image_size.py | 24 | 2270 | # Copyright 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
ALIAS = "image-size"
authorize = extensions.soft_extension_authorizer('compute', 'v3:' + ALIAS)
class ImageSizeController(wsgi.Controller):
def _extend_image(self, image, image_cache):
key = "OS-EXT-IMG-SIZE:size"
image[key] = image_cache['size']
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ["nova.context"]
if authorize(context):
image_resp = resp_obj.obj['image']
# image guaranteed to be in the cache due to the core API adding
# it in its 'show' method
image_cached = req.get_db_item('images', image_resp['id'])
self._extend_image(image_resp, image_cached)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if authorize(context):
images_resp = list(resp_obj.obj['images'])
# images guaranteed to be in the cache due to the core API adding
# it in its 'detail' method
for image in images_resp:
image_cached = req.get_db_item('images', image['id'])
self._extend_image(image, image_cached)
class ImageSize(extensions.V3APIExtensionBase):
"""Adds image size to image listings."""
name = "ImageSize"
alias = ALIAS
version = 1
def get_controller_extensions(self):
controller = ImageSizeController()
extension = extensions.ControllerExtension(self, 'images', controller)
return [extension]
def get_resources(self):
return []
| apache-2.0 |
OpenWinCon/OpenWinNet | web-gui/myvenv/lib/python3.4/site-packages/django/core/exceptions.py | 486 | 5276 | """
Global Django exception and warning classes.
"""
from django.utils import six
from django.utils.encoding import force_text
class FieldDoesNotExist(Exception):
"""The requested model field does not exist"""
pass
class DjangoRuntimeWarning(RuntimeWarning):
pass
class AppRegistryNotReady(Exception):
"""The django.apps registry is not populated yet"""
pass
class ObjectDoesNotExist(Exception):
"""The requested object does not exist"""
silent_variable_failure = True
class MultipleObjectsReturned(Exception):
"""The query returned multiple objects when only one was expected."""
pass
class SuspiciousOperation(Exception):
"""The user did something suspicious"""
class SuspiciousMultipartForm(SuspiciousOperation):
"""Suspect MIME request in multipart form data"""
pass
class SuspiciousFileOperation(SuspiciousOperation):
"""A Suspicious filesystem operation was attempted"""
pass
class DisallowedHost(SuspiciousOperation):
"""HTTP_HOST header contains invalid value"""
pass
class DisallowedRedirect(SuspiciousOperation):
"""Redirect to scheme not in allowed list"""
pass
class PermissionDenied(Exception):
"""The user did not have permission to do that"""
pass
class ViewDoesNotExist(Exception):
"""The requested view does not exist"""
pass
class MiddlewareNotUsed(Exception):
"""This middleware is not used in this server configuration"""
pass
class ImproperlyConfigured(Exception):
"""Django is somehow improperly configured"""
pass
class FieldError(Exception):
"""Some kind of problem with a model field."""
pass
NON_FIELD_ERRORS = '__all__'
class ValidationError(Exception):
"""An error while validating data."""
def __init__(self, message, code=None, params=None):
"""
The `message` argument can be a single error, a list of errors, or a
dictionary that maps field names to lists of errors. What we define as
an "error" can be either a simple string or an instance of
ValidationError with its message attribute set, and what we define as
list or dictionary can be an actual `list` or `dict` or an instance
of ValidationError with its `error_list` or `error_dict` attribute set.
"""
# PY2 can't pickle naive exception: http://bugs.python.org/issue1692335.
super(ValidationError, self).__init__(message, code, params)
if isinstance(message, ValidationError):
if hasattr(message, 'error_dict'):
message = message.error_dict
# PY2 has a `message` property which is always there so we can't
# duck-type on it. It was introduced in Python 2.5 and already
# deprecated in Python 2.6.
elif not hasattr(message, 'message' if six.PY3 else 'code'):
message = message.error_list
else:
message, code, params = message.message, message.code, message.params
if isinstance(message, dict):
self.error_dict = {}
for field, messages in message.items():
if not isinstance(messages, ValidationError):
messages = ValidationError(messages)
self.error_dict[field] = messages.error_list
elif isinstance(message, list):
self.error_list = []
for message in message:
# Normalize plain strings to instances of ValidationError.
if not isinstance(message, ValidationError):
message = ValidationError(message)
if hasattr(message, 'error_dict'):
self.error_list.extend(sum(message.error_dict.values(), []))
else:
self.error_list.extend(message.error_list)
else:
self.message = message
self.code = code
self.params = params
self.error_list = [self]
@property
def message_dict(self):
# Trigger an AttributeError if this ValidationError
# doesn't have an error_dict.
getattr(self, 'error_dict')
return dict(self)
@property
def messages(self):
if hasattr(self, 'error_dict'):
return sum(dict(self).values(), [])
return list(self)
def update_error_dict(self, error_dict):
if hasattr(self, 'error_dict'):
for field, error_list in self.error_dict.items():
error_dict.setdefault(field, []).extend(error_list)
else:
error_dict.setdefault(NON_FIELD_ERRORS, []).extend(self.error_list)
return error_dict
def __iter__(self):
if hasattr(self, 'error_dict'):
for field, errors in self.error_dict.items():
yield field, list(ValidationError(errors))
else:
for error in self.error_list:
message = error.message
if error.params:
message %= error.params
yield force_text(message)
def __str__(self):
if hasattr(self, 'error_dict'):
return repr(dict(self))
return repr(list(self))
def __repr__(self):
return 'ValidationError(%s)' % self
| apache-2.0 |
abhattad4/Digi-Menu | digimenu2/django/contrib/admin/sites.py | 77 | 22052 | from functools import update_wrapper
from django.apps import apps
from django.conf import settings
from django.contrib.admin import ModelAdmin, actions
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.core.urlresolvers import NoReverseMatch, reverse
from django.db.models.base import ModelBase
from django.http import Http404, HttpResponseRedirect
from django.template.engine import Engine
from django.template.response import TemplateResponse
from django.utils import six
from django.utils.text import capfirst
from django.utils.translation import ugettext as _, ugettext_lazy
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
system_check_errors = []
class AlreadyRegistered(Exception):
pass
class NotRegistered(Exception):
pass
class AdminSite(object):
"""
An AdminSite object encapsulates an instance of the Django admin application, ready
to be hooked in to your URLconf. Models are registered with the AdminSite using the
register() method, and the get_urls() method can then be used to access Django view
functions that present a full admin interface for the collection of registered
models.
"""
# Text to put at the end of each page's <title>.
site_title = ugettext_lazy('Django site admin')
# Text to put in each page's <h1>.
site_header = ugettext_lazy('Django administration')
# Text to put at the top of the admin index page.
index_title = ugettext_lazy('Site administration')
# URL for the "View site" link at the top of each admin page.
site_url = '/'
login_form = None
index_template = None
app_index_template = None
login_template = None
logout_template = None
password_change_template = None
password_change_done_template = None
def __init__(self, name='admin'):
self._registry = {} # model_class class -> admin_class instance
self.name = name
self._actions = {'delete_selected': actions.delete_selected}
self._global_actions = self._actions.copy()
def register(self, model_or_iterable, admin_class=None, **options):
"""
Registers the given model(s) with the given admin class.
The model(s) should be Model classes, not instances.
If an admin class isn't given, it will use ModelAdmin (the default
admin options). If keyword arguments are given -- e.g., list_display --
they'll be applied as options to the admin class.
If a model is already registered, this will raise AlreadyRegistered.
If a model is abstract, this will raise ImproperlyConfigured.
"""
if not admin_class:
admin_class = ModelAdmin
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model._meta.abstract:
raise ImproperlyConfigured('The model %s is abstract, so it '
'cannot be registered with admin.' % model.__name__)
if model in self._registry:
raise AlreadyRegistered('The model %s is already registered' % model.__name__)
# Ignore the registration if the model has been
# swapped out.
if not model._meta.swapped:
# If we got **options then dynamically construct a subclass of
# admin_class with those **options.
if options:
# For reasons I don't quite understand, without a __module__
# the created class appears to "live" in the wrong place,
# which causes issues later on.
options['__module__'] = __name__
admin_class = type("%sAdmin" % model.__name__, (admin_class,), options)
if admin_class is not ModelAdmin and settings.DEBUG:
system_check_errors.extend(admin_class.check(model))
# Instantiate the admin class to save in the registry
self._registry[model] = admin_class(model, self)
def unregister(self, model_or_iterable):
"""
Unregisters the given model(s).
If a model isn't already registered, this will raise NotRegistered.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model not in self._registry:
raise NotRegistered('The model %s is not registered' % model.__name__)
del self._registry[model]
def is_registered(self, model):
"""
Check if a model class is registered with this `AdminSite`.
"""
return model in self._registry
def add_action(self, action, name=None):
"""
Register an action to be available globally.
"""
name = name or action.__name__
self._actions[name] = action
self._global_actions[name] = action
def disable_action(self, name):
"""
Disable a globally-registered action. Raises KeyError for invalid names.
"""
del self._actions[name]
def get_action(self, name):
"""
Explicitly get a registered global action whether it's enabled or
not. Raises KeyError for invalid names.
"""
return self._global_actions[name]
@property
def actions(self):
"""
Get all the enabled actions as an iterable of (name, func).
"""
return six.iteritems(self._actions)
def has_permission(self, request):
"""
Returns True if the given HttpRequest has permission to view
*at least one* page in the admin site.
"""
return request.user.is_active and request.user.is_staff
def check_dependencies(self):
"""
Check that all things needed to run the admin have been correctly installed.
The default implementation checks that admin and contenttypes apps are
installed, as well as the auth context processor.
"""
if not apps.is_installed('django.contrib.admin'):
raise ImproperlyConfigured(
"Put 'django.contrib.admin' in your INSTALLED_APPS "
"setting in order to use the admin application.")
if not apps.is_installed('django.contrib.contenttypes'):
raise ImproperlyConfigured(
"Put 'django.contrib.contenttypes' in your INSTALLED_APPS "
"setting in order to use the admin application.")
try:
default_template_engine = Engine.get_default()
except Exception:
# Skip this non-critical check:
# 1. if the user has a non-trivial TEMPLATES setting and Django
# can't find a default template engine
# 2. if anything goes wrong while loading template engines, in
# order to avoid raising an exception from a confusing location
# Catching ImproperlyConfigured suffices for 1. but 2. requires
# catching all exceptions.
pass
else:
if ('django.contrib.auth.context_processors.auth'
not in default_template_engine.context_processors):
raise ImproperlyConfigured(
"Enable 'django.contrib.auth.context_processors.auth' "
"in your TEMPLATES setting in order to use the admin "
"application.")
def admin_view(self, view, cacheable=False):
"""
Decorator to create an admin view attached to this ``AdminSite``. This
wraps the view and provides permission checking by calling
``self.has_permission``.
You'll want to use this from within ``AdminSite.get_urls()``:
class MyAdminSite(AdminSite):
def get_urls(self):
from django.conf.urls import url
urls = super(MyAdminSite, self).get_urls()
urls += [
url(r'^my_view/$', self.admin_view(some_view))
]
return urls
By default, admin_views are marked non-cacheable using the
``never_cache`` decorator. If the view can be safely cached, set
cacheable=True.
"""
def inner(request, *args, **kwargs):
if not self.has_permission(request):
if request.path == reverse('admin:logout', current_app=self.name):
index_path = reverse('admin:index', current_app=self.name)
return HttpResponseRedirect(index_path)
# Inner import to prevent django.contrib.admin (app) from
# importing django.contrib.auth.models.User (unrelated model).
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(
request.get_full_path(),
reverse('admin:login', current_app=self.name)
)
return view(request, *args, **kwargs)
if not cacheable:
inner = never_cache(inner)
# We add csrf_protect here so this function can be used as a utility
# function for any view, without having to repeat 'csrf_protect'.
if not getattr(view, 'csrf_exempt', False):
inner = csrf_protect(inner)
return update_wrapper(inner, view)
def get_urls(self):
from django.conf.urls import url, include
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level,
# and django.contrib.contenttypes.views imports ContentType.
from django.contrib.contenttypes import views as contenttype_views
if settings.DEBUG:
self.check_dependencies()
def wrap(view, cacheable=False):
def wrapper(*args, **kwargs):
return self.admin_view(view, cacheable)(*args, **kwargs)
return update_wrapper(wrapper, view)
# Admin-site-wide views.
urlpatterns = [
url(r'^$', wrap(self.index), name='index'),
url(r'^login/$', self.login, name='login'),
url(r'^logout/$', wrap(self.logout), name='logout'),
url(r'^password_change/$', wrap(self.password_change, cacheable=True), name='password_change'),
url(r'^password_change/done/$', wrap(self.password_change_done, cacheable=True),
name='password_change_done'),
url(r'^jsi18n/$', wrap(self.i18n_javascript, cacheable=True), name='jsi18n'),
url(r'^r/(?P<content_type_id>\d+)/(?P<object_id>.+)/$', wrap(contenttype_views.shortcut),
name='view_on_site'),
]
# Add in each model's views, and create a list of valid URLS for the
# app_index
valid_app_labels = []
for model, model_admin in six.iteritems(self._registry):
urlpatterns += [
url(r'^%s/%s/' % (model._meta.app_label, model._meta.model_name), include(model_admin.urls)),
]
if model._meta.app_label not in valid_app_labels:
valid_app_labels.append(model._meta.app_label)
# If there were ModelAdmins registered, we should have a list of app
# labels for which we need to allow access to the app_index view,
if valid_app_labels:
regex = r'^(?P<app_label>' + '|'.join(valid_app_labels) + ')/$'
urlpatterns += [
url(regex, wrap(self.app_index), name='app_list'),
]
return urlpatterns
@property
def urls(self):
return self.get_urls(), 'admin', self.name
def each_context(self, request):
"""
Returns a dictionary of variables to put in the template context for
*every* page in the admin site.
"""
return {
'site_title': self.site_title,
'site_header': self.site_header,
'site_url': self.site_url,
'has_permission': self.has_permission(request),
}
def password_change(self, request, extra_context=None):
"""
Handles the "change password" task -- both form display and validation.
"""
from django.contrib.admin.forms import AdminPasswordChangeForm
from django.contrib.auth.views import password_change
url = reverse('admin:password_change_done', current_app=self.name)
defaults = {
'current_app': self.name,
'password_change_form': AdminPasswordChangeForm,
'post_change_redirect': url,
'extra_context': dict(self.each_context(request), **(extra_context or {})),
}
if self.password_change_template is not None:
defaults['template_name'] = self.password_change_template
return password_change(request, **defaults)
def password_change_done(self, request, extra_context=None):
"""
Displays the "success" page after a password change.
"""
from django.contrib.auth.views import password_change_done
defaults = {
'current_app': self.name,
'extra_context': dict(self.each_context(request), **(extra_context or {})),
}
if self.password_change_done_template is not None:
defaults['template_name'] = self.password_change_done_template
return password_change_done(request, **defaults)
def i18n_javascript(self, request):
"""
Displays the i18n JavaScript that the Django admin requires.
This takes into account the USE_I18N setting. If it's set to False, the
generated JavaScript will be leaner and faster.
"""
if settings.USE_I18N:
from django.views.i18n import javascript_catalog
else:
from django.views.i18n import null_javascript_catalog as javascript_catalog
return javascript_catalog(request, packages=['django.conf', 'django.contrib.admin'])
@never_cache
def logout(self, request, extra_context=None):
"""
Logs out the user for the given HttpRequest.
This should *not* assume the user is already logged in.
"""
from django.contrib.auth.views import logout
defaults = {
'current_app': self.name,
'extra_context': dict(self.each_context(request), **(extra_context or {})),
}
if self.logout_template is not None:
defaults['template_name'] = self.logout_template
return logout(request, **defaults)
@never_cache
def login(self, request, extra_context=None):
"""
Displays the login form for the given HttpRequest.
"""
if request.method == 'GET' and self.has_permission(request):
# Already logged-in, redirect to admin index
index_path = reverse('admin:index', current_app=self.name)
return HttpResponseRedirect(index_path)
from django.contrib.auth.views import login
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level,
# and django.contrib.admin.forms eventually imports User.
from django.contrib.admin.forms import AdminAuthenticationForm
context = dict(self.each_context(request),
title=_('Log in'),
app_path=request.get_full_path(),
)
if (REDIRECT_FIELD_NAME not in request.GET and
REDIRECT_FIELD_NAME not in request.POST):
context[REDIRECT_FIELD_NAME] = request.get_full_path()
context.update(extra_context or {})
defaults = {
'extra_context': context,
'current_app': self.name,
'authentication_form': self.login_form or AdminAuthenticationForm,
'template_name': self.login_template or 'admin/login.html',
}
return login(request, **defaults)
@never_cache
def index(self, request, extra_context=None):
"""
Displays the main admin index page, which lists all of the installed
apps that have been registered in this site.
"""
app_dict = {}
for model, model_admin in self._registry.items():
app_label = model._meta.app_label
has_module_perms = model_admin.has_module_permission(request)
if has_module_perms:
perms = model_admin.get_model_perms(request)
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
if True in perms.values():
info = (app_label, model._meta.model_name)
model_dict = {
'name': capfirst(model._meta.verbose_name_plural),
'object_name': model._meta.object_name,
'perms': perms,
}
if perms.get('change', False):
try:
model_dict['admin_url'] = reverse('admin:%s_%s_changelist' % info, current_app=self.name)
except NoReverseMatch:
pass
if perms.get('add', False):
try:
model_dict['add_url'] = reverse('admin:%s_%s_add' % info, current_app=self.name)
except NoReverseMatch:
pass
if app_label in app_dict:
app_dict[app_label]['models'].append(model_dict)
else:
app_dict[app_label] = {
'name': apps.get_app_config(app_label).verbose_name,
'app_label': app_label,
'app_url': reverse(
'admin:app_list',
kwargs={'app_label': app_label},
current_app=self.name,
),
'has_module_perms': has_module_perms,
'models': [model_dict],
}
# Sort the apps alphabetically.
app_list = list(six.itervalues(app_dict))
app_list.sort(key=lambda x: x['name'].lower())
# Sort the models alphabetically within each app.
for app in app_list:
app['models'].sort(key=lambda x: x['name'])
context = dict(
self.each_context(request),
title=self.index_title,
app_list=app_list,
)
context.update(extra_context or {})
request.current_app = self.name
return TemplateResponse(request, self.index_template or
'admin/index.html', context)
def app_index(self, request, app_label, extra_context=None):
app_name = apps.get_app_config(app_label).verbose_name
app_dict = {}
for model, model_admin in self._registry.items():
if app_label == model._meta.app_label:
has_module_perms = model_admin.has_module_permission(request)
if not has_module_perms:
raise PermissionDenied
perms = model_admin.get_model_perms(request)
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
if True in perms.values():
info = (app_label, model._meta.model_name)
model_dict = {
'name': capfirst(model._meta.verbose_name_plural),
'object_name': model._meta.object_name,
'perms': perms,
}
if perms.get('change'):
try:
model_dict['admin_url'] = reverse('admin:%s_%s_changelist' % info, current_app=self.name)
except NoReverseMatch:
pass
if perms.get('add'):
try:
model_dict['add_url'] = reverse('admin:%s_%s_add' % info, current_app=self.name)
except NoReverseMatch:
pass
if app_dict:
app_dict['models'].append(model_dict),
else:
# First time around, now that we know there's
# something to display, add in the necessary meta
# information.
app_dict = {
'name': app_name,
'app_label': app_label,
'app_url': '',
'has_module_perms': has_module_perms,
'models': [model_dict],
}
if not app_dict:
raise Http404('The requested admin page does not exist.')
# Sort the models alphabetically within each app.
app_dict['models'].sort(key=lambda x: x['name'])
context = dict(self.each_context(request),
title=_('%(app)s administration') % {'app': app_name},
app_list=[app_dict],
app_label=app_label,
)
context.update(extra_context or {})
request.current_app = self.name
return TemplateResponse(request, self.app_index_template or [
'admin/%s/app_index.html' % app_label,
'admin/app_index.html'
], context)
# This global object represents the default admin site, for the common case.
# You can instantiate AdminSite in your own code to create a custom admin site.
site = AdminSite()
| bsd-3-clause |
kissbac/upm | examples/python/vcap.py | 2 | 2281 | #!/usr/bin/python
# Author: Jon Trulson <[email protected]>
# Copyright (c) 2016 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import time, sys, signal, atexit
import pyupm_vcap as sensorObj
## Exit handlers ##
# This function stops python from printing a stacktrace when you hit control-C
def SIGINTHandler(signum, frame):
raise SystemExit
# This function lets you run code on exit
def exitHandler():
print "Exiting..."
sys.exit(0)
# Register exit handlers
atexit.register(exitHandler)
signal.signal(signal.SIGINT, SIGINTHandler)
defaultDev = "/dev/video0"
# if an argument was specified, use it as the device instead
if (len(sys.argv) > 1):
defaultDev = sys.argv[1]
print "Using device", defaultDev
print "Initializing..."
# Instantiate an VCAP instance, using the specified video device
sensor = sensorObj.VCAP(defaultDev)
# enable some debug/verbose output
sensor.setDebug(True);
# This is just a hint. The kernel can change this to a lower
# resolution that the hardware supports. Use getWidth() and
# getHeight() methods to see what the kernel actually chose if you
# care.
sensor.setResolution(1920, 1080);
# capture an image
sensor.captureImage();
# convert and save it as a jpeg
sensor.saveImage("video-img1.jpg");
| mit |
SevInf/IEDriver | py/selenium/webdriver/phantomjs/service.py | 14 | 3533 | #!/usr/bin/python
#
# Copyright 2012 Software Freedom Conservancy
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import platform
import signal
import subprocess
import time
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common import utils
class Service(object):
"""
Object that manages the starting and stopping of PhantomJS / Ghostdriver
"""
def __init__(self, executable_path, port=0, service_args=None, log_path=None):
"""
Creates a new instance of the Service
:Args:
- executable_path : Path to PhantomJS binary
- port : Port the service is running on
- service_args : A List of other command line options to pass to PhantomJS
- log_path: Path for PhantomJS service to log to
"""
self.port = port
self.path = executable_path
self.service_args= service_args
if self.port == 0:
self.port = utils.free_port()
if self.service_args is None:
self.service_args = []
else:
self.service_args=service_args[:]
self.service_args.insert(0, self.path)
self.service_args.append("--webdriver=%d" % self.port)
if not log_path:
log_path = "ghostdriver.log"
self._log = open(log_path, 'w')
def __del__(self):
# subprocess.Popen doesn't send signal on __del__;
# we have to try to stop the launched process.
self.stop()
def start(self):
"""
Starts PhantomJS with GhostDriver.
:Exceptions:
- WebDriverException : Raised either when it can't start the service
or when it can't connect to the service
"""
try:
self.process = subprocess.Popen(self.service_args, stdin=subprocess.PIPE,
close_fds=platform.system() != 'Windows',
stdout=self._log, stderr=self._log)
except Exception as e:
raise WebDriverException("Unable to start phantomjs with ghostdriver.", e)
count = 0
while not utils.is_connectable(self.port):
count += 1
time.sleep(1)
if count == 30:
raise WebDriverException("Can not connect to GhostDriver")
@property
def service_url(self):
"""
Gets the url of the GhostDriver Service
"""
return "http://localhost:%d/wd/hub" % self.port
def stop(self):
"""
Cleans up the process
"""
if self._log:
self._log.close()
self._log = None
#If its dead dont worry
if self.process is None:
return
#Tell the Server to properly die in case
try:
if self.process:
self.process.send_signal(signal.SIGTERM)
self.process.wait()
except OSError:
# kill may not be available under windows environment
pass
| apache-2.0 |
privacyidea/privacyidea | tests/test_lib_usercache.py | 1 | 26733 | # coding: utf-8
"""
This test file tests the lib.usercache
The lib.usercache.py only depends on the database model
"""
from contextlib import contextmanager
from mock import patch
from privacyidea.lib.error import UserError
from tests import ldap3mock
from tests.test_mock_ldap3 import LDAPDirectory
from .base import MyTestCase
from privacyidea.lib.resolver import (save_resolver, delete_resolver, get_resolver_object)
from privacyidea.lib.realm import (set_realm, delete_realm)
from privacyidea.lib.user import (User, get_username, create_user)
from privacyidea.lib.usercache import (get_cache_time,
cache_username, delete_user_cache,
EXPIRATION_SECONDS, retrieve_latest_entry, is_cache_enabled)
from privacyidea.lib.config import set_privacyidea_config
from datetime import timedelta
from datetime import datetime
from privacyidea.models import UserCache
class UserCacheTestCase(MyTestCase):
"""
Test the user on the database level
"""
PWFILE = "tests/testdata/passwd"
resolvername1 = "resolver1"
realm1 = "realm1"
username = "root"
uid = "0"
sql_realm = "sqlrealm"
sql_resolver = "SQL1"
sql_parameters = {'Driver': 'sqlite',
'Server': '/tests/testdata/',
'Database': "testusercache.sqlite",
'Table': 'users',
'Encoding': 'utf8',
'Map': '{ "username": "username", \
"userid" : "id", \
"email" : "email", \
"surname" : "name", \
"givenname" : "givenname", \
"password" : "password", \
"phone": "phone", \
"mobile": "mobile"}',
'resolver': sql_resolver,
'type': 'sqlresolver',
}
def _create_realm(self):
rid = save_resolver({"resolver": self.resolvername1,
"type": "passwdresolver",
"fileName": self.PWFILE,
"type.fileName": "string",
"desc.fileName": "The name of the file"})
self.assertTrue(rid > 0, rid)
added, failed = set_realm(realm=self.realm1, resolvers=[self.resolvername1])
self.assertTrue(len(added) > 0, added)
self.assertEqual(len(failed), 0)
def _delete_realm(self):
delete_realm(self.realm1)
delete_resolver(self.resolvername1)
def test_00_set_config(self):
# Save wrong data in EXPIRATION_SECONDS
set_privacyidea_config(EXPIRATION_SECONDS, "wrong")
exp_delta = get_cache_time()
self.assertEqual(exp_delta, timedelta(seconds=0))
self.assertFalse(is_cache_enabled())
# Save empty data in EXPIRATION_SECONDS
set_privacyidea_config(EXPIRATION_SECONDS, "")
exp_delta = get_cache_time()
self.assertEqual(exp_delta, timedelta(seconds=0))
self.assertFalse(is_cache_enabled())
# Save real data in EXPIRATION_SECONDS
set_privacyidea_config(EXPIRATION_SECONDS, 600)
exp_delta = get_cache_time()
self.assertEqual(exp_delta, timedelta(seconds=600))
self.assertTrue(is_cache_enabled())
def test_01_get_username_from_cache(self):
# If a username is already contained in the cache, the function
# lib.user.get_username will return the cache value
username = "cached_user"
resolver = "resolver1"
uid = "1"
expiration_delta = get_cache_time()
r = UserCache(username, username, resolver, uid, datetime.now()).save()
u_name = get_username(uid, resolver)
self.assertEqual(u_name, username)
# A non-existing user is not in the cache and returns and empty username
u_name = get_username(uid, "resolver_does_not_exist")
self.assertEqual(u_name, "")
def test_02_get_resolvers(self):
# enable user cache
set_privacyidea_config(EXPIRATION_SECONDS, 600)
# create realm
self._create_realm()
# delete user_cache
r = delete_user_cache()
self.assertTrue(r >= 0)
# The username is not in the cache. It is fetched from the resolver
# At the same time the cache is filled.
user = User(self.username, self.realm1)
self.assertEqual(user.login, self.username)
# The user ID is fetched from the resolver
self.assertEqual(user.uid, self.uid)
# Now, the cache should have exactly one entry
entry = UserCache.query.one()
self.assertEqual(entry.user_id, self.uid)
self.assertEqual(entry.username, self.username)
self.assertEqual(entry.resolver, self.resolvername1)
ts = entry.timestamp
# delete the resolver, which also purges the cache
self._delete_realm()
# manually re-add the entry from above
UserCache(self.username, self.username, self.resolvername1,
self.uid, ts).save()
# the username is fetched from the cache
u_name = get_username(self.uid, self.resolvername1)
self.assertEqual(u_name, self.username)
# delete the cache
r = delete_user_cache()
# try to fetch the username. It is not in the cache and the
# resolver does not exist anymore.
u_name = get_username(self.uid, self.resolvername1)
self.assertEqual(u_name, "")
def test_03_get_identifiers(self):
# create realm
self._create_realm()
# delete user_cache
r = delete_user_cache()
self.assertTrue(r >= 0)
# The username is not in the cache. It is fetched from the resolver
# At the same time the cache is filled. Implicitly we test the
# _get_resolvers!
user = User(self.username, self.realm1, self.resolvername1)
uids = user.get_user_identifiers()
self.assertEqual(user.login, self.username)
self.assertEqual(user.uid, self.uid)
# Now, the cache should have exactly one entry
entry = UserCache.query.one()
self.assertEqual(entry.user_id, self.uid)
self.assertEqual(entry.username, self.username)
self.assertEqual(entry.resolver, self.resolvername1)
ts = entry.timestamp
# delete the resolver, which also purges the cache
self._delete_realm()
# manually re-add the entry from above
UserCache(self.username, self.username, self.resolvername1,
self.uid, ts).save()
# the username is fetched from the cache
u_name = get_username(self.uid, self.resolvername1)
self.assertEqual(u_name, self.username)
# The `User` class also fetches the UID from the cache
user2 = User(self.username, self.realm1, self.resolvername1)
self.assertEqual(user2.uid, self.uid)
# delete the cache
r = delete_user_cache()
# try to fetch the username. It is not in the cache and the
# resolver does not exist anymore.
u_name = get_username(self.uid, self.resolvername1)
self.assertEqual(u_name, "")
# similar case for the `User` class
# The `User` class also tries to fetch the UID from the cache
with self.assertRaises(UserError):
user3 = User(self.username, self.realm1, self.resolvername1)
def test_04_delete_cache(self):
now = datetime.now()
UserCache("hans1", "hans1", "resolver1", "uid1", now).save()
UserCache("hans2", "hans1", "resolver2", "uid2", now).save()
r = UserCache.query.filter(UserCache.username == "hans1").first()
self.assertTrue(r)
r = UserCache.query.filter(UserCache.username == "hans2").first()
self.assertTrue(r)
# delete hans1
delete_user_cache(username="hans1")
r = UserCache.query.filter(UserCache.username == "hans1").first()
self.assertFalse(r)
r = UserCache.query.filter(UserCache.username == "hans2").first()
self.assertTrue(r)
# delete resolver2
delete_user_cache(resolver="resolver2")
r = UserCache.query.filter(UserCache.username == "hans1").first()
self.assertFalse(r)
r = UserCache.query.filter(UserCache.username == "hans2").first()
self.assertFalse(r)
def test_05_multiple_entries(self):
# two consistent entries
now = datetime.now()
UserCache("hans1", "hans1", "resolver1", "uid1", now - timedelta(seconds=60)).save()
UserCache("hans1", "hans1", "resolver1", "uid1", now).save()
r = UserCache.query.filter(UserCache.username == "hans1", UserCache.resolver == "resolver1")
self.assertEqual(r.count(), 2)
u_name = get_username("uid1", "resolver1")
self.assertEqual(u_name, "hans1")
r = delete_user_cache()
# two inconsistent entries: most recent entry (ordered by datetime) wins
UserCache("hans2", "hans2", "resolver1", "uid1", now).save()
UserCache("hans1", "hans1", "resolver1", "uid1", now - timedelta(seconds=60)).save()
r = UserCache.query.filter(UserCache.user_id == "uid1", UserCache.resolver == "resolver1")
self.assertEqual(r.count(), 2)
u_name = get_username("uid1", "resolver1")
self.assertEqual(u_name, "hans2")
# Clean up the cache
r = delete_user_cache()
def test_06_implicit_cache_population(self):
self._create_realm()
# testing `get_username`
self.assertEqual(UserCache.query.count(), 0)
# the cache is empty, so the username is read from the resolver
u_name = get_username(self.uid, self.resolvername1)
self.assertEqual(self.username, u_name)
# it should be part of the cache now
r = UserCache.query.filter(UserCache.user_id == self.uid, UserCache.resolver == self.resolvername1).one()
self.assertEqual(self.username, r.username)
# Apart from that, the cache should be empty.
self.assertEqual(UserCache.query.count(), 1)
r = delete_user_cache()
# testing `User()`, but this time we add an already-expired entry to the cache
self.assertEqual(UserCache.query.count(), 0)
UserCache(self.username, self.username,
self.resolvername1, 'fake_uid', datetime.now() - timedelta(weeks=50)).save()
# cache contains an expired entry, uid is read from the resolver (we can verify
# that the cache entry is indeed not queried as it contains 'fake_uid' instead of the correct uid)
user = User(self.username, self.realm1, self.resolvername1)
self.assertEqual(user.uid, self.uid)
# a new entry should have been added to the cache now
r = retrieve_latest_entry((UserCache.username == self.username) & (UserCache.resolver == self.resolvername1))
self.assertEqual(self.uid, r.user_id)
# But the expired entry is also still in the cache
self.assertEqual(UserCache.query.count(), 2)
r = delete_user_cache()
self._delete_realm()
def _populate_cache(self):
self.assertEqual(UserCache.query.count(), 0)
# initially populate the cache with three entries
timestamp = datetime.now()
UserCache("hans1", "hans1", self.resolvername1, "uid1", timestamp).save()
UserCache("hans2", "hans2", self.resolvername1, "uid2", timestamp - timedelta(weeks=50)).save()
UserCache("hans3", "hans3", "resolver2", "uid2", timestamp).save()
self.assertEqual(UserCache.query.count(), 3)
def test_07_invalidate_save_resolver(self):
self._create_realm()
self._populate_cache()
# call save_resolver on resolver1, which should invalidate all entries of "resolver1"
# (even the expired 'hans2' one)
save_resolver({"resolver": self.resolvername1,
"type": "passwdresolver",
"fileName": self.PWFILE,
"type.fileName": "string",
"desc.fileName": "Some change"
})
self.assertEqual(UserCache.query.count(), 1)
# Only hans3 in resolver2 should still be in the cache
# We can use get_username to ensure it is fetched from the cache
# because resolver2 does not actually exist
u_name = get_username("uid2", "resolver2")
self.assertEqual("hans3", u_name)
delete_user_cache()
self._delete_realm()
def test_08_invalidate_delete_resolver(self):
self._create_realm()
self._populate_cache()
# call delete_resolver on resolver1, which should invalidate all of its entries
self._delete_realm()
self.assertEqual(UserCache.query.count(), 1)
# Only hans3 in resolver2 should still be in the cache
u_name = get_username("uid2", "resolver2")
self.assertEqual("hans3", u_name)
delete_user_cache()
def _create_sql_realm(self):
rid = save_resolver(self.sql_parameters)
self.assertTrue(rid > 0, rid)
(added, failed) = set_realm(self.sql_realm, [self.sql_resolver])
self.assertEqual(len(failed), 0)
self.assertEqual(len(added), 1)
def _delete_sql_realm(self):
delete_realm(self.sql_realm)
delete_resolver(self.sql_resolver)
def test_09_invalidate_edit_user(self):
# Validate that editing users actually invalidates the cache. For that, we first need an editable resolver
self._create_sql_realm()
# The cache is initially empty
self.assertEqual(UserCache.query.count(), 0)
# The following adds an entry to the cache
user = User(login="wordpressuser", realm=self.sql_realm)
self.assertEqual(UserCache.query.count(), 1)
uinfo = user.info
self.assertEqual(uinfo.get("givenname", ""), "")
user.update_user_info({"givenname": "wordy"})
uinfo = user.info
self.assertEqual(uinfo.get("givenname"), "wordy")
# This should have removed the entry from the cache
self.assertEqual(UserCache.query.count(), 0)
# But now it gets added again
user2 = User(login="wordpressuser", realm=self.sql_realm)
self.assertEqual(UserCache.query.count(), 1)
# Change it back for the other tests
user.update_user_info({"givenname": ""})
uinfo = user.info
self.assertEqual(uinfo.get("givenname", ""), "")
self.assertEqual(UserCache.query.count(), 0)
self._delete_sql_realm()
def test_10_invalidate_delete_user(self):
# Validate that deleting users actually invalidates the cache. For that, we first need an editable resolver
self._create_sql_realm()
# The cache is initially empty
self.assertEqual(UserCache.query.count(), 0)
# The following adds an entry to the cache
user = User(login="wordpressuser", realm=self.sql_realm)
self.assertEqual(UserCache.query.count(), 1)
uinfo = user.info
user.delete()
# This should have removed the entry from the cache
self.assertEqual(UserCache.query.count(), 0)
# We add the user again for the other tests
create_user(self.sql_resolver, uinfo)
self.assertEqual(UserCache.query.count(), 0)
self._delete_sql_realm()
@contextmanager
def _patch_datetime_now(self, target, delta=timedelta(days=1)):
with patch(target) as mock_datetime:
mock_datetime.now.side_effect = lambda: datetime.now() + delta
mock_datetime.side_effect = lambda *args, **kw: datetime(*args, **kw)
yield mock_datetime
def test_11_cache_expiration(self):
# delete user_cache
r = delete_user_cache()
self.assertTrue(r >= 0)
# populate the cache with artificial, somewhat "old", but still relevant data
timestamp = datetime.now() - timedelta(seconds=300)
UserCache("hans1", "hans1", "resolver1", "uid1", timestamp).save()
UserCache("hans2", "hans2", "resolver1", "uid2", timestamp).save()
# check that the cache is indeed queried
self.assertEqual(get_username("uid1", "resolver1"), "hans1")
self.assertEqual(User("hans2", "realm1", "resolver1").uid, "uid2")
# check that the (non-existent) resolver is queried
# for entries not contained in the cache
self.assertEqual(get_username("uid3", "resolver1"), "")
# TODO: Interestingly, if we mock `datetime` here to increase the time by one
# day, this test works, but a subsequent test (test_ui_certificate) will fail
# with weird error messages. So we do not use the datetime mock for now.
#with self._patch_datetime_now('privacyidea.lib.usercache.datetime.datetime') as mock_datetime:
with patch('privacyidea.lib.usercache.get_cache_time') as mock_get_cache_time:
# Instead, we just decrease the cache time from 600 to 60 seconds,
# which causes the entries above to be considered expired
mock_get_cache_time.return_value = timedelta(seconds=60)
# check that the cached entries are not queried anymore
self.assertEqual(UserCache.query.count(), 2)
self.assertEqual(get_username("uid1", "resolver1"), "")
with self.assertRaises(UserError):
User("hans2", "realm1", "resolver1")
self.assertEqual(get_username("uid3", "resolver1"), "")
# We add another, "current" entry
UserCache("hans4", "hans4", "resolver1", "uid4", datetime.now()).save()
self.assertEqual(UserCache.query.count(), 3)
# we now remove old entries, only the newest remains
delete_user_cache(expired=True)
self.assertEqual(UserCache.query.count(), 1)
self.assertEqual(UserCache.query.one().user_id, "uid4")
# clean up
delete_user_cache()
def test_12_multiple_resolvers(self):
# one realm, two SQL resolvers
parameters_a = self.sql_parameters.copy()
# first resolver only contains users with phone numbers
parameters_a['Where'] = 'phone LIKE %'
parameters_a['resolver'] = 'reso_a'
rid_a = save_resolver(parameters_a)
self.assertTrue(rid_a > 0, rid_a)
# second resolver contains all users
parameters_b = self.sql_parameters.copy()
parameters_b['resolver'] = 'reso_b'
rid_b = save_resolver(parameters_b)
self.assertTrue(rid_b > 0, rid_b)
# First ask reso_a, then reso_b
(added, failed) = set_realm(self.sql_realm, ['reso_a', 'reso_b'], {
'reso_a': 1,
'reso_b': 2
})
self.assertEqual(len(failed), 0)
self.assertEqual(len(added), 2)
# Now, query the user and populate the cache
self.assertEqual(UserCache.query.count(), 0)
user1 = User('wordpressuser', self.sql_realm)
self.assertEqual(user1.uid, '6')
# Assert it was found in reso_b (as it does not have a phone number)!
self.assertEqual(user1.resolver, 'reso_b')
self.assertEqual(UserCache.query.filter(UserCache.username == 'wordpressuser',
UserCache.user_id == 6).one().resolver,
'reso_b')
# Add a phone number. We do not use the User API to do that to simulate that the change is performed
# out of privacyIDEA's control. Using `update_user_info` would invalidate the cache, which would be unrealistic.
info = user1.info
new_info = info.copy()
new_info['phone'] = '123456'
get_resolver_object('reso_a').update_user(user1.uid, new_info)
# Ensure that the user's association with reso_b is still cached.
self.assertEqual(UserCache.query.filter(UserCache.username == 'wordpressuser',
UserCache.user_id == 6).one().resolver,
'reso_b')
# Now, it should be located in reso_a!
user2 = User('wordpressuser', self.sql_realm)
self.assertEqual(user2.uid, '6')
self.assertEqual(user2.resolver, 'reso_a')
# ... but the cache still contains entries for both!
resolver_query = UserCache.query.filter(UserCache.username == 'wordpressuser',
UserCache.user_id == 6).order_by(UserCache.timestamp.desc())
cached_resolvers = [entry.resolver for entry in resolver_query.all()]
self.assertEqual(cached_resolvers, ['reso_a', 'reso_b'])
# Remove the phone number.
get_resolver_object('reso_a').update_user(user1.uid, {'phone': None})
delete_realm(self.sql_realm)
delete_resolver('reso_a')
delete_resolver('reso_b')
def test_13_cache_username(self):
self.counter = 0
def get_username(uid, resolver):
self.counter += 1
return "user1"
r = cache_username(get_username, "uid1", "reso1")
self.assertEqual(r, "user1")
self.assertEqual(self.counter, 1)
# The second call does not increase the counter, since the result is fetched from the cache
r = cache_username(get_username, "uid1", "reso1")
self.assertEqual(r, "user1")
self.assertEqual(self.counter, 1)
def test_99_unset_config(self):
# Test early exit!
# Assert that the function `retrieve_latest_entry` is called if the cache is enabled
with patch('privacyidea.lib.usercache.retrieve_latest_entry') as mock_retrieve:
mock_retrieve.return_value = None
get_username('some-userid', 'resolver1')
self.assertEqual(mock_retrieve.call_count, 1)
set_privacyidea_config(EXPIRATION_SECONDS, 0)
self.assertFalse(is_cache_enabled())
# Assert that the function `retrieve_latest_entry` is not called anymore
with patch('privacyidea.lib.usercache.retrieve_latest_entry') as mock_retrieve:
mock_retrieve.return_value = None
get_username('some-userid', 'resolver1')
self.assertEqual(mock_retrieve.call_count, 0)
class TestUserCacheMultipleLoginAttributes(MyTestCase):
ldap_realm = "ldaprealm"
ldap_resolver = "ldap1"
ldap_parameters = {'LDAPURI': 'ldap://localhost',
'LDAPBASE': 'o=test',
'BINDDN': 'cn=manager,ou=example,o=test',
'BINDPW': 'ldaptest',
'LOGINNAMEATTRIBUTE': 'cn, email',
'LDAPSEARCHFILTER': '(cn=*)',
'USERINFO': '{"phone" : "telephoneNumber", '
'"mobile" : "mobile"'
', "email" : "email", '
'"surname" : "sn", '
'"givenname" : "givenName" }',
'UIDTYPE': 'DN',
'CACHE_TIMEOUT': 0,
'resolver': ldap_resolver,
'type': 'ldapresolver',
}
def _create_ldap_realm(self):
rid = save_resolver(self.ldap_parameters)
self.assertTrue(rid > 0, rid)
(added, failed) = set_realm(self.ldap_realm, [self.ldap_resolver])
self.assertEqual(len(failed), 0)
self.assertEqual(len(added), 1)
def _delete_ldap_realm(self):
delete_realm(self.ldap_realm)
delete_resolver(self.ldap_resolver)
@classmethod
def setUpClass(cls):
MyTestCase.setUpClass()
set_privacyidea_config(EXPIRATION_SECONDS, 600)
@classmethod
def tearDownClass(cls):
set_privacyidea_config(EXPIRATION_SECONDS, 0)
MyTestCase.tearDownClass()
@ldap3mock.activate
def test_01_secondary_login_attribute(self):
ldap3mock.setLDAPDirectory(LDAPDirectory)
self._create_ldap_realm()
# Populate the user cache, check its contents
user1 = User('alice', self.ldap_realm)
self.assertEqual(user1.resolver, self.ldap_resolver)
self.assertEqual(user1.uid, "cn=alice,ou=example,o=test")
self.assertEqual(user1.login, "alice")
self.assertEqual(user1.used_login, "alice")
entry = UserCache.query.one()
self.assertEqual(entry.user_id, user1.uid)
self.assertEqual(entry.used_login, "alice")
self.assertEqual(entry.username, "alice")
self.assertEqual(entry.resolver, self.ldap_resolver)
# query again, user cache does not change
user2 = User('alice', self.ldap_realm)
self.assertEqual(user2.resolver, self.ldap_resolver)
self.assertEqual(user2.uid, "cn=alice,ou=example,o=test")
self.assertEqual(user2.login, "alice")
self.assertEqual(user2.used_login, "alice")
self.assertEqual(UserCache.query.count(), 1)
# use secondary login attribute, usercache has a new entry with secondary login attribute
user3 = User('[email protected]', self.ldap_realm)
self.assertEqual(user3.resolver, self.ldap_resolver)
self.assertEqual(user3.uid, "cn=alice,ou=example,o=test")
self.assertEqual(user3.login, "alice")
self.assertEqual(user3.used_login, "[email protected]")
entries = UserCache.query.filter_by(user_id="cn=alice,ou=example,o=test").order_by(UserCache.id).all()
self.assertEqual(len(entries), 2)
entry = entries[-1]
self.assertEqual(entry.user_id, user1.uid)
self.assertEqual(entry.used_login, "[email protected]")
self.assertEqual(entry.username, "alice")
self.assertEqual(entry.resolver, self.ldap_resolver)
# use secondary login attribute again, login name is fetched correctly
user4 = User('[email protected]', self.ldap_realm)
self.assertEqual(user4.resolver, self.ldap_resolver)
self.assertEqual(user4.uid, "cn=alice,ou=example,o=test")
self.assertEqual(user4.login, "alice")
self.assertEqual(user4.used_login, "[email protected]")
# still only two entries in the cache
entries = UserCache.query.filter_by(user_id="cn=alice,ou=example,o=test").order_by(UserCache.id).all()
self.assertEqual(len(entries), 2)
# get the primary login name
login_name = get_username("cn=alice,ou=example,o=test", self.ldap_resolver)
self.assertEqual(login_name, "alice")
# still only two entries in the cache
entries = UserCache.query.filter_by(user_id="cn=alice,ou=example,o=test").order_by(UserCache.id).all()
self.assertEqual(len(entries), 2)
self._delete_ldap_realm()
| agpl-3.0 |
eino-makitalo/odoo | addons/hr_payroll_account/wizard/__init__.py | 433 | 1116 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_payroll_payslips_by_employees
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.