repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
bazz-erp/erpnext | erpnext/setup/doctype/company/company.py | 1 | 15659 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, os
from frappe import _
from frappe.utils import cint, today, formatdate
import frappe.defaults
from frappe.model.document import Document
from frappe.contacts.address_and_contact import load_address_and_contact
class Company(Document):
def onload(self):
load_address_and_contact(self, "company")
self.get("__onload")["transactions_exist"] = self.check_if_transactions_exist()
def check_if_transactions_exist(self):
exists = False
for doctype in ["Sales Invoice", "Delivery Note", "Sales Order", "Quotation",
"Purchase Invoice", "Purchase Receipt", "Purchase Order", "Supplier Quotation"]:
if frappe.db.sql("""select name from `tab%s` where company=%s and docstatus=1
limit 1""" % (doctype, "%s"), self.name):
exists = True
break
return exists
def validate(self):
self.validate_abbr()
self.validate_default_accounts()
self.validate_currency()
self.validate_coa_input()
self.validate_perpetual_inventory()
def validate_abbr(self):
if not self.abbr:
self.abbr = ''.join([c[0] for c in self.company_name.split()]).upper()
self.abbr = self.abbr.strip()
if self.get('__islocal') and len(self.abbr) > 5:
frappe.throw(_("Abbreviation cannot have more than 5 characters"))
if not self.abbr.strip():
frappe.throw(_("Abbreviation is mandatory"))
if frappe.db.sql("select abbr from tabCompany where name!=%s and abbr=%s", (self.name, self.abbr)):
frappe.throw(_("Abbreviation already used for another company"))
def validate_default_accounts(self):
for field in ["default_bank_account", "default_cash_account",
"default_receivable_account", "default_payable_account",
"default_expense_account", "default_income_account",
"stock_received_but_not_billed", "stock_adjustment_account",
"expenses_included_in_valuation", "default_payroll_payable_account",
"default_deferred_checks_account"]:
if self.get(field):
for_company = frappe.db.get_value("Account", self.get(field), "company")
if for_company != self.name:
frappe.throw(_("Account {0} does not belong to company: {1}")
.format(self.get(field), self.name))
def validate_currency(self):
self.previous_default_currency = frappe.db.get_value("Company", self.name, "default_currency")
if self.default_currency and self.previous_default_currency and \
self.default_currency != self.previous_default_currency and \
self.check_if_transactions_exist():
frappe.throw(_("Cannot change company's default currency, because there are existing transactions. Transactions must be cancelled to change the default currency."))
def on_update(self):
if not frappe.db.sql("""select name from tabAccount
where company=%s and docstatus<2 limit 1""", self.name):
if not frappe.local.flags.ignore_chart_of_accounts:
self.create_default_accounts()
self.create_default_warehouses()
self.install_country_fixtures()
if not frappe.db.get_value("Cost Center", {"is_group": 0, "company": self.name}):
self.create_default_cost_center()
if not frappe.local.flags.ignore_chart_of_accounts:
self.set_default_accounts()
if self.default_cash_account:
self.set_mode_of_payment_account()
if self.default_currency:
frappe.db.set_value("Currency", self.default_currency, "enabled", 1)
if hasattr(frappe.local, 'enable_perpetual_inventory') and \
self.name in frappe.local.enable_perpetual_inventory:
frappe.local.enable_perpetual_inventory[self.name] = self.enable_perpetual_inventory
frappe.clear_cache()
def install_country_fixtures(self):
path = frappe.get_app_path('erpnext', 'regional', frappe.scrub(self.country))
if os.path.exists(path.encode("utf-8")):
frappe.get_attr("erpnext.regional.{0}.setup.setup"
.format(self.country.lower()))(self)
def create_default_warehouses(self):
for wh_detail in [
{"warehouse_name": _("All Warehouses"), "is_group": 1},
{"warehouse_name": _("Stores"), "is_group": 0},
{"warehouse_name": _("Work In Progress"), "is_group": 0},
{"warehouse_name": _("Finished Goods"), "is_group": 0}]:
if not frappe.db.exists("Warehouse", "{0} - {1}".format(wh_detail["warehouse_name"], self.abbr)):
stock_group = frappe.db.get_value("Account", {"account_type": "Stock",
"is_group": 1, "company": self.name})
if stock_group:
warehouse = frappe.get_doc({
"doctype":"Warehouse",
"warehouse_name": wh_detail["warehouse_name"],
"is_group": wh_detail["is_group"],
"company": self.name,
"parent_warehouse": "{0} - {1}".format(_("All Warehouses"), self.abbr) \
if not wh_detail["is_group"] else ""
})
warehouse.flags.ignore_permissions = True
warehouse.insert()
def create_default_accounts(self):
from erpnext.accounts.doctype.account.chart_of_accounts.chart_of_accounts import create_charts
create_charts(self.name, self.chart_of_accounts, self.existing_company)
frappe.db.set(self, "default_receivable_account", frappe.db.get_value("Account",
{"company": self.name, "account_type": "Receivable", "is_group": 0}))
frappe.db.set(self, "default_payable_account", frappe.db.get_value("Account",
{"company": self.name, "account_type": "Payable", "is_group": 0}))
def validate_coa_input(self):
if self.create_chart_of_accounts_based_on == "Existing Company":
self.chart_of_accounts = None
if not self.existing_company:
frappe.throw(_("Please select Existing Company for creating Chart of Accounts"))
else:
self.existing_company = None
self.create_chart_of_accounts_based_on = "Standard Template"
if not self.chart_of_accounts:
self.chart_of_accounts = "Standard"
def validate_perpetual_inventory(self):
if not self.get("__islocal"):
if cint(self.enable_perpetual_inventory) == 1 and not self.default_inventory_account:
frappe.msgprint(_("Set default inventory account for perpetual inventory"),
alert=True, indicator='orange')
def set_default_accounts(self):
self._set_default_account("default_cash_account", "Cash")
self._set_default_account("default_bank_account", "Bank")
self._set_default_account("round_off_account", "Round Off")
self._set_default_account("accumulated_depreciation_account", "Accumulated Depreciation")
self._set_default_account("depreciation_expense_account", "Depreciation")
if self.enable_perpetual_inventory:
self._set_default_account("stock_received_but_not_billed", "Stock Received But Not Billed")
self._set_default_account("default_inventory_account", "Stock")
self._set_default_account("stock_adjustment_account", "Stock Adjustment")
self._set_default_account("expenses_included_in_valuation", "Expenses Included In Valuation")
self._set_default_account("default_expense_account", "Cost of Goods Sold")
if not self.default_income_account:
self.db_set("default_income_account", frappe.db.get_value("Account",
{"account_name": _("Sales"), "company": self.name}))
if not self.default_payable_account:
self.db_set("default_payable_account", self.default_payable_account)
def _set_default_account(self, fieldname, account_type):
if self.get(fieldname):
return
account = frappe.db.get_value("Account", {"account_type": account_type,
"is_group": 0, "company": self.name})
if account:
self.db_set(fieldname, account)
def set_mode_of_payment_account(self):
cash = frappe.db.get_value('Mode of Payment', {'type': 'Cash'}, 'name')
if cash and self.default_cash_account \
and not frappe.db.get_value('Mode of Payment Account', {'company': self.name}):
mode_of_payment = frappe.get_doc('Mode of Payment', cash)
mode_of_payment.append('accounts', {
'company': self.name,
'default_account': self.default_cash_account
})
mode_of_payment.save(ignore_permissions=True)
def create_default_cost_center(self):
cc_list = [
{
'cost_center_name': self.name,
'company':self.name,
'is_group': 1,
'parent_cost_center':None
},
{
'cost_center_name':_('Main'),
'company':self.name,
'is_group':0,
'parent_cost_center':self.name + ' - ' + self.abbr
},
]
for cc in cc_list:
cc.update({"doctype": "Cost Center"})
cc_doc = frappe.get_doc(cc)
cc_doc.flags.ignore_permissions = True
if cc.get("cost_center_name") == self.name:
cc_doc.flags.ignore_mandatory = True
cc_doc.insert()
frappe.db.set(self, "cost_center", _("Main") + " - " + self.abbr)
frappe.db.set(self, "round_off_cost_center", _("Main") + " - " + self.abbr)
frappe.db.set(self, "depreciation_cost_center", _("Main") + " - " + self.abbr)
def before_rename(self, olddn, newdn, merge=False):
if merge:
frappe.throw(_("Sorry, companies cannot be merged"))
def after_rename(self, olddn, newdn, merge=False):
frappe.db.set(self, "company_name", newdn)
frappe.db.sql("""update `tabDefaultValue` set defvalue=%s
where defkey='Company' and defvalue=%s""", (newdn, olddn))
frappe.defaults.clear_cache()
def abbreviate(self):
self.abbr = ''.join([c[0].upper() for c in self.company_name.split()])
def on_trash(self):
"""
Trash accounts and cost centers for this company if no gl entry exists
"""
accounts = frappe.db.sql_list("select name from tabAccount where company=%s", self.name)
cost_centers = frappe.db.sql_list("select name from `tabCost Center` where company=%s", self.name)
warehouses = frappe.db.sql_list("select name from tabWarehouse where company=%s", self.name)
rec = frappe.db.sql("SELECT name from `tabGL Entry` where company = %s", self.name)
if not rec:
frappe.db.sql("""delete from `tabBudget Account`
where exists(select name from tabBudget
where name=`tabBudget Account`.parent and company = %s)""", self.name)
for doctype in ["Account", "Cost Center", "Budget", "Party Account"]:
frappe.db.sql("delete from `tab{0}` where company = %s".format(doctype), self.name)
if not frappe.db.get_value("Stock Ledger Entry", {"company": self.name}):
frappe.db.sql("""delete from `tabWarehouse` where company=%s""", self.name)
frappe.defaults.clear_default("company", value=self.name)
# clear default accounts, warehouses from item
if warehouses:
for f in ["default_warehouse", "website_warehouse"]:
frappe.db.sql("""update tabItem set %s=NULL where %s in (%s)"""
% (f, f, ', '.join(['%s']*len(warehouses))), tuple(warehouses))
frappe.db.sql("""delete from `tabItem Reorder` where warehouse in (%s)"""
% ', '.join(['%s']*len(warehouses)), tuple(warehouses))
if accounts:
for f in ["income_account", "expense_account"]:
frappe.db.sql("""update tabItem set %s=NULL where %s in (%s)"""
% (f, f, ', '.join(['%s']*len(accounts))), tuple(accounts))
if cost_centers:
for f in ["selling_cost_center", "buying_cost_center"]:
frappe.db.sql("""update tabItem set %s=NULL where %s in (%s)"""
% (f, f, ', '.join(['%s']*len(cost_centers))), tuple(cost_centers))
# reset default company
frappe.db.sql("""update `tabSingles` set value=""
where doctype='Global Defaults' and field='default_company'
and value=%s""", self.name)
@frappe.whitelist()
def replace_abbr(company, old, new):
new = new.strip()
if not new:
frappe.throw(_("Abbr can not be blank or space"))
frappe.only_for("System Manager")
frappe.db.set_value("Company", company, "abbr", new)
def _rename_record(dt):
for d in frappe.db.sql("select name from `tab%s` where company=%s" % (dt, '%s'), company):
parts = d[0].rsplit(" - ", 1)
if len(parts) == 1 or parts[1].lower() == old.lower():
frappe.rename_doc(dt, d[0], parts[0] + " - " + new)
for dt in ["Warehouse", "Account", "Cost Center"]:
_rename_record(dt)
frappe.db.commit()
def get_name_with_abbr(name, company):
company_abbr = frappe.db.get_value("Company", company, "abbr")
parts = name.split(" - ")
if parts[-1].lower() != company_abbr.lower():
parts.append(company_abbr)
return " - ".join(parts)
def update_company_current_month_sales(company):
current_month_year = formatdate(today(), "MM-yyyy")
results = frappe.db.sql('''
select
sum(base_grand_total) as total, date_format(posting_date, '%m-%Y') as month_year
from
`tabSales Invoice`
where
date_format(posting_date, '%m-%Y')="{0}"
and docstatus = 1
and company = "{1}"
group by
month_year
'''.format(current_month_year, frappe.db.escape(company)), as_dict = True)
monthly_total = results[0]['total'] if len(results) > 0 else 0
frappe.db.set_value("Company", company, "total_monthly_sales", monthly_total)
frappe.db.commit()
def update_company_monthly_sales(company):
'''Cache past year monthly sales of every company based on sales invoices'''
from frappe.utils.goal import get_monthly_results
import json
filter_str = "company = '{0}' and status != 'Draft' and docstatus=1".format(frappe.db.escape(company))
month_to_value_dict = get_monthly_results("Sales Invoice", "base_grand_total",
"posting_date", filter_str, "sum")
frappe.db.set_value("Company", company, "sales_monthly_history", json.dumps(month_to_value_dict))
frappe.db.commit()
def cache_companies_monthly_sales_history():
companies = [d['name'] for d in frappe.get_list("Company")]
for company in companies:
update_company_monthly_sales(company)
frappe.db.commit()
@frappe.whitelist()
def get_company_details(company_name):
return frappe.get_value("Company", company_name, "type")
| gpl-3.0 |
jedie/pypyjs-standalone | website/js/pypy.js-0.3.0/lib/modules/test/test_macpath.py | 100 | 2000 | import macpath
from test import test_support, test_genericpath
import unittest
class MacPathTestCase(unittest.TestCase):
def test_abspath(self):
self.assertEqual(macpath.abspath("xx:yy"), "xx:yy")
def test_isabs(self):
isabs = macpath.isabs
self.assertTrue(isabs("xx:yy"))
self.assertTrue(isabs("xx:yy:"))
self.assertTrue(isabs("xx:"))
self.assertFalse(isabs("foo"))
self.assertFalse(isabs(":foo"))
self.assertFalse(isabs(":foo:bar"))
self.assertFalse(isabs(":foo:bar:"))
def test_split(self):
split = macpath.split
self.assertEqual(split("foo:bar"),
('foo:', 'bar'))
self.assertEqual(split("conky:mountpoint:foo:bar"),
('conky:mountpoint:foo', 'bar'))
self.assertEqual(split(":"), ('', ''))
self.assertEqual(split(":conky:mountpoint:"),
(':conky:mountpoint', ''))
def test_splitext(self):
splitext = macpath.splitext
self.assertEqual(splitext(":foo.ext"), (':foo', '.ext'))
self.assertEqual(splitext("foo:foo.ext"), ('foo:foo', '.ext'))
self.assertEqual(splitext(".ext"), ('.ext', ''))
self.assertEqual(splitext("foo.ext:foo"), ('foo.ext:foo', ''))
self.assertEqual(splitext(":foo.ext:"), (':foo.ext:', ''))
self.assertEqual(splitext(""), ('', ''))
self.assertEqual(splitext("foo.bar.ext"), ('foo.bar', '.ext'))
def test_normpath(self):
# Issue 5827: Make sure normpath preserves unicode
for path in (u'', u'.', u'/', u'\\', u':', u'///foo/.//bar//'):
self.assertIsInstance(macpath.normpath(path), unicode,
'normpath() returned str instead of unicode')
class MacCommonTest(test_genericpath.CommonTest):
pathmodule = macpath
def test_main():
test_support.run_unittest(MacPathTestCase, MacCommonTest)
if __name__ == "__main__":
test_main()
| mit |
Kaftanov/Cchat | chat-client/client.py | 1 | 5334 | #!/usr/bin/env python3
"""
#############################
Server applycation
version python: python3
based on socket
#############################
"""
import select
import socket
import sys
import datetime
from PyQt5 import QtWidgets
from PyQt5.QtCore import QThread, QPoint
import userform
from cchatui import Ui_CchatWindow
from communication import send, receive
class RegisterError(Exception):
""" My exception for user's password """
def __init__(self, type_exception):
Exception.__init__(self)
if type_exception == 0:
self.msg = "Cchat_Client: You password isn't correct, sry"
elif type_exception == 1:
self.msg = "Unexpected exception"
def __str__(self):
return self.msg
class WorkThread(QThread):
"""
Class for working with pyqt thread
this class run 'run_chat_loop()' in class 'Client'
"""
def __init__(self):
QThread.__init__(self)
def setWorker(self, Cl):
self.Cl = Cl
def run(self):
self.Cl.run_chat_loop()
class Client:
"""
Client is contain
prompt -- string -- it's need for visual effect command line
functions Server contain
__init__
init socket, connect, get name form server
cmdloop
loop for wait witting message(send/receive)
"""
def __init__(self, server_host=None, server_port=None):
""" init client object """
if server_host is None:
self.server_host = 'localhost'
else:
self.server_host = server_host
if server_port is None:
self.server_port = 3490
else:
self.server_port = server_port
# Initial prompt
self.user_name = self.connect()
self.head = '%s~' % self.user_name
self.initUI()
def initUI(self):
""" Initialize pyqt form"""
application = QtWidgets.QApplication(sys.argv)
CchatWindow = QtWidgets.QMainWindow()
self.ui = Ui_CchatWindow()
self.ui.setupUi(CchatWindow)
self.ui.sendButton.clicked.connect(self.send_message)
self.ui.inputLine.returnPressed.connect(self.send_message)
CchatWindow.show()
# set thread
self.workThread = WorkThread()
self.workThread.setWorker(self)
self.workThread.start()
sys.exit(application.exec_())
def print_into_box(self, data):
""" Printing data into text box"""
self.ui.textBox.append(data)
pass
def send_message(self):
""" Send message into socket"""
# Warning error send message if unbound magic
data = self.ui.inputLine.text()
time = str(datetime.datetime.now().time())[:16]
self.print_into_box(self.head + time + ':' + data)
self.ui.inputLine.clear()
send(self.sock, data)
def connect(self):
""" Checking registration/login data"""
is_authenticate = False
while not is_authenticate:
try:
form = userform.create_userform()
if form is None:
sys.exit('KeyboardInterrupt from user_form')
data = {}
if form[0] == 0:
data = form[1]
data['type'] = 'log'
elif form[0] == 1:
data = form[1]
data['type'] = 'reg'
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((self.server_host, self.server_port))
send(self.sock, data)
receive_data = receive(self.sock)
if receive_data == 'Error':
raise RegisterError(0)
elif receive_data == 'Success':
is_authenticate = True
return data['login']
else:
raise RegisterError(1)
except socket.error as error:
print('Cchat_Client: Could not connect to chat server')
print(error)
sys.exit(1)
except RegisterError as msg:
print(msg)
print("Try again")
self.sock.close()
except KeyboardInterrupt as signal:
print(signal)
if self.sock:
self.sock.close()
sys.exit(1)
def run_chat_loop(self):
is_shutdown = True
while is_shutdown:
in_fds, out_fds, err_fds = select.select([self.sock], [], [])
for sock in in_fds:
if sock is self.sock:
data = receive(self.sock)
if not data:
self.print_into_box('Server was shutdown')
is_shutdown = False
break
else:
if not data['message']:
continue
message = data['head'] + data['message']
print(message)
self.print_into_box(message)
@staticmethod
def time():
return str(datetime.datetime.now().time())[:16]
if __name__ == "__main__":
Client()
| gpl-3.0 |
mikewiebe-ansible/ansible | test/units/modules/network/iosxr/test_iosxr_user.py | 48 | 4023 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.iosxr import iosxr_user
from units.modules.utils import set_module_args
from .iosxr_module import TestIosxrModule, load_fixture
class TestIosxrUserModule(TestIosxrModule):
module = iosxr_user
def setUp(self):
super(TestIosxrUserModule, self).setUp()
self.mock_get_config = patch('ansible.modules.network.iosxr.iosxr_user.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.iosxr.iosxr_user.load_config')
self.load_config = self.mock_load_config.start()
self.mock_is_cliconf = patch('ansible.modules.network.iosxr.iosxr_user.is_cliconf')
self.is_cliconf = self.mock_is_cliconf.start()
def tearDown(self):
super(TestIosxrUserModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
self.mock_is_cliconf.stop()
def load_fixtures(self, commands=None, transport='cli'):
self.get_config.return_value = load_fixture('iosxr_user_config.cfg')
self.load_config.return_value = dict(diff=None, session='session')
self.is_cliconf.return_value = True
def test_iosxr_user_delete(self):
set_module_args(dict(name='ansible', state='absent'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['no username ansible'])
def test_iosxr_user_password(self):
set_module_args(dict(name='ansible', configured_password='test'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['username ansible secret test'])
def test_iosxr_user_purge(self):
set_module_args(dict(purge=True))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['no username ansible'])
def test_iosxr_user_group(self):
set_module_args(dict(name='ansible', group='sysadmin'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['username ansible group sysadmin'])
def test_iosxr_user_update_password_changed(self):
set_module_args(dict(name='test', configured_password='test', update_password='on_create'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'],
['username test', 'username test secret test'])
def test_iosxr_user_update_password_on_create_ok(self):
set_module_args(dict(name='ansible', configured_password='test', update_password='on_create'))
self.execute_module()
def test_iosxr_user_update_password_always(self):
set_module_args(dict(name='ansible', configured_password='test', update_password='always'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['username ansible secret test'])
def test_iosxr_user_admin_mode(self):
set_module_args(dict(name='ansible-2', configured_password='test-2', admin=True))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['username ansible-2', 'username ansible-2 secret test-2'])
| gpl-3.0 |
saketkc/statsmodels | statsmodels/sandbox/distributions/tests/_est_fit.py | 31 | 2609 | # NOTE: contains only one test, _est_cont_fit, that is renamed so that
# nose doesn't run it
# I put this here for the record and for the case when someone wants to
# verify the quality of fit
# with current parameters: relatively small sample size, default starting values
# Ran 84 tests in 401.797s
# FAILED (failures=15)
import numpy.testing as npt
import numpy as np
from scipy import stats
from .distparams import distcont
# this is not a proper statistical test for convergence, but only
# verifies that the estimate and true values don't differ by too much
n_repl1 = 1000 # sample size for first run
n_repl2 = 5000 # sample size for second run, if first run fails
thresh_percent = 0.25 # percent of true parameters for fail cut-off
thresh_min = 0.75 # minimum difference estimate - true to fail test
#distcont = [['genextreme', (3.3184017469423535,)]]
def _est_cont_fit():
# this tests the closeness of the estimated parameters to the true
# parameters with fit method of continuous distributions
# Note: is slow, some distributions don't converge with sample size <= 10000
for distname, arg in distcont:
yield check_cont_fit, distname,arg
def check_cont_fit(distname,arg):
distfn = getattr(stats, distname)
rvs = distfn.rvs(size=n_repl1,*arg)
est = distfn.fit(rvs) #,*arg) # start with default values
truearg = np.hstack([arg,[0.0,1.0]])
diff = est-truearg
txt = ''
diffthreshold = np.max(np.vstack([truearg*thresh_percent,
np.ones(distfn.numargs+2)*thresh_min]),0)
# threshold for location
diffthreshold[-2] = np.max([np.abs(rvs.mean())*thresh_percent,thresh_min])
if np.any(np.isnan(est)):
raise AssertionError('nan returned in fit')
else:
if np.any((np.abs(diff) - diffthreshold) > 0.0):
## txt = 'WARNING - diff too large with small sample'
## print 'parameter diff =', diff - diffthreshold, txt
rvs = np.concatenate([rvs,distfn.rvs(size=n_repl2-n_repl1,*arg)])
est = distfn.fit(rvs) #,*arg)
truearg = np.hstack([arg,[0.0,1.0]])
diff = est-truearg
if np.any((np.abs(diff) - diffthreshold) > 0.0):
txt = 'parameter: %s\n' % str(truearg)
txt += 'estimated: %s\n' % str(est)
txt += 'diff : %s\n' % str(diff)
raise AssertionError('fit not very good in %s\n' % distfn.name + txt)
if __name__ == "__main__":
import nose
#nose.run(argv=['', __file__])
nose.runmodule(argv=[__file__,'-s'], exit=False)
| bsd-3-clause |
nwjs/chromium.src | components/test/data/password_manager/form_classification_tests/signin_forms_test.py | 19 | 24192 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from form_classification_test import FormClassificationTest
"""Unittest class for testing signin forms.
The test methods were generated by the form annotation extension
(components/test/data/password_manager/form_annotation_extension)
"""
class SignInFormsTest(FormClassificationTest):
def test_i_360_cn(self):
self.GoTo("http://i.360.cn/login/")
self.CheckPwdField("INPUT[name='password'][type='password']",
is_pwd_creation=False)
def test_9gag_com(self):
self.GoTo("http://9gag.com/")
self.Click("A.btn-mute")
self.CheckPwdField(
"INPUT[id='login-email-password'][name='password'][type='password']",
is_pwd_creation=False)
def test_login_adf_ly(self):
self.GoTo("https://login.adf.ly/login")
self.CheckPwdField("INPUT#loginPassword[name='password'][type='password']",
is_pwd_creation=False)
def test_adobeid_na1_services_adobe_com(self):
self.GoTo("http://www.adobe.com/")
self.Click("SPAN.close > A")
self.Click("LI[class*='signin'] > BUTTON")
self.CheckPwdField(
"INPUT[id='adobeid_password'][name='password'][type='password']"
".text-center",
is_pwd_creation=False)
def test_publishers_adsterra_net(self):
self.GoTo("http://publishers.adsterra.net/login")
self.CheckPwdField("INPUT[name='password'][type='password']",
is_pwd_creation=False)
def test_ssl_allegro_pl(self):
self.GoTo("http://allegro.pl/")
SignInFormsTest.driver.maximize_window()
self.Click(
"DIV.header-namespace > DIV.user-links-wrapper > DIV.wrapper-fluid > "
"UL.user-nav > LI.login:nth-child(10) > A")
self.CheckPwdField(
"INPUT[id='password'][name='password'][type='password'].ng-untouched",
is_pwd_creation=False)
def test_www_amazon_com(self):
self.GoTo("http://www.amazon.com/")
self.Click("A[id='nav-link-yourAccount']")
self.CheckPwdField(
"INPUT[id='ap_password'][name='password'][type='password']"
".a-input-text",
is_pwd_creation=False)
def test_my_screenname_aol_com(self):
self.GoTo(
"https://my.screenname.aol.com/_cqr/login/login.psp?sitedomain="
"www.aol.com&lang=en&locale=us&authLev=0")
self.CheckPwdField(
"INPUT#pwdId1[name='password'][type='password'].inputBox",
is_pwd_creation=False)
def test_secure2_store_apple_com(self):
self.GoTo("http://www.apple.com/")
self.Click("LI[id='ac-gn-bag'].ac-gn-bag:nth-child(10) > A.ac-gn-link-bag")
self.Click("A.ac-gn-bagview-nav-link-signIn")
self.CheckPwdField(
"INPUT[id='login-password'][name='login-password'][type='password']"
".password",
is_pwd_creation=False)
def test_www_baidu_com(self):
self.GoTo("http://www.baidu.com/")
self.Click("DIV#u1 > A[name='tj_login'].lb")
self.CheckPwdField(
"INPUT#TANGRAM__PSP_8__password[name='password'][type='password']"
".pass-text-input-password",
is_pwd_creation=False)
def test_blogs_forbes_com(self):
self.GoTo("http://blogs.forbes.com/account/login/")
self.CheckPwdField(
"INPUT[id='login_form_password'][name='pass'][type='password']",
is_pwd_creation=False)
def test_secure_fly_cnet_com(self):
self.GoTo("https://secure-fly.cnet.com/uk/user/login/")
self.CheckPwdField(
"INPUT#login_password[name='login[password]'][type='password']",
is_pwd_creation=False)
def test_passport_csdn_net(self):
self.GoTo("https://passport.csdn.net/account/login")
self.CheckPwdField(
"INPUT#password[name='password'][type='password'].pass-word",
is_pwd_creation=False)
def test_signin_ebay_com(self):
self.GoTo("https://signin.ebay.com/ws/eBayISAPI.dll?SignIn")
self.CheckPwdField("DIV#pri_signin INPUT[type='password']",
is_pwd_creation=False)
def test_email_163_com(self):
self.GoTo("http://email.163.com/")
self.CheckPwdField("INPUT#pwdInput[name='password'][type='password']",
is_pwd_creation=False)
def test_en_softonic_com(self):
self.GoTo("http://en.softonic.com/")
self.Click("A#header-login-link.header-site-user-btn") # Close overlay.
self.Click("A#header-login-link.header-site-user-btn")
self.CheckPwdField(
"INPUT#password[name='password'][type='password'].field-default",
is_pwd_creation=False)
def test_id_orange_fr(self):
self.GoTo("https://id.orange.fr/auth_user/bin/auth_user.cgi")
self.CheckPwdField(
"INPUT#default_f_password[name='password'][type='password'].password",
is_pwd_creation=False)
def test_feedly_com(self):
self.GoTo("https://feedly.com/v3/auth/login")
self.CheckPwdField("INPUT[name='password'][type='password'].input-bottom",
is_pwd_creation=False)
def test_gfycat_com(self):
self.GoTo("http://gfycat.com/")
self.Click("BUTTON.login-btn")
self.CheckPwdField("INPUT[type='password']", is_pwd_creation=False)
def test_github_com(self):
self.GoTo("https://github.com/login")
self.CheckPwdField(
"INPUT#password[name='password'][type='password'].form-control",
is_pwd_creation=False)
def test_login_globo_com(self):
self.GoTo("https://login.globo.com/login/4728")
self.CheckPwdField(
"INPUT[id='password'][name='password'][type='password'].password",
is_pwd_creation=False)
def test_www_gmx_net(self):
self.GoTo("http://www.gmx.net/")
self.CheckPwdField(
"INPUT#inpLoginFreemailPassword[name='password'][type='password']",
is_pwd_creation=False)
def test_uk_godaddy_com(self):
self.GoTo("https://uk.godaddy.com/")
self.Click(
"DIV[id='sign-in'].pc-menu-item.ux-tray > A.ux-tray-toggle.menu-title")
self.CheckPwdField("INPUT[id='password'][name='password'][type='password']",
is_pwd_creation=False)
def test_id_ifeng_com(self):
self.GoTo("https://id.ifeng.com/user/login")
self.CheckPwdField(
"INPUT#userLogin_pwd[name='userLogin_pwd'][type='password'].txt_270",
is_pwd_creation=False)
def test_secure_imdb_com(self):
self.GoTo("http://www.imdb.com/")
self.Click(
"LI[id='navUserMenu'].css_nav_menu:nth-child(1) > P.singleLine > "
"A[id='nblogin'].cboxElement")
self.SwitchTo("IFRAME.cboxIframe")
self.Click("A[id='imdb-toggle'].oauth-link")
self.CheckPwdField(
"INPUT[id='passwordprompt'][name='password'][type='password']",
is_pwd_creation=False)
def test_imgur_com(self):
self.GoTo("https://imgur.com/signin")
self.CheckPwdField(" INPUT[name='password'][type='password']",
is_pwd_creation=False)
def test_secure_indeed_com(self):
self.GoTo("https://secure.indeed.com/account/login")
self.CheckPwdField(
"INPUT#signin_password[name='password'][type='password']"
".input_password",
is_pwd_creation=False)
def test_www_instagram_com(self):
self.GoTo("https://www.instagram.com/")
self.Click("A._k6cv7")
self.CheckPwdField("INPUT[name='password'][type='password']._kp5f7",
is_pwd_creation=False)
def test_ssl_kakaku_com(self):
self.GoTo("https://ssl.kakaku.com/auth/id/login.asp")
self.CheckPwdField(
"INPUT#password[name='password'][type='password'].secPwFrm",
is_pwd_creation=False)
def test_kat_cr(self):
self.GoTo("https://kat.cr/")
self.Click("DIV.land-login > A.ajaxLink")
self.CheckPwdField(
"INPUT#field_password[name='password'][type='password'].botmarg5px",
is_pwd_creation=False)
def test_www_linkedin_com(self):
self.GoTo("https://www.linkedin.com/")
self.CheckPwdField(
"INPUT#login-password[name='session_password'][type='password']",
is_pwd_creation=False)
def test_login_live_com(self):
self.GoTo("https://login.live.com/login.srf")
self.CheckPwdField("INPUT[name='passwd'][type='password'].form-control",
is_pwd_creation=False)
def test_mail_ru(self):
self.GoTo("https://mail.ru/")
self.CheckPwdField(
"INPUT#mailbox__password[name='Password'][type='password']"
".mailbox__password",
is_pwd_creation=False)
def test_mega_nz(self):
self.GoTo("https://mega.nz/")
self.Click("A.top-login-button")
self.CheckPwdField(
"INPUT#login-password[name='login-password'][type='password']",
is_pwd_creation=False)
def test_member_livedoor_com(self):
self.GoTo("https://member.livedoor.com/login/")
self.CheckPwdField(
"INPUT#password[name='password'][type='password'].password",
is_pwd_creation=False)
def test_my_outbrain_com(self):
self.GoTo("https://my.outbrain.com/")
self.CheckPwdField(
"INPUT#signin-member-password[name='loginPassword'][type='password']"
".input-text",
is_pwd_creation=False)
def test_www_naver_com(self):
self.GoTo("http://www.naver.com/")
self.CheckPwdField("INPUT#pw[name='pw'][type='password']",
is_pwd_creation=False)
def test_ssl_naver_jp(self):
self.GoTo("https://ssl.naver.jp/login")
self.CheckPwdField(
"INPUT#_passwd[name='password'][type='password'].mdInputTxt03Input",
is_pwd_creation=False)
def test_www_netflix_com(self):
self.GoTo("https://www.netflix.com/gb/")
self.Click("DIV.nfHeader > A.authLinks")
self.CheckPwdField("INPUT[name='password'][type='password'].ui-text-input",
is_pwd_creation=False)
def test_passport_bilibili_com(self):
self.GoTo("https://passport.bilibili.com/login")
self.CheckPwdField("INPUT#passwdTxt[name='pwd'][type='password'].password",
is_pwd_creation=False)
def test_passport_china_com(self):
self.GoTo("http://passport.china.com/")
self.CheckPwdField("INPUT[id='55'][name='password'][type='password']",
is_pwd_creation=False)
def test_www_pinterest_com(self):
self.GoTo("https://www.pinterest.com/login/")
self.CheckPwdField("INPUT[name='password'][type='password']",
is_pwd_creation=False)
def test_www_pixnet_net(self):
self.GoTo("https://www.pixnet.net/")
self.CheckPwdField(
"INPUT[id='input-password'][name='password'][type='password']",
is_pwd_creation=False)
def test_qq_com(self):
self.GoTo("http://www.qq.com/")
self.Click("A[id='loginGrayLayout'].login")
self.SwitchTo("#login_frame")
self.CheckPwdField("INPUT[id='p'][name='p'][type='password'].password",
is_pwd_creation=False)
def test_www_rakuten_co_jp(self):
self.GoTo("https://www.rakuten.co.jp/myrakuten/login.html")
self.CheckPwdField("INPUT[id='passwd'][name='p'][type='password'].textBox",
is_pwd_creation=False)
def test_www_reddit_com(self):
self.GoTo("https://www.reddit.com/")
self.Click("SPAN.user > A.login-required")
self.CheckPwdField(
"INPUT[id='passwd_login'][name='passwd'][type='password']"
".c-form-control",
is_pwd_creation=False)
def test_login_sina_com_cn(self):
self.GoTo("https://login.sina.com.cn/signup/signin.php")
self.CheckPwdField("INPUT[id='password'][name='password'][type='password']",
is_pwd_creation=False)
def test_login_skype_com(self):
self.GoTo("https://login.skype.com/login")
self.CheckPwdField("INPUT[id='password'][name='password'][type='password']",
is_pwd_creation=False)
def test_www_sohu_com(self):
self.GoTo("http://www.sohu.com/")
self.CheckPwdField("INPUT[name='password'][type='password'].simple-pwd",
is_pwd_creation=False)
def test_soundcloud_com(self):
self.GoTo(
"https://soundcloud.com/connect?client_id=02gUJC0hH2ct1EGOcYXQIzRFU91c"
"72Ea&response_type=token&scope=non-expiring%20fast-connect%20purchase"
"%20upload&display=next&redirect_uri=https%3A//soundcloud.com/"
"soundcloud-callback.html")
self.CheckPwdField(
"INPUT[id='password'][name='password'][type='password'].sc-input",
is_pwd_creation=False)
def test_ssl_bbc_com(self):
self.GoTo("https://ssl.bbc.com/id/signin")
self.CheckPwdField(
"INPUT[id='bbcid_password'][name='password'][type='password'].password",
is_pwd_creation=False)
def test_openid_stackexchange_com(self):
self.GoTo("https://stackexchange.com/users/login?#log-in")
self.SwitchTo("#affiliate-signin-iframe")
self.CheckPwdField(
"INPUT[id='password'][name='password'][type='password']"
".framed-text-field",
is_pwd_creation=False)
def test_stackoverflow_com(self):
self.GoTo("https://stackoverflow.com/users/login")
self.CheckPwdField("INPUT[id='password'][name='password'][type='password']",
is_pwd_creation=False)
def test_store_steampowered_com(self):
self.GoTo("https://store.steampowered.com//login/")
self.CheckPwdField(
"INPUT[id='input_password'][name='password'][type='password']"
".text_input",
is_pwd_creation=False)
def test_profile_theguardian_com(self):
self.GoTo("https://profile.theguardian.com/signin")
self.CheckPwdField(
"INPUT[id='signin_field_password'][name='password'][type='password']"
".signin-form__field--password",
is_pwd_creation=False)
def test_thepiratebay_se(self):
self.GoTo("https://thepiratebay.se/login")
self.CheckPwdField("INPUT[id='password'][name='password'][type='password']",
is_pwd_creation=False)
def test_torrentz_eu(self):
self.GoTo("http://torrentz.eu/profile")
self.CheckPwdField("INPUT[id='lpass'][name='pass'][type='password'].i",
is_pwd_creation=False)
def test_login_tudou_com(self):
self.GoTo("http://login.tudou.com/")
self.CheckPwdField(
"INPUT[id='password1'][name='password1'][type='password'].password",
is_pwd_creation=False)
def test_twitter_com(self):
self.GoTo("https://twitter.com/")
self.CheckPwdField(
"INPUT[id='signin-password'][name='session[password]'][type='password']"
".flex-table-input",
is_pwd_creation=False)
def test_member_udn_com(self):
self.GoTo("https://member.udn.com/member/login.jsp")
self.CheckPwdField(
"INPUT[id='password'][name='password'][type='password'].textfield",
is_pwd_creation=False)
def test_en_uptodown_com(self):
SignInFormsTest.driver.maximize_window()
self.GoTo("http://en.uptodown.com/ubuntu")
self.Click("A.button > SPAN")
self.CheckPwdField("INPUT[id='password'][type='password']",
is_pwd_creation=False)
def test_vimeo_com(self):
self.GoTo("https://vimeo.com/")
self.Click("A.js-login_toggle")
self.CheckPwdField(
"INPUT[id='login_password'][name='password'][type='password']"
".js-login_password",
is_pwd_creation=False)
def test_web_de(self):
self.GoTo("http://web.de/")
self.Click("A.icon-freemail")
self.CheckPwdField(
"INPUT[id='inpFreemailLoginPassword'][name='password']"
"[type='password']",
is_pwd_creation=False)
def test_weibo_com(self):
self.GoTo("http://weibo.com/")
self.Click("DIV.tab > A:nth-child(2)")
self.CheckPwdField("INPUT[name='password'][type='password'].W_input",
is_pwd_creation=False)
def test_en_wikipedia_org(self):
self.GoTo("https://en.wikipedia.org/w/index.php?title=Special:UserLogin")
self.CheckPwdField(
"INPUT[id='wpPassword1'][name='wpPassword'][type='password']"
".loginPassword",
is_pwd_creation=False)
def test_www_avito_ru(self):
self.GoTo("https://www.avito.ru/profile/login")
self.CheckPwdField("INPUT[name='password'][type='password'].password-field",
is_pwd_creation=False)
def test_www_babytree_com(self):
self.GoTo("http://www.babytree.com/reg/login.php")
self.CheckPwdField(
"INPUT[name='password'][type='password'].login-input-text",
is_pwd_creation=False)
def test_www_booking_com(self):
self.GoTo("http://www.booking.com/")
self.Click("li.account_register_option div.sign_in_wrapper")
self.CheckPwdField("INPUT[name='password'][type='password']",
is_pwd_creation=False)
def test_www_buzzfeed_com(self):
self.GoTo("http://www.buzzfeed.com/")
self.Click(
"DIV.page-nav__utilities > DIV[id='nav-signin'].nav-signin > "
"DIV[id='usernav-signin'] > A[id='header-signin'].nav-signin-icon")
self.CheckPwdField(
"INPUT[name='password'][type='password'].js-user-password",
is_pwd_creation=False)
def test_www_dailymail_co_uk(self):
self.GoTo("http://www.dailymail.co.uk/home/index.html")
self.Click("A.js-login")
self.CheckPwdField(
"INPUT[id='reg-lbx-password-lightbox'][name='j_password']"
"[type='password']",
is_pwd_creation=False)
def test_www_deviantart_com(self):
self.GoTo("http://www.deviantart.com/")
self.Click("TD[id='oh-loginbutton'] > A.oh-touch")
self.CheckPwdField(
"INPUT[id='login-password'][name='password'][type='password'].itext",
is_pwd_creation=False)
def test_www_dmm_com(self):
self.GoTo("https://www.dmm.com/en/my/-/login/=/path=SgReFg__/")
self.CheckPwdField("INPUT[id='password'][name='password'][type='password']",
is_pwd_creation=False)
def test_www_douyu_com(self):
self.GoTo("http://www.douyu.com/")
SignInFormsTest.driver.maximize_window()
self.Click("A.u-login")
self.CheckPwdField("FORM > P > INPUT[name='password'][type='password'].ipt",
is_pwd_creation=False)
def test_www_dropbox_com(self):
self.GoTo("https://www.dropbox.com/")
SignInFormsTest.driver.maximize_window()
self.Click("A[id='sign-in'].sign-in")
self.CheckPwdField("INPUT[name='login_password'][type='password']",
is_pwd_creation=False)
def test_www_etsy_com(self):
self.GoTo("https://www.etsy.com/")
self.Click("A[id='sign-in'].signin-header-action")
self.CheckPwdField(
"INPUT[id='password-existing'][name='password'][type='password'].text",
is_pwd_creation=False)
def test_www_facebook_com(self):
self.GoTo("https://www.facebook.com/")
self.CheckPwdField(
"INPUT[id='pass'][name='pass'][type='password'].inputtext",
is_pwd_creation=False)
def test_www_foxnews_com(self):
self.GoTo("http://www.foxnews.com/")
self.Click("A.login")
self.CheckPwdField(
"INPUT[id='capture_signIn_traditionalSignIn_password']"
"[name='traditionalSignIn_password'][type='password']"
".capture_traditionalSignIn_password",
is_pwd_creation=False)
def test_www_homedepot_com(self):
self.GoTo("http://www.homedepot.com/")
self.Click("A[id='headerMyAccount'].headerMyAccount__button")
self.Click("A.headerMyAccount__authLink")
self.CheckPwdField(
"INPUT[id='password'][name='logonPassword'][type='password']"
".width_332px",
is_pwd_creation=False)
def test_www_livejournal_com(self):
self.GoTo("http://www.livejournal.com/")
self.Click("A.s-header-item__link--login")
self.CheckPwdField(
"INPUT[id='lj_loginwidget_password'][name='password'][type='password']"
".b-input",
is_pwd_creation=False)
def test_www_mediafire_com(self):
self.GoTo(
"https://www.mediafire.com/templates/login_signup/login_signup.php")
self.CheckPwdField(
"INPUT[id='widget_login_pass'][name='login_pass'][type='password']",
is_pwd_creation=False)
def test_www_nytimes_com(self):
self.GoTo("http://www.nytimes.com/")
self.Click("DIV[id='instl_close'] > A.nytdGrowlNotifyCross"
) # Close overlay.
self.Click("BUTTON.button.login-modal-trigger")
self.CheckPwdField(
"INPUT[id='login-password'][name='password'][type='password']"
".login-password",
is_pwd_creation=False)
def test_www_popads_net(self):
self.GoTo("https://www.popads.net/")
self.CheckPwdField(
"INPUT[id='UserPassword'][name='data[User][password]']"
"[type='password']",
is_pwd_creation=False)
def test_www_quora_com(self):
self.GoTo("https://www.quora.com/")
self.CheckPwdField(
"INPUT[id*='password'][name='password'][type='password']"
".header_login_text_box",
is_pwd_creation=False)
def test_www_slideshare_net(self):
self.GoTo("https://www.slideshare.net/login")
self.CheckPwdField(
"INPUT[id='user_password'][name='user_password'][type='password']",
is_pwd_creation=False)
def test_www_so_com(self):
self.GoTo("https://www.so.com/")
self.Click("A[id='user-login']")
self.CheckPwdField(
"INPUT[id*='password'][name='password'][type='password']"
".quc-input-password",
is_pwd_creation=False)
def test_www_sunmaker_com(self):
self.GoTo("https://www.sunmaker.com/de/")
self.CheckPwdField("INPUT[name='password'][type='password'].form-control",
is_pwd_creation=False)
def test_www_tianya_cn(self):
self.GoTo("http://www.tianya.cn/")
self.CheckPwdField(
"INPUT[id='password1'][name='vpassword'][type='password'].text-ipt",
is_pwd_creation=False)
def test_www_tribunnews_com(self):
self.GoTo("http://www.tribunnews.com/")
SignInFormsTest.driver.maximize_window()
self.Click("A[id='login'].blue")
self.CheckPwdField(
"FORM[id='logform'].form INPUT[name='password']"
"[type='password'].input",
is_pwd_creation=False)
def test_www_tripadvisor_com(self):
self.GoTo("https://www.tripadvisor.com/")
self.Click("LI.login:nth-child(4) > SPAN.link.no_cpu")
self.SwitchTo("#overlayRegFrame")
self.CheckPwdField("INPUT[id='regSignIn.password'][type='password'].text",
is_pwd_creation=False)
def test_www_walmart_com(self):
self.GoTo("https://www.walmart.com/account/login")
self.CheckPwdField(
"INPUT[id='login-password'][name='login-password'][type='password']"
".js-password",
is_pwd_creation=False)
def test_www_wittyfeed_com(self):
self.GoTo("http://www.wittyfeed.com/")
self.Click("A.express")
self.CheckPwdField(
"DIV.signIn_passwordDiv > "
"INPUT[name='password'][type='password'].form-control",
is_pwd_creation=False)
def test_www_yelp_com(self):
self.GoTo("https://www.yelp.com/login")
self.CheckPwdField(
"FORM[id='ajax-login'].yform > "
"INPUT[id='password'][name='password'][type='password']",
is_pwd_creation=False)
def test_www_zillow_com(self):
self.GoTo("https://www.zillow.com")
self.Click("A[id='login_opener'].zss-login-link")
self.SwitchTo("DIV#login_content > IFRAME")
self.CheckPwdField("INPUT[id='password'][name='password'][type='password']",
is_pwd_creation=False)
def test_yandex_ru(self):
self.GoTo("https://yandex.ru/")
self.CheckPwdField("INPUT[name='passwd'][type='password'].input__input",
is_pwd_creation=False)
def test_login_youku_com(self):
self.GoTo("http://login.youku.com/user/login_win")
self.CheckPwdField(
"INPUT[id='password'][name='password'][type='password'].form_input",
is_pwd_creation=False)
def test_service_zol_com_cn(self):
self.GoTo("http://service.zol.com.cn/user/siteLogin.php")
self.CheckPwdField("INPUT[id='loginPwd'][type='password']",
is_pwd_creation=False)
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
Dexhub/MTX | src/arch/x86/isa/insts/simd64/integer/data_reordering/unpack_and_interleave.py | 91 | 4097 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop PUNPCKLBW_MMX_MMX {
unpack mmx, mmx, mmxm, ext=0, size=1
};
def macroop PUNPCKLBW_MMX_M {
ldfp ufp1, seg, sib, disp, dataSize=8
unpack mmx, mmx, ufp1, ext=0, size=1
};
def macroop PUNPCKLBW_MMX_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
unpack mmx, mmx, ufp1, ext=0, size=1
};
def macroop PUNPCKLWD_MMX_MMX {
unpack mmx, mmx, mmxm, ext=0, size=2
};
def macroop PUNPCKLWD_MMX_M {
ldfp ufp1, seg, sib, disp, dataSize=8
unpack mmx, mmx, ufp1, ext=0, size=2
};
def macroop PUNPCKLWD_MMX_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
unpack mmx, mmx, ufp1, ext=0, size=2
};
def macroop PUNPCKLDQ_MMX_MMX {
unpack mmx, mmx, mmxm, ext=0, size=4
};
def macroop PUNPCKLDQ_MMX_M {
ldfp ufp1, seg, sib, disp, dataSize=8
unpack mmx, mmx, ufp1, ext=0, size=4
};
def macroop PUNPCKLDQ_MMX_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
unpack mmx, mmx, ufp1, ext=0, size=4
};
def macroop PUNPCKHBW_MMX_MMX {
unpack mmx, mmx, mmxm, ext=1, size=1
};
def macroop PUNPCKHBW_MMX_M {
ldfp ufp1, seg, sib, disp, dataSize=8
unpack mmx, mmx, ufp1, ext=1, size=1
};
def macroop PUNPCKHBW_MMX_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
unpack mmx, mmx, ufp1, ext=1, size=1
};
def macroop PUNPCKHWD_MMX_MMX {
unpack mmx, mmx, mmxm, ext=1, size=2
};
def macroop PUNPCKHWD_MMX_M {
ldfp ufp1, seg, sib, disp, dataSize=8
unpack mmx, mmx, ufp1, ext=1, size=2
};
def macroop PUNPCKHWD_MMX_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
unpack mmx, mmx, ufp1, ext=1, size=2
};
def macroop PUNPCKHDQ_MMX_MMX {
unpack mmx, mmx, mmxm, ext=1, size=4
};
def macroop PUNPCKHDQ_MMX_M {
ldfp ufp1, seg, sib, disp, dataSize=8
unpack mmx, mmx, ufp1, ext=1, size=4
};
def macroop PUNPCKHDQ_MMX_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
unpack mmx, mmx, ufp1, ext=1, size=4
};
'''
| bsd-3-clause |
erikr/django | django/conf/locale/hu/formats.py | 504 | 1117 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'Y. F j.'
TIME_FORMAT = 'G.i'
DATETIME_FORMAT = 'Y. F j. G.i'
YEAR_MONTH_FORMAT = 'Y. F'
MONTH_DAY_FORMAT = 'F j.'
SHORT_DATE_FORMAT = 'Y.m.d.'
SHORT_DATETIME_FORMAT = 'Y.m.d. G.i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%Y.%m.%d.', # '2006.10.25.'
]
TIME_INPUT_FORMATS = [
'%H.%M.%S', # '14.30.59'
'%H.%M', # '14.30'
]
DATETIME_INPUT_FORMATS = [
'%Y.%m.%d. %H.%M.%S', # '2006.10.25. 14.30.59'
'%Y.%m.%d. %H.%M.%S.%f', # '2006.10.25. 14.30.59.000200'
'%Y.%m.%d. %H.%M', # '2006.10.25. 14.30'
'%Y.%m.%d.', # '2006.10.25.'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' ' # Non-breaking space
NUMBER_GROUPING = 3
| bsd-3-clause |
AOSPA-L/android_external_skia | gm/rebaseline_server/compare_rendered_pictures_test.py | 67 | 4021 | #!/usr/bin/python
"""
Copyright 2014 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
Test compare_rendered_pictures.py
TODO(epoger): Create a command to update the expected results (in
self._output_dir_expected) when appropriate. For now, you should:
1. examine the results in self._output_dir_actual and make sure they are ok
2. rm -rf self._output_dir_expected
3. mv self._output_dir_actual self._output_dir_expected
Although, if you're using an SVN checkout, this will blow away .svn directories
within self._output_dir_expected, which wouldn't be good...
"""
import os
import subprocess
import sys
# Imports from within Skia
import base_unittest
import compare_rendered_pictures
import results
import gm_json # must import results first, so that gm_json will be in sys.path
class CompareRenderedPicturesTest(base_unittest.TestCase):
def test_endToEnd(self):
"""Generate two sets of SKPs, run render_pictures over both, and compare
the results."""
self._generate_skps_and_run_render_pictures(
subdir='before_patch', skpdict={
'changed.skp': 200,
'unchanged.skp': 100,
'only-in-before.skp': 128,
})
self._generate_skps_and_run_render_pictures(
subdir='after_patch', skpdict={
'changed.skp': 201,
'unchanged.skp': 100,
'only-in-after.skp': 128,
})
results_obj = compare_rendered_pictures.RenderedPicturesComparisons(
actuals_root=self._temp_dir,
subdirs=('before_patch', 'after_patch'),
generated_images_root=self._temp_dir,
diff_base_url='/static/generated-images')
results_obj.get_timestamp = mock_get_timestamp
gm_json.WriteToFile(
results_obj.get_packaged_results_of_type(
results.KEY__HEADER__RESULTS_ALL),
os.path.join(self._output_dir_actual, 'compare_rendered_pictures.json'))
def _generate_skps_and_run_render_pictures(self, subdir, skpdict):
"""Generate SKPs and run render_pictures on them.
Args:
subdir: subdirectory (within self._temp_dir) to write all files into
skpdict: {skpname: redvalue} dictionary describing the SKP files to render
"""
out_path = os.path.join(self._temp_dir, subdir)
os.makedirs(out_path)
for skpname, redvalue in skpdict.iteritems():
self._run_skpmaker(
output_path=os.path.join(out_path, skpname), red=redvalue)
# TODO(epoger): Add --mode tile 256 256 --writeWholeImage to the unittest,
# and fix its result! (imageURLs within whole-image entries are wrong when
# I tried adding that)
binary = self.find_path_to_program('render_pictures')
return subprocess.check_output([
binary,
'--clone', '1',
'--config', '8888',
'-r', out_path,
'--writeChecksumBasedFilenames',
'--writeJsonSummaryPath', os.path.join(out_path, 'summary.json'),
'--writePath', out_path])
def _run_skpmaker(self, output_path, red=0, green=0, blue=0,
width=640, height=400):
"""Runs the skpmaker binary to generate SKP with known characteristics.
Args:
output_path: Filepath to write the SKP into.
red: Value of red color channel in image, 0-255.
green: Value of green color channel in image, 0-255.
blue: Value of blue color channel in image, 0-255.
width: Width of canvas to create.
height: Height of canvas to create.
"""
binary = self.find_path_to_program('skpmaker')
return subprocess.check_output([
binary,
'--red', str(red),
'--green', str(green),
'--blue', str(blue),
'--width', str(width),
'--height', str(height),
'--writePath', str(output_path)])
def mock_get_timestamp():
"""Mock version of BaseComparisons.get_timestamp() for testing."""
return 12345678
def main():
base_unittest.main(CompareRenderedPicturesTest)
if __name__ == '__main__':
main()
| bsd-3-clause |
quarkonics/zstack-woodpecker | zstackwoodpecker/zstackwoodpecker/operations/deploy_operations.py | 1 | 47036 | '''
deploy operations for setup zstack database.
@author: Youyk
'''
import zstackwoodpecker.test_util as test_util
import apibinding.api_actions as api_actions
import account_operations
import resource_operations as res_ops
import zstacklib.utils.sizeunit as sizeunit
import zstacklib.utils.jsonobject as jsonobject
import zstacklib.utils.xmlobject as xmlobject
import zstacklib.utils.lock as lock
import apibinding.inventory as inventory
import sys
import traceback
import threading
import time
#global exception information for thread usage
exc_info = []
AddKVMHostTimeOut = 10*60*1000
IMAGE_THREAD_LIMIT = 2
DEPLOY_THREAD_LIMIT = 500
def get_first_item_from_list(list_obj, list_obj_name, list_obj_value, action_name):
'''
Judge if list is empty. If not, return the 1st item.
list_obj: the list for judgment and return;
list_obj_name: the list item type name;
list_obj_value: the list item's value when do previous query;
action_name: which action is calling this function
'''
if not isinstance(list_obj, list):
raise test_util.TestError("The first parameter is not a [list] type")
if not list_obj:
raise test_util.TestError("Did not find %s: [%s], when adding %s" % (list_obj_name, list_obj_value, action_name))
if len(list_obj) > 1:
raise test_util.TestError("Find more than 1 [%s] resource with name: [%s], when adding %s. Please check your deploy.xml and make sure resource do NOT have duplicated name " % (list_obj_name, list_obj_value, action_name))
return list_obj[0]
#Add Backup Storage
def add_backup_storage(deployConfig, session_uuid):
if xmlobject.has_element(deployConfig, 'backupStorages.sftpBackupStorage'):
for bs in xmlobject.safe_list(deployConfig.backupStorages.sftpBackupStorage):
action = api_actions.AddSftpBackupStorageAction()
action.sessionUuid = session_uuid
action.name = bs.name_
action.description = bs.description__
action.url = bs.url_
action.username = bs.username_
action.password = bs.password_
action.hostname = bs.hostname_
action.timeout = AddKVMHostTimeOut #for some platform slowly salt execution
action.type = inventory.SFTP_BACKUP_STORAGE_TYPE
thread = threading.Thread(target = _thread_for_action, args = (action, ))
wait_for_thread_queue()
thread.start()
if xmlobject.has_element(deployConfig, 'backupStorages.cephBackupStorage'):
for bs in xmlobject.safe_list(deployConfig.backupStorages.cephBackupStorage):
action = api_actions.AddCephBackupStorageAction()
action.sessionUuid = session_uuid
action.name = bs.name_
action.description = bs.description__
action.monUrls = bs.monUrls_.split(';')
if bs.poolName__:
action.poolName = bs.poolName_
action.timeout = AddKVMHostTimeOut #for some platform slowly salt execution
action.type = inventory.CEPH_BACKUP_STORAGE_TYPE
thread = threading.Thread(target = _thread_for_action, args = (action, ))
wait_for_thread_queue()
thread.start()
if xmlobject.has_element(deployConfig, 'backupStorages.simulatorBackupStorage'):
for bs in xmlobject.safe_list(deployConfig.backupStorages.simulatorBackupStorage):
action = api_actions.AddSimulatorBackupStorageAction()
action.sessionUuid = session_uuid
action.name = bs.name_
action.description = bs.description__
action.url = bs.url_
action.type = inventory.SIMULATOR_BACKUP_STORAGE_TYPE
action.totalCapacity = sizeunit.get_size(bs.totalCapacity_)
action.availableCapacity = sizeunit.get_size(bs.availableCapacity_)
thread = threading.Thread(target = _thread_for_action, args = (action, ))
wait_for_thread_queue()
thread.start()
wait_for_thread_done()
#Add Zones
def add_zone(deployConfig, session_uuid, zone_name = None):
def _add_zone(zone, zone_duplication):
action = api_actions.CreateZoneAction()
action.sessionUuid = session_uuid
if zone_duplication == 0:
action.name = zone.name_
action.description = zone.description__
else:
action.name = generate_dup_name(zone.name_, zone_duplication, 'z')
action.description = generate_dup_name(zone.description__, zone_duplication, 'zone')
try:
evt = action.run()
test_util.test_logger(jsonobject.dumps(evt))
zinv = evt.inventory
except:
exc_info.append(sys.exc_info())
if xmlobject.has_element(zone, 'backupStorageRef'):
for ref in xmlobject.safe_list(zone.backupStorageRef):
bss = res_ops.get_resource(res_ops.BACKUP_STORAGE, session_uuid, name=ref.text_)
bs = get_first_item_from_list(bss, 'Backup Storage', ref.text_, 'attach backup storage to zone')
action = api_actions.AttachBackupStorageToZoneAction()
action.sessionUuid = session_uuid
action.backupStorageUuid = bs.uuid
action.zoneUuid = zinv.uuid
try:
evt = action.run()
test_util.test_logger(jsonobject.dumps(evt))
except:
exc_info.append(sys.exc_info())
if not xmlobject.has_element(deployConfig, 'zones.zone'):
return
for zone in xmlobject.safe_list(deployConfig.zones.zone):
if zone_name and zone_name != zone.name_:
continue
if zone.duplication__ == None:
duplication = 1
else:
duplication = int(zone.duplication__)
for i in range(duplication):
thread = threading.Thread(target=_add_zone, args=(zone, i, ))
wait_for_thread_queue()
thread.start()
wait_for_thread_done()
#Add L2 network
def add_l2_network(deployConfig, session_uuid, l2_name = None, zone_name = None):
'''
If providing name, it will only add L2 network with the same name.
'''
if not xmlobject.has_element(deployConfig, "zones.zone"):
return
def _deploy_l2_network(zone, is_vlan):
if is_vlan:
if not xmlobject.has_element(zone, "l2Networks.l2VlanNetwork"):
return
l2Network = zone.l2Networks.l2VlanNetwork
else:
if not xmlobject.has_element(zone, \
"l2Networks.l2NoVlanNetwork"):
return
l2Network = zone.l2Networks.l2NoVlanNetwork
if zone.duplication__ == None:
zone_dup = 1
else:
zone_dup = int(zone.duplication__)
for zone_ref in range(zone_dup):
zoneName = generate_dup_name(zone.name_, zone_ref, 'z')
zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, name=zoneName)
zinv = get_first_item_from_list(zinvs, 'Zone', zoneName, 'L2 network')
#can only deal with single cluster duplication case.
cluster = xmlobject.safe_list(zone.clusters.cluster)[0]
if cluster.duplication__ == None:
cluster_duplication = 1
else:
cluster_duplication = int(cluster.duplication__)
for cluster_ref in range(cluster_duplication):
for l2 in xmlobject.safe_list(l2Network):
if l2_name and l2_name != l2.name_:
continue
if not is_vlan or l2.duplication__ == None:
l2_dup = 1
else:
l2_dup = int(l2.duplication__)
for j in range(l2_dup):
l2Name = generate_dup_name(\
generate_dup_name(\
generate_dup_name(\
l2.name_, zone_ref, 'z')\
, cluster_ref, 'c')\
, j, 'n')
l2Des = generate_dup_name(\
generate_dup_name(\
generate_dup_name(\
l2.description_, zone_ref, 'z')\
, cluster_ref, 'c')\
, j, 'n')
if is_vlan:
l2_vlan = int(l2.vlan_) + j
if is_vlan:
action = api_actions.CreateL2VlanNetworkAction()
else:
action = api_actions.CreateL2NoVlanNetworkAction()
action.sessionUuid = session_uuid
action.name = l2Name
action.description = l2Des
action.physicalInterface = l2.physicalInterface_
action.zoneUuid = zinv.uuid
if is_vlan:
action.vlan = l2_vlan
thread = threading.Thread(\
target=_thread_for_action, \
args=(action,))
wait_for_thread_queue()
thread.start()
for zone in xmlobject.safe_list(deployConfig.zones.zone):
if zone_name and zone.name_ != zone_name:
continue
_deploy_l2_network(zone, False)
_deploy_l2_network(zone, True)
wait_for_thread_done()
#Add Primary Storage
def add_primary_storage(deployConfig, session_uuid, ps_name = None, \
zone_name = None):
if not xmlobject.has_element(deployConfig, 'zones.zone'):
test_util.test_logger('Not find zones.zone in config, skip primary storage deployment')
return
def _generate_sim_ps_action(zone, pr, zone_ref, cluster_ref):
if zone_ref == 0:
zone_name = zone.name_
else:
zone_name = generate_dup_name(zone.name_, zone_ref, 'z')
zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, name=zone_name)
zinv = get_first_item_from_list(zinvs, 'Zone', zone_name, 'primary storage')
action = api_actions.AddSimulatorPrimaryStorageAction()
action.sessionUuid = session_uuid
action.name = generate_dup_name(generate_dup_name(pr.name_, zone_ref, 'z'), cluster_ref, 'c')
action.description = generate_dup_name(generate_dup_name(pr.description__, zone_ref, 'zone'), cluster_ref, 'cluster')
action.url = generate_dup_name(generate_dup_name(pr.url_, zone_ref, 'z'), cluster_ref, 'c')
action.type = inventory.SIMULATOR_PRIMARY_STORAGE_TYPE
action.zoneUuid = zinv.uuid
action.totalCapacity = sizeunit.get_size(pr.totalCapacity_)
action.availableCapacity = sizeunit.get_size(pr.availableCapacity_)
return action
def _deploy_primary_storage(zone):
if xmlobject.has_element(zone, 'primaryStorages.IscsiFileSystemBackendPrimaryStorage'):
zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, \
name=zone.name_)
zinv = get_first_item_from_list(zinvs, 'Zone', zone.name_, 'primary storage')
for pr in xmlobject.safe_list(zone.primaryStorages.IscsiFileSystemBackendPrimaryStorage):
if ps_name and ps_name != pr.name_:
continue
action = api_actions.AddIscsiFileSystemBackendPrimaryStorageAction()
action.sessionUuid = session_uuid
action.name = pr.name_
action.description = pr.description__
action.type = inventory.ISCSI_FILE_SYSTEM_BACKEND_PRIMARY_STORAGE_TYPE
action.url = pr.url_
action.zoneUuid = zinv.uuid
action.chapPassword = pr.chapPassword_
action.chapUsername = pr.chapUsername_
action.sshPassword = pr.sshPassword_
action.sshUsername = pr.sshUsername_
action.hostname = pr.hostname_
action.filesystemType = pr.filesystemType_
thread = threading.Thread(target=_thread_for_action, args=(action,))
wait_for_thread_queue()
thread.start()
if xmlobject.has_element(zone, 'primaryStorages.localPrimaryStorage'):
zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, \
name=zone.name_)
zinv = get_first_item_from_list(zinvs, 'Zone', zone.name_, 'primary storage')
for pr in xmlobject.safe_list(zone.primaryStorages.localPrimaryStorage):
if ps_name and ps_name != pr.name_:
continue
action = api_actions.AddLocalPrimaryStorageAction()
action.sessionUuid = session_uuid
action.name = pr.name_
action.description = pr.description__
action.type = inventory.LOCAL_STORAGE_TYPE
action.url = pr.url_
action.zoneUuid = zinv.uuid
thread = threading.Thread(target=_thread_for_action, args=(action,))
wait_for_thread_queue()
thread.start()
if xmlobject.has_element(zone, 'primaryStorages.cephPrimaryStorage'):
zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, \
name=zone.name_)
zinv = get_first_item_from_list(zinvs, 'Zone', zone.name_, 'primary storage')
for pr in xmlobject.safe_list(zone.primaryStorages.cephPrimaryStorage):
if ps_name and ps_name != pr.name_:
continue
action = api_actions.AddCephPrimaryStorageAction()
action.sessionUuid = session_uuid
action.name = pr.name_
action.description = pr.description__
action.type = inventory.CEPH_PRIMARY_STORAGE_TYPE
action.monUrls = pr.monUrls_.split(';')
if pr.dataVolumePoolName__:
action.dataVolumePoolName = pr.dataVolumePoolName__
if pr.rootVolumePoolName__:
action.rootVolumePoolName = pr.rootVolumePoolName__
if pr.imageCachePoolName__:
action.imageCachePoolName = pr.imageCachePoolName__
action.zoneUuid = zinv.uuid
thread = threading.Thread(target=_thread_for_action, args=(action,))
wait_for_thread_queue()
thread.start()
if xmlobject.has_element(zone, 'primaryStorages.nfsPrimaryStorage'):
zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, \
name=zone.name_)
zinv = get_first_item_from_list(zinvs, 'Zone', zone.name_, 'primary storage')
for pr in xmlobject.safe_list(zone.primaryStorages.nfsPrimaryStorage):
if ps_name and ps_name != pr.name_:
continue
action = api_actions.AddNfsPrimaryStorageAction()
action.sessionUuid = session_uuid
action.name = pr.name_
action.description = pr.description__
action.type = inventory.NFS_PRIMARY_STORAGE_TYPE
action.url = pr.url_
action.zoneUuid = zinv.uuid
thread = threading.Thread(target=_thread_for_action, args=(action,))
wait_for_thread_queue()
thread.start()
if xmlobject.has_element(zone, 'primaryStorages.simulatorPrimaryStorage'):
if zone.duplication__ == None:
duplication = 1
else:
duplication = int(zone.duplication__)
for pr in xmlobject.safe_list(zone.primaryStorages.simulatorPrimaryStorage):
for zone_ref in range(duplication):
for cluster in xmlobject.safe_list(zone.clusters.cluster):
for pref in xmlobject.safe_list(cluster.primaryStorageRef):
if pref.text_ == pr.name_:
if cluster.duplication__ == None:
cluster_duplication = 1
else:
cluster_duplication = int(cluster.duplication__)
for cluster_ref in range(cluster_duplication):
action = _generate_sim_ps_action(zone, pr, zone_ref, cluster_ref)
thread = threading.Thread(target=_thread_for_action, args=(action,))
wait_for_thread_queue()
thread.start()
for zone in xmlobject.safe_list(deployConfig.zones.zone):
if zone_name and zone.name_ != zone_name:
continue
_deploy_primary_storage(zone)
wait_for_thread_done()
#Add Cluster
def add_cluster(deployConfig, session_uuid, cluster_name = None, \
zone_name = None):
if not xmlobject.has_element(deployConfig, "zones.zone"):
return
def _add_cluster(action, zone_ref, cluster, cluster_ref):
evt = action.run()
test_util.test_logger(jsonobject.dumps(evt))
cinv = evt.inventory
try:
if xmlobject.has_element(cluster, 'primaryStorageRef'):
for pref in xmlobject.safe_list(cluster.primaryStorageRef):
ps_name = generate_dup_name(generate_dup_name(pref.text_, zone_ref, 'z'), cluster_ref, 'c')
pinvs = res_ops.get_resource(res_ops.PRIMARY_STORAGE, session_uuid, name=ps_name)
pinv = get_first_item_from_list(pinvs, 'Primary Storage', ps_name, 'Cluster')
action_ps = api_actions.AttachPrimaryStorageToClusterAction()
action_ps.sessionUuid = session_uuid
action_ps.clusterUuid = cinv.uuid
action_ps.primaryStorageUuid = pinv.uuid
evt = action_ps.run()
test_util.test_logger(jsonobject.dumps(evt))
except:
exc_info.append(sys.exc_info())
if cluster.allL2NetworkRef__ == 'true':
#find all L2 network in zone and attach to cluster
cond = res_ops.gen_query_conditions('zoneUuid', '=', \
action.zoneUuid)
l2_count = res_ops.query_resource_count(res_ops.L2_NETWORK, \
cond, session_uuid)
l2invs = res_ops.query_resource_fields(res_ops.L2_NETWORK, \
[{'name':'zoneUuid', 'op':'=', 'value':action.zoneUuid}], \
session_uuid, ['uuid'], 0, l2_count)
else:
l2invs = []
if xmlobject.has_element(cluster, 'l2NetworkRef'):
for l2ref in xmlobject.safe_list(cluster.l2NetworkRef):
l2_name = generate_dup_name(generate_dup_name(l2ref.text_, zone_ref, 'z'), cluster_ref, 'c')
cond = res_ops.gen_query_conditions('zoneUuid', '=', \
action.zoneUuid)
cond = res_ops.gen_query_conditions('name', '=', l2_name, \
cond)
l2inv = res_ops.query_resource_fields(res_ops.L2_NETWORK, \
cond, session_uuid, ['uuid'])
if not l2inv:
raise test_util.TestError("Can't find l2 network [%s] in database." % l2_name)
l2invs.extend(l2inv)
for l2inv in l2invs:
action = api_actions.AttachL2NetworkToClusterAction()
action.sessionUuid = session_uuid
action.clusterUuid = cinv.uuid
action.l2NetworkUuid = l2inv.uuid
thread = threading.Thread(target=_thread_for_action, args=(action,))
wait_for_thread_queue()
thread.start()
def _deploy_cluster(zone):
if not xmlobject.has_element(zone, "clusters.cluster"):
return
if zone.duplication__ == None:
zone_duplication = 1
else:
zone_duplication = int(zone.duplication__)
for zone_ref in range(zone_duplication):
for cluster in xmlobject.safe_list(zone.clusters.cluster):
if cluster_name and cluster_name != cluster.name_:
continue
if cluster.duplication__ == None:
cluster_duplication = 1
else:
cluster_duplication = int(cluster.duplication__)
for cluster_ref in range(cluster_duplication):
action = api_actions.CreateClusterAction()
action.sessionUuid = session_uuid
action.name = generate_dup_name(generate_dup_name(cluster.name_, zone_ref, 'z'), cluster_ref, 'c')
action.description = generate_dup_name(generate_dup_name(cluster.description__, zone_ref, 'z'), cluster_ref, 'c')
action.hypervisorType = cluster.hypervisorType_
zone_name = generate_dup_name(zone.name_, zone_ref, 'z')
zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, name=zone_name)
zinv = get_first_item_from_list(zinvs, 'Zone', zone_name, 'Cluster')
action.zoneUuid = zinv.uuid
thread = threading.Thread(target=_add_cluster, args=(action, zone_ref, cluster, cluster_ref, ))
wait_for_thread_queue()
thread.start()
for zone in xmlobject.safe_list(deployConfig.zones.zone):
if zone_name and zone_name != zone.name_:
continue
_deploy_cluster(zone)
wait_for_thread_done()
#Add Host
def add_host(deployConfig, session_uuid, host_ip = None, zone_name = None, \
cluster_name = None):
'''
Base on an xml deploy config object to add hosts.
If providing giving zone_name, cluster_name or host_ip, this function will
only add related hosts.
'''
if not xmlobject.has_element(deployConfig, "zones.zone"):
return
def _deploy_host(cluster, zone_ref, cluster_ref):
if not xmlobject.has_element(cluster, "hosts.host"):
return
if zone_ref == 0 and cluster_ref == 0:
cluster_name = cluster.name_
else:
cluster_name = generate_dup_name(generate_dup_name(cluster.name_, zone_ref, 'z'), cluster_ref, 'c')
cinvs = res_ops.get_resource(res_ops.CLUSTER, session_uuid, name=cluster_name)
cinv = get_first_item_from_list(cinvs, 'Cluster', cluster_name, 'L3 network')
for host in xmlobject.safe_list(cluster.hosts.host):
if host_ip and host_ip != host.managementIp_:
continue
if host.duplication__ == None:
host_duplication = 1
else:
host_duplication = int(host.duplication__)
for i in range(host_duplication):
if cluster.hypervisorType_ == inventory.KVM_HYPERVISOR_TYPE:
action = api_actions.AddKVMHostAction()
action.username = host.username_
action.password = host.password_
action.timeout = AddKVMHostTimeOut
elif cluster.hypervisorType_ == inventory.SIMULATOR_HYPERVISOR_TYPE:
action = api_actions.AddSimulatorHostAction()
action.cpuCapacity = host.cpuCapacity_
action.memoryCapacity = sizeunit.get_size(host.memoryCapacity_)
action.sessionUuid = session_uuid
action.clusterUuid = cinv.uuid
action.hostTags = host.hostTags__
if zone_ref == 0 and cluster_ref == 0 and i == 0:
action.name = host.name_
action.description = host.description__
action.managementIp = host.managementIp_
else:
action.name = generate_dup_name(generate_dup_name(generate_dup_name(host.name_, zone_ref, 'z'), cluster_ref, 'c'), i, 'h')
action.description = generate_dup_name(generate_dup_name(generate_dup_name(host.description__, zone_ref, 'z'), cluster_ref, 'c'), i, 'h')
action.managementIp = generate_dup_host_ip(host.managementIp_, zone_ref, cluster_ref, i)
thread = threading.Thread(target=_thread_for_action, args = (action, ))
wait_for_thread_queue()
thread.start()
for zone in xmlobject.safe_list(deployConfig.zones.zone):
if zone_name and zone_name != zone.name_:
continue
if not xmlobject.has_element(zone, 'clusters.cluster'):
continue
if zone.duplication__ == None:
zone_duplication = 1
else:
zone_duplication = int(zone.duplication__)
for zone_ref in range(zone_duplication):
for cluster in xmlobject.safe_list(zone.clusters.cluster):
if cluster_name and cluster_name != cluster.name_:
continue
if cluster.duplication__ == None:
cluster_duplication = 1
else:
cluster_duplication = int(cluster.duplication__)
for cluster_ref in range(cluster_duplication):
_deploy_host(cluster, zone_ref, cluster_ref)
wait_for_thread_done()
test_util.test_logger('All add KVM host actions are done.')
#Add L3 network
def add_l3_network(deployConfig, session_uuid, l3_name = None, l2_name = None, \
zone_name = None):
'''
add_l3_network will add L3 network and also add related DNS, IpRange and
network services.
'''
if not xmlobject.has_element(deployConfig, "zones.zone"):
return
def _deploy_l3_network(l2, zone_ref, cluster_ref):
if not xmlobject.has_element(l2, "l3Networks.l3BasicNetwork"):
return
if not l2.duplication__:
l2_dup = 1
else:
l2_dup = int(l2.duplication__)
for l2_num in range(l2_dup):
for l3 in xmlobject.safe_list(l2.l3Networks.l3BasicNetwork):
if l3_name and l3_name != l3.name_:
continue
l2Name = generate_dup_name(generate_dup_name(generate_dup_name(l2.name_, zone_ref, 'z'), cluster_ref, 'c'), l2_num, 'n')
l3Name = generate_dup_name(generate_dup_name(generate_dup_name(l3.name_, zone_ref, 'z'), cluster_ref, 'c'), l2_num, 'n')
l2invs = res_ops.get_resource(res_ops.L2_NETWORK, \
session_uuid, \
name=l2Name)
l2inv = get_first_item_from_list(l2invs, \
'L2 Network', l2Name, 'L3 Network')
thread = threading.Thread(target=_do_l3_deploy, \
args=(l3, l2inv.uuid, l3Name, session_uuid, ))
wait_for_thread_queue()
thread.start()
def _do_l3_deploy(l3, l2inv_uuid, l3Name, session_uuid):
action = api_actions.CreateL3NetworkAction()
action.sessionUuid = session_uuid
action.description = l3.description__
if l3.system__ and l3.system__ != 'False':
action.system = 'true'
action.l2NetworkUuid = l2inv_uuid
action.name = l3Name
action.type = inventory.L3_BASIC_NETWORK_TYPE
if l3.domain_name__:
action.dnsDomain = l3.domain_name__
try:
evt = action.run()
except:
exc_info.append(sys.exc_info())
test_util.test_logger(jsonobject.dumps(evt))
l3_inv = evt.inventory
#add dns
if xmlobject.has_element(l3, 'dns'):
for dns in xmlobject.safe_list(l3.dns):
action = api_actions.AddDnsToL3NetworkAction()
action.sessionUuid = session_uuid
action.dns = dns.text_
action.l3NetworkUuid = l3_inv.uuid
try:
evt = action.run()
except:
exc_info.append(sys.exc_info())
test_util.test_logger(jsonobject.dumps(evt))
#add ip range.
if xmlobject.has_element(l3, 'ipRange'):
do_add_ip_range(l3.ipRange, l3_inv.uuid, session_uuid)
#add network service.
providers = {}
action = api_actions.QueryNetworkServiceProviderAction()
action.sessionUuid = session_uuid
action.conditions = []
try:
reply = action.run()
except:
exc_info.append(sys.exc_info())
for pinv in reply:
providers[pinv.name] = pinv.uuid
if xmlobject.has_element(l3, 'networkService'):
do_add_network_service(l3.networkService, l3_inv.uuid, \
providers, session_uuid)
for zone in xmlobject.safe_list(deployConfig.zones.zone):
if zone_name and zone_name != zone.name_:
continue
l2networks = []
if xmlobject.has_element(zone, 'l2Networks.l2NoVlanNetwork'):
l2networks.extend(xmlobject.safe_list(zone.l2Networks.l2NoVlanNetwork))
if xmlobject.has_element(zone, 'l2Networks.l2VlanNetwork'):
l2networks.extend(xmlobject.safe_list(zone.l2Networks.l2VlanNetwork))
for l2 in l2networks:
if l2_name and l2_name != l2.name_:
continue
if zone.duplication__ == None:
duplication = 1
else:
duplication = int(zone.duplication__)
for zone_ref in range(duplication):
for cluster in xmlobject.safe_list(zone.clusters.cluster):
if cluster.duplication__ == None:
cluster_duplication = 1
else:
cluster_duplication = int(cluster.duplication__)
for cluster_ref in range(cluster_duplication):
if zone_ref == 1 and cluster_ref == 1:
zone_ref = 0
cluster_ref = 0
_deploy_l3_network(l2, zone_ref, cluster_ref)
wait_for_thread_done()
test_util.test_logger('All add L3 Network actions are done.')
#Add Iprange
def add_ip_range(deployConfig, session_uuid, ip_range_name = None, \
zone_name= None, l3_name = None):
'''
Call by only adding an IP range. If the IP range is in L3 config,
add_l3_network will add ip range direclty.
deployConfig is a xmlobject. If using standard net_operation, please
check net_operations.add_ip_range(test_util.IpRangeOption())
'''
if not xmlobject.has_element(deployConfig, "zones.zone"):
return
l3networks = []
for zone in xmlobject.safe_list(deployConfig.zones.zone):
if zone_name and zone_name != zone.name_:
continue
l2networks = []
if xmlobject.has_element(zone, 'l2Networks.l2NoVlanNetwork'):
l2networks.extend(xmlobject.safe_list(zone.l2Networks.l2NoVlanNetwork))
if xmlobject.has_element(zone, 'l2Networks.l2VlanNetwork'):
l2networks.extend(xmlobject.safe_list(zone.l2Networks.l2VlanNetwork))
for l2 in l2networks:
if xmlobject.has_element(l2, 'l3Networks.l3BasicNetwork'):
l3networks.extend(xmlobject.safe_list(l2.l3Networks.l3BasicNetwork))
if zone.duplication__ == None:
duplication = 1
else:
duplication = int(zone.duplication__)
for zone_duplication in range(duplication):
for l3 in l3networks:
if l3_name and l3_name != l3.name_:
continue
if not xmlobject.has_element(l3, 'ipRange'):
continue
if zone_duplication == 0:
l3Name = l3.name_
else:
l3Name = generate_dup_name(l3.name_, zone_duplication, 'z')
l3_invs = res_ops.get_resource(res_ops.L3_NETWORK, session_uuid, name = l3Name)
l3_inv = get_first_item_from_list(l3_invs, 'L3 Network', l3Name, 'IP range')
do_add_ip_range(l3.ipRange, l3_inv.uuid, session_uuid, \
ip_range_name)
def do_add_ip_range(ip_range_xml_obj, l3_uuid, session_uuid, \
ip_range_name = None):
for ir in xmlobject.safe_list(ip_range_xml_obj):
if ip_range_name and ip_range_name != ir.name_:
continue
action = api_actions.AddIpRangeAction()
action.sessionUuid = session_uuid
action.description = ir.description__
action.endIp = ir.endIp_
action.gateway = ir.gateway_
action.l3NetworkUuid = l3_uuid
action.name = ir.name_
action.netmask = ir.netmask_
action.startIp = ir.startIp_
try:
evt = action.run()
except Exception as e:
exc_info.append(sys.exc_info())
raise e
test_util.test_logger(jsonobject.dumps(evt))
#Add Network Service
def add_network_service(deployConfig, session_uuid):
if not xmlobject.has_element(deployConfig, "zones.zone"):
return
l3networks = []
for zone in xmlobject.safe_list(deployConfig.zones.zone):
l2networks = []
if xmlobject.has_element(zone, 'l2Networks.l2NoVlanNetwork'):
l2networks.extend(xmlobject.safe_list(zone.l2Networks.l2NoVlanNetwork))
if xmlobject.has_element(zone, 'l2Networks.l2VlanNetwork'):
l2networks.extend(xmlobject.safe_list(zone.l2Networks.l2VlanNetwork))
for l2 in l2networks:
if xmlobject.has_element(l2, 'l3Networks.l3BasicNetwork'):
l3networks.extend(xmlobject.safe_list(l2.l3Networks.l3BasicNetwork))
providers = {}
action = api_actions.QueryNetworkServiceProviderAction()
action.sessionUuid = session_uuid
action.conditions = []
try:
reply = action.run()
except Exception as e:
exc_info.append(sys.exc_info())
raise e
for pinv in reply:
providers[pinv.name] = pinv.uuid
if zone.duplication__ == None:
duplication = 1
else:
duplication = int(zone.duplication__)
for zone_duplication in range(duplication):
for l3 in l3networks:
if not xmlobject.has_element(l3, 'networkService'):
continue
if zone_duplication == 0:
l3_name = l3.name_
else:
l3_name = generate_dup_name(l3.name_, zone_duplication, 'z')
l3_invs = res_ops.get_resource(res_ops.L3_NETWORK, session_uuid, name = l3_name)
l3_inv = get_first_item_from_list(l3_invs, 'L3 Network', l3_name, 'Network Service')
do_add_network_service(l3.networkService, l3_inv.uuid, \
providers, session_uuid)
def do_add_network_service(net_service_xml_obj, l3_uuid, providers, \
session_uuid):
allservices = {}
for ns in xmlobject.safe_list(net_service_xml_obj):
puuid = providers.get(ns.provider_)
if not puuid:
raise test_util.TestError('cannot find network service provider[%s], it may not have been added' % ns.provider_)
servs = []
for nst in xmlobject.safe_list(ns.serviceType):
servs.append(nst.text_)
allservices[puuid] = servs
action = api_actions.AttachNetworkServiceToL3NetworkAction()
action.sessionUuid = session_uuid
action.l3NetworkUuid = l3_uuid
action.networkServices = allservices
try:
evt = action.run()
except Exception as e:
exc_info.append(sys.exc_info())
raise e
test_util.test_logger(jsonobject.dumps(evt))
#Add Image
def add_image(deployConfig, session_uuid):
def _add_image(action):
increase_image_thread()
try:
evt = action.run()
test_util.test_logger(jsonobject.dumps(evt))
except:
exc_info.append(sys.exc_info())
finally:
decrease_image_thread()
if not xmlobject.has_element(deployConfig, 'images.image'):
return
for i in xmlobject.safe_list(deployConfig.images.image):
for bsref in xmlobject.safe_list(i.backupStorageRef):
bss = res_ops.get_resource(res_ops.BACKUP_STORAGE, session_uuid, name=bsref.text_)
bs = get_first_item_from_list(bss, 'backup storage', bsref.text_, 'image')
action = api_actions.AddImageAction()
action.sessionUuid = session_uuid
#TODO: account uuid will be removed later.
action.accountUuid = inventory.INITIAL_SYSTEM_ADMIN_UUID
action.backupStorageUuids = [bs.uuid]
action.bits = i.bits__
if not action.bits:
action.bits = 64
action.description = i.description__
action.format = i.format_
action.mediaType = i.mediaType_
action.guestOsType = i.guestOsType__
if not action.guestOsType:
action.guestOsType = 'unknown'
action.hypervisorType = i.hypervisorType__
action.name = i.name_
action.url = i.url_
action.timeout = 1800000
thread = threading.Thread(target = _add_image, args = (action, ))
print 'before add image1: %s' % i.url_
wait_for_image_thread_queue()
print 'before add image2: %s' % i.url_
thread.start()
print 'add image: %s' % i.url_
print 'all images add command are executed'
wait_for_thread_done(True)
print 'all images have been added'
#Add Disk Offering
def add_disk_offering(deployConfig, session_uuid):
def _add_disk_offering(disk_offering_xml_obj, session_uuid):
action = api_actions.CreateDiskOfferingAction()
action.sessionUuid = session_uuid
action.name = disk_offering_xml_obj.name_
action.description = disk_offering_xml_obj.description_
action.diskSize = sizeunit.get_size(disk_offering_xml_obj.diskSize_)
try:
evt = action.run()
test_util.test_logger(jsonobject.dumps(evt))
except:
exc_info.append(sys.exc_info())
if not xmlobject.has_element(deployConfig, 'diskOfferings.diskOffering'):
return
for disk_offering_xml_obj in \
xmlobject.safe_list(deployConfig.diskOfferings.diskOffering):
thread = threading.Thread(target = _add_disk_offering, \
args = (disk_offering_xml_obj, session_uuid))
wait_for_thread_queue()
thread.start()
wait_for_thread_done()
#Add Instance Offering
def add_instance_offering(deployConfig, session_uuid):
def _add_io(instance_offering_xml_obj, session_uuid):
action = api_actions.CreateInstanceOfferingAction()
action.sessionUuid = session_uuid
action.name = instance_offering_xml_obj.name_
action.description = instance_offering_xml_obj.description__
action.cpuNum = instance_offering_xml_obj.cpuNum_
action.cpuSpeed = instance_offering_xml_obj.cpuSpeed_
if instance_offering_xml_obj.memorySize__:
action.memorySize = sizeunit.get_size(instance_offering_xml_obj.memorySize_)
elif instance_offering_xml_obj.memoryCapacity_:
action.memorySize = sizeunit.get_size(instance_offering_xml_obj.memoryCapacity_)
try:
evt = action.run()
test_util.test_logger(jsonobject.dumps(evt))
except:
exc_info.append(sys.exc_info())
if not xmlobject.has_element(deployConfig, \
'instanceOfferings.instanceOffering'):
return
for instance_offering_xml_obj in \
xmlobject.safe_list(deployConfig.instanceOfferings.instanceOffering):
thread = threading.Thread(target = _add_io, \
args = (instance_offering_xml_obj, session_uuid, ))
wait_for_thread_queue()
thread.start()
wait_for_thread_done()
#Add VM -- Pass
def _thread_for_action(action):
try:
evt = action.run()
test_util.test_logger(jsonobject.dumps(evt))
except:
exc_info.append(sys.exc_info())
#Add Virtual Router Offering
def add_virtual_router(deployConfig, session_uuid, l3_name = None, \
zone_name = None):
if not xmlobject.has_element(deployConfig, 'instanceOfferings.virtualRouterOffering'):
return
for i in xmlobject.safe_list(deployConfig.instanceOfferings.virtualRouterOffering):
if l3_name and l3_name != i.managementL3NetworkRef.text_:
continue
if zone_name and zone_name != i.zoneRef.text_:
continue
print "continue l3_name: %s; zone_name: %s" % (l3_name, zone_name)
action = api_actions.CreateVirtualRouterOfferingAction()
action.sessionUuid = session_uuid
action.name = i.name_
action.description = i.description__
action.cpuNum = i.cpuNum_
action.cpuSpeed = i.cpuSpeed_
if i.memorySize__:
action.memorySize = sizeunit.get_size(i.memorySize_)
elif i.memoryCapacity_:
action.memorySize = sizeunit.get_size(i.memoryCapacity_)
action.isDefault = i.isDefault__
action.type = 'VirtualRouter'
zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, name=i.zoneRef.text_)
zinv = get_first_item_from_list(zinvs, 'zone', i.zoneRef.text_, 'virtual router offering')
action.zoneUuid = zinv.uuid
cond = res_ops.gen_query_conditions('zoneUuid', '=', zinv.uuid)
cond1 = res_ops.gen_query_conditions('name', '=', \
i.managementL3NetworkRef.text_, cond)
minvs = res_ops.query_resource(res_ops.L3_NETWORK, cond1, \
session_uuid)
minv = get_first_item_from_list(minvs, 'Management L3 Network', i.managementL3NetworkRef.text_, 'virtualRouterOffering')
action.managementNetworkUuid = minv.uuid
if xmlobject.has_element(i, 'publicL3NetworkRef'):
cond1 = res_ops.gen_query_conditions('name', '=', \
i.publicL3NetworkRef.text_, cond)
pinvs = res_ops.query_resource(res_ops.L3_NETWORK, cond1, \
session_uuid)
pinv = get_first_item_from_list(pinvs, 'Public L3 Network', i.publicL3NetworkRef.text_, 'virtualRouterOffering')
action.publicNetworkUuid = pinv.uuid
iinvs = res_ops.get_resource(res_ops.IMAGE, session_uuid, \
name=i.imageRef.text_)
iinv = get_first_item_from_list(iinvs, 'Image', i.imageRef.text_, 'virtualRouterOffering')
action.imageUuid = iinv.uuid
thread = threading.Thread(target = _thread_for_action, args = (action, ))
wait_for_thread_queue()
thread.start()
wait_for_thread_done()
def deploy_initial_database(deploy_config):
operations = [
add_backup_storage,
add_zone,
add_l2_network,
add_primary_storage,
add_cluster,
add_host,
add_l3_network,
add_image,
add_disk_offering,
add_instance_offering,
add_virtual_router
]
for operation in operations:
session_uuid = account_operations.login_as_admin()
try:
operation(deploy_config, session_uuid)
except Exception as e:
test_util.test_logger('[Error] zstack deployment meets exception when doing: %s . The real exception are:.' % operation.__name__)
print('----------------------Exception Reason------------------------')
traceback.print_exc(file=sys.stdout)
print('-------------------------Reason End---------------------------\n')
raise e
finally:
account_operations.logout(session_uuid)
test_util.test_logger('[Done] zstack initial database was created successfully.')
def generate_dup_name(origin_name, num, prefix=None):
if num == 0:
return origin_name
if prefix:
return str(origin_name) + '-' + str(prefix) + str(num)
else:
return str(origin_name) + '-' + str(num)
def generate_dup_host_ip(origin_ip, zone_ref, cluster_ref, host_ref):
ip_fields = origin_ip.split('.')
ip_fields[1] = str(int(ip_fields[1]) + zone_ref)
ip_fields[2] = str(int(ip_fields[2]) + cluster_ref)
ip_fields[3] = str(int(ip_fields[3]) + host_ref)
return '.'.join(ip_fields)
image_thread_queue = 0
@lock.lock('image_thread')
def increase_image_thread():
global image_thread_queue
image_thread_queue += 1
@lock.lock('image_thread')
def decrease_image_thread():
global image_thread_queue
image_thread_queue -= 1
def wait_for_image_thread_queue():
while image_thread_queue >= IMAGE_THREAD_LIMIT:
time.sleep(1)
print 'image_thread_queue: %d' % image_thread_queue
def wait_for_thread_queue():
while threading.active_count() > DEPLOY_THREAD_LIMIT:
check_thread_exception()
time.sleep(1)
def cleanup_exc_info():
exc_info = []
def check_thread_exception():
if exc_info:
info1 = exc_info[0][1]
info2 = exc_info[0][2]
cleanup_exc_info()
raise info1, None, info2
def wait_for_thread_done(report = False):
while threading.active_count() > 1:
check_thread_exception()
time.sleep(1)
if report:
print 'thread count: %d' % threading.active_count()
check_thread_exception()
| apache-2.0 |
nooperpudd/pulsar | examples/httpbin/manage.py | 5 | 12153 | '''Pulsar HTTP test application::
python manage.py
Implementation
======================
.. autoclass:: HttpBin
:members:
:member-order: bysource
Server Hooks
===================
This example shows how to use
:ref:`server hooks <setting-section-application-hooks>` to log each request
.. automodule:: examples.httpbin.config
:members:
'''
import os
import sys
import string
import mimetypes
from itertools import repeat, chain
from random import random
from pulsar import HttpRedirect, HttpException, version, JAPANESE, CHINESE
from pulsar.utils.httpurl import (Headers, ENCODE_URL_METHODS,
ENCODE_BODY_METHODS)
from pulsar.utils.html import escape
from pulsar.apps import wsgi, ws
from pulsar.apps.wsgi import (route, Html, Json, HtmlDocument, GZipMiddleware,
AsyncString)
from pulsar.utils.structures import MultiValueDict
from pulsar.utils.system import json
METHODS = frozenset(chain((m.lower() for m in ENCODE_URL_METHODS),
(m.lower() for m in ENCODE_BODY_METHODS)))
pyversion = '.'.join(map(str, sys.version_info[:3]))
ASSET_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets')
FAVICON = os.path.join(ASSET_DIR, 'favicon.ico')
characters = string.ascii_letters + string.digits
def asset(name, mode='r', chunk_size=None):
name = os.path.join(ASSET_DIR, name)
if os.path.isfile(name):
content_type, encoding = mimetypes.guess_type(name)
if chunk_size:
def _chunks():
with open(name, mode) as file:
while True:
data = file.read(chunk_size)
if not data:
break
yield data
data = _chunks()
else:
with open(name, mode) as file:
data = file.read()
return data, content_type, encoding
class BaseRouter(wsgi.Router):
########################################################################
# INTERNALS
def bind_server_event(self, request, event, handler):
consumer = request.environ['pulsar.connection'].current_consumer()
consumer.bind_event(event, handler)
def info_data_response(self, request, **params):
data = self.info_data(request, **params)
return Json(data).http_response(request)
def info_data(self, request, **params):
headers = self.getheaders(request)
data = {'method': request.method,
'headers': headers,
'pulsar': self.pulsar_info(request)}
if request.method in ENCODE_URL_METHODS:
data['args'] = dict(request.url_data)
else:
args, files = request.data_and_files()
jfiles = MultiValueDict()
for name, parts in files.lists():
for part in parts:
try:
part = part.string()
except UnicodeError:
part = part.base64()
jfiles[name] = part
data.update((('args', dict(args)),
('files', dict(jfiles))))
data.update(params)
return data
def getheaders(self, request):
headers = Headers(kind='client')
for k in request.environ:
if k.startswith('HTTP_'):
headers[k[5:].replace('_', '-')] = request.environ[k]
return dict(headers)
def pulsar_info(self, request):
return request.get('pulsar.connection').info()
class HttpBin(BaseRouter):
'''The main :class:`.Router` for the HttpBin application
'''
def get(self, request):
'''The home page of this router'''
ul = Html('ul')
for router in sorted(self.routes, key=lambda r: r.creation_count):
a = router.link(escape(router.route.path))
a.addClass(router.name)
for method in METHODS:
if router.getparam(method):
a.addClass(method)
li = Html('li', a, ' %s' % router.getparam('title', ''))
ul.append(li)
title = 'Pulsar'
html = request.html_document
html.head.title = title
html.head.links.append('httpbin.css')
html.head.links.append('favicon.ico', rel="icon", type='image/x-icon')
html.head.scripts.append('httpbin.js')
ul = ul.render(request)
templ, _, _ = asset('template.html')
body = templ % (title, JAPANESE, CHINESE, version, pyversion, ul)
html.body.append(body)
return html.http_response(request)
@route(title='Returns GET data')
def get_get(self, request):
return self.info_data_response(request)
@route(title='Returns POST data')
def post_post(self, request):
return self.info_data_response(request)
@route(title='Returns PATCH data')
def patch_patch(self, request):
return self.info_data_response(request)
@route(title='Returns PUT data')
def put_put(self, request):
return self.info_data_response(request)
@route(title='Returns DELETE data')
def delete_delete(self, request):
return self.info_data_response(request)
@route('redirect/<int(min=1,max=10):times>', defaults={'times': 5},
title='302 Redirect n times')
def redirect(self, request):
num = request.urlargs['times'] - 1
if num:
raise HttpRedirect('/redirect/%s' % num)
else:
raise HttpRedirect('/get')
@route('getsize/<int(min=1,max=8388608):size>', defaults={'size': 150000},
title='Returns a preset size of data (limit at 8MB)')
def getsize(self, request):
size = request.urlargs['size']
data = {'size': size, 'data': 'd' * size}
return self.info_data_response(request, **data)
@route(title='Returns gzip encoded data')
def gzip(self, request):
response = self.info_data_response(request, gzipped=True)
return GZipMiddleware(10)(request.environ, response)
@route(title='Returns cookie data')
def cookies(self, request):
cookies = request.cookies
d = dict(((c.key, c.value) for c in cookies.values()))
return Json({'cookies': d}).http_response(request)
@route('cookies/set/<name>/<value>', title='Sets a simple cookie',
defaults={'name': 'package', 'value': 'pulsar'})
def request_cookies_set(self, request):
key = request.urlargs['name']
value = request.urlargs['value']
request.response.set_cookie(key, value=value)
request.response.status_code = 302
request.response.headers['location'] = '/cookies'
return request.response
@route('status/<int(min=100,max=505):status>',
title='Returns given HTTP Status code',
defaults={'status': 418})
def status(self, request):
request.response.content_type = 'text/html'
raise HttpException(status=request.urlargs['status'])
@route(title='Returns response headers')
def response_headers(self, request):
class Gen:
headers = None
def __call__(self, server, **kw):
self.headers = server.headers
def generate(self):
# yield a byte so that headers are sent
yield b''
# we must have the headers now
yield json.dumps(dict(self.headers))
gen = Gen()
self.bind_server_event(request, 'on_headers', gen)
request.response.content = gen.generate()
request.response.content_type = 'application/json'
return request.response
@route('basic-auth/<username>/<password>',
title='Challenges HTTPBasic Auth',
defaults={'username': 'username', 'password': 'password'})
def challenge_auth(self, request):
auth = request.get('http.authorization')
if auth and auth.authenticated(request.environ, **request.urlargs):
return Json({'authenticated': True,
'username': auth.username}).http_response(request)
raise wsgi.HttpAuthenticate('basic')
@route('digest-auth/<username>/<password>/<qop>',
title='Challenges HTTP Digest Auth',
defaults={'username': 'username',
'password': 'password',
'qop': 'auth'})
def challenge_digest_auth(self, request):
auth = request.get('http.authorization')
if auth and auth.authenticated(request.environ, **request.urlargs):
return Json({'authenticated': True,
'username': auth.username}).http_response(request)
raise wsgi.HttpAuthenticate('digest', qop=[request.urlargs['qop']])
@route('stream/<int(min=1):m>/<int(min=1):n>',
title='Stream m chunk of data n times',
defaults={'m': 300, 'n': 20})
def request_stream(self, request):
m = request.urlargs['m']
n = request.urlargs['n']
request.response.content_type = 'text/plain'
request.response.content = repeat(b'a' * m, n)
return request.response
@route(title='A web socket graph')
def websocket(self, request):
data = open(os.path.join(os.path.dirname(__file__),
'assets', 'websocket.html')).read()
scheme = 'wss' if request.is_secure else 'ws'
host = request.get('HTTP_HOST')
data = data % {'address': '%s://%s/graph-data' % (scheme, host)}
request.response.content_type = 'text/html'
request.response.content = data
return request.response
@route(title='Live server statistics')
def stats(self, request):
'''Live stats for the server.
Try sending lots of requests
'''
# scheme = 'wss' if request.is_secure else 'ws'
# host = request.get('HTTP_HOST')
# address = '%s://%s/stats' % (scheme, host)
doc = HtmlDocument(title='Live server stats', media_path='/assets/')
# docs.head.scripts
return doc.http_response(request)
@route('clip/<int(min=256,max=16777216):chunk_size>',
defaults={'chunk_size': 4096},
title='Show a video clip')
def clip(self, request):
c = request.urlargs['chunk_size']
data, ct, encoding = asset('clip.mp4', 'rb', chunk_size=c)
response = request.response
response.content_type = ct
response.encoding = encoding
response.content = data
return response
########################################################################
# BENCHMARK ROUTES
@route()
def json(self, request):
return Json({'message': "Hello, World!"}).http_response(request)
@route()
def plaintext(self, request):
return AsyncString('Hello, World!').http_response(request)
class ExpectFail(BaseRouter):
def post(self, request):
chunk = request.get('wsgi.input')
if not chunk.done():
chunk.fail()
else:
return self.info_data_response(request)
class Graph(ws.WS):
def on_message(self, websocket, msg):
websocket.write(json.dumps([(i, random()) for i in range(100)]))
class Site(wsgi.LazyWsgi):
def setup(self, environ):
router = HttpBin('/')
return wsgi.WsgiHandler([ExpectFail('expect'),
wsgi.wait_for_body_middleware,
wsgi.clean_path_middleware,
wsgi.authorization_middleware,
wsgi.MediaRouter('media', ASSET_DIR,
show_indexes=True),
ws.WebSocket('/graph-data', Graph()),
router],
async=True)
def server(description=None, **kwargs):
description = description or 'Pulsar HttpBin'
return wsgi.WSGIServer(Site(), description=description, **kwargs)
if __name__ == '__main__': # pragma nocover
server().start()
| bsd-3-clause |
s20121035/rk3288_android5.1_repo | external/lldb/test/functionalities/inferior-changed/TestInferiorChanged.py | 2 | 3689 | """Test lldb reloads the inferior after it was changed during the session."""
import os, time
import unittest2
import lldb
from lldbtest import *
import lldbutil
class ChangedInferiorTestCase(TestBase):
mydir = os.path.join("functionalities", "inferior-changed")
@unittest2.skipUnless(sys.platform.startswith("darwin"), "requires Darwin")
def test_inferior_crashing_dsym(self):
"""Test lldb reloads the inferior after it was changed during the session."""
self.buildDsym()
self.inferior_crashing()
self.cleanup()
d = {'C_SOURCES': 'main2.c'}
self.buildDsym(dictionary=d)
self.setTearDownCleanup(dictionary=d)
self.inferior_not_crashing()
def test_inferior_crashing_dwarf(self):
"""Test lldb reloads the inferior after it was changed during the session."""
self.buildDwarf()
self.inferior_crashing()
self.cleanup()
# lldb needs to recognize the inferior has changed. If lldb needs to check the
# new module timestamp, make sure it is not the same as the old one, so add a
# 1 second delay.
time.sleep(1)
d = {'C_SOURCES': 'main2.c'}
self.buildDwarf(dictionary=d)
self.setTearDownCleanup(dictionary=d)
self.inferior_not_crashing()
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number of the crash.
self.line1 = line_number('main.c', '// Crash here.')
self.line2 = line_number('main2.c', '// Not crash here.')
def inferior_crashing(self):
"""Inferior crashes upon launching; lldb should catch the event and stop."""
exe = os.path.join(os.getcwd(), "a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
self.runCmd("run", RUN_SUCCEEDED)
if sys.platform.startswith("darwin"):
stop_reason = 'stop reason = EXC_BAD_ACCESS'
else:
stop_reason = 'stop reason = invalid address'
# The stop reason of the thread should be a bad access exception.
self.expect("thread list", STOPPED_DUE_TO_EXC_BAD_ACCESS,
substrs = ['stopped',
stop_reason])
# And it should report the correct line number.
self.expect("thread backtrace all",
substrs = [stop_reason,
'main.c:%d' % self.line1])
def inferior_not_crashing(self):
"""Test lldb reloads the inferior after it was changed during the session."""
self.runCmd("process kill")
self.runCmd("run", RUN_SUCCEEDED)
self.runCmd("process status")
if sys.platform.startswith("darwin"):
stop_reason = 'EXC_BAD_ACCESS'
else:
stop_reason = 'invalid address'
if stop_reason in self.res.GetOutput():
self.fail("Inferior changed, but lldb did not perform a reload")
# Break inside the main.
lldbutil.run_break_set_by_file_and_line (self, "main2.c", self.line2, num_expected_locations=1, loc_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs = ['stopped',
'stop reason = breakpoint'])
self.runCmd("frame variable int_ptr")
self.expect("frame variable *int_ptr",
substrs = ['= 7'])
self.expect("expression *int_ptr",
substrs = ['= 7'])
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
| gpl-3.0 |
EliotBerriot/django | django/utils/inspect.py | 323 | 4195 | from __future__ import absolute_import
import inspect
from django.utils import six
def getargspec(func):
if six.PY2:
return inspect.getargspec(func)
sig = inspect.signature(func)
args = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
]
varargs = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_POSITIONAL
]
varargs = varargs[0] if varargs else None
varkw = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_KEYWORD
]
varkw = varkw[0] if varkw else None
defaults = [
p.default for p in sig.parameters.values()
if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and p.default is not p.empty
] or None
return args, varargs, varkw, defaults
def get_func_args(func):
if six.PY2:
argspec = inspect.getargspec(func)
return argspec.args[1:] # ignore 'self'
sig = inspect.signature(func)
return [
arg_name for arg_name, param in sig.parameters.items()
if param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
]
def get_func_full_args(func):
"""
Return a list of (argument name, default value) tuples. If the argument
does not have a default value, omit it in the tuple. Arguments such as
*args and **kwargs are also included.
"""
if six.PY2:
argspec = inspect.getargspec(func)
args = argspec.args[1:] # ignore 'self'
defaults = argspec.defaults or []
# Split args into two lists depending on whether they have default value
no_default = args[:len(args) - len(defaults)]
with_default = args[len(args) - len(defaults):]
# Join the two lists and combine it with default values
args = [(arg,) for arg in no_default] + zip(with_default, defaults)
# Add possible *args and **kwargs and prepend them with '*' or '**'
varargs = [('*' + argspec.varargs,)] if argspec.varargs else []
kwargs = [('**' + argspec.keywords,)] if argspec.keywords else []
return args + varargs + kwargs
sig = inspect.signature(func)
args = []
for arg_name, param in sig.parameters.items():
name = arg_name
# Ignore 'self'
if name == 'self':
continue
if param.kind == inspect.Parameter.VAR_POSITIONAL:
name = '*' + name
elif param.kind == inspect.Parameter.VAR_KEYWORD:
name = '**' + name
if param.default != inspect.Parameter.empty:
args.append((name, param.default))
else:
args.append((name,))
return args
def func_accepts_kwargs(func):
if six.PY2:
# Not all callables are inspectable with getargspec, so we'll
# try a couple different ways but in the end fall back on assuming
# it is -- we don't want to prevent registration of valid but weird
# callables.
try:
argspec = inspect.getargspec(func)
except TypeError:
try:
argspec = inspect.getargspec(func.__call__)
except (TypeError, AttributeError):
argspec = None
return not argspec or argspec[2] is not None
return any(
p for p in inspect.signature(func).parameters.values()
if p.kind == p.VAR_KEYWORD
)
def func_accepts_var_args(func):
"""
Return True if function 'func' accepts positional arguments *args.
"""
if six.PY2:
return inspect.getargspec(func)[1] is not None
return any(
p for p in inspect.signature(func).parameters.values()
if p.kind == p.VAR_POSITIONAL
)
def func_has_no_args(func):
args = inspect.getargspec(func)[0] if six.PY2 else [
p for p in inspect.signature(func).parameters.values()
if p.kind == p.POSITIONAL_OR_KEYWORD
]
return len(args) == 1
def func_supports_parameter(func, parameter):
if six.PY3:
return parameter in inspect.signature(func).parameters
else:
args, varargs, varkw, defaults = inspect.getargspec(func)
return parameter in args
| bsd-3-clause |
fluxw42/youtube-dl | youtube_dl/extractor/laola1tv.py | 14 | 6314 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
ExtractorError,
unified_strdate,
urlencode_postdata,
xpath_element,
xpath_text,
urljoin,
update_url_query,
)
class Laola1TvEmbedIE(InfoExtractor):
IE_NAME = 'laola1tv:embed'
_VALID_URL = r'https?://(?:www\.)?laola1\.tv/titanplayer\.php\?.*?\bvideoid=(?P<id>\d+)'
_TEST = {
# flashvars.premium = "false";
'url': 'https://www.laola1.tv/titanplayer.php?videoid=708065&type=V&lang=en&portal=int&customer=1024',
'info_dict': {
'id': '708065',
'ext': 'mp4',
'title': 'MA Long CHN - FAN Zhendong CHN',
'uploader': 'ITTF - International Table Tennis Federation',
'upload_date': '20161211',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
flash_vars = self._search_regex(
r'(?s)flashvars\s*=\s*({.+?});', webpage, 'flash vars')
def get_flashvar(x, *args, **kwargs):
flash_var = self._search_regex(
r'%s\s*:\s*"([^"]+)"' % x,
flash_vars, x, default=None)
if not flash_var:
flash_var = self._search_regex([
r'flashvars\.%s\s*=\s*"([^"]+)"' % x,
r'%s\s*=\s*"([^"]+)"' % x],
webpage, x, *args, **kwargs)
return flash_var
hd_doc = self._download_xml(
'http://www.laola1.tv/server/hd_video.php', video_id, query={
'play': get_flashvar('streamid'),
'partner': get_flashvar('partnerid'),
'portal': get_flashvar('portalid'),
'lang': get_flashvar('sprache'),
'v5ident': '',
})
_v = lambda x, **k: xpath_text(hd_doc, './/video/' + x, **k)
title = _v('title', fatal=True)
token_url = None
premium = get_flashvar('premium', default=None)
if premium:
token_url = update_url_query(
_v('url', fatal=True), {
'timestamp': get_flashvar('timestamp'),
'auth': get_flashvar('auth'),
})
else:
data_abo = urlencode_postdata(
dict((i, v) for i, v in enumerate(_v('req_liga_abos').split(','))))
token_url = self._download_json(
'https://club.laola1.tv/sp/laola1/api/v3/user/session/premium/player/stream-access',
video_id, query={
'videoId': _v('id'),
'target': self._search_regex(r'vs_target = (\d+);', webpage, 'vs target'),
'label': _v('label'),
'area': _v('area'),
}, data=data_abo)['data']['stream-access'][0]
token_doc = self._download_xml(
token_url, video_id, 'Downloading token',
headers=self.geo_verification_headers())
token_attrib = xpath_element(token_doc, './/token').attrib
if token_attrib['status'] != '0':
raise ExtractorError(
'Token error: %s' % token_attrib['comment'], expected=True)
formats = self._extract_akamai_formats(
'%s?hdnea=%s' % (token_attrib['url'], token_attrib['auth']),
video_id)
self._sort_formats(formats)
categories_str = _v('meta_sports')
categories = categories_str.split(',') if categories_str else []
is_live = _v('islive') == 'true'
return {
'id': video_id,
'title': self._live_title(title) if is_live else title,
'upload_date': unified_strdate(_v('time_date')),
'uploader': _v('meta_organisation'),
'categories': categories,
'is_live': is_live,
'formats': formats,
}
class Laola1TvIE(InfoExtractor):
IE_NAME = 'laola1tv'
_VALID_URL = r'https?://(?:www\.)?laola1\.tv/[a-z]+-[a-z]+/[^/]+/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'http://www.laola1.tv/de-de/video/straubing-tigers-koelner-haie/227883.html',
'info_dict': {
'id': '227883',
'display_id': 'straubing-tigers-koelner-haie',
'ext': 'flv',
'title': 'Straubing Tigers - Kölner Haie',
'upload_date': '20140912',
'is_live': False,
'categories': ['Eishockey'],
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.laola1.tv/de-de/video/straubing-tigers-koelner-haie',
'info_dict': {
'id': '464602',
'display_id': 'straubing-tigers-koelner-haie',
'ext': 'flv',
'title': 'Straubing Tigers - Kölner Haie',
'upload_date': '20160129',
'is_live': False,
'categories': ['Eishockey'],
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.laola1.tv/de-de/livestream/2016-03-22-belogorie-belgorod-trentino-diatec-lde',
'info_dict': {
'id': '487850',
'display_id': '2016-03-22-belogorie-belgorod-trentino-diatec-lde',
'ext': 'flv',
'title': 'Belogorie BELGOROD - TRENTINO Diatec',
'upload_date': '20160322',
'uploader': 'CEV - Europäischer Volleyball Verband',
'is_live': True,
'categories': ['Volleyball'],
},
'params': {
'skip_download': True,
},
'skip': 'This live stream has already finished.',
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
if 'Dieser Livestream ist bereits beendet.' in webpage:
raise ExtractorError('This live stream has already finished.', expected=True)
iframe_url = urljoin(url, self._search_regex(
r'<iframe[^>]*?id="videoplayer"[^>]*?src="([^"]+)"',
webpage, 'iframe url'))
return {
'_type': 'url',
'display_id': display_id,
'url': iframe_url,
'ie_key': 'Laola1TvEmbed',
}
| unlicense |
maciekcc/tensorflow | tensorflow/python/estimator/export/export.py | 19 | 7374 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Configuration and utilities for receiving inputs at serving time."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import time
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.util import compat
_SINGLE_FEATURE_DEFAULT_NAME = 'feature'
_SINGLE_RECEIVER_DEFAULT_NAME = 'input'
class ServingInputReceiver(collections.namedtuple('ServingInputReceiver',
['features',
'receiver_tensors'])):
"""A return type for a serving_input_receiver_fn.
The expected return values are:
features: A dict of string to `Tensor` or `SparseTensor`, specifying the
features to be passed to the model.
receiver_tensors: a `Tensor`, or a dict of string to `Tensor`, specifying
input nodes where this receiver expects to be fed. Typically, this is a
single placeholder expecting serialized `tf.Example` protos.
"""
# TODO(soergel): add receiver_alternatives when supported in serving.
def __new__(cls, features, receiver_tensors):
if features is None:
raise ValueError('features must be defined.')
if not isinstance(features, dict):
features = {_SINGLE_FEATURE_DEFAULT_NAME: features}
for name, tensor in features.items():
if not isinstance(name, six.string_types):
raise ValueError('feature keys must be strings: {}.'.format(name))
if not (isinstance(tensor, ops.Tensor)
or isinstance(tensor, sparse_tensor.SparseTensor)):
raise ValueError(
'feature {} must be a Tensor or SparseTensor.'.format(name))
if receiver_tensors is None:
raise ValueError('receiver_tensors must be defined.')
if not isinstance(receiver_tensors, dict):
receiver_tensors = {_SINGLE_RECEIVER_DEFAULT_NAME: receiver_tensors}
for name, tensor in receiver_tensors.items():
if not isinstance(name, six.string_types):
raise ValueError(
'receiver_tensors keys must be strings: {}.'.format(name))
if not isinstance(tensor, ops.Tensor):
raise ValueError(
'receiver_tensor {} must be a Tensor.'.format(name))
return super(ServingInputReceiver, cls).__new__(
cls, features=features, receiver_tensors=receiver_tensors)
def build_parsing_serving_input_receiver_fn(feature_spec,
default_batch_size=None):
"""Build a serving_input_receiver_fn expecting fed tf.Examples.
Creates a serving_input_receiver_fn that expects a serialized tf.Example fed
into a string placeholder. The function parses the tf.Example according to
the provided feature_spec, and returns all parsed Tensors as features.
Args:
feature_spec: a dict of string to `VarLenFeature`/`FixedLenFeature`.
default_batch_size: the number of query examples expected per batch.
Leave unset for variable batch size (recommended).
Returns:
A serving_input_receiver_fn suitable for use in serving.
"""
def serving_input_receiver_fn():
"""An input_fn that expects a serialized tf.Example."""
serialized_tf_example = array_ops.placeholder(dtype=dtypes.string,
shape=[default_batch_size],
name='input_example_tensor')
receiver_tensors = {'examples': serialized_tf_example}
features = parsing_ops.parse_example(serialized_tf_example, feature_spec)
return ServingInputReceiver(features, receiver_tensors)
return serving_input_receiver_fn
def build_raw_serving_input_receiver_fn(features, default_batch_size=None):
"""Build a serving_input_receiver_fn expecting feature Tensors.
Creates an serving_input_receiver_fn that expects all features to be fed
directly.
Args:
features: a dict of string to `Tensor`.
default_batch_size: the number of query examples expected per batch.
Leave unset for variable batch size (recommended).
Returns:
A serving_input_receiver_fn.
"""
def serving_input_receiver_fn():
"""A serving_input_receiver_fn that expects features to be fed directly."""
receiver_tensors = {}
for name, t in features.items():
shape_list = t.get_shape().as_list()
shape_list[0] = default_batch_size
shape = tensor_shape.TensorShape(shape_list)
# Reuse the feature tensor name for the placeholder, excluding the index
placeholder_name = t.name.split(':')[0]
receiver_tensors[name] = array_ops.placeholder(dtype=t.dtype,
shape=shape,
name=placeholder_name)
# TODO(b/34885899): remove the unnecessary copy
# The features provided are simply the placeholders, but we defensively copy
# the dict because it may be mutated.
return ServingInputReceiver(receiver_tensors, receiver_tensors.copy())
return serving_input_receiver_fn
### Below utilities are specific to SavedModel exports.
def build_all_signature_defs(receiver_tensors, export_outputs):
"""Build `SignatureDef`s for all export outputs."""
if not isinstance(receiver_tensors, dict):
receiver_tensors = {'receiver': receiver_tensors}
if export_outputs is None or not isinstance(export_outputs, dict):
raise ValueError('export_outputs must be a dict.')
signature_def_map = {
'{}'.format(output_key or 'None'):
export_output.as_signature_def(receiver_tensors)
for output_key, export_output in export_outputs.items()}
return signature_def_map
def get_timestamped_export_dir(export_dir_base):
"""Builds a path to a new subdirectory within the base directory.
Each export is written into a new subdirectory named using the
current time. This guarantees monotonically increasing version
numbers even across multiple runs of the pipeline.
The timestamp used is the number of seconds since epoch UTC.
Args:
export_dir_base: A string containing a directory to write the exported
graph and checkpoints.
Returns:
The full path of the new subdirectory (which is not actually created yet).
"""
export_timestamp = int(time.time())
export_dir = os.path.join(
compat.as_bytes(export_dir_base),
compat.as_bytes(str(export_timestamp)))
return export_dir
| apache-2.0 |
sridevikoushik31/openstack | nova/availability_zones.py | 3 | 3309 | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Availability zone helper functions."""
from oslo.config import cfg
from nova import db
from nova.openstack.common import log as logging
availability_zone_opts = [
cfg.StrOpt('internal_service_availability_zone',
default='internal',
help='availability_zone to show internal services under'),
cfg.StrOpt('default_availability_zone',
# deprecated in Grizzly release
deprecated_name='node_availability_zone',
default='nova',
help='default compute node availability_zone'),
]
CONF = cfg.CONF
CONF.register_opts(availability_zone_opts)
LOG = logging.getLogger(__name__)
def set_availability_zones(context, services):
# Makes sure services isn't a sqlalchemy object
services = [dict(service.iteritems()) for service in services]
metadata = db.aggregate_host_get_by_metadata_key(context,
key='availability_zone')
for service in services:
az = CONF.internal_service_availability_zone
if service['topic'] == "compute":
if metadata.get(service['host']):
az = u','.join(list(metadata[service['host']]))
else:
az = CONF.default_availability_zone
service['availability_zone'] = az
return services
def get_host_availability_zone(context, host, conductor_api=None):
if conductor_api:
metadata = conductor_api.aggregate_metadata_get_by_host(
context, host, key='availability_zone')
else:
metadata = db.aggregate_metadata_get_by_host(
context, host, key='availability_zone')
if 'availability_zone' in metadata:
return list(metadata['availability_zone'])[0]
else:
return CONF.default_availability_zone
def get_availability_zones(context):
"""Return available and unavailable zones."""
enabled_services = db.service_get_all(context, False)
disabled_services = db.service_get_all(context, True)
enabled_services = set_availability_zones(context, enabled_services)
disabled_services = set_availability_zones(context, disabled_services)
available_zones = []
for zone in [service['availability_zone'] for service
in enabled_services]:
if zone not in available_zones:
available_zones.append(zone)
not_available_zones = []
zones = [service['available_zones'] for service in disabled_services
if service['available_zones'] not in available_zones]
for zone in zones:
if zone not in not_available_zones:
not_available_zones.append(zone)
return (available_zones, not_available_zones)
| apache-2.0 |
orekyuu/intellij-community | python/lib/Lib/site-packages/django/contrib/flatpages/admin.py | 250 | 1089 | from django import forms
from django.contrib import admin
from django.contrib.flatpages.models import FlatPage
from django.utils.translation import ugettext_lazy as _
class FlatpageForm(forms.ModelForm):
url = forms.RegexField(label=_("URL"), max_length=100, regex=r'^[-\w/\.~]+$',
help_text = _("Example: '/about/contact/'. Make sure to have leading"
" and trailing slashes."),
error_message = _("This value must contain only letters, numbers,"
" dots, underscores, dashes, slashes or tildes."))
class Meta:
model = FlatPage
class FlatPageAdmin(admin.ModelAdmin):
form = FlatpageForm
fieldsets = (
(None, {'fields': ('url', 'title', 'content', 'sites')}),
(_('Advanced options'), {'classes': ('collapse',), 'fields': ('enable_comments', 'registration_required', 'template_name')}),
)
list_display = ('url', 'title')
list_filter = ('sites', 'enable_comments', 'registration_required')
search_fields = ('url', 'title')
admin.site.register(FlatPage, FlatPageAdmin)
| apache-2.0 |
mohseniaref/adore-doris | gui/snaphuConfigEditor.py | 2 | 11609 | #!/usr/bin/env python
# example basictreeview.py
import pygtk
pygtk.require('2.0')
import gtk
import os
import dialogs
class SnaphuConfigEditor:
def snaphuParser(self, set=None, setFile=None):
if setFile is None:
setFile=self.setFile;
if set is None:
set=self.set;
f=open(setFile, 'r')
for l in f:
wl=l.split('#')[0].strip() #remove comments
if wl!='': #skip empty lines
key=wl.split()[0].strip() #get the keyword
val=''.join(wl.split()[1:]) #get value
#print [key, val]
set[key]=val
f.close()
def advancedChkBtnToggled(self, widget, liststore):
#widget.get_active()
liststore.clear()
self.displayOptions(self.setFile, liststore);
def displayOptions(self, setFile, liststore):
# self.set.read(setFile)
# we'll add some data now - 4 rows with 3 child rows each
#for section in self.set.sections():
# sectionId = self.liststore.append(None, (False,section, ''))
# for option,value in self.set.items(section):
# if "_rel_" in option and not self.advancedChkBtn.get_active():
# continue;
# self.liststore.append(sectionId, (False,option,value))
k=0;
if os.path.exists(self.setFile):
f=open(self.setFile, 'r')
for l in f:
wl=l.split('#')[0].strip() #remove comments
if wl!='': #skip empty lines
k=k+1;
key=wl.split()[0].strip() #get the keyword
val=''.join(wl.split()[1:]) #get value
#print [key, val]
self.liststore.append((False, key, val))
f.close()
self.window.set_title(str('%d settings: %s' % (k, self.setFile) ))
def chkbx_toggled_cb(self, cell, path, liststore):
liststore[path][0]=not liststore[path][0]
return
# Handle edited value
def edited_cb2(self, cell, path, new_text, liststore):
#print path
#print new_text
#print liststore
liststore[path][2] = new_text
liststore[path][0] = True
self.window.set_title(str('! %s' % ( self.setFile) ))
return
# def row_inserted(self, widget, path, iter):
# print widget
# print path
# print iter
#
# self.treeview.set_cursor(path, focus_column=self.tvcolumn2, start_editing=True)
# close the window and quit
def delete_event(self, widget, event, data=None):
#gtk.main_quit()
del self.set
del self.liststore
self.window.destroy()
return False
def saveAsButtonClicked(self, widget, liststore):
chooser = gtk.FileChooserDialog(title=None,action=gtk.FILE_CHOOSER_ACTION_SAVE,
buttons=(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,gtk.STOCK_OPEN,gtk.RESPONSE_OK))
response = chooser.run()
if response == gtk.RESPONSE_OK:
filename=chooser.get_filename();
chooser.destroy()
self.setFile=filename
f=open(self.setFile, 'w')
for row in liststore:
f.write(str('%s\t%s\n' %(row[1], row[2])))
f.close()
self.window.set_title(str('%s' % ( self.setFile) ))
def saveButtonClicked(self, widget, liststore):
f=open(self.setFile, 'w')
for row in liststore:
f.write(str('%s\t%s\n' %(row[1], row[2])))
f.close()
self.window.set_title(str('%s' % ( self.setFile) ))
#Let's see if this will stop the constant crashing
#self.window.destroy();
def addButtonClicked(self, widget, liststore):
dropdownlist=self.set.keys();
for row in liststore:
if row[1] in dropdownlist:
dropdownlist.remove(row[1]);
if len(dropdownlist)>0:
response,param=dialogs.dropdown(dropdownlist, '<b>Add</b>');
if response == gtk.RESPONSE_OK:
liststore.prepend((False, param, self.set[param]))
self.window.set_title(str('! %s' % ( self.setFile) ))
return
else:
dialogs.error('No more keywords to add.')
return
def removeButtonClicked(self, widget, liststore):
for row in liststore:
if row[0] == True:
liststore.remove(row.iter)
self.window.set_title(str('! %s' % (self.setFile) ))
def openButtonClicked(self, widget, liststore):
liststore.clear()
chooser = gtk.FileChooserDialog(title=None,action=gtk.FILE_CHOOSER_ACTION_OPEN,
buttons=(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,gtk.STOCK_OPEN,gtk.RESPONSE_OK))
response = chooser.run()
if response == gtk.RESPONSE_OK:
filename=chooser.get_filename();
chooser.destroy()
self.setFile=filename
self.displayOptions(self.setFile, liststore);
def __init__(self,mainWindow):
#Load settings
#self.set=ConfigParser.ConfigParser()
self.set={}
#Make settings case sensitive
#self.set.optionxform = str
#
mainWindow.readSet();
self.confFull=os.path.join(mainWindow.set.get('adore','ADOREFOLDER').strip('"'),'set/snaphu.conf.full')
self.snaphuParser(setFile=self.confFull); #Initialize the set object.
self.setFile=os.path.join(mainWindow.set.get('adore','outputFolder').strip('"'),'snaphu.conf')
self.runcmd=mainWindow.runcmd;
# self.set=ConfigParser.ConfigParser()
# self.set.read(setFile)
# Create a new window
self.window = gtk.Window()#hadjustment=None, vadjustment=None)
self.swindow = gtk.ScrolledWindow(hadjustment=None, vadjustment=None)
self.swindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_ALWAYS)
self.window.set_title("AGOOEY Snaphu Configuration Editor")
self.window.set_size_request(500, 600)
self.window.connect("delete_event", self.delete_event)
self.vbox = gtk.VBox(homogeneous=False, spacing=0);
self.hbox = gtk.HBox(homogeneous=False, spacing=0);
# create a TreeStore with one string column to use as the model
self.liststore = gtk.ListStore(bool, str, str)
##### SET THE HBOX #####
self.saveButton=gtk.Button(label='Save', stock=None, use_underline=True);
self.saveButton.connect("clicked", self.saveButtonClicked, self.liststore)
self.saveButton.set_flags(gtk.CAN_DEFAULT);
self.saveButton.show();
self.saveAsButton=gtk.Button(label='Save As', stock=None, use_underline=True);
self.saveAsButton.connect("clicked", self.saveAsButtonClicked, self.liststore)
self.saveAsButton.set_flags(gtk.CAN_DEFAULT);
self.saveAsButton.show();
# self.refreshButton=gtk.Button(label='Refresh', stock=None, use_underline=True);
# self.refreshButton.connect("clicked", self.refreshButtonClicked, self.liststore)
# self.refreshButton.set_flags(gtk.CAN_DEFAULT);
# self.refreshButton.show();
self.openButton=gtk.Button(label='Open', stock=None, use_underline=True);
self.openButton.connect("clicked", self.openButtonClicked, self.liststore)
self.openButton.set_flags(gtk.CAN_DEFAULT);
self.openButton.show();
self.addButton=gtk.Button(label='Add', stock=None, use_underline=True);
self.addButton.connect("clicked", self.addButtonClicked, self.liststore)
self.addButton.set_flags(gtk.CAN_DEFAULT);
self.addButton.show();
self.removeButton=gtk.Button(label='Remove', stock=None, use_underline=True);
self.removeButton.connect("clicked", self.removeButtonClicked, self.liststore)
self.removeButton.set_flags(gtk.CAN_DEFAULT);
self.removeButton.show();
# self.advancedChkBtn=gtk.CheckButton("Advanced");
# self.advancedChkBtn.connect("toggled", self.advancedChkBtnToggled, self.liststore)
# self.advancedChkBtn.show();
self.hbox.pack_start(self.openButton, expand = False, fill = False, padding = 5);
self.hbox.pack_start(self.saveButton, expand = False, fill = False, padding = 5);
self.hbox.pack_start(self.saveAsButton, expand = False, fill = False, padding = 5);
self.hbox.pack_start(self.addButton, expand = False, fill = False, padding = 5);
self.hbox.pack_start(self.removeButton, expand = False, fill = False, padding = 5);
# self.hbox.pack_start(self.refreshButton, expand = False, fill = False, padding = 5);
# self.hbox.pack_start(self.advancedChkBtn, expand = False, fill = False, padding = 20);
##### SET THE VBOX #####
# adj = gtk.Adjustment(0.0, 0.0, 100.0, 1.0, 10.0, 0.0)
# scrollbar = gtk.HScale(adj)
# self.vbox.pack_start(scrollbar, False, False, 0)
#Add some data now
self.displayOptions(self.setFile, self.liststore);
# create the TreeView using liststore
self.treeview = gtk.TreeView(self.liststore)
# create a CellRendererText to render the data
self.chkbx= gtk.CellRendererToggle();
self.cell = gtk.CellRendererText()
self.cell2 = gtk.CellRendererText()
#Make chkbox col activatable
self.chkbx.set_property('activatable', True)
#Make col1 editable
self.cell2.set_property('editable', True)
# connect the edit handling function
self.cell2.connect('edited', self.edited_cb2, self.liststore)
self.chkbx.connect('toggled', self.chkbx_toggled_cb, self.liststore)
#self.liststore.connect('row_inserted', self.row_inserted)
# create the TreeViewColumn to display the data
self.tvcolumn0 = gtk.TreeViewColumn('Remove', self.chkbx)
self.tvcolumn1 = gtk.TreeViewColumn('Settings', self.cell, text=1)
self.tvcolumn2 = gtk.TreeViewColumn('Values', self.cell2, text=2)
# add tvcolumn to treeview
self.treeview.append_column(self.tvcolumn0)
self.treeview.append_column(self.tvcolumn1)
self.treeview.append_column(self.tvcolumn2)
# add the cell to the tvcolumn and allow it to expand
#self.tvcolumn.pack_start(self.cell, True)
#self.tvcolumn2.pack_start(self.cell2, True)
# set the cell "text" attribute to column 0 - retrieve text
# from that column in liststore
self.tvcolumn0.add_attribute(self.chkbx, 'active', 0)
self.tvcolumn1.add_attribute(self.cell, 'text', 1)
self.tvcolumn2.add_attribute(self.cell2, 'text', 2)
# make it searchable
self.treeview.set_search_column(1)
# Allow sorting on the column
self.tvcolumn1.set_sort_column_id(1)
# Allow drag and drop reordering of rows
self.treeview.set_reorderable(True)
self.treeview.show()
self.vbox.pack_start(self.hbox);
self.vbox.pack_end(self.treeview);
self.window.set_default(self.saveButton);
self.swindow.add_with_viewport(self.vbox)
self.window.add(self.swindow)
#self.vbox.show()
self.window.show_all();
def main():
gtk.main()
return 0
if __name__ == "__main__":
se = SnaphuConfigEditor()
main()
| gpl-2.0 |
ldtp/pyatom | atomac/ldtpd/text.py | 2 | 16670 | # Copyright (c) 2012 VMware, Inc. All Rights Reserved.
# This file is part of ATOMac.
#@author: Nagappan Alagappan <[email protected]>
#@copyright: Copyright (c) 2009-12 Nagappan Alagappan
#http://ldtp.freedesktop.org
# ATOMac is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the Free
# Software Foundation version 2 and no later version.
# ATOMac is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License version 2
# for more details.
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51 Franklin
# St, Fifth Floor, Boston, MA 02110-1301 USA.
"""Text class."""
import re
import fnmatch
import atomac.Clipboard as Clipboard
from utils import Utils
from keypress_actions import KeyComboAction, KeyPressAction, KeyReleaseAction
from server_exception import LdtpServerException
class Text(Utils):
def generatekeyevent(self, data):
"""
Generates key event to the system, this simulates the best user like
interaction via keyboard.
@param data: data to type.
@type data: string
@return: 1 on success.
@rtype: integer
"""
KeyComboAction(data)
return 1
def keypress(self, data):
"""
Press key. NOTE: keyrelease should be called
@param data: data to type.
@type data: string
@return: 1 on success.
@rtype: integer
"""
try:
window=self._get_front_most_window()
except (IndexError, ):
window=self._get_any_window()
key_press_action = KeyPressAction(window, data)
return 1
def keyrelease(self, data):
"""
Release key. NOTE: keypress should be called before this
@param data: data to type.
@type data: string
@return: 1 on success.
@rtype: integer
"""
try:
window=self._get_front_most_window()
except (IndexError, ):
window=self._get_any_window()
key_release_action = KeyReleaseAction(window, data)
return 1
def enterstring(self, window_name, object_name='', data=''):
"""
Type string sequence.
@param window_name: Window name to focus on, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to focus on, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param data: data to type.
@type data: string
@return: 1 on success.
@rtype: integer
"""
if not object_name and not data:
return self.generatekeyevent(window_name)
else:
object_handle=self._get_object_handle(window_name, object_name)
if not object_handle.AXEnabled:
raise LdtpServerException(u"Object %s state disabled" % object_name)
self._grabfocus(object_handle)
object_handle.sendKeys(data)
return 1
def settextvalue(self, window_name, object_name, data):
"""
Type string sequence.
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param data: data to type.
@type data: string
@return: 1 on success.
@rtype: integer
"""
object_handle=self._get_object_handle(window_name, object_name)
if not object_handle.AXEnabled:
raise LdtpServerException(u"Object %s state disabled" % object_name)
object_handle.AXValue=data
return 1
def gettextvalue(self, window_name, object_name, startPosition=0, endPosition=0):
"""
Get text value
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param startPosition: Starting position of text to fetch
@type: startPosition: int
@param endPosition: Ending position of text to fetch
@type: endPosition: int
@return: text on success.
@rtype: string
"""
object_handle=self._get_object_handle(window_name, object_name)
if not object_handle.AXEnabled:
raise LdtpServerException(u"Object %s state disabled" % object_name)
return object_handle.AXValue
def inserttext(self, window_name, object_name, position, data):
"""
Insert string sequence in given position.
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param position: position where text has to be entered.
@type data: int
@param data: data to type.
@type data: string
@return: 1 on success.
@rtype: integer
"""
object_handle=self._get_object_handle(window_name, object_name)
if not object_handle.AXEnabled:
raise LdtpServerException(u"Object %s state disabled" % object_name)
existing_data=object_handle.AXValue
size=len(existing_data)
if position < 0:
position=0
if position > size:
position=size
object_handle.AXValue=existing_data[:position] + data + \
existing_data[position:]
return 1
def verifypartialmatch(self, window_name, object_name, partial_text):
"""
Verify partial text
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param partial_text: Partial text to match
@type object_name: string
@return: 1 on success.
@rtype: integer
"""
try:
if re.search(fnmatch.translate(partial_text),
self.gettextvalue(window_name,
object_name)):
return 1
except:
pass
return 0
def verifysettext(self, window_name, object_name, text):
"""
Verify text is set correctly
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param text: text to match
@type object_name: string
@return: 1 on success.
@rtype: integer
"""
try:
return int(re.match(fnmatch.translate(text),
self.gettextvalue(window_name,
object_name)))
except:
return 0
def istextstateenabled(self, window_name, object_name):
"""
Verifies text state enabled or not
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@return: 1 on success 0 on failure.
@rtype: integer
"""
try:
object_handle=self._get_object_handle(window_name, object_name)
if object_handle.AXEnabled:
return 1
except LdtpServerException:
pass
return 0
def getcharcount(self, window_name, object_name):
"""
Get character count
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@return: 1 on success.
@rtype: integer
"""
object_handle=self._get_object_handle(window_name, object_name)
if not object_handle.AXEnabled:
raise LdtpServerException(u"Object %s state disabled" % object_name)
return object_handle.AXNumberOfCharacters
def appendtext(self, window_name, object_name, data):
"""
Append string sequence.
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param data: data to type.
@type data: string
@return: 1 on success.
@rtype: integer
"""
object_handle=self._get_object_handle(window_name, object_name)
if not object_handle.AXEnabled:
raise LdtpServerException(u"Object %s state disabled" % object_name)
object_handle.AXValue += data
return 1
def getcursorposition(self, window_name, object_name):
"""
Get cursor position
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@return: Cursor position on success.
@rtype: integer
"""
object_handle=self._get_object_handle(window_name, object_name)
if not object_handle.AXEnabled:
raise LdtpServerException(u"Object %s state disabled" % object_name)
return object_handle.AXSelectedTextRange.loc
def setcursorposition(self, window_name, object_name, cursor_position):
"""
Set cursor position
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param cursor_position: Cursor position to be set
@type object_name: string
@return: 1 on success.
@rtype: integer
"""
object_handle=self._get_object_handle(window_name, object_name)
if not object_handle.AXEnabled:
raise LdtpServerException(u"Object %s state disabled" % object_name)
object_handle.AXSelectedTextRange.loc=cursor_position
return 1
def cuttext(self, window_name, object_name, start_position, end_position=-1):
"""
cut text from start position to end position
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param start_position: Start position
@type object_name: integer
@param end_position: End position, default -1
Cut all the text from start position till end
@type object_name: integer
@return: 1 on success.
@rtype: integer
"""
object_handle=self._get_object_handle(window_name, object_name)
if not object_handle.AXEnabled:
raise LdtpServerException(u"Object %s state disabled" % object_name)
size=object_handle.AXNumberOfCharacters
if end_position == -1 or end_position > size:
end_position=size
if start_position < 0:
start_position=0
data=object_handle.AXValue
Clipboard.copy(data[start_position:end_position])
object_handle.AXValue=data[:start_position] + data[end_position:]
return 1
def copytext(self, window_name, object_name, start_position, end_position=-1):
"""
copy text from start position to end position
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param start_position: Start position
@type object_name: integer
@param end_position: End position, default -1
Copy all the text from start position till end
@type object_name: integer
@return: 1 on success.
@rtype: integer
"""
object_handle=self._get_object_handle(window_name, object_name)
if not object_handle.AXEnabled:
raise LdtpServerException(u"Object %s state disabled" % object_name)
size=object_handle.AXNumberOfCharacters
if end_position == -1 or end_position > size:
end_position=size
if start_position < 0:
start_position=0
data=object_handle.AXValue
Clipboard.copy(data[start_position:end_position])
return 1
def deletetext(self, window_name, object_name, start_position, end_position=-1):
"""
delete text from start position to end position
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param start_position: Start position
@type object_name: integer
@param end_position: End position, default -1
Delete all the text from start position till end
@type object_name: integer
@return: 1 on success.
@rtype: integer
"""
object_handle=self._get_object_handle(window_name, object_name)
if not object_handle.AXEnabled:
raise LdtpServerException(u"Object %s state disabled" % object_name)
size=object_handle.AXNumberOfCharacters
if end_position == -1 or end_position > size:
end_position=size
if start_position < 0:
start_position=0
data=object_handle.AXValue
object_handle.AXValue=data[:start_position] + data[end_position:]
return 1
def pastetext(self, window_name, object_name, position=0):
"""
paste text from start position to end position
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param position: Position to paste the text, default 0
@type object_name: integer
@return: 1 on success.
@rtype: integer
"""
object_handle=self._get_object_handle(window_name, object_name)
if not object_handle.AXEnabled:
raise LdtpServerException(u"Object %s state disabled" % object_name)
size=object_handle.AXNumberOfCharacters
if position > size:
position=size
if position < 0:
position=0
clipboard=Clipboard.paste()
data=object_handle.AXValue
object_handle.AXValue=data[:position] + clipboard + data[position:]
return 1
| gpl-2.0 |
initzx/aobot | commands/private/changepic.py | 1 | 1135 | import aiohttp
from registrar import AbstractCommand, bot_command
@bot_command
class Command(AbstractCommand):
""" Template for bot command classes. """
_name = 'changepic'
_aliases = ['changepic']
_enabled = True
tags = {'misc': ['private'], 'args': {'req': ['pic'], 'nreq': []}}
@staticmethod
async def execute(client, msg, **kwargs):
""" Executes this command.
:type client: bot.Bot
:type msg: discord.Message
"""
with aiohttp.Timeout(10):
async with aiohttp.get(kwargs.get('pic')) as res:
await client.edit_profile(avatar=await res.read())
@property
def name(self):
""" The name of this command """
return self._name
@property
def aliases(self):
""" The aliases that can be used to call this command """
return self._aliases
@property
def enabled(self):
""" Controls whether the command is allowed to be executed. """
return self._enabled
@enabled.setter
def enabled(self, value):
""" Setter for `enabled` """
self.enabled = value
| gpl-3.0 |
aimscare/angular-test | node_modules/node-gyp/gyp/pylib/gyp/msvs_emulation.py | 1407 | 47697 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module helps emulate Visual Studio 2008 behavior on top of other
build systems, primarily ninja.
"""
import os
import re
import subprocess
import sys
from gyp.common import OrderedSet
import gyp.MSVSUtil
import gyp.MSVSVersion
windows_quoter_regex = re.compile(r'(\\*)"')
def QuoteForRspFile(arg):
"""Quote a command line argument so that it appears as one argument when
processed via cmd.exe and parsed by CommandLineToArgvW (as is typical for
Windows programs)."""
# See http://goo.gl/cuFbX and http://goo.gl/dhPnp including the comment
# threads. This is actually the quoting rules for CommandLineToArgvW, not
# for the shell, because the shell doesn't do anything in Windows. This
# works more or less because most programs (including the compiler, etc.)
# use that function to handle command line arguments.
# For a literal quote, CommandLineToArgvW requires 2n+1 backslashes
# preceding it, and results in n backslashes + the quote. So we substitute
# in 2* what we match, +1 more, plus the quote.
arg = windows_quoter_regex.sub(lambda mo: 2 * mo.group(1) + '\\"', arg)
# %'s also need to be doubled otherwise they're interpreted as batch
# positional arguments. Also make sure to escape the % so that they're
# passed literally through escaping so they can be singled to just the
# original %. Otherwise, trying to pass the literal representation that
# looks like an environment variable to the shell (e.g. %PATH%) would fail.
arg = arg.replace('%', '%%')
# These commands are used in rsp files, so no escaping for the shell (via ^)
# is necessary.
# Finally, wrap the whole thing in quotes so that the above quote rule
# applies and whitespace isn't a word break.
return '"' + arg + '"'
def EncodeRspFileList(args):
"""Process a list of arguments using QuoteCmdExeArgument."""
# Note that the first argument is assumed to be the command. Don't add
# quotes around it because then built-ins like 'echo', etc. won't work.
# Take care to normpath only the path in the case of 'call ../x.bat' because
# otherwise the whole thing is incorrectly interpreted as a path and not
# normalized correctly.
if not args: return ''
if args[0].startswith('call '):
call, program = args[0].split(' ', 1)
program = call + ' ' + os.path.normpath(program)
else:
program = os.path.normpath(args[0])
return program + ' ' + ' '.join(QuoteForRspFile(arg) for arg in args[1:])
def _GenericRetrieve(root, default, path):
"""Given a list of dictionary keys |path| and a tree of dicts |root|, find
value at path, or return |default| if any of the path doesn't exist."""
if not root:
return default
if not path:
return root
return _GenericRetrieve(root.get(path[0]), default, path[1:])
def _AddPrefix(element, prefix):
"""Add |prefix| to |element| or each subelement if element is iterable."""
if element is None:
return element
# Note, not Iterable because we don't want to handle strings like that.
if isinstance(element, list) or isinstance(element, tuple):
return [prefix + e for e in element]
else:
return prefix + element
def _DoRemapping(element, map):
"""If |element| then remap it through |map|. If |element| is iterable then
each item will be remapped. Any elements not found will be removed."""
if map is not None and element is not None:
if not callable(map):
map = map.get # Assume it's a dict, otherwise a callable to do the remap.
if isinstance(element, list) or isinstance(element, tuple):
element = filter(None, [map(elem) for elem in element])
else:
element = map(element)
return element
def _AppendOrReturn(append, element):
"""If |append| is None, simply return |element|. If |append| is not None,
then add |element| to it, adding each item in |element| if it's a list or
tuple."""
if append is not None and element is not None:
if isinstance(element, list) or isinstance(element, tuple):
append.extend(element)
else:
append.append(element)
else:
return element
def _FindDirectXInstallation():
"""Try to find an installation location for the DirectX SDK. Check for the
standard environment variable, and if that doesn't exist, try to find
via the registry. May return None if not found in either location."""
# Return previously calculated value, if there is one
if hasattr(_FindDirectXInstallation, 'dxsdk_dir'):
return _FindDirectXInstallation.dxsdk_dir
dxsdk_dir = os.environ.get('DXSDK_DIR')
if not dxsdk_dir:
# Setup params to pass to and attempt to launch reg.exe.
cmd = ['reg.exe', 'query', r'HKLM\Software\Microsoft\DirectX', '/s']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in p.communicate()[0].splitlines():
if 'InstallPath' in line:
dxsdk_dir = line.split(' ')[3] + "\\"
# Cache return value
_FindDirectXInstallation.dxsdk_dir = dxsdk_dir
return dxsdk_dir
def GetGlobalVSMacroEnv(vs_version):
"""Get a dict of variables mapping internal VS macro names to their gyp
equivalents. Returns all variables that are independent of the target."""
env = {}
# '$(VSInstallDir)' and '$(VCInstallDir)' are available when and only when
# Visual Studio is actually installed.
if vs_version.Path():
env['$(VSInstallDir)'] = vs_version.Path()
env['$(VCInstallDir)'] = os.path.join(vs_version.Path(), 'VC') + '\\'
# Chromium uses DXSDK_DIR in include/lib paths, but it may or may not be
# set. This happens when the SDK is sync'd via src-internal, rather than
# by typical end-user installation of the SDK. If it's not set, we don't
# want to leave the unexpanded variable in the path, so simply strip it.
dxsdk_dir = _FindDirectXInstallation()
env['$(DXSDK_DIR)'] = dxsdk_dir if dxsdk_dir else ''
# Try to find an installation location for the Windows DDK by checking
# the WDK_DIR environment variable, may be None.
env['$(WDK_DIR)'] = os.environ.get('WDK_DIR', '')
return env
def ExtractSharedMSVSSystemIncludes(configs, generator_flags):
"""Finds msvs_system_include_dirs that are common to all targets, removes
them from all targets, and returns an OrderedSet containing them."""
all_system_includes = OrderedSet(
configs[0].get('msvs_system_include_dirs', []))
for config in configs[1:]:
system_includes = config.get('msvs_system_include_dirs', [])
all_system_includes = all_system_includes & OrderedSet(system_includes)
if not all_system_includes:
return None
# Expand macros in all_system_includes.
env = GetGlobalVSMacroEnv(GetVSVersion(generator_flags))
expanded_system_includes = OrderedSet([ExpandMacros(include, env)
for include in all_system_includes])
if any(['$' in include for include in expanded_system_includes]):
# Some path relies on target-specific variables, bail.
return None
# Remove system includes shared by all targets from the targets.
for config in configs:
includes = config.get('msvs_system_include_dirs', [])
if includes: # Don't insert a msvs_system_include_dirs key if not needed.
# This must check the unexpanded includes list:
new_includes = [i for i in includes if i not in all_system_includes]
config['msvs_system_include_dirs'] = new_includes
return expanded_system_includes
class MsvsSettings(object):
"""A class that understands the gyp 'msvs_...' values (especially the
msvs_settings field). They largely correpond to the VS2008 IDE DOM. This
class helps map those settings to command line options."""
def __init__(self, spec, generator_flags):
self.spec = spec
self.vs_version = GetVSVersion(generator_flags)
supported_fields = [
('msvs_configuration_attributes', dict),
('msvs_settings', dict),
('msvs_system_include_dirs', list),
('msvs_disabled_warnings', list),
('msvs_precompiled_header', str),
('msvs_precompiled_source', str),
('msvs_configuration_platform', str),
('msvs_target_platform', str),
]
configs = spec['configurations']
for field, default in supported_fields:
setattr(self, field, {})
for configname, config in configs.iteritems():
getattr(self, field)[configname] = config.get(field, default())
self.msvs_cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])
unsupported_fields = [
'msvs_prebuild',
'msvs_postbuild',
]
unsupported = []
for field in unsupported_fields:
for config in configs.values():
if field in config:
unsupported += ["%s not supported (target %s)." %
(field, spec['target_name'])]
if unsupported:
raise Exception('\n'.join(unsupported))
def GetExtension(self):
"""Returns the extension for the target, with no leading dot.
Uses 'product_extension' if specified, otherwise uses MSVS defaults based on
the target type.
"""
ext = self.spec.get('product_extension', None)
if ext:
return ext
return gyp.MSVSUtil.TARGET_TYPE_EXT.get(self.spec['type'], '')
def GetVSMacroEnv(self, base_to_build=None, config=None):
"""Get a dict of variables mapping internal VS macro names to their gyp
equivalents."""
target_platform = 'Win32' if self.GetArch(config) == 'x86' else 'x64'
target_name = self.spec.get('product_prefix', '') + \
self.spec.get('product_name', self.spec['target_name'])
target_dir = base_to_build + '\\' if base_to_build else ''
target_ext = '.' + self.GetExtension()
target_file_name = target_name + target_ext
replacements = {
'$(InputName)': '${root}',
'$(InputPath)': '${source}',
'$(IntDir)': '$!INTERMEDIATE_DIR',
'$(OutDir)\\': target_dir,
'$(PlatformName)': target_platform,
'$(ProjectDir)\\': '',
'$(ProjectName)': self.spec['target_name'],
'$(TargetDir)\\': target_dir,
'$(TargetExt)': target_ext,
'$(TargetFileName)': target_file_name,
'$(TargetName)': target_name,
'$(TargetPath)': os.path.join(target_dir, target_file_name),
}
replacements.update(GetGlobalVSMacroEnv(self.vs_version))
return replacements
def ConvertVSMacros(self, s, base_to_build=None, config=None):
"""Convert from VS macro names to something equivalent."""
env = self.GetVSMacroEnv(base_to_build, config=config)
return ExpandMacros(s, env)
def AdjustLibraries(self, libraries):
"""Strip -l from library if it's specified with that."""
libs = [lib[2:] if lib.startswith('-l') else lib for lib in libraries]
return [lib + '.lib' if not lib.endswith('.lib') else lib for lib in libs]
def _GetAndMunge(self, field, path, default, prefix, append, map):
"""Retrieve a value from |field| at |path| or return |default|. If
|append| is specified, and the item is found, it will be appended to that
object instead of returned. If |map| is specified, results will be
remapped through |map| before being returned or appended."""
result = _GenericRetrieve(field, default, path)
result = _DoRemapping(result, map)
result = _AddPrefix(result, prefix)
return _AppendOrReturn(append, result)
class _GetWrapper(object):
def __init__(self, parent, field, base_path, append=None):
self.parent = parent
self.field = field
self.base_path = [base_path]
self.append = append
def __call__(self, name, map=None, prefix='', default=None):
return self.parent._GetAndMunge(self.field, self.base_path + [name],
default=default, prefix=prefix, append=self.append, map=map)
def GetArch(self, config):
"""Get architecture based on msvs_configuration_platform and
msvs_target_platform. Returns either 'x86' or 'x64'."""
configuration_platform = self.msvs_configuration_platform.get(config, '')
platform = self.msvs_target_platform.get(config, '')
if not platform: # If no specific override, use the configuration's.
platform = configuration_platform
# Map from platform to architecture.
return {'Win32': 'x86', 'x64': 'x64'}.get(platform, 'x86')
def _TargetConfig(self, config):
"""Returns the target-specific configuration."""
# There's two levels of architecture/platform specification in VS. The
# first level is globally for the configuration (this is what we consider
# "the" config at the gyp level, which will be something like 'Debug' or
# 'Release_x64'), and a second target-specific configuration, which is an
# override for the global one. |config| is remapped here to take into
# account the local target-specific overrides to the global configuration.
arch = self.GetArch(config)
if arch == 'x64' and not config.endswith('_x64'):
config += '_x64'
if arch == 'x86' and config.endswith('_x64'):
config = config.rsplit('_', 1)[0]
return config
def _Setting(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_settings."""
return self._GetAndMunge(
self.msvs_settings[config], path, default, prefix, append, map)
def _ConfigAttrib(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_configuration_attributes."""
return self._GetAndMunge(
self.msvs_configuration_attributes[config],
path, default, prefix, append, map)
def AdjustIncludeDirs(self, include_dirs, config):
"""Updates include_dirs to expand VS specific paths, and adds the system
include dirs used for platform SDK and similar."""
config = self._TargetConfig(config)
includes = include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCCLCompilerTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes]
def AdjustMidlIncludeDirs(self, midl_include_dirs, config):
"""Updates midl_include_dirs to expand VS specific paths, and adds the
system include dirs used for platform SDK and similar."""
config = self._TargetConfig(config)
includes = midl_include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCMIDLTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes]
def GetComputedDefines(self, config):
"""Returns the set of defines that are injected to the defines list based
on other VS settings."""
config = self._TargetConfig(config)
defines = []
if self._ConfigAttrib(['CharacterSet'], config) == '1':
defines.extend(('_UNICODE', 'UNICODE'))
if self._ConfigAttrib(['CharacterSet'], config) == '2':
defines.append('_MBCS')
defines.extend(self._Setting(
('VCCLCompilerTool', 'PreprocessorDefinitions'), config, default=[]))
return defines
def GetCompilerPdbName(self, config, expand_special):
"""Get the pdb file name that should be used for compiler invocations, or
None if there's no explicit name specified."""
config = self._TargetConfig(config)
pdbname = self._Setting(
('VCCLCompilerTool', 'ProgramDataBaseFileName'), config)
if pdbname:
pdbname = expand_special(self.ConvertVSMacros(pdbname))
return pdbname
def GetMapFileName(self, config, expand_special):
"""Gets the explicitly overriden map file name for a target or returns None
if it's not set."""
config = self._TargetConfig(config)
map_file = self._Setting(('VCLinkerTool', 'MapFileName'), config)
if map_file:
map_file = expand_special(self.ConvertVSMacros(map_file, config=config))
return map_file
def GetOutputName(self, config, expand_special):
"""Gets the explicitly overridden output name for a target or returns None
if it's not overridden."""
config = self._TargetConfig(config)
type = self.spec['type']
root = 'VCLibrarianTool' if type == 'static_library' else 'VCLinkerTool'
# TODO(scottmg): Handle OutputDirectory without OutputFile.
output_file = self._Setting((root, 'OutputFile'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetPDBName(self, config, expand_special, default):
"""Gets the explicitly overridden pdb name for a target or returns
default if it's not overridden, or if no pdb will be generated."""
config = self._TargetConfig(config)
output_file = self._Setting(('VCLinkerTool', 'ProgramDatabaseFile'), config)
generate_debug_info = self._Setting(
('VCLinkerTool', 'GenerateDebugInformation'), config)
if generate_debug_info == 'true':
if output_file:
return expand_special(self.ConvertVSMacros(output_file, config=config))
else:
return default
else:
return None
def GetNoImportLibrary(self, config):
"""If NoImportLibrary: true, ninja will not expect the output to include
an import library."""
config = self._TargetConfig(config)
noimplib = self._Setting(('NoImportLibrary',), config)
return noimplib == 'true'
def GetAsmflags(self, config):
"""Returns the flags that need to be added to ml invocations."""
config = self._TargetConfig(config)
asmflags = []
safeseh = self._Setting(('MASM', 'UseSafeExceptionHandlers'), config)
if safeseh == 'true':
asmflags.append('/safeseh')
return asmflags
def GetCflags(self, config):
"""Returns the flags that need to be added to .c and .cc compilations."""
config = self._TargetConfig(config)
cflags = []
cflags.extend(['/wd' + w for w in self.msvs_disabled_warnings[config]])
cl = self._GetWrapper(self, self.msvs_settings[config],
'VCCLCompilerTool', append=cflags)
cl('Optimization',
map={'0': 'd', '1': '1', '2': '2', '3': 'x'}, prefix='/O', default='2')
cl('InlineFunctionExpansion', prefix='/Ob')
cl('DisableSpecificWarnings', prefix='/wd')
cl('StringPooling', map={'true': '/GF'})
cl('EnableFiberSafeOptimizations', map={'true': '/GT'})
cl('OmitFramePointers', map={'false': '-', 'true': ''}, prefix='/Oy')
cl('EnableIntrinsicFunctions', map={'false': '-', 'true': ''}, prefix='/Oi')
cl('FavorSizeOrSpeed', map={'1': 't', '2': 's'}, prefix='/O')
cl('FloatingPointModel',
map={'0': 'precise', '1': 'strict', '2': 'fast'}, prefix='/fp:',
default='0')
cl('CompileAsManaged', map={'false': '', 'true': '/clr'})
cl('WholeProgramOptimization', map={'true': '/GL'})
cl('WarningLevel', prefix='/W')
cl('WarnAsError', map={'true': '/WX'})
cl('CallingConvention',
map={'0': 'd', '1': 'r', '2': 'z', '3': 'v'}, prefix='/G')
cl('DebugInformationFormat',
map={'1': '7', '3': 'i', '4': 'I'}, prefix='/Z')
cl('RuntimeTypeInfo', map={'true': '/GR', 'false': '/GR-'})
cl('EnableFunctionLevelLinking', map={'true': '/Gy', 'false': '/Gy-'})
cl('MinimalRebuild', map={'true': '/Gm'})
cl('BufferSecurityCheck', map={'true': '/GS', 'false': '/GS-'})
cl('BasicRuntimeChecks', map={'1': 's', '2': 'u', '3': '1'}, prefix='/RTC')
cl('RuntimeLibrary',
map={'0': 'T', '1': 'Td', '2': 'D', '3': 'Dd'}, prefix='/M')
cl('ExceptionHandling', map={'1': 'sc','2': 'a'}, prefix='/EH')
cl('DefaultCharIsUnsigned', map={'true': '/J'})
cl('TreatWChar_tAsBuiltInType',
map={'false': '-', 'true': ''}, prefix='/Zc:wchar_t')
cl('EnablePREfast', map={'true': '/analyze'})
cl('AdditionalOptions', prefix='')
cl('EnableEnhancedInstructionSet',
map={'1': 'SSE', '2': 'SSE2', '3': 'AVX', '4': 'IA32', '5': 'AVX2'},
prefix='/arch:')
cflags.extend(['/FI' + f for f in self._Setting(
('VCCLCompilerTool', 'ForcedIncludeFiles'), config, default=[])])
if self.vs_version.short_name in ('2013', '2013e', '2015'):
# New flag required in 2013 to maintain previous PDB behavior.
cflags.append('/FS')
# ninja handles parallelism by itself, don't have the compiler do it too.
cflags = filter(lambda x: not x.startswith('/MP'), cflags)
return cflags
def _GetPchFlags(self, config, extension):
"""Get the flags to be added to the cflags for precompiled header support.
"""
config = self._TargetConfig(config)
# The PCH is only built once by a particular source file. Usage of PCH must
# only be for the same language (i.e. C vs. C++), so only include the pch
# flags when the language matches.
if self.msvs_precompiled_header[config]:
source_ext = os.path.splitext(self.msvs_precompiled_source[config])[1]
if _LanguageMatchesForPch(source_ext, extension):
pch = os.path.split(self.msvs_precompiled_header[config])[1]
return ['/Yu' + pch, '/FI' + pch, '/Fp${pchprefix}.' + pch + '.pch']
return []
def GetCflagsC(self, config):
"""Returns the flags that need to be added to .c compilations."""
config = self._TargetConfig(config)
return self._GetPchFlags(config, '.c')
def GetCflagsCC(self, config):
"""Returns the flags that need to be added to .cc compilations."""
config = self._TargetConfig(config)
return ['/TP'] + self._GetPchFlags(config, '.cc')
def _GetAdditionalLibraryDirectories(self, root, config, gyp_to_build_path):
"""Get and normalize the list of paths in AdditionalLibraryDirectories
setting."""
config = self._TargetConfig(config)
libpaths = self._Setting((root, 'AdditionalLibraryDirectories'),
config, default=[])
libpaths = [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(p, config=config)))
for p in libpaths]
return ['/LIBPATH:"' + p + '"' for p in libpaths]
def GetLibFlags(self, config, gyp_to_build_path):
"""Returns the flags that need to be added to lib commands."""
config = self._TargetConfig(config)
libflags = []
lib = self._GetWrapper(self, self.msvs_settings[config],
'VCLibrarianTool', append=libflags)
libflags.extend(self._GetAdditionalLibraryDirectories(
'VCLibrarianTool', config, gyp_to_build_path))
lib('LinkTimeCodeGeneration', map={'true': '/LTCG'})
lib('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'},
prefix='/MACHINE:')
lib('AdditionalOptions')
return libflags
def GetDefFile(self, gyp_to_build_path):
"""Returns the .def file from sources, if any. Otherwise returns None."""
spec = self.spec
if spec['type'] in ('shared_library', 'loadable_module', 'executable'):
def_files = [s for s in spec.get('sources', []) if s.endswith('.def')]
if len(def_files) == 1:
return gyp_to_build_path(def_files[0])
elif len(def_files) > 1:
raise Exception("Multiple .def files")
return None
def _GetDefFileAsLdflags(self, ldflags, gyp_to_build_path):
""".def files get implicitly converted to a ModuleDefinitionFile for the
linker in the VS generator. Emulate that behaviour here."""
def_file = self.GetDefFile(gyp_to_build_path)
if def_file:
ldflags.append('/DEF:"%s"' % def_file)
def GetPGDName(self, config, expand_special):
"""Gets the explicitly overridden pgd name for a target or returns None
if it's not overridden."""
config = self._TargetConfig(config)
output_file = self._Setting(
('VCLinkerTool', 'ProfileGuidedDatabase'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetLdflags(self, config, gyp_to_build_path, expand_special,
manifest_base_name, output_name, is_executable, build_dir):
"""Returns the flags that need to be added to link commands, and the
manifest files."""
config = self._TargetConfig(config)
ldflags = []
ld = self._GetWrapper(self, self.msvs_settings[config],
'VCLinkerTool', append=ldflags)
self._GetDefFileAsLdflags(ldflags, gyp_to_build_path)
ld('GenerateDebugInformation', map={'true': '/DEBUG'})
ld('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'},
prefix='/MACHINE:')
ldflags.extend(self._GetAdditionalLibraryDirectories(
'VCLinkerTool', config, gyp_to_build_path))
ld('DelayLoadDLLs', prefix='/DELAYLOAD:')
ld('TreatLinkerWarningAsErrors', prefix='/WX',
map={'true': '', 'false': ':NO'})
out = self.GetOutputName(config, expand_special)
if out:
ldflags.append('/OUT:' + out)
pdb = self.GetPDBName(config, expand_special, output_name + '.pdb')
if pdb:
ldflags.append('/PDB:' + pdb)
pgd = self.GetPGDName(config, expand_special)
if pgd:
ldflags.append('/PGD:' + pgd)
map_file = self.GetMapFileName(config, expand_special)
ld('GenerateMapFile', map={'true': '/MAP:' + map_file if map_file
else '/MAP'})
ld('MapExports', map={'true': '/MAPINFO:EXPORTS'})
ld('AdditionalOptions', prefix='')
minimum_required_version = self._Setting(
('VCLinkerTool', 'MinimumRequiredVersion'), config, default='')
if minimum_required_version:
minimum_required_version = ',' + minimum_required_version
ld('SubSystem',
map={'1': 'CONSOLE%s' % minimum_required_version,
'2': 'WINDOWS%s' % minimum_required_version},
prefix='/SUBSYSTEM:')
stack_reserve_size = self._Setting(
('VCLinkerTool', 'StackReserveSize'), config, default='')
if stack_reserve_size:
stack_commit_size = self._Setting(
('VCLinkerTool', 'StackCommitSize'), config, default='')
if stack_commit_size:
stack_commit_size = ',' + stack_commit_size
ldflags.append('/STACK:%s%s' % (stack_reserve_size, stack_commit_size))
ld('TerminalServerAware', map={'1': ':NO', '2': ''}, prefix='/TSAWARE')
ld('LinkIncremental', map={'1': ':NO', '2': ''}, prefix='/INCREMENTAL')
ld('BaseAddress', prefix='/BASE:')
ld('FixedBaseAddress', map={'1': ':NO', '2': ''}, prefix='/FIXED')
ld('RandomizedBaseAddress',
map={'1': ':NO', '2': ''}, prefix='/DYNAMICBASE')
ld('DataExecutionPrevention',
map={'1': ':NO', '2': ''}, prefix='/NXCOMPAT')
ld('OptimizeReferences', map={'1': 'NOREF', '2': 'REF'}, prefix='/OPT:')
ld('ForceSymbolReferences', prefix='/INCLUDE:')
ld('EnableCOMDATFolding', map={'1': 'NOICF', '2': 'ICF'}, prefix='/OPT:')
ld('LinkTimeCodeGeneration',
map={'1': '', '2': ':PGINSTRUMENT', '3': ':PGOPTIMIZE',
'4': ':PGUPDATE'},
prefix='/LTCG')
ld('IgnoreDefaultLibraryNames', prefix='/NODEFAULTLIB:')
ld('ResourceOnlyDLL', map={'true': '/NOENTRY'})
ld('EntryPointSymbol', prefix='/ENTRY:')
ld('Profile', map={'true': '/PROFILE'})
ld('LargeAddressAware',
map={'1': ':NO', '2': ''}, prefix='/LARGEADDRESSAWARE')
# TODO(scottmg): This should sort of be somewhere else (not really a flag).
ld('AdditionalDependencies', prefix='')
if self.GetArch(config) == 'x86':
safeseh_default = 'true'
else:
safeseh_default = None
ld('ImageHasSafeExceptionHandlers',
map={'false': ':NO', 'true': ''}, prefix='/SAFESEH',
default=safeseh_default)
# If the base address is not specifically controlled, DYNAMICBASE should
# be on by default.
base_flags = filter(lambda x: 'DYNAMICBASE' in x or x == '/FIXED',
ldflags)
if not base_flags:
ldflags.append('/DYNAMICBASE')
# If the NXCOMPAT flag has not been specified, default to on. Despite the
# documentation that says this only defaults to on when the subsystem is
# Vista or greater (which applies to the linker), the IDE defaults it on
# unless it's explicitly off.
if not filter(lambda x: 'NXCOMPAT' in x, ldflags):
ldflags.append('/NXCOMPAT')
have_def_file = filter(lambda x: x.startswith('/DEF:'), ldflags)
manifest_flags, intermediate_manifest, manifest_files = \
self._GetLdManifestFlags(config, manifest_base_name, gyp_to_build_path,
is_executable and not have_def_file, build_dir)
ldflags.extend(manifest_flags)
return ldflags, intermediate_manifest, manifest_files
def _GetLdManifestFlags(self, config, name, gyp_to_build_path,
allow_isolation, build_dir):
"""Returns a 3-tuple:
- the set of flags that need to be added to the link to generate
a default manifest
- the intermediate manifest that the linker will generate that should be
used to assert it doesn't add anything to the merged one.
- the list of all the manifest files to be merged by the manifest tool and
included into the link."""
generate_manifest = self._Setting(('VCLinkerTool', 'GenerateManifest'),
config,
default='true')
if generate_manifest != 'true':
# This means not only that the linker should not generate the intermediate
# manifest but also that the manifest tool should do nothing even when
# additional manifests are specified.
return ['/MANIFEST:NO'], [], []
output_name = name + '.intermediate.manifest'
flags = [
'/MANIFEST',
'/ManifestFile:' + output_name,
]
# Instead of using the MANIFESTUAC flags, we generate a .manifest to
# include into the list of manifests. This allows us to avoid the need to
# do two passes during linking. The /MANIFEST flag and /ManifestFile are
# still used, and the intermediate manifest is used to assert that the
# final manifest we get from merging all the additional manifest files
# (plus the one we generate here) isn't modified by merging the
# intermediate into it.
# Always NO, because we generate a manifest file that has what we want.
flags.append('/MANIFESTUAC:NO')
config = self._TargetConfig(config)
enable_uac = self._Setting(('VCLinkerTool', 'EnableUAC'), config,
default='true')
manifest_files = []
generated_manifest_outer = \
"<?xml version='1.0' encoding='UTF-8' standalone='yes'?>" \
"<assembly xmlns='urn:schemas-microsoft-com:asm.v1' manifestVersion='1.0'>%s" \
"</assembly>"
if enable_uac == 'true':
execution_level = self._Setting(('VCLinkerTool', 'UACExecutionLevel'),
config, default='0')
execution_level_map = {
'0': 'asInvoker',
'1': 'highestAvailable',
'2': 'requireAdministrator'
}
ui_access = self._Setting(('VCLinkerTool', 'UACUIAccess'), config,
default='false')
inner = '''
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level='%s' uiAccess='%s' />
</requestedPrivileges>
</security>
</trustInfo>''' % (execution_level_map[execution_level], ui_access)
else:
inner = ''
generated_manifest_contents = generated_manifest_outer % inner
generated_name = name + '.generated.manifest'
# Need to join with the build_dir here as we're writing it during
# generation time, but we return the un-joined version because the build
# will occur in that directory. We only write the file if the contents
# have changed so that simply regenerating the project files doesn't
# cause a relink.
build_dir_generated_name = os.path.join(build_dir, generated_name)
gyp.common.EnsureDirExists(build_dir_generated_name)
f = gyp.common.WriteOnDiff(build_dir_generated_name)
f.write(generated_manifest_contents)
f.close()
manifest_files = [generated_name]
if allow_isolation:
flags.append('/ALLOWISOLATION')
manifest_files += self._GetAdditionalManifestFiles(config,
gyp_to_build_path)
return flags, output_name, manifest_files
def _GetAdditionalManifestFiles(self, config, gyp_to_build_path):
"""Gets additional manifest files that are added to the default one
generated by the linker."""
files = self._Setting(('VCManifestTool', 'AdditionalManifestFiles'), config,
default=[])
if isinstance(files, str):
files = files.split(';')
return [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(f, config=config)))
for f in files]
def IsUseLibraryDependencyInputs(self, config):
"""Returns whether the target should be linked via Use Library Dependency
Inputs (using component .objs of a given .lib)."""
config = self._TargetConfig(config)
uldi = self._Setting(('VCLinkerTool', 'UseLibraryDependencyInputs'), config)
return uldi == 'true'
def IsEmbedManifest(self, config):
"""Returns whether manifest should be linked into binary."""
config = self._TargetConfig(config)
embed = self._Setting(('VCManifestTool', 'EmbedManifest'), config,
default='true')
return embed == 'true'
def IsLinkIncremental(self, config):
"""Returns whether the target should be linked incrementally."""
config = self._TargetConfig(config)
link_inc = self._Setting(('VCLinkerTool', 'LinkIncremental'), config)
return link_inc != '1'
def GetRcflags(self, config, gyp_to_ninja_path):
"""Returns the flags that need to be added to invocations of the resource
compiler."""
config = self._TargetConfig(config)
rcflags = []
rc = self._GetWrapper(self, self.msvs_settings[config],
'VCResourceCompilerTool', append=rcflags)
rc('AdditionalIncludeDirectories', map=gyp_to_ninja_path, prefix='/I')
rcflags.append('/I' + gyp_to_ninja_path('.'))
rc('PreprocessorDefinitions', prefix='/d')
# /l arg must be in hex without leading '0x'
rc('Culture', prefix='/l', map=lambda x: hex(int(x))[2:])
return rcflags
def BuildCygwinBashCommandLine(self, args, path_to_base):
"""Build a command line that runs args via cygwin bash. We assume that all
incoming paths are in Windows normpath'd form, so they need to be
converted to posix style for the part of the command line that's passed to
bash. We also have to do some Visual Studio macro emulation here because
various rules use magic VS names for things. Also note that rules that
contain ninja variables cannot be fixed here (for example ${source}), so
the outer generator needs to make sure that the paths that are written out
are in posix style, if the command line will be used here."""
cygwin_dir = os.path.normpath(
os.path.join(path_to_base, self.msvs_cygwin_dirs[0]))
cd = ('cd %s' % path_to_base).replace('\\', '/')
args = [a.replace('\\', '/').replace('"', '\\"') for a in args]
args = ["'%s'" % a.replace("'", "'\\''") for a in args]
bash_cmd = ' '.join(args)
cmd = (
'call "%s\\setup_env.bat" && set CYGWIN=nontsec && ' % cygwin_dir +
'bash -c "%s ; %s"' % (cd, bash_cmd))
return cmd
def IsRuleRunUnderCygwin(self, rule):
"""Determine if an action should be run under cygwin. If the variable is
unset, or set to 1 we use cygwin."""
return int(rule.get('msvs_cygwin_shell',
self.spec.get('msvs_cygwin_shell', 1))) != 0
def _HasExplicitRuleForExtension(self, spec, extension):
"""Determine if there's an explicit rule for a particular extension."""
for rule in spec.get('rules', []):
if rule['extension'] == extension:
return True
return False
def _HasExplicitIdlActions(self, spec):
"""Determine if an action should not run midl for .idl files."""
return any([action.get('explicit_idl_action', 0)
for action in spec.get('actions', [])])
def HasExplicitIdlRulesOrActions(self, spec):
"""Determine if there's an explicit rule or action for idl files. When
there isn't we need to generate implicit rules to build MIDL .idl files."""
return (self._HasExplicitRuleForExtension(spec, 'idl') or
self._HasExplicitIdlActions(spec))
def HasExplicitAsmRules(self, spec):
"""Determine if there's an explicit rule for asm files. When there isn't we
need to generate implicit rules to assemble .asm files."""
return self._HasExplicitRuleForExtension(spec, 'asm')
def GetIdlBuildData(self, source, config):
"""Determine the implicit outputs for an idl file. Returns output
directory, outputs, and variables and flags that are required."""
config = self._TargetConfig(config)
midl_get = self._GetWrapper(self, self.msvs_settings[config], 'VCMIDLTool')
def midl(name, default=None):
return self.ConvertVSMacros(midl_get(name, default=default),
config=config)
tlb = midl('TypeLibraryName', default='${root}.tlb')
header = midl('HeaderFileName', default='${root}.h')
dlldata = midl('DLLDataFileName', default='dlldata.c')
iid = midl('InterfaceIdentifierFileName', default='${root}_i.c')
proxy = midl('ProxyFileName', default='${root}_p.c')
# Note that .tlb is not included in the outputs as it is not always
# generated depending on the content of the input idl file.
outdir = midl('OutputDirectory', default='')
output = [header, dlldata, iid, proxy]
variables = [('tlb', tlb),
('h', header),
('dlldata', dlldata),
('iid', iid),
('proxy', proxy)]
# TODO(scottmg): Are there configuration settings to set these flags?
target_platform = 'win32' if self.GetArch(config) == 'x86' else 'x64'
flags = ['/char', 'signed', '/env', target_platform, '/Oicf']
return outdir, output, variables, flags
def _LanguageMatchesForPch(source_ext, pch_source_ext):
c_exts = ('.c',)
cc_exts = ('.cc', '.cxx', '.cpp')
return ((source_ext in c_exts and pch_source_ext in c_exts) or
(source_ext in cc_exts and pch_source_ext in cc_exts))
class PrecompiledHeader(object):
"""Helper to generate dependencies and build rules to handle generation of
precompiled headers. Interface matches the GCH handler in xcode_emulation.py.
"""
def __init__(
self, settings, config, gyp_to_build_path, gyp_to_unique_output, obj_ext):
self.settings = settings
self.config = config
pch_source = self.settings.msvs_precompiled_source[self.config]
self.pch_source = gyp_to_build_path(pch_source)
filename, _ = os.path.splitext(pch_source)
self.output_obj = gyp_to_unique_output(filename + obj_ext).lower()
def _PchHeader(self):
"""Get the header that will appear in an #include line for all source
files."""
return os.path.split(self.settings.msvs_precompiled_header[self.config])[1]
def GetObjDependencies(self, sources, objs, arch):
"""Given a list of sources files and the corresponding object files,
returns a list of the pch files that should be depended upon. The
additional wrapping in the return value is for interface compatibility
with make.py on Mac, and xcode_emulation.py."""
assert arch is None
if not self._PchHeader():
return []
pch_ext = os.path.splitext(self.pch_source)[1]
for source in sources:
if _LanguageMatchesForPch(os.path.splitext(source)[1], pch_ext):
return [(None, None, self.output_obj)]
return []
def GetPchBuildCommands(self, arch):
"""Not used on Windows as there are no additional build steps required
(instead, existing steps are modified in GetFlagsModifications below)."""
return []
def GetFlagsModifications(self, input, output, implicit, command,
cflags_c, cflags_cc, expand_special):
"""Get the modified cflags and implicit dependencies that should be used
for the pch compilation step."""
if input == self.pch_source:
pch_output = ['/Yc' + self._PchHeader()]
if command == 'cxx':
return ([('cflags_cc', map(expand_special, cflags_cc + pch_output))],
self.output_obj, [])
elif command == 'cc':
return ([('cflags_c', map(expand_special, cflags_c + pch_output))],
self.output_obj, [])
return [], output, implicit
vs_version = None
def GetVSVersion(generator_flags):
global vs_version
if not vs_version:
vs_version = gyp.MSVSVersion.SelectVisualStudioVersion(
generator_flags.get('msvs_version', 'auto'),
allow_fallback=False)
return vs_version
def _GetVsvarsSetupArgs(generator_flags, arch):
vs = GetVSVersion(generator_flags)
return vs.SetupScript()
def ExpandMacros(string, expansions):
"""Expand $(Variable) per expansions dict. See MsvsSettings.GetVSMacroEnv
for the canonical way to retrieve a suitable dict."""
if '$' in string:
for old, new in expansions.iteritems():
assert '$(' not in new, new
string = string.replace(old, new)
return string
def _ExtractImportantEnvironment(output_of_set):
"""Extracts environment variables required for the toolchain to run from
a textual dump output by the cmd.exe 'set' command."""
envvars_to_save = (
'goma_.*', # TODO(scottmg): This is ugly, but needed for goma.
'include',
'lib',
'libpath',
'path',
'pathext',
'systemroot',
'temp',
'tmp',
)
env = {}
for line in output_of_set.splitlines():
for envvar in envvars_to_save:
if re.match(envvar + '=', line.lower()):
var, setting = line.split('=', 1)
if envvar == 'path':
# Our own rules (for running gyp-win-tool) and other actions in
# Chromium rely on python being in the path. Add the path to this
# python here so that if it's not in the path when ninja is run
# later, python will still be found.
setting = os.path.dirname(sys.executable) + os.pathsep + setting
env[var.upper()] = setting
break
for required in ('SYSTEMROOT', 'TEMP', 'TMP'):
if required not in env:
raise Exception('Environment variable "%s" '
'required to be set to valid path' % required)
return env
def _FormatAsEnvironmentBlock(envvar_dict):
"""Format as an 'environment block' directly suitable for CreateProcess.
Briefly this is a list of key=value\0, terminated by an additional \0. See
CreateProcess documentation for more details."""
block = ''
nul = '\0'
for key, value in envvar_dict.iteritems():
block += key + '=' + value + nul
block += nul
return block
def _ExtractCLPath(output_of_where):
"""Gets the path to cl.exe based on the output of calling the environment
setup batch file, followed by the equivalent of `where`."""
# Take the first line, as that's the first found in the PATH.
for line in output_of_where.strip().splitlines():
if line.startswith('LOC:'):
return line[len('LOC:'):].strip()
def GenerateEnvironmentFiles(toplevel_build_dir, generator_flags,
system_includes, open_out):
"""It's not sufficient to have the absolute path to the compiler, linker,
etc. on Windows, as those tools rely on .dlls being in the PATH. We also
need to support both x86 and x64 compilers within the same build (to support
msvs_target_platform hackery). Different architectures require a different
compiler binary, and different supporting environment variables (INCLUDE,
LIB, LIBPATH). So, we extract the environment here, wrap all invocations
of compiler tools (cl, link, lib, rc, midl, etc.) via win_tool.py which
sets up the environment, and then we do not prefix the compiler with
an absolute path, instead preferring something like "cl.exe" in the rule
which will then run whichever the environment setup has put in the path.
When the following procedure to generate environment files does not
meet your requirement (e.g. for custom toolchains), you can pass
"-G ninja_use_custom_environment_files" to the gyp to suppress file
generation and use custom environment files prepared by yourself."""
archs = ('x86', 'x64')
if generator_flags.get('ninja_use_custom_environment_files', 0):
cl_paths = {}
for arch in archs:
cl_paths[arch] = 'cl.exe'
return cl_paths
vs = GetVSVersion(generator_flags)
cl_paths = {}
for arch in archs:
# Extract environment variables for subprocesses.
args = vs.SetupScript(arch)
args.extend(('&&', 'set'))
popen = subprocess.Popen(
args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
variables, _ = popen.communicate()
env = _ExtractImportantEnvironment(variables)
# Inject system includes from gyp files into INCLUDE.
if system_includes:
system_includes = system_includes | OrderedSet(
env.get('INCLUDE', '').split(';'))
env['INCLUDE'] = ';'.join(system_includes)
env_block = _FormatAsEnvironmentBlock(env)
f = open_out(os.path.join(toplevel_build_dir, 'environment.' + arch), 'wb')
f.write(env_block)
f.close()
# Find cl.exe location for this architecture.
args = vs.SetupScript(arch)
args.extend(('&&',
'for', '%i', 'in', '(cl.exe)', 'do', '@echo', 'LOC:%~$PATH:i'))
popen = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE)
output, _ = popen.communicate()
cl_paths[arch] = _ExtractCLPath(output)
return cl_paths
def VerifyMissingSources(sources, build_dir, generator_flags, gyp_to_ninja):
"""Emulate behavior of msvs_error_on_missing_sources present in the msvs
generator: Check that all regular source files, i.e. not created at run time,
exist on disk. Missing files cause needless recompilation when building via
VS, and we want this check to match for people/bots that build using ninja,
so they're not surprised when the VS build fails."""
if int(generator_flags.get('msvs_error_on_missing_sources', 0)):
no_specials = filter(lambda x: '$' not in x, sources)
relative = [os.path.join(build_dir, gyp_to_ninja(s)) for s in no_specials]
missing = filter(lambda x: not os.path.exists(x), relative)
if missing:
# They'll look like out\Release\..\..\stuff\things.cc, so normalize the
# path for a slightly less crazy looking output.
cleaned_up = [os.path.normpath(x) for x in missing]
raise Exception('Missing input files:\n%s' % '\n'.join(cleaned_up))
# Sets some values in default_variables, which are required for many
# generators, run on Windows.
def CalculateCommonVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
# Set a variable so conditions can be based on msvs_version.
msvs_version = gyp.msvs_emulation.GetVSVersion(generator_flags)
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCHITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if ('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
| mit |
TheTimmy/spack | var/spack/repos/builtin/packages/gflags/package.py | 3 | 1900 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Gflags(CMakePackage):
"""The gflags package contains a C++ library that implements
commandline flags processing. It includes built-in support for
standard types such as string and the ability to define flags
in the source file in which they are used. Online documentation
available at: https://gflags.github.io/gflags/"""
homepage = "https://gflags.github.io/gflags"
url = "https://github.com/gflags/gflags/archive/v2.1.2.tar.gz"
version('2.1.2', 'ac432de923f9de1e9780b5254884599f')
depends_on('[email protected]:', type='build')
def cmake_args(self):
return ['-DBUILD_SHARED_LIBS=ON']
| lgpl-2.1 |
data-exp-lab/girder | girder/utility/search.py | 4 | 3228 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from functools import partial
from girder.models.model_base import ModelImporter
from girder.exceptions import GirderException
_allowedSearchMode = {}
def getSearchModeHandler(mode):
"""
Get the handler function for a search mode
:param mode: A search mode identifier.
:type mode: str
:returns: A search mode handler function, or None.
:rtype: function or None
"""
return _allowedSearchMode.get(mode)
def addSearchMode(mode, handler):
"""
Register a search mode.
New searches made for the registered mode will call the handler function. The handler function
must take parameters: `query`, `types`, `user`, `level`, `limit`, `offset`, and return the
search results.
:param mode: A search mode identifier.
:type mode: str
:param handler: A search mode handler function.
:type handler: function
"""
if _allowedSearchMode.get(mode) is not None:
raise GirderException('A search mode %r already exists.' % mode)
_allowedSearchMode[mode] = handler
def removeSearchMode(mode):
"""
Remove a search mode.
This will fail gracefully (returning `False`) if no search mode `mode` was registered.
:param mode: A search mode identifier.
:type mode: str
:returns: Whether the search mode was actually removed.
:rtype: bool
"""
return _allowedSearchMode.pop(mode, None) is not None
def _commonSearchModeHandler(mode, query, types, user, level, limit, offset):
"""
The common handler for `text` and `prefix` search modes.
"""
# Avoid circular import
from girder.api.v1.resource import allowedSearchTypes
method = '%sSearch' % mode
results = {}
for modelName in types:
if modelName not in allowedSearchTypes:
continue
if '.' in modelName:
name, plugin = modelName.rsplit('.', 1)
model = ModelImporter.model(name, plugin)
else:
model = ModelImporter.model(modelName)
if model is not None:
results[modelName] = [
model.filter(d, user) for d in getattr(model, method)(
query=query, user=user, limit=limit, offset=offset, level=level)
]
return results
# Add dynamically the default search mode
addSearchMode('text', partial(_commonSearchModeHandler, mode='text'))
addSearchMode('prefix', partial(_commonSearchModeHandler, mode='prefix'))
| apache-2.0 |
Manuel4131/youtube-dl | youtube_dl/extractor/onionstudios.py | 109 | 2787 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import determine_ext
class OnionStudiosIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?onionstudios\.com/(?:videos/[^/]+-|embed\?.*\bid=)(?P<id>\d+)(?!-)'
_TESTS = [{
'url': 'http://www.onionstudios.com/videos/hannibal-charges-forward-stops-for-a-cocktail-2937',
'md5': 'd4851405d31adfadf71cd7a487b765bb',
'info_dict': {
'id': '2937',
'ext': 'mp4',
'title': 'Hannibal charges forward, stops for a cocktail',
'description': 'md5:545299bda6abf87e5ec666548c6a9448',
'thumbnail': 're:^https?://.*\.jpg$',
'uploader': 'The A.V. Club',
'uploader_id': 'TheAVClub',
},
}, {
'url': 'http://www.onionstudios.com/embed?id=2855&autoplay=true',
'only_matching': True,
}]
@staticmethod
def _extract_url(webpage):
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?onionstudios\.com/embed.+?)\1', webpage)
if mobj:
return mobj.group('url')
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'http://www.onionstudios.com/embed?id=%s' % video_id, video_id)
formats = []
for src in re.findall(r'<source[^>]+src="([^"]+)"', webpage):
if determine_ext(src) != 'm3u8': # m3u8 always results in 403
formats.append({
'url': src,
})
self._sort_formats(formats)
title = self._search_regex(
r'share_title\s*=\s*(["\'])(?P<title>[^\1]+?)\1',
webpage, 'title', group='title')
description = self._search_regex(
r'share_description\s*=\s*(["\'])(?P<description>[^\1]+?)\1',
webpage, 'description', default=None, group='description')
thumbnail = self._search_regex(
r'poster\s*=\s*(["\'])(?P<thumbnail>[^\1]+?)\1',
webpage, 'thumbnail', default=False, group='thumbnail')
uploader_id = self._search_regex(
r'twitter_handle\s*=\s*(["\'])(?P<uploader_id>[^\1]+?)\1',
webpage, 'uploader id', fatal=False, group='uploader_id')
uploader = self._search_regex(
r'window\.channelName\s*=\s*(["\'])Embedded:(?P<uploader>[^\1]+?)\1',
webpage, 'uploader', default=False, group='uploader')
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'uploader': uploader,
'uploader_id': uploader_id,
'formats': formats,
}
| unlicense |
Mashape/dd-agent | tests/core/test_histogram.py | 40 | 4851 | # stdlib
import unittest
# project
from aggregator import Histogram, MetricsAggregator
from config import get_histogram_aggregates, get_histogram_percentiles
class TestHistogram(unittest.TestCase):
def test_default(self):
stats = MetricsAggregator('myhost')
for i in xrange(20):
stats.submit_packets('myhistogram:{0}|h'.format(i))
metrics = stats.flush()
self.assertEquals(len(metrics), 5, metrics)
value_by_type = {}
for k in metrics:
value_by_type[k['metric'][len('myhistogram')+1:]] = k['points'][0][1]
self.assertEquals(
sorted(value_by_type.keys()),
['95percentile', 'avg', 'count', 'max', 'median'], value_by_type
)
self.assertEquals(value_by_type['max'], 19, value_by_type)
self.assertEquals(value_by_type['median'], 9, value_by_type)
self.assertEquals(value_by_type['avg'], 9.5, value_by_type)
self.assertEquals(value_by_type['count'], 20.0, value_by_type)
self.assertEquals(value_by_type['95percentile'], 18, value_by_type)
def test_custom_single_percentile(self):
configstr = '0.40'
stats = MetricsAggregator(
'myhost',
histogram_percentiles=get_histogram_percentiles(configstr)
)
self.assertEquals(
stats.metric_config[Histogram]['percentiles'],
[0.40],
stats.metric_config[Histogram]
)
for i in xrange(20):
stats.submit_packets('myhistogram:{0}|h'.format(i))
metrics = stats.flush()
self.assertEquals(len(metrics), 5, metrics)
value_by_type = {}
for k in metrics:
value_by_type[k['metric'][len('myhistogram')+1:]] = k['points'][0][1]
self.assertEquals(value_by_type['40percentile'], 7, value_by_type)
def test_custom_multiple_percentile(self):
configstr = '0.4, 0.65, 0.999'
stats = MetricsAggregator(
'myhost',
histogram_percentiles=get_histogram_percentiles(configstr)
)
self.assertEquals(
stats.metric_config[Histogram]['percentiles'],
[0.4, 0.65, 0.99],
stats.metric_config[Histogram]
)
for i in xrange(20):
stats.submit_packets('myhistogram:{0}|h'.format(i))
metrics = stats.flush()
self.assertEquals(len(metrics), 7, metrics)
value_by_type = {}
for k in metrics:
value_by_type[k['metric'][len('myhistogram')+1:]] = k['points'][0][1]
self.assertEquals(value_by_type['40percentile'], 7, value_by_type)
self.assertEquals(value_by_type['65percentile'], 12, value_by_type)
self.assertEquals(value_by_type['99percentile'], 19, value_by_type)
def test_custom_invalid_percentile(self):
configstr = '1.2342'
stats = MetricsAggregator(
'myhost',
histogram_percentiles=get_histogram_percentiles(configstr)
)
self.assertEquals(
stats.metric_config[Histogram]['percentiles'],
[],
stats.metric_config[Histogram]
)
def test_custom_invalid_percentile2(self):
configstr = 'aoeuoeu'
stats = MetricsAggregator(
'myhost',
histogram_percentiles=get_histogram_percentiles(configstr)
)
self.assertEquals(
stats.metric_config[Histogram]['percentiles'],
[],
stats.metric_config[Histogram]
)
def test_custom_invalid_percentile3skip(self):
configstr = 'aoeuoeu, 2.23, 0.8, 23'
stats = MetricsAggregator(
'myhost',
histogram_percentiles=get_histogram_percentiles(configstr)
)
self.assertEquals(
stats.metric_config[Histogram]['percentiles'],
[0.8],
stats.metric_config[Histogram]
)
def test_custom_aggregate(self):
configstr = 'median, max'
stats = MetricsAggregator(
'myhost',
histogram_aggregates=get_histogram_aggregates(configstr)
)
self.assertEquals(
sorted(stats.metric_config[Histogram]['aggregates']),
['max', 'median'],
stats.metric_config[Histogram]
)
for i in xrange(20):
stats.submit_packets('myhistogram:{0}|h'.format(i))
metrics = stats.flush()
self.assertEquals(len(metrics), 3, metrics)
value_by_type = {}
for k in metrics:
value_by_type[k['metric'][len('myhistogram')+1:]] = k['points'][0][1]
self.assertEquals(value_by_type['median'], 9, value_by_type)
self.assertEquals(value_by_type['max'], 19, value_by_type)
self.assertEquals(value_by_type['95percentile'], 18, value_by_type)
| bsd-3-clause |
lxwvictor/flask-ask | samples/session/session.py | 2 | 1928 | import logging
import os
from flask import Flask, json, render_template
from flask_ask import Ask, request, session, question, statement
app = Flask(__name__)
ask = Ask(app, "/")
logging.getLogger('flask_ask').setLevel(logging.DEBUG)
COLOR_KEY = "COLOR"
@ask.launch
def launch():
card_title = render_template('card_title')
question_text = render_template('welcome')
reprompt_text = render_template('welcome_reprompt')
return question(question_text).reprompt(reprompt_text).simple_card(card_title, question_text)
@ask.intent('MyColorIsIntent', mapping={'color': 'Color'})
def my_color_is(color):
card_title = render_template('card_title')
if color is not None:
session.attributes[COLOR_KEY] = color
question_text = render_template('known_color', color=color)
reprompt_text = render_template('known_color_reprompt')
else:
question_text = render_template('unknown_color')
reprompt_text = render_template('unknown_color_reprompt')
return question(question_text).reprompt(reprompt_text).simple_card(card_title, question_text)
@ask.intent('WhatsMyColorIntent')
def whats_my_color():
card_title = render_template('card_title')
color = session.attributes.get(COLOR_KEY)
if color is not None:
statement_text = render_template('known_color_bye', color=color)
return statement(statement_text).simple_card(card_title, statement_text)
else:
question_text = render_template('unknown_color_reprompt')
return question(question_text).reprompt(question_text).simple_card(card_title, question_text)
@ask.session_ended
def session_ended():
return "{}", 200
if __name__ == '__main__':
if 'ASK_VERIFY_REQUESTS' in os.environ:
verify = str(os.environ.get('ASK_VERIFY_REQUESTS', '')).lower()
if verify == 'false':
app.config['ASK_VERIFY_REQUESTS'] = False
app.run(debug=True)
| apache-2.0 |
cmdunkers/DeeperMind | PythonEnv/lib/python2.7/site-packages/scipy/io/harwell_boeing/hb.py | 23 | 18482 | """
Implementation of Harwell-Boeing read/write.
At the moment not the full Harwell-Boeing format is supported. Supported
features are:
- assembled, non-symmetric, real matrices
- integer for pointer/indices
- exponential format for float values, and int format
"""
from __future__ import division, print_function, absolute_import
# TODO:
# - Add more support (symmetric/complex matrices, non-assembled matrices ?)
# XXX: reading is reasonably efficient (>= 85 % is in numpy.fromstring), but
# takes a lot of memory. Being faster would require compiled code.
# write is not efficient. Although not a terribly exciting task,
# having reusable facilities to efficiently read/write fortran-formatted files
# would be useful outside this module.
import warnings
import numpy as np
from scipy.sparse import csc_matrix
from scipy.io.harwell_boeing._fortran_format_parser import \
FortranFormatParser, IntFormat, ExpFormat
from scipy._lib.six import string_types
__all__ = ["MalformedHeader", "hb_read", "hb_write", "HBInfo", "HBFile",
"HBMatrixType"]
class MalformedHeader(Exception):
pass
class LineOverflow(Warning):
pass
def _nbytes_full(fmt, nlines):
"""Return the number of bytes to read to get every full lines for the
given parsed fortran format."""
return (fmt.repeat * fmt.width + 1) * (nlines - 1)
class HBInfo(object):
@classmethod
def from_data(cls, m, title="Default title", key="0", mxtype=None, fmt=None):
"""Create a HBInfo instance from an existing sparse matrix.
Parameters
----------
m : sparse matrix
the HBInfo instance will derive its parameters from m
title : str
Title to put in the HB header
key : str
Key
mxtype : HBMatrixType
type of the input matrix
fmt : dict
not implemented
Returns
-------
hb_info : HBInfo instance
"""
pointer = m.indptr
indices = m.indices
values = m.data
nrows, ncols = m.shape
nnon_zeros = m.nnz
if fmt is None:
# +1 because HB use one-based indexing (Fortran), and we will write
# the indices /pointer as such
pointer_fmt = IntFormat.from_number(np.max(pointer+1))
indices_fmt = IntFormat.from_number(np.max(indices+1))
if values.dtype.kind in np.typecodes["AllFloat"]:
values_fmt = ExpFormat.from_number(-np.max(np.abs(values)))
elif values.dtype.kind in np.typecodes["AllInteger"]:
values_fmt = IntFormat.from_number(-np.max(np.abs(values)))
else:
raise NotImplementedError("type %s not implemented yet" % values.dtype.kind)
else:
raise NotImplementedError("fmt argument not supported yet.")
if mxtype is None:
if not np.isrealobj(values):
raise ValueError("Complex values not supported yet")
if values.dtype.kind in np.typecodes["AllInteger"]:
tp = "integer"
elif values.dtype.kind in np.typecodes["AllFloat"]:
tp = "real"
else:
raise NotImplementedError("type %s for values not implemented"
% values.dtype)
mxtype = HBMatrixType(tp, "unsymmetric", "assembled")
else:
raise ValueError("mxtype argument not handled yet.")
def _nlines(fmt, size):
nlines = size // fmt.repeat
if nlines * fmt.repeat != size:
nlines += 1
return nlines
pointer_nlines = _nlines(pointer_fmt, pointer.size)
indices_nlines = _nlines(indices_fmt, indices.size)
values_nlines = _nlines(values_fmt, values.size)
total_nlines = pointer_nlines + indices_nlines + values_nlines
return cls(title, key,
total_nlines, pointer_nlines, indices_nlines, values_nlines,
mxtype, nrows, ncols, nnon_zeros,
pointer_fmt.fortran_format, indices_fmt.fortran_format,
values_fmt.fortran_format)
@classmethod
def from_file(cls, fid):
"""Create a HBInfo instance from a file object containg a matrix in the
HB format.
Parameters
----------
fid : file-like matrix
File or file-like object containing a matrix in the HB format.
Returns
-------
hb_info : HBInfo instance
"""
# First line
line = fid.readline().strip("\n")
if not len(line) > 72:
raise ValueError("Expected at least 72 characters for first line, "
"got: \n%s" % line)
title = line[:72]
key = line[72:]
# Second line
line = fid.readline().strip("\n")
if not len(line.rstrip()) >= 56:
raise ValueError("Expected at least 56 characters for second line, "
"got: \n%s" % line)
total_nlines = _expect_int(line[:14])
pointer_nlines = _expect_int(line[14:28])
indices_nlines = _expect_int(line[28:42])
values_nlines = _expect_int(line[42:56])
rhs_nlines = line[56:72].strip()
if rhs_nlines == '':
rhs_nlines = 0
else:
rhs_nlines = _expect_int(rhs_nlines)
if not rhs_nlines == 0:
raise ValueError("Only files without right hand side supported for "
"now.")
# Third line
line = fid.readline().strip("\n")
if not len(line) >= 70:
raise ValueError("Expected at least 72 character for third line, got:\n"
"%s" % line)
mxtype_s = line[:3].upper()
if not len(mxtype_s) == 3:
raise ValueError("mxtype expected to be 3 characters long")
mxtype = HBMatrixType.from_fortran(mxtype_s)
if mxtype.value_type not in ["real", "integer"]:
raise ValueError("Only real or integer matrices supported for "
"now (detected %s)" % mxtype)
if not mxtype.structure == "unsymmetric":
raise ValueError("Only unsymmetric matrices supported for "
"now (detected %s)" % mxtype)
if not mxtype.storage == "assembled":
raise ValueError("Only assembled matrices supported for now")
if not line[3:14] == " " * 11:
raise ValueError("Malformed data for third line: %s" % line)
nrows = _expect_int(line[14:28])
ncols = _expect_int(line[28:42])
nnon_zeros = _expect_int(line[42:56])
nelementals = _expect_int(line[56:70])
if not nelementals == 0:
raise ValueError("Unexpected value %d for nltvl (last entry of line 3)"
% nelementals)
# Fourth line
line = fid.readline().strip("\n")
ct = line.split()
if not len(ct) == 3:
raise ValueError("Expected 3 formats, got %s" % ct)
return cls(title, key,
total_nlines, pointer_nlines, indices_nlines, values_nlines,
mxtype, nrows, ncols, nnon_zeros,
ct[0], ct[1], ct[2],
rhs_nlines, nelementals)
def __init__(self, title, key,
total_nlines, pointer_nlines, indices_nlines, values_nlines,
mxtype, nrows, ncols, nnon_zeros,
pointer_format_str, indices_format_str, values_format_str,
right_hand_sides_nlines=0, nelementals=0):
"""Do not use this directly, but the class ctrs (from_* functions)."""
self.title = title
self.key = key
if title is None:
title = "No Title"
if len(title) > 72:
raise ValueError("title cannot be > 72 characters")
if key is None:
key = "|No Key"
if len(key) > 8:
warnings.warn("key is > 8 characters (key is %s)" % key, LineOverflow)
self.total_nlines = total_nlines
self.pointer_nlines = pointer_nlines
self.indices_nlines = indices_nlines
self.values_nlines = values_nlines
parser = FortranFormatParser()
pointer_format = parser.parse(pointer_format_str)
if not isinstance(pointer_format, IntFormat):
raise ValueError("Expected int format for pointer format, got %s"
% pointer_format)
indices_format = parser.parse(indices_format_str)
if not isinstance(indices_format, IntFormat):
raise ValueError("Expected int format for indices format, got %s" %
indices_format)
values_format = parser.parse(values_format_str)
if isinstance(values_format, ExpFormat):
if mxtype.value_type not in ["real", "complex"]:
raise ValueError("Inconsistency between matrix type %s and "
"value type %s" % (mxtype, values_format))
values_dtype = np.float64
elif isinstance(values_format, IntFormat):
if mxtype.value_type not in ["integer"]:
raise ValueError("Inconsistency between matrix type %s and "
"value type %s" % (mxtype, values_format))
# XXX: fortran int -> dtype association ?
values_dtype = np.int
else:
raise ValueError("Unsupported format for values %r" % (values_format,))
self.pointer_format = pointer_format
self.indices_format = indices_format
self.values_format = values_format
self.pointer_dtype = np.int32
self.indices_dtype = np.int32
self.values_dtype = values_dtype
self.pointer_nlines = pointer_nlines
self.pointer_nbytes_full = _nbytes_full(pointer_format, pointer_nlines)
self.indices_nlines = indices_nlines
self.indices_nbytes_full = _nbytes_full(indices_format, indices_nlines)
self.values_nlines = values_nlines
self.values_nbytes_full = _nbytes_full(values_format, values_nlines)
self.nrows = nrows
self.ncols = ncols
self.nnon_zeros = nnon_zeros
self.nelementals = nelementals
self.mxtype = mxtype
def dump(self):
"""Gives the header corresponding to this instance as a string."""
header = [self.title.ljust(72) + self.key.ljust(8)]
header.append("%14d%14d%14d%14d" %
(self.total_nlines, self.pointer_nlines,
self.indices_nlines, self.values_nlines))
header.append("%14s%14d%14d%14d%14d" %
(self.mxtype.fortran_format.ljust(14), self.nrows,
self.ncols, self.nnon_zeros, 0))
pffmt = self.pointer_format.fortran_format
iffmt = self.indices_format.fortran_format
vffmt = self.values_format.fortran_format
header.append("%16s%16s%20s" %
(pffmt.ljust(16), iffmt.ljust(16), vffmt.ljust(20)))
return "\n".join(header)
def _expect_int(value, msg=None):
try:
return int(value)
except ValueError:
if msg is None:
msg = "Expected an int, got %s"
raise ValueError(msg % value)
def _read_hb_data(content, header):
# XXX: look at a way to reduce memory here (big string creation)
ptr_string = "".join([content.read(header.pointer_nbytes_full),
content.readline()])
ptr = np.fromstring(ptr_string,
dtype=np.int, sep=' ')
ind_string = "".join([content.read(header.indices_nbytes_full),
content.readline()])
ind = np.fromstring(ind_string,
dtype=np.int, sep=' ')
val_string = "".join([content.read(header.values_nbytes_full),
content.readline()])
val = np.fromstring(val_string,
dtype=header.values_dtype, sep=' ')
try:
return csc_matrix((val, ind-1, ptr-1),
shape=(header.nrows, header.ncols))
except ValueError as e:
raise e
def _write_data(m, fid, header):
def write_array(f, ar, nlines, fmt):
# ar_nlines is the number of full lines, n is the number of items per
# line, ffmt the fortran format
pyfmt = fmt.python_format
pyfmt_full = pyfmt * fmt.repeat
# for each array to write, we first write the full lines, and special
# case for partial line
full = ar[:(nlines - 1) * fmt.repeat]
for row in full.reshape((nlines-1, fmt.repeat)):
f.write(pyfmt_full % tuple(row) + "\n")
nremain = ar.size - full.size
if nremain > 0:
f.write((pyfmt * nremain) % tuple(ar[ar.size - nremain:]) + "\n")
fid.write(header.dump())
fid.write("\n")
# +1 is for fortran one-based indexing
write_array(fid, m.indptr+1, header.pointer_nlines,
header.pointer_format)
write_array(fid, m.indices+1, header.indices_nlines,
header.indices_format)
write_array(fid, m.data, header.values_nlines,
header.values_format)
class HBMatrixType(object):
"""Class to hold the matrix type."""
# q2f* translates qualified names to fortran character
_q2f_type = {
"real": "R",
"complex": "C",
"pattern": "P",
"integer": "I",
}
_q2f_structure = {
"symmetric": "S",
"unsymmetric": "U",
"hermitian": "H",
"skewsymmetric": "Z",
"rectangular": "R"
}
_q2f_storage = {
"assembled": "A",
"elemental": "E",
}
_f2q_type = dict([(j, i) for i, j in _q2f_type.items()])
_f2q_structure = dict([(j, i) for i, j in _q2f_structure.items()])
_f2q_storage = dict([(j, i) for i, j in _q2f_storage.items()])
@classmethod
def from_fortran(cls, fmt):
if not len(fmt) == 3:
raise ValueError("Fortran format for matrix type should be 3 "
"characters long")
try:
value_type = cls._f2q_type[fmt[0]]
structure = cls._f2q_structure[fmt[1]]
storage = cls._f2q_storage[fmt[2]]
return cls(value_type, structure, storage)
except KeyError:
raise ValueError("Unrecognized format %s" % fmt)
def __init__(self, value_type, structure, storage="assembled"):
self.value_type = value_type
self.structure = structure
self.storage = storage
if value_type not in self._q2f_type:
raise ValueError("Unrecognized type %s" % value_type)
if structure not in self._q2f_structure:
raise ValueError("Unrecognized structure %s" % structure)
if storage not in self._q2f_storage:
raise ValueError("Unrecognized storage %s" % storage)
@property
def fortran_format(self):
return self._q2f_type[self.value_type] + \
self._q2f_structure[self.structure] + \
self._q2f_storage[self.storage]
def __repr__(self):
return "HBMatrixType(%s, %s, %s)" % \
(self.value_type, self.structure, self.storage)
class HBFile(object):
def __init__(self, file, hb_info=None):
"""Create a HBFile instance.
Parameters
----------
file : file-object
StringIO work as well
hb_info : HBInfo, optional
Should be given as an argument for writing, in which case the file
should be writable.
"""
self._fid = file
if hb_info is None:
self._hb_info = HBInfo.from_file(file)
else:
#raise IOError("file %s is not writable, and hb_info "
# "was given." % file)
self._hb_info = hb_info
@property
def title(self):
return self._hb_info.title
@property
def key(self):
return self._hb_info.key
@property
def type(self):
return self._hb_info.mxtype.value_type
@property
def structure(self):
return self._hb_info.mxtype.structure
@property
def storage(self):
return self._hb_info.mxtype.storage
def read_matrix(self):
return _read_hb_data(self._fid, self._hb_info)
def write_matrix(self, m):
return _write_data(m, self._fid, self._hb_info)
def hb_read(file):
"""Read HB-format file.
Parameters
----------
file : str-like or file-like
If a string-like object, file is the name of the file to read. If a
file-like object, the data are read from it.
Returns
-------
data : scipy.sparse.csc_matrix instance
The data read from the HB file as a sparse matrix.
Notes
-----
At the moment not the full Harwell-Boeing format is supported. Supported
features are:
- assembled, non-symmetric, real matrices
- integer for pointer/indices
- exponential format for float values, and int format
"""
def _get_matrix(fid):
hb = HBFile(fid)
return hb.read_matrix()
if isinstance(file, string_types):
fid = open(file)
try:
return _get_matrix(fid)
finally:
fid.close()
else:
return _get_matrix(file)
def hb_write(file, m, hb_info=None):
"""Write HB-format file.
Parameters
----------
file : str-like or file-like
if a string-like object, file is the name of the file to read. If a
file-like object, the data are read from it.
m : sparse-matrix
the sparse matrix to write
hb_info : HBInfo
contains the meta-data for write
Returns
-------
None
Notes
-----
At the moment not the full Harwell-Boeing format is supported. Supported
features are:
- assembled, non-symmetric, real matrices
- integer for pointer/indices
- exponential format for float values, and int format
"""
if hb_info is None:
hb_info = HBInfo.from_data(m)
def _set_matrix(fid):
hb = HBFile(fid, hb_info)
return hb.write_matrix(m)
if isinstance(file, string_types):
fid = open(file, "w")
try:
return _set_matrix(fid)
finally:
fid.close()
else:
return _set_matrix(file)
| bsd-3-clause |
danielreed/python-hpOneView | hpOneView/resources/networking/logical_downlinks.py | 1 | 5419 | # -*- coding: utf-8 -*-
###
# (C) Copyright (2012-2016) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
__title__ = 'logical-downlinks'
__version__ = '0.0.1'
__copyright__ = '(C) Copyright (2012-2016) Hewlett Packard Enterprise ' \
' Development LP'
__license__ = 'MIT'
__status__ = 'Development'
from hpOneView.resources.resource import ResourceClient
class LogicalDownlinks(object):
URI = '/rest/logical-downlinks'
def __init__(self, con):
self._connection = con
self._client = ResourceClient(con, self.URI)
def get_all(self, start=0, count=-1, filter='', sort=''):
"""
Gets a paginated collection of logical downlinks. The collection is based on
optional sorting and filtering, and constrained by start and count parameters.
Args:
start:
The first item to return, using 0-based indexing.
If not specified, the default is 0 - start with the first available item.
count:
The number of resources to return. A count of -1 requests all the items.
The actual number of items in the response may differ from the requested
count if the sum of start and count exceed the total number of items.
filter:
A general filter/query string to narrow the list of items returned. The
default is no filter - all resources are returned.
sort:
The sort order of the returned data set. By default, the sort order is based
on create time, with the oldest entry first.
Returns:
list: A list of logical downlinks.
"""
return self._client.get_all(start, count, filter=filter, sort=sort)
def get(self, id_or_uri):
"""
Gets a logical downlink by ID or by uri.
Args:
id_or_uri: Could be either the logical downlink id or the logical downlink uri.
Returns:
dict: The logical downlink.
"""
return self._client.get(id_or_uri)
def get_by(self, field, value):
"""
Get all logical downlinks that match the filter.
The search is case insensitive.
Args:
field: Field name to filter.
value: Value to filter.
Returns:
list: A list of logical downlinks.
"""
return self._client.get_by(field, value)
def get_all_without_ethernet(self, start=0, count=-1, filter='', sort=''):
"""
Gets a paginated collection of logical downlinks without ethernet. The collection is
based on optional sorting and filtering, and constrained by start and count parameters.
Args:
start:
The first item to return, using 0-based indexing.
If not specified, the default is 0 - start with the first available item.
count:
The number of resources to return. A count of -1 requests all the items.
The actual number of items in the response may differ from the requested
count if the sum of start and count exceed the total number of items.
filter:
A general filter/query string to narrow the list of items returned. The
default is no filter - all resources are returned.
sort:
The sort order of the returned data set. By default, the sort order is based
on create time, with the oldest entry first.
Returns:
dict
"""
without_ethernet_client = ResourceClient(
self._connection, "/rest/logical-downlinks/withoutEthernet")
return without_ethernet_client.get_all(start, count, filter=filter, sort=sort)
def get_without_ethernet(self, id):
"""
Gets the logical downlink with the specified ID without ethernet.
Args:
id: ID of logical downlink.
Returns:
dict
"""
uri = "/rest/logical-downlinks/%s/withoutEthernet" % (id)
return self._client.get(uri)
| mit |
sholloway/Jitterbug | ext/vendor/glm-0.9.2.3/util/gen_external_templates.py | 18 | 5678 |
__author__ = "eloraiby"
__date__ = "$5-Sep-2010 9:35:29 PM$"
atomic_types = ["unsigned char", "unsigned short", "unsigned int",
"signed char", "signed short", "signed int",
"float", "double"]
glsl_vector_types = ["tvec2", "tvec3", "tvec4"]
glsl_matrix_types = ["tmat2x2", "tmat2x3", "tmat2x4",
"tmat3x2", "tmat3x3", "tmat3x4",
"tmat4x2", "tmat4x3", "tmat4x4"]
glsl_matrix_member_operators = ["+=", "-=", "*=", "/="]
glsl_matrix_out_op_dic = {
"tmat2x2":"tmat2x2",
"tmat2x3":"tmat3x3",
"tmat2x4":"tmat4x4",
"tmat3x2":"tmat2x2",
"tmat3x3":"tmat3x3",
"tmat3x4":"tmat4x4",
"tmat4x2":"tmat2x2",
"tmat4x3":"tmat3x3",
"tmat4x4":"tmat4x4",
}
glsl_matrix_right_op_dic = {
"tmat2x2":"tmat2x2",
"tmat2x3":"tmat3x2",
"tmat2x4":"tmat4x2",
"tmat3x2":"tmat2x3",
"tmat3x3":"tmat3x3",
"tmat3x4":"tmat4x3",
"tmat4x2":"tmat2x4",
"tmat4x3":"tmat3x4",
"tmat4x4":"tmat4x4",
}
def gen_vectors():
for v in glsl_vector_types:
print
print "//"
print "// " + v + " type explicit instantiation"
print "//"
for a in atomic_types:
print "template struct " + v + "<" + a + ">;"
print
def gen_matrices_member_operators():
for m in glsl_matrix_types:
print
print "//"
print "// " + m + " type member operator instantiation"
print "//"
for a in atomic_types:
#print "template " + m + "<" + a + ">::col_type;"
#print "template " + m + "<" + a + ">::row_type;"
for c in atomic_types:
if a != c:
print "template " + m + "<" + a + ">::" + m + "(" + m + "<" + c + "> const &m);"
"""for b in glsl_matrix_member_operators:
for cm in atomic_types:
print "template " + m + "<" + a + ">& " + m + "<" + a + ">::operator " + b + "( " + m + "<" + cm + "> const &m);"
print "template " + m + "<" + a + ">& " + m + "<" + a + ">::operator " + b + "( " + cm + " const &s);"
"""
print
print "//"
print "// Binary operators"
print "//"
print "template " + m + "<" + a + "> operator + (" + m + "<" + a + "> const &m, " + a + " const &s);"
if m == "tmat2x2" or m == "tmat3x3" or m == "tmat4x4":
print "template " + m + "<" + a + "> operator + (" + a + " const &s, " + m + "<" + a + "> const &m);"
print "template " + m + "<" + a + "> operator + (" + m + "<" + a + "> const &m1, " + m + "<" + a + "> const &m2);"
print "template " + m + "<" + a + "> operator - (" + m + "<" + a + "> const &m, " + a + " const &s);"
if m == "tmat2x2" or m == "tmat3x3" or m == "tmat4x4":
print "template " + m + "<" + a + "> operator - (" + a + " const &s, " + m + "<" + a + "> const &m);"
print "template " + m + "<" + a + "> operator - (" + m + "<" + a + "> const &m1, " + m + "<" + a + "> const &m2);"
out_op = glsl_matrix_out_op_dic[m]
right_op = glsl_matrix_right_op_dic[m]
print "template " + m + "<" + a + "> operator * (" + m + "<" + a + "> const &m, " + a + " const &s);"
if m == "tmat2x2" or m == "tmat3x3" or m == "tmat4x4":
print "template " + m + "<" + a + "> operator * ( " + a + " const &s, " + m + "<" + a + "> const &m);"
print "template " + out_op + "<" + a + "> operator * (" + m + "<" + a + "> const &m1, " + right_op + "<" + a + "> const &m2);"
print "template " + m + "<" + a + ">::col_type" + " operator * ( " + m + "<" + a + "> const &m, " + m + "<" + a + ">::row_type" + " const &s);"
print "template " + m + "<" + a + ">::row_type" + " operator * ( " + m + "<" + a + ">::col_type const &s, " + m + "<" + a + "> const &m);"
print "template " + m + "<" + a + "> operator / (" + m + "<" + a + "> const &m, " + a + " const &s);"
#print "template " + right_op + "<" + a + "> operator / ( " + a + " const &s, " + m + "<" + a + "> const &m);"
if m == "tmat2x2" or m == "tmat3x3" or m == "tmat4x4":
print "template " + m + "<" + a + "> operator / ( " + a + " const &s, " + m + "<" + a + "> const &m);"
#print "template " + m + "<" + a + "> operator / (" + m + "<" + a + "> const &m1, " + m + "<" + a + "> const &m2);"
else:
print "template " + m + "<" + a + "> operator / ( " + a + " const &s, " + m + "<" + a + "> const &m);"
#print "template " + m + "<" + a + ">" + " operator / ( " + m + "<" + a + "> const &m, " + a + " const &s);"
#print "template " + m + "<" + a + ">" + " operator / ( " + a + " const &s, " + m + "<" + a + "> const &m);"
print
print "//"
print "// Unary constant operators"
print "//"
print "template " + m + "<" + a + "> const operator -(" + m + "<" + a + "> const &m);"
print "template " + m + "<" + a + "> const operator --(" + m + "<" + a + "> const &m, int);"
print "template " + m + "<" + a + "> const operator ++(" + m + "<" + a + "> const &m, int);"
print
def gen_matrices():
for m in glsl_matrix_types:
print
print "//"
print "// " + m + " type explicit instantiation"
print "//"
for a in atomic_types:
print "template struct " + m + "<" + a + ">;"
print
if __name__ == "__main__":
print "//"
print "// GLM External templates generator script version 0.1 for GLM core"
print "//"
print "// atomic types:", atomic_types
print "// GLSL vector types:", glsl_vector_types;
print "// GLSL matrix types:", glsl_matrix_types;
print "//"
print
print "#include <glm/glm.hpp>"
print
print "namespace glm {"
print "namespace detail {"
gen_vectors()
gen_matrices()
gen_matrices_member_operators()
print "} // namespace detail"
print "} // namespace glm"
| mit |
googleapis/python-speech | setup.py | 1 | 2804 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import setuptools
# Package metadata.
name = "google-cloud-speech"
description = "Google Cloud Speech API client library"
version = "2.5.0"
# Should be one of:
# 'Development Status :: 3 - Alpha'
# 'Development Status :: 4 - Beta'
# 'Development Status :: 5 - Production/Stable'
release_status = "Development Status :: 5 - Production/Stable"
dependencies = [
"google-api-core[grpc] >= 1.26.0, < 2.0.0dev",
"libcst >= 0.2.5",
"proto-plus >= 1.4.0",
"packaging >= 14.3",
]
extras = {}
# Setup boilerplate below this line.
package_root = os.path.abspath(os.path.dirname(__file__))
readme_filename = os.path.join(package_root, "README.rst")
with io.open(readme_filename, encoding="utf-8") as readme_file:
readme = readme_file.read()
# Only include packages under the 'google' namespace. Do not include tests,
# benchmarks, etc.
packages = [
package
for package in setuptools.PEP420PackageFinder.find()
if package.startswith("google")
]
# Determine which namespaces are needed.
namespaces = ["google"]
if "google.cloud" in packages:
namespaces.append("google.cloud")
setuptools.setup(
name=name,
version=version,
description=description,
long_description=readme,
author="Google LLC",
author_email="[email protected]",
license="Apache 2.0",
url="https://github.com/googleapis/python-speech",
classifiers=[
release_status,
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Operating System :: OS Independent",
"Topic :: Internet",
],
platforms="Posix; MacOS X; Windows",
packages=packages,
namespace_packages=namespaces,
install_requires=dependencies,
extras_require=extras,
python_requires=">=3.6",
scripts=[
"scripts/fixup_speech_v1_keywords.py",
"scripts/fixup_speech_v1p1beta1_keywords.py",
],
include_package_data=True,
zip_safe=False,
)
| apache-2.0 |
asm-products/movie-database-service | ani/lib/python2.7/site-packages/django/db/models/sql/query.py | 13 | 84272 | """
Create SQL statements for QuerySets.
The code in here encapsulates all of the SQL construction so that QuerySets
themselves do not have to (and could be backed by things other than SQL
databases). The abstraction barrier only works one way: this module has to know
all about the internals of models in order to get the information it needs.
"""
import copy
from django.utils.datastructures import SortedDict
from django.utils.encoding import force_text
from django.utils.tree import Node
from django.utils import six
from django.db import connections, DEFAULT_DB_ALIAS
from django.db.models.constants import LOOKUP_SEP
from django.db.models.aggregates import refs_aggregate
from django.db.models.expressions import ExpressionNode
from django.db.models.fields import FieldDoesNotExist
from django.db.models.related import PathInfo
from django.db.models.sql import aggregates as base_aggregates_module
from django.db.models.sql.constants import (QUERY_TERMS, ORDER_DIR, SINGLE,
ORDER_PATTERN, JoinInfo, SelectInfo)
from django.db.models.sql.datastructures import EmptyResultSet, Empty, MultiJoin
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.sql.where import (WhereNode, Constraint, EverythingNode,
ExtraWhere, AND, OR, EmptyWhere)
from django.core.exceptions import FieldError
__all__ = ['Query', 'RawQuery']
class RawQuery(object):
"""
A single raw SQL query
"""
def __init__(self, sql, using, params=None):
self.params = params or ()
self.sql = sql
self.using = using
self.cursor = None
# Mirror some properties of a normal query so that
# the compiler can be used to process results.
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.extra_select = {}
self.aggregate_select = {}
def clone(self, using):
return RawQuery(self.sql, using, params=self.params)
def convert_values(self, value, field, connection):
"""Convert the database-returned value into a type that is consistent
across database backends.
By default, this defers to the underlying backend operations, but
it can be overridden by Query classes for specific backends.
"""
return connection.ops.convert_values(value, field)
def get_columns(self):
if self.cursor is None:
self._execute_query()
converter = connections[self.using].introspection.table_name_converter
return [converter(column_meta[0])
for column_meta in self.cursor.description]
def __iter__(self):
# Always execute a new query for a new iterator.
# This could be optimized with a cache at the expense of RAM.
self._execute_query()
if not connections[self.using].features.can_use_chunked_reads:
# If the database can't use chunked reads we need to make sure we
# evaluate the entire query up front.
result = list(self.cursor)
else:
result = self.cursor
return iter(result)
def __repr__(self):
return "<RawQuery: %r>" % (self.sql % tuple(self.params))
def _execute_query(self):
self.cursor = connections[self.using].cursor()
self.cursor.execute(self.sql, self.params)
class Query(object):
"""
A single SQL query.
"""
# SQL join types. These are part of the class because their string forms
# vary from database to database and can be customised by a subclass.
INNER = 'INNER JOIN'
LOUTER = 'LEFT OUTER JOIN'
alias_prefix = 'T'
query_terms = QUERY_TERMS
aggregates_module = base_aggregates_module
compiler = 'SQLCompiler'
def __init__(self, model, where=WhereNode):
self.model = model
self.alias_refcount = {}
# alias_map is the most important data structure regarding joins.
# It's used for recording which joins exist in the query and what
# type they are. The key is the alias of the joined table (possibly
# the table name) and the value is JoinInfo from constants.py.
self.alias_map = {}
self.table_map = {} # Maps table names to list of aliases.
self.join_map = {}
self.default_cols = True
self.default_ordering = True
self.standard_ordering = True
self.used_aliases = set()
self.filter_is_sticky = False
self.included_inherited_models = {}
# SQL-related attributes
# Select and related select clauses as SelectInfo instances.
# The select is used for cases where we want to set up the select
# clause to contain other than default fields (values(), annotate(),
# subqueries...)
self.select = []
# The related_select_cols is used for columns needed for
# select_related - this is populated in compile stage.
self.related_select_cols = []
self.tables = [] # Aliases in the order they are created.
self.where = where()
self.where_class = where
self.group_by = None
self.having = where()
self.order_by = []
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.distinct = False
self.distinct_fields = []
self.select_for_update = False
self.select_for_update_nowait = False
self.select_related = False
# SQL aggregate-related attributes
self.aggregates = SortedDict() # Maps alias -> SQL aggregate function
self.aggregate_select_mask = None
self._aggregate_select_cache = None
# Arbitrary maximum limit for select_related. Prevents infinite
# recursion. Can be changed by the depth parameter to select_related().
self.max_depth = 5
# These are for extensions. The contents are more or less appended
# verbatim to the appropriate clause.
self.extra = SortedDict() # Maps col_alias -> (col_sql, params).
self.extra_select_mask = None
self._extra_select_cache = None
self.extra_tables = ()
self.extra_order_by = ()
# A tuple that is a set of model field names and either True, if these
# are the fields to defer, or False if these are the only fields to
# load.
self.deferred_loading = (set(), True)
def __str__(self):
"""
Returns the query as a string of SQL with the parameter values
substituted in (use sql_with_params() to see the unsubstituted string).
Parameter values won't necessarily be quoted correctly, since that is
done by the database interface at execution time.
"""
sql, params = self.sql_with_params()
return sql % params
def sql_with_params(self):
"""
Returns the query as an SQL string and the parameters that will be
subsituted into the query.
"""
return self.get_compiler(DEFAULT_DB_ALIAS).as_sql()
def __deepcopy__(self, memo):
result = self.clone(memo=memo)
memo[id(self)] = result
return result
def prepare(self):
return self
def get_compiler(self, using=None, connection=None):
if using is None and connection is None:
raise ValueError("Need either using or connection")
if using:
connection = connections[using]
# Check that the compiler will be able to execute the query
for alias, aggregate in self.aggregate_select.items():
connection.ops.check_aggregate_support(aggregate)
return connection.ops.compiler(self.compiler)(self, connection, using)
def get_meta(self):
"""
Returns the Options instance (the model._meta) from which to start
processing. Normally, this is self.model._meta, but it can be changed
by subclasses.
"""
return self.model._meta
def clone(self, klass=None, memo=None, **kwargs):
"""
Creates a copy of the current instance. The 'kwargs' parameter can be
used by clients to update attributes after copying has taken place.
"""
obj = Empty()
obj.__class__ = klass or self.__class__
obj.model = self.model
obj.alias_refcount = self.alias_refcount.copy()
obj.alias_map = self.alias_map.copy()
obj.table_map = self.table_map.copy()
obj.join_map = self.join_map.copy()
obj.default_cols = self.default_cols
obj.default_ordering = self.default_ordering
obj.standard_ordering = self.standard_ordering
obj.included_inherited_models = self.included_inherited_models.copy()
obj.select = self.select[:]
obj.related_select_cols = []
obj.tables = self.tables[:]
obj.where = self.where.clone()
obj.where_class = self.where_class
if self.group_by is None:
obj.group_by = None
else:
obj.group_by = self.group_by[:]
obj.having = self.having.clone()
obj.order_by = self.order_by[:]
obj.low_mark, obj.high_mark = self.low_mark, self.high_mark
obj.distinct = self.distinct
obj.distinct_fields = self.distinct_fields[:]
obj.select_for_update = self.select_for_update
obj.select_for_update_nowait = self.select_for_update_nowait
obj.select_related = self.select_related
obj.related_select_cols = []
obj.aggregates = self.aggregates.copy()
if self.aggregate_select_mask is None:
obj.aggregate_select_mask = None
else:
obj.aggregate_select_mask = self.aggregate_select_mask.copy()
# _aggregate_select_cache cannot be copied, as doing so breaks the
# (necessary) state in which both aggregates and
# _aggregate_select_cache point to the same underlying objects.
# It will get re-populated in the cloned queryset the next time it's
# used.
obj._aggregate_select_cache = None
obj.max_depth = self.max_depth
obj.extra = self.extra.copy()
if self.extra_select_mask is None:
obj.extra_select_mask = None
else:
obj.extra_select_mask = self.extra_select_mask.copy()
if self._extra_select_cache is None:
obj._extra_select_cache = None
else:
obj._extra_select_cache = self._extra_select_cache.copy()
obj.extra_tables = self.extra_tables
obj.extra_order_by = self.extra_order_by
obj.deferred_loading = copy.copy(self.deferred_loading[0]), self.deferred_loading[1]
if self.filter_is_sticky and self.used_aliases:
obj.used_aliases = self.used_aliases.copy()
else:
obj.used_aliases = set()
obj.filter_is_sticky = False
obj.__dict__.update(kwargs)
if hasattr(obj, '_setup_query'):
obj._setup_query()
return obj
def convert_values(self, value, field, connection):
"""Convert the database-returned value into a type that is consistent
across database backends.
By default, this defers to the underlying backend operations, but
it can be overridden by Query classes for specific backends.
"""
return connection.ops.convert_values(value, field)
def resolve_aggregate(self, value, aggregate, connection):
"""Resolve the value of aggregates returned by the database to
consistent (and reasonable) types.
This is required because of the predisposition of certain backends
to return Decimal and long types when they are not needed.
"""
if value is None:
if aggregate.is_ordinal:
return 0
# Return None as-is
return value
elif aggregate.is_ordinal:
# Any ordinal aggregate (e.g., count) returns an int
return int(value)
elif aggregate.is_computed:
# Any computed aggregate (e.g., avg) returns a float
return float(value)
else:
# Return value depends on the type of the field being processed.
return self.convert_values(value, aggregate.field, connection)
def get_aggregation(self, using):
"""
Returns the dictionary with the values of the existing aggregations.
"""
if not self.aggregate_select:
return {}
# If there is a group by clause, aggregating does not add useful
# information but retrieves only the first row. Aggregate
# over the subquery instead.
if self.group_by is not None:
from django.db.models.sql.subqueries import AggregateQuery
query = AggregateQuery(self.model)
obj = self.clone()
# Remove any aggregates marked for reduction from the subquery
# and move them to the outer AggregateQuery.
for alias, aggregate in self.aggregate_select.items():
if aggregate.is_summary:
query.aggregate_select[alias] = aggregate
del obj.aggregate_select[alias]
try:
query.add_subquery(obj, using)
except EmptyResultSet:
return dict(
(alias, None)
for alias in query.aggregate_select
)
else:
query = self
self.select = []
self.default_cols = False
self.extra = {}
self.remove_inherited_models()
query.clear_ordering(True)
query.clear_limits()
query.select_for_update = False
query.select_related = False
query.related_select_cols = []
result = query.get_compiler(using).execute_sql(SINGLE)
if result is None:
result = [None for q in query.aggregate_select.items()]
return dict([
(alias, self.resolve_aggregate(val, aggregate, connection=connections[using]))
for (alias, aggregate), val
in zip(query.aggregate_select.items(), result)
])
def get_count(self, using):
"""
Performs a COUNT() query using the current filter constraints.
"""
obj = self.clone()
if len(self.select) > 1 or self.aggregate_select or (self.distinct and self.distinct_fields):
# If a select clause exists, then the query has already started to
# specify the columns that are to be returned.
# In this case, we need to use a subquery to evaluate the count.
from django.db.models.sql.subqueries import AggregateQuery
subquery = obj
subquery.clear_ordering(True)
subquery.clear_limits()
obj = AggregateQuery(obj.model)
try:
obj.add_subquery(subquery, using=using)
except EmptyResultSet:
# add_subquery evaluates the query, if it's an EmptyResultSet
# then there are can be no results, and therefore there the
# count is obviously 0
return 0
obj.add_count_column()
number = obj.get_aggregation(using=using)[None]
# Apply offset and limit constraints manually, since using LIMIT/OFFSET
# in SQL (in variants that provide them) doesn't change the COUNT
# output.
number = max(0, number - self.low_mark)
if self.high_mark is not None:
number = min(number, self.high_mark - self.low_mark)
return number
def has_results(self, using):
q = self.clone()
q.clear_select_clause()
q.clear_ordering(True)
q.set_limits(high=1)
compiler = q.get_compiler(using=using)
return compiler.has_results()
def combine(self, rhs, connector):
"""
Merge the 'rhs' query into the current one (with any 'rhs' effects
being applied *after* (that is, "to the right of") anything in the
current query. 'rhs' is not modified during a call to this function.
The 'connector' parameter describes how to connect filters from the
'rhs' query.
"""
assert self.model == rhs.model, \
"Cannot combine queries on two different base models."
assert self.can_filter(), \
"Cannot combine queries once a slice has been taken."
assert self.distinct == rhs.distinct, \
"Cannot combine a unique query with a non-unique query."
assert self.distinct_fields == rhs.distinct_fields, \
"Cannot combine queries with different distinct fields."
self.remove_inherited_models()
# Work out how to relabel the rhs aliases, if necessary.
change_map = {}
conjunction = (connector == AND)
# Determine which existing joins can be reused. When combining the
# query with AND we must recreate all joins for m2m filters. When
# combining with OR we can reuse joins. The reason is that in AND
# case a single row can't fulfill a condition like:
# revrel__col=1 & revrel__col=2
# But, there might be two different related rows matching this
# condition. In OR case a single True is enough, so single row is
# enough, too.
#
# Note that we will be creating duplicate joins for non-m2m joins in
# the AND case. The results will be correct but this creates too many
# joins. This is something that could be fixed later on.
reuse = set() if conjunction else set(self.tables)
# Base table must be present in the query - this is the same
# table on both sides.
self.get_initial_alias()
# Now, add the joins from rhs query into the new query (skipping base
# table).
for alias in rhs.tables[1:]:
table, _, join_type, lhs, join_cols, nullable, join_field = rhs.alias_map[alias]
promote = (join_type == self.LOUTER)
# If the left side of the join was already relabeled, use the
# updated alias.
lhs = change_map.get(lhs, lhs)
new_alias = self.join(
(lhs, table, join_cols), reuse=reuse,
outer_if_first=not conjunction, nullable=nullable,
join_field=join_field)
if promote:
self.promote_joins([new_alias])
# We can't reuse the same join again in the query. If we have two
# distinct joins for the same connection in rhs query, then the
# combined query must have two joins, too.
reuse.discard(new_alias)
change_map[alias] = new_alias
if not rhs.alias_refcount[alias]:
# The alias was unused in the rhs query. Unref it so that it
# will be unused in the new query, too. We have to add and
# unref the alias so that join promotion has information of
# the join type for the unused alias.
self.unref_alias(new_alias)
# So that we don't exclude valid results in an OR query combination,
# all joins exclusive to either the lhs or the rhs must be converted
# to an outer join. RHS joins were already set to outer joins above,
# so check which joins were used only in the lhs query.
if not conjunction:
rhs_used_joins = set(change_map.values())
to_promote = [alias for alias in self.tables
if alias not in rhs_used_joins]
self.promote_joins(to_promote, True)
# Now relabel a copy of the rhs where-clause and add it to the current
# one.
if rhs.where:
w = rhs.where.clone()
w.relabel_aliases(change_map)
if not self.where:
# Since 'self' matches everything, add an explicit "include
# everything" where-constraint so that connections between the
# where clauses won't exclude valid results.
self.where.add(EverythingNode(), AND)
elif self.where:
# rhs has an empty where clause.
w = self.where_class()
w.add(EverythingNode(), AND)
else:
w = self.where_class()
self.where.add(w, connector)
# Selection columns and extra extensions are those provided by 'rhs'.
self.select = []
for col, field in rhs.select:
if isinstance(col, (list, tuple)):
new_col = change_map.get(col[0], col[0]), col[1]
self.select.append(SelectInfo(new_col, field))
else:
new_col = col.relabeled_clone(change_map)
self.select.append(SelectInfo(new_col, field))
if connector == OR:
# It would be nice to be able to handle this, but the queries don't
# really make sense (or return consistent value sets). Not worth
# the extra complexity when you can write a real query instead.
if self.extra and rhs.extra:
raise ValueError("When merging querysets using 'or', you "
"cannot have extra(select=...) on both sides.")
self.extra.update(rhs.extra)
extra_select_mask = set()
if self.extra_select_mask is not None:
extra_select_mask.update(self.extra_select_mask)
if rhs.extra_select_mask is not None:
extra_select_mask.update(rhs.extra_select_mask)
if extra_select_mask:
self.set_extra_mask(extra_select_mask)
self.extra_tables += rhs.extra_tables
# Ordering uses the 'rhs' ordering, unless it has none, in which case
# the current ordering is used.
self.order_by = rhs.order_by[:] if rhs.order_by else self.order_by
self.extra_order_by = rhs.extra_order_by or self.extra_order_by
def deferred_to_data(self, target, callback):
"""
Converts the self.deferred_loading data structure to an alternate data
structure, describing the field that *will* be loaded. This is used to
compute the columns to select from the database and also by the
QuerySet class to work out which fields are being initialised on each
model. Models that have all their fields included aren't mentioned in
the result, only those that have field restrictions in place.
The "target" parameter is the instance that is populated (in place).
The "callback" is a function that is called whenever a (model, field)
pair need to be added to "target". It accepts three parameters:
"target", and the model and list of fields being added for that model.
"""
field_names, defer = self.deferred_loading
if not field_names:
return
orig_opts = self.get_meta()
seen = {}
must_include = {orig_opts.concrete_model: set([orig_opts.pk])}
for field_name in field_names:
parts = field_name.split(LOOKUP_SEP)
cur_model = self.model
opts = orig_opts
for name in parts[:-1]:
old_model = cur_model
source = opts.get_field_by_name(name)[0]
if is_reverse_o2o(source):
cur_model = source.model
else:
cur_model = source.rel.to
opts = cur_model._meta
# Even if we're "just passing through" this model, we must add
# both the current model's pk and the related reference field
# (if it's not a reverse relation) to the things we select.
if not is_reverse_o2o(source):
must_include[old_model].add(source)
add_to_dict(must_include, cur_model, opts.pk)
field, model, _, _ = opts.get_field_by_name(parts[-1])
if model is None:
model = cur_model
if not is_reverse_o2o(field):
add_to_dict(seen, model, field)
if defer:
# We need to load all fields for each model, except those that
# appear in "seen" (for all models that appear in "seen"). The only
# slight complexity here is handling fields that exist on parent
# models.
workset = {}
for model, values in six.iteritems(seen):
for field, m in model._meta.get_fields_with_model():
if field in values:
continue
add_to_dict(workset, m or model, field)
for model, values in six.iteritems(must_include):
# If we haven't included a model in workset, we don't add the
# corresponding must_include fields for that model, since an
# empty set means "include all fields". That's why there's no
# "else" branch here.
if model in workset:
workset[model].update(values)
for model, values in six.iteritems(workset):
callback(target, model, values)
else:
for model, values in six.iteritems(must_include):
if model in seen:
seen[model].update(values)
else:
# As we've passed through this model, but not explicitly
# included any fields, we have to make sure it's mentioned
# so that only the "must include" fields are pulled in.
seen[model] = values
# Now ensure that every model in the inheritance chain is mentioned
# in the parent list. Again, it must be mentioned to ensure that
# only "must include" fields are pulled in.
for model in orig_opts.get_parent_list():
if model not in seen:
seen[model] = set()
for model, values in six.iteritems(seen):
callback(target, model, values)
def deferred_to_columns_cb(self, target, model, fields):
"""
Callback used by deferred_to_columns(). The "target" parameter should
be a set instance.
"""
table = model._meta.db_table
if table not in target:
target[table] = set()
for field in fields:
target[table].add(field.column)
def table_alias(self, table_name, create=False):
"""
Returns a table alias for the given table_name and whether this is a
new alias or not.
If 'create' is true, a new alias is always created. Otherwise, the
most recently created alias for the table (if one exists) is reused.
"""
current = self.table_map.get(table_name)
if not create and current:
alias = current[0]
self.alias_refcount[alias] += 1
return alias, False
# Create a new alias for this table.
if current:
alias = '%s%d' % (self.alias_prefix, len(self.alias_map) + 1)
current.append(alias)
else:
# The first occurence of a table uses the table name directly.
alias = table_name
self.table_map[alias] = [alias]
self.alias_refcount[alias] = 1
self.tables.append(alias)
return alias, True
def ref_alias(self, alias):
""" Increases the reference count for this alias. """
self.alias_refcount[alias] += 1
def unref_alias(self, alias, amount=1):
""" Decreases the reference count for this alias. """
self.alias_refcount[alias] -= amount
def promote_joins(self, aliases, unconditional=False):
"""
Promotes recursively the join type of given aliases and its children to
an outer join. If 'unconditional' is False, the join is only promoted if
it is nullable or the parent join is an outer join.
Note about join promotion: When promoting any alias, we make sure all
joins which start from that alias are promoted, too. When adding a join
in join(), we make sure any join added to already existing LOUTER join
is generated as LOUTER. This ensures we don't ever have broken join
chains which contain first a LOUTER join, then an INNER JOIN, that is
this kind of join should never be generated: a LOUTER b INNER c. The
reason for avoiding this type of join chain is that the INNER after
the LOUTER will effectively remove any effect the LOUTER had.
"""
aliases = list(aliases)
while aliases:
alias = aliases.pop(0)
if self.alias_map[alias].join_cols[0][1] is None:
# This is the base table (first FROM entry) - this table
# isn't really joined at all in the query, so we should not
# alter its join type.
continue
parent_alias = self.alias_map[alias].lhs_alias
parent_louter = (parent_alias
and self.alias_map[parent_alias].join_type == self.LOUTER)
already_louter = self.alias_map[alias].join_type == self.LOUTER
if ((unconditional or self.alias_map[alias].nullable
or parent_louter) and not already_louter):
data = self.alias_map[alias]._replace(join_type=self.LOUTER)
self.alias_map[alias] = data
# Join type of 'alias' changed, so re-examine all aliases that
# refer to this one.
aliases.extend(
join for join in self.alias_map.keys()
if (self.alias_map[join].lhs_alias == alias
and join not in aliases))
def reset_refcounts(self, to_counts):
"""
This method will reset reference counts for aliases so that they match
the value passed in :param to_counts:.
"""
for alias, cur_refcount in self.alias_refcount.copy().items():
unref_amount = cur_refcount - to_counts.get(alias, 0)
self.unref_alias(alias, unref_amount)
def promote_disjunction(self, aliases_before, alias_usage_counts,
num_childs):
"""
This method is to be used for promoting joins in ORed filters.
The principle for promotion is: any alias which is used (it is in
alias_usage_counts), is not used by every child of the ORed filter,
and isn't pre-existing needs to be promoted to LOUTER join.
"""
for alias, use_count in alias_usage_counts.items():
if use_count < num_childs and alias not in aliases_before:
self.promote_joins([alias])
def change_aliases(self, change_map):
"""
Changes the aliases in change_map (which maps old-alias -> new-alias),
relabelling any references to them in select columns and the where
clause.
"""
assert set(change_map.keys()).intersection(set(change_map.values())) == set()
def relabel_column(col):
if isinstance(col, (list, tuple)):
old_alias = col[0]
return (change_map.get(old_alias, old_alias), col[1])
else:
return col.relabeled_clone(change_map)
# 1. Update references in "select" (normal columns plus aliases),
# "group by", "where" and "having".
self.where.relabel_aliases(change_map)
self.having.relabel_aliases(change_map)
if self.group_by:
self.group_by = [relabel_column(col) for col in self.group_by]
self.select = [SelectInfo(relabel_column(s.col), s.field)
for s in self.select]
self.aggregates = SortedDict(
(key, relabel_column(col)) for key, col in self.aggregates.items())
# 2. Rename the alias in the internal table/alias datastructures.
for ident, aliases in self.join_map.items():
del self.join_map[ident]
aliases = tuple([change_map.get(a, a) for a in aliases])
ident = (change_map.get(ident[0], ident[0]),) + ident[1:]
self.join_map[ident] = aliases
for old_alias, new_alias in six.iteritems(change_map):
alias_data = self.alias_map[old_alias]
alias_data = alias_data._replace(rhs_alias=new_alias)
self.alias_refcount[new_alias] = self.alias_refcount[old_alias]
del self.alias_refcount[old_alias]
self.alias_map[new_alias] = alias_data
del self.alias_map[old_alias]
table_aliases = self.table_map[alias_data.table_name]
for pos, alias in enumerate(table_aliases):
if alias == old_alias:
table_aliases[pos] = new_alias
break
for pos, alias in enumerate(self.tables):
if alias == old_alias:
self.tables[pos] = new_alias
break
for key, alias in self.included_inherited_models.items():
if alias in change_map:
self.included_inherited_models[key] = change_map[alias]
# 3. Update any joins that refer to the old alias.
for alias, data in six.iteritems(self.alias_map):
lhs = data.lhs_alias
if lhs in change_map:
data = data._replace(lhs_alias=change_map[lhs])
self.alias_map[alias] = data
# 4. Update the temporary _lookup_joins list
if hasattr(self, '_lookup_joins'):
self._lookup_joins = [change_map.get(lj, lj) for lj in self._lookup_joins]
def bump_prefix(self, exceptions=()):
"""
Changes the alias prefix to the next letter in the alphabet and
relabels all the aliases. Even tables that previously had no alias will
get an alias after this call (it's mostly used for nested queries and
the outer query will already be using the non-aliased table name).
Subclasses who create their own prefix should override this method to
produce a similar result (a new prefix and relabelled aliases).
The 'exceptions' parameter is a container that holds alias names which
should not be changed.
"""
current = ord(self.alias_prefix)
assert current < ord('Z')
prefix = chr(current + 1)
self.alias_prefix = prefix
change_map = SortedDict()
for pos, alias in enumerate(self.tables):
if alias in exceptions:
continue
new_alias = '%s%d' % (prefix, pos)
change_map[alias] = new_alias
self.tables[pos] = new_alias
self.change_aliases(change_map)
def get_initial_alias(self):
"""
Returns the first alias for this query, after increasing its reference
count.
"""
if self.tables:
alias = self.tables[0]
self.ref_alias(alias)
else:
alias = self.join((None, self.get_meta().db_table, None))
return alias
def count_active_tables(self):
"""
Returns the number of tables in this query with a non-zero reference
count. Note that after execution, the reference counts are zeroed, so
tables added in compiler will not be seen by this method.
"""
return len([1 for count in self.alias_refcount.values() if count])
def join(self, connection, reuse=None, outer_if_first=False,
nullable=False, join_field=None):
"""
Returns an alias for the join in 'connection', either reusing an
existing alias for that join or creating a new one. 'connection' is a
tuple (lhs, table, join_cols) where 'lhs' is either an existing
table alias or a table name. 'join_cols' is a tuple of tuples containing
columns to join on ((l_id1, r_id1), (l_id2, r_id2)). The join corresponds
to the SQL equivalent of::
lhs.l_id1 = table.r_id1 AND lhs.l_id2 = table.r_id2
The 'reuse' parameter can be either None which means all joins
(matching the connection) are reusable, or it can be a set containing
the aliases that can be reused.
If 'outer_if_first' is True and a new join is created, it will have the
LOUTER join type.
A join is always created as LOUTER if the lhs alias is LOUTER to make
sure we do not generate chains like t1 LOUTER t2 INNER t3.
If 'nullable' is True, the join can potentially involve NULL values and
is a candidate for promotion (to "left outer") when combining querysets.
The 'join_field' is the field we are joining along (if any).
"""
lhs, table, join_cols = connection
assert lhs is None or join_field is not None
existing = self.join_map.get(connection, ())
if reuse is None:
reuse = existing
else:
reuse = [a for a in existing if a in reuse]
for alias in reuse:
if join_field and self.alias_map[alias].join_field != join_field:
# The join_map doesn't contain join_field (mainly because
# fields in Query structs are problematic in pickling), so
# check that the existing join is created using the same
# join_field used for the under work join.
continue
self.ref_alias(alias)
return alias
# No reuse is possible, so we need a new alias.
alias, _ = self.table_alias(table, True)
if not lhs:
# Not all tables need to be joined to anything. No join type
# means the later columns are ignored.
join_type = None
elif outer_if_first or self.alias_map[lhs].join_type == self.LOUTER:
# We need to use LOUTER join if asked by outer_if_first or if the
# LHS table is left-joined in the query.
join_type = self.LOUTER
else:
join_type = self.INNER
join = JoinInfo(table, alias, join_type, lhs, join_cols or ((None, None),), nullable,
join_field)
self.alias_map[alias] = join
if connection in self.join_map:
self.join_map[connection] += (alias,)
else:
self.join_map[connection] = (alias,)
return alias
def setup_inherited_models(self):
"""
If the model that is the basis for this QuerySet inherits other models,
we need to ensure that those other models have their tables included in
the query.
We do this as a separate step so that subclasses know which
tables are going to be active in the query, without needing to compute
all the select columns (this method is called from pre_sql_setup(),
whereas column determination is a later part, and side-effect, of
as_sql()).
"""
opts = self.get_meta()
root_alias = self.tables[0]
seen = {None: root_alias}
for field, model in opts.get_fields_with_model():
if model not in seen:
self.join_parent_model(opts, model, root_alias, seen)
self.included_inherited_models = seen
def join_parent_model(self, opts, model, alias, seen):
"""
Makes sure the given 'model' is joined in the query. If 'model' isn't
a parent of 'opts' or if it is None this method is a no-op.
The 'alias' is the root alias for starting the join, 'seen' is a dict
of model -> alias of existing joins. It must also contain a mapping
of None -> some alias. This will be returned in the no-op case.
"""
if model in seen:
return seen[model]
chain = opts.get_base_chain(model)
if chain is None:
return alias
curr_opts = opts
for int_model in chain:
if int_model in seen:
return seen[int_model]
# Proxy model have elements in base chain
# with no parents, assign the new options
# object and skip to the next base in that
# case
if not curr_opts.parents[int_model]:
curr_opts = int_model._meta
continue
link_field = curr_opts.get_ancestor_link(int_model)
_, _, _, joins, _ = self.setup_joins(
[link_field.name], curr_opts, alias)
curr_opts = int_model._meta
alias = seen[int_model] = joins[-1]
return alias or seen[None]
def remove_inherited_models(self):
"""
Undoes the effects of setup_inherited_models(). Should be called
whenever select columns (self.select) are set explicitly.
"""
for key, alias in self.included_inherited_models.items():
if key:
self.unref_alias(alias)
self.included_inherited_models = {}
def add_aggregate(self, aggregate, model, alias, is_summary):
"""
Adds a single aggregate expression to the Query
"""
opts = model._meta
field_list = aggregate.lookup.split(LOOKUP_SEP)
if len(field_list) == 1 and aggregate.lookup in self.aggregates:
# Aggregate is over an annotation
field_name = field_list[0]
col = field_name
source = self.aggregates[field_name]
if not is_summary:
raise FieldError("Cannot compute %s('%s'): '%s' is an aggregate" % (
aggregate.name, field_name, field_name))
elif ((len(field_list) > 1) or
(field_list[0] not in [i.name for i in opts.fields]) or
self.group_by is None or
not is_summary):
# If:
# - the field descriptor has more than one part (foo__bar), or
# - the field descriptor is referencing an m2m/m2o field, or
# - this is a reference to a model field (possibly inherited), or
# - this is an annotation over a model field
# then we need to explore the joins that are required.
field, sources, opts, join_list, path = self.setup_joins(
field_list, opts, self.get_initial_alias())
# Process the join chain to see if it can be trimmed
targets, _, join_list = self.trim_joins(sources, join_list, path)
# If the aggregate references a model or field that requires a join,
# those joins must be LEFT OUTER - empty join rows must be returned
# in order for zeros to be returned for those aggregates.
self.promote_joins(join_list, True)
col = targets[0].column
source = sources[0]
col = (join_list[-1], col)
else:
# The simplest cases. No joins required -
# just reference the provided column alias.
field_name = field_list[0]
source = opts.get_field(field_name)
col = field_name
# Add the aggregate to the query
aggregate.add_to_query(self, alias, col=col, source=source, is_summary=is_summary)
def build_filter(self, filter_expr, branch_negated=False, current_negated=False,
can_reuse=None):
"""
Builds a WhereNode for a single filter clause, but doesn't add it
to this Query. Query.add_q() will then add this filter to the where
or having Node.
The 'branch_negated' tells us if the current branch contains any
negations. This will be used to determine if subqueries are needed.
The 'current_negated' is used to determine if the current filter is
negated or not and this will be used to determine if IS NULL filtering
is needed.
The difference between current_netageted and branch_negated is that
branch_negated is set on first negation, but current_negated is
flipped for each negation.
Note that add_filter will not do any negating itself, that is done
upper in the code by add_q().
The 'can_reuse' is a set of reusable joins for multijoins.
The method will create a filter clause that can be added to the current
query. However, if the filter isn't added to the query then the caller
is responsible for unreffing the joins used.
"""
arg, value = filter_expr
parts = arg.split(LOOKUP_SEP)
if not parts:
raise FieldError("Cannot parse keyword query %r" % arg)
# Work out the lookup type and remove it from the end of 'parts',
# if necessary.
lookup_type = 'exact' # Default lookup type
num_parts = len(parts)
if (len(parts) > 1 and parts[-1] in self.query_terms
and arg not in self.aggregates):
# Traverse the lookup query to distinguish related fields from
# lookup types.
lookup_model = self.model
for counter, field_name in enumerate(parts):
try:
lookup_field = lookup_model._meta.get_field(field_name)
except FieldDoesNotExist:
# Not a field. Bail out.
lookup_type = parts.pop()
break
# Unless we're at the end of the list of lookups, let's attempt
# to continue traversing relations.
if (counter + 1) < num_parts:
try:
lookup_model = lookup_field.rel.to
except AttributeError:
# Not a related field. Bail out.
lookup_type = parts.pop()
break
clause = self.where_class()
# Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all
# uses of None as a query value.
if value is None:
if lookup_type != 'exact':
raise ValueError("Cannot use None as a query value")
lookup_type = 'isnull'
value = True
elif callable(value):
value = value()
elif isinstance(value, ExpressionNode):
# If value is a query expression, evaluate it
value = SQLEvaluator(value, self, reuse=can_reuse)
# For Oracle '' is equivalent to null. The check needs to be done
# at this stage because join promotion can't be done at compiler
# stage. Using DEFAULT_DB_ALIAS isn't nice, but it is the best we
# can do here. Similar thing is done in is_nullable(), too.
if (connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and
lookup_type == 'exact' and value == ''):
value = True
lookup_type = 'isnull'
for alias, aggregate in self.aggregates.items():
if alias in (parts[0], LOOKUP_SEP.join(parts)):
clause.add((aggregate, lookup_type, value), AND)
return clause
opts = self.get_meta()
alias = self.get_initial_alias()
allow_many = not branch_negated
try:
field, sources, opts, join_list, path = self.setup_joins(
parts, opts, alias, can_reuse, allow_many,
allow_explicit_fk=True)
if can_reuse is not None:
can_reuse.update(join_list)
# split_exclude() needs to know which joins were generated for the
# lookup parts
self._lookup_joins = join_list
except MultiJoin as e:
return self.split_exclude(filter_expr, LOOKUP_SEP.join(parts[:e.level]),
can_reuse, e.names_with_path)
if (lookup_type == 'isnull' and value is True and not current_negated and
len(join_list) > 1):
# If the comparison is against NULL, we may need to use some left
# outer joins when creating the join chain. This is only done when
# needed, as it's less efficient at the database level.
self.promote_joins(join_list)
# Process the join list to see if we can remove any inner joins from
# the far end (fewer tables in a query is better). Note that join
# promotion must happen before join trimming to have the join type
# information available when reusing joins.
targets, alias, join_list = self.trim_joins(sources, join_list, path)
if hasattr(field, 'get_lookup_constraint'):
constraint = field.get_lookup_constraint(self.where_class, alias, targets, sources,
lookup_type, value)
else:
constraint = (Constraint(alias, targets[0].column, field), lookup_type, value)
clause.add(constraint, AND)
if current_negated and (lookup_type != 'isnull' or value is False):
self.promote_joins(join_list)
if (lookup_type != 'isnull' and (
self.is_nullable(targets[0]) or
self.alias_map[join_list[-1]].join_type == self.LOUTER)):
# The condition added here will be SQL like this:
# NOT (col IS NOT NULL), where the first NOT is added in
# upper layers of code. The reason for addition is that if col
# is null, then col != someval will result in SQL "unknown"
# which isn't the same as in Python. The Python None handling
# is wanted, and it can be gotten by
# (col IS NULL OR col != someval)
# <=>
# NOT (col IS NOT NULL AND col = someval).
clause.add((Constraint(alias, targets[0].column, None), 'isnull', False), AND)
return clause
def add_filter(self, filter_clause):
self.where.add(self.build_filter(filter_clause), 'AND')
def need_having(self, obj):
"""
Returns whether or not all elements of this q_object need to be put
together in the HAVING clause.
"""
if not isinstance(obj, Node):
return (refs_aggregate(obj[0].split(LOOKUP_SEP), self.aggregates)
or (hasattr(obj[1], 'contains_aggregate')
and obj[1].contains_aggregate(self.aggregates)))
return any(self.need_having(c) for c in obj.children)
def split_having_parts(self, q_object, negated=False):
"""
Returns a list of q_objects which need to go into the having clause
instead of the where clause. Removes the splitted out nodes from the
given q_object. Note that the q_object is altered, so cloning it is
needed.
"""
having_parts = []
for c in q_object.children[:]:
# When constucting the having nodes we need to take care to
# preserve the negation status from the upper parts of the tree
if isinstance(c, Node):
# For each negated child, flip the in_negated flag.
in_negated = c.negated ^ negated
if c.connector == OR and self.need_having(c):
# A subtree starting from OR clause must go into having in
# whole if any part of that tree references an aggregate.
q_object.children.remove(c)
having_parts.append(c)
c.negated = in_negated
else:
having_parts.extend(
self.split_having_parts(c, in_negated)[1])
elif self.need_having(c):
q_object.children.remove(c)
new_q = self.where_class(children=[c], negated=negated)
having_parts.append(new_q)
return q_object, having_parts
def add_q(self, q_object):
"""
A preprocessor for the internal _add_q(). Responsible for
splitting the given q_object into where and having parts and
setting up some internal variables.
"""
if not self.need_having(q_object):
where_part, having_parts = q_object, []
else:
where_part, having_parts = self.split_having_parts(
q_object.clone(), q_object.negated)
used_aliases = self.used_aliases
clause = self._add_q(where_part, used_aliases)
self.where.add(clause, AND)
for hp in having_parts:
clause = self._add_q(hp, used_aliases)
self.having.add(clause, AND)
if self.filter_is_sticky:
self.used_aliases = used_aliases
def _add_q(self, q_object, used_aliases, branch_negated=False,
current_negated=False):
"""
Adds a Q-object to the current filter.
"""
connector = q_object.connector
current_negated = current_negated ^ q_object.negated
branch_negated = branch_negated or q_object.negated
target_clause = self.where_class(connector=connector,
negated=q_object.negated)
# Treat case NOT (a AND b) like case ((NOT a) OR (NOT b)) for join
# promotion. See ticket #21748.
effective_connector = connector
if current_negated:
effective_connector = OR if effective_connector == AND else AND
if effective_connector == OR:
alias_usage_counts = dict()
aliases_before = set(self.tables)
for child in q_object.children:
if effective_connector == OR:
refcounts_before = self.alias_refcount.copy()
if isinstance(child, Node):
child_clause = self._add_q(
child, used_aliases, branch_negated,
current_negated)
else:
child_clause = self.build_filter(
child, can_reuse=used_aliases, branch_negated=branch_negated,
current_negated=current_negated)
target_clause.add(child_clause, connector)
if effective_connector == OR:
used = alias_diff(refcounts_before, self.alias_refcount)
for alias in used:
alias_usage_counts[alias] = alias_usage_counts.get(alias, 0) + 1
if effective_connector == OR:
self.promote_disjunction(aliases_before, alias_usage_counts,
len(q_object.children))
return target_clause
def names_to_path(self, names, opts, allow_many, allow_explicit_fk):
"""
Walks the names path and turns them PathInfo tuples. Note that a
single name in 'names' can generate multiple PathInfos (m2m for
example).
'names' is the path of names to travle, 'opts' is the model Options we
start the name resolving from, 'allow_many' and 'allow_explicit_fk'
are as for setup_joins().
Returns a list of PathInfo tuples. In addition returns the final field
(the last used join field), and target (which is a field guaranteed to
contain the same value as the final field).
"""
path, names_with_path = [], []
for pos, name in enumerate(names):
cur_names_with_path = (name, [])
if name == 'pk':
name = opts.pk.name
try:
field, model, direct, m2m = opts.get_field_by_name(name)
except FieldDoesNotExist:
for f in opts.fields:
if allow_explicit_fk and name == f.attname:
# XXX: A hack to allow foo_id to work in values() for
# backwards compatibility purposes. If we dropped that
# feature, this could be removed.
field, model, direct, m2m = opts.get_field_by_name(f.name)
break
else:
available = opts.get_all_field_names() + list(self.aggregate_select)
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(available)))
# Check if we need any joins for concrete inheritance cases (the
# field lives in parent, but we are currently in one of its
# children)
if model:
# The field lives on a base class of the current model.
# Skip the chain of proxy to the concrete proxied model
proxied_model = opts.concrete_model
for int_model in opts.get_base_chain(model):
if int_model is proxied_model:
opts = int_model._meta
else:
final_field = opts.parents[int_model]
targets = (final_field.rel.get_related_field(),)
opts = int_model._meta
path.append(PathInfo(final_field.model._meta, opts, targets, final_field, False, True))
cur_names_with_path[1].append(PathInfo(final_field.model._meta, opts, targets, final_field, False, True))
if hasattr(field, 'get_path_info'):
pathinfos = field.get_path_info()
if not allow_many:
for inner_pos, p in enumerate(pathinfos):
if p.m2m:
cur_names_with_path[1].extend(pathinfos[0:inner_pos + 1])
names_with_path.append(cur_names_with_path)
raise MultiJoin(pos + 1, names_with_path)
last = pathinfos[-1]
path.extend(pathinfos)
final_field = last.join_field
opts = last.to_opts
targets = last.target_fields
cur_names_with_path[1].extend(pathinfos)
names_with_path.append(cur_names_with_path)
else:
# Local non-relational field.
final_field = field
targets = (field,)
break
if pos != len(names) - 1:
if pos == len(names) - 2:
raise FieldError(
"Join on field %r not permitted. Did you misspell %r for "
"the lookup type?" % (name, names[pos + 1]))
else:
raise FieldError("Join on field %r not permitted." % name)
return path, final_field, targets
def setup_joins(self, names, opts, alias, can_reuse=None, allow_many=True,
allow_explicit_fk=False, outer_if_first=False):
"""
Compute the necessary table joins for the passage through the fields
given in 'names'. 'opts' is the Options class for the current model
(which gives the table we are starting from), 'alias' is the alias for
the table to start the joining from.
The 'can_reuse' defines the reverse foreign key joins we can reuse. It
can be None in which case all joins are reusable or a set of aliases
that can be reused. Note that non-reverse foreign keys are always
reusable when using setup_joins().
If 'allow_many' is False, then any reverse foreign key seen will
generate a MultiJoin exception.
The 'allow_explicit_fk' controls if field.attname is allowed in the
lookups.
Returns the final field involved in the joins, the target field (used
for any 'where' constraint), the final 'opts' value, the joins and the
field path travelled to generate the joins.
The target field is the field containing the concrete value. Final
field can be something different, for example foreign key pointing to
that value. Final field is needed for example in some value
conversions (convert 'obj' in fk__id=obj to pk val using the foreign
key field for example).
"""
joins = [alias]
# First, generate the path for the names
path, final_field, targets = self.names_to_path(
names, opts, allow_many, allow_explicit_fk)
# Then, add the path to the query's joins. Note that we can't trim
# joins at this stage - we will need the information about join type
# of the trimmed joins.
for pos, join in enumerate(path):
opts = join.to_opts
if join.direct:
nullable = self.is_nullable(join.join_field)
else:
nullable = True
connection = alias, opts.db_table, join.join_field.get_joining_columns()
reuse = can_reuse if join.m2m else None
alias = self.join(
connection, reuse=reuse, nullable=nullable, join_field=join.join_field,
outer_if_first=outer_if_first)
joins.append(alias)
if hasattr(final_field, 'field'):
final_field = final_field.field
return final_field, targets, opts, joins, path
def trim_joins(self, targets, joins, path):
"""
The 'target' parameter is the final field being joined to, 'joins'
is the full list of join aliases. The 'path' contain the PathInfos
used to create the joins.
Returns the final target field and table alias and the new active
joins.
We will always trim any direct join if we have the target column
available already in the previous table. Reverse joins can't be
trimmed as we don't know if there is anything on the other side of
the join.
"""
for pos, info in enumerate(reversed(path)):
if len(joins) == 1 or not info.direct:
break
join_targets = set(t.column for t in info.join_field.foreign_related_fields)
cur_targets = set(t.column for t in targets)
if not cur_targets.issubset(join_targets):
break
targets = tuple(r[0] for r in info.join_field.related_fields if r[1].column in cur_targets)
self.unref_alias(joins.pop())
return targets, joins[-1], joins
def split_exclude(self, filter_expr, prefix, can_reuse, names_with_path):
"""
When doing an exclude against any kind of N-to-many relation, we need
to use a subquery. This method constructs the nested query, given the
original exclude filter (filter_expr) and the portion up to the first
N-to-many relation field.
As an example we could have original filter ~Q(child__name='foo').
We would get here with filter_expr = child__name, prefix = child and
can_reuse is a set of joins usable for filters in the original query.
We will turn this into equivalent of:
WHERE NOT (pk IN (SELECT parent_id FROM thetable
WHERE name = 'foo' AND parent_id IS NOT NULL))
It might be worth it to consider using WHERE NOT EXISTS as that has
saner null handling, and is easier for the backend's optimizer to
handle.
"""
# Generate the inner query.
query = Query(self.model)
query.where.add(query.build_filter(filter_expr), AND)
query.bump_prefix()
query.clear_ordering(True)
# Try to have as simple as possible subquery -> trim leading joins from
# the subquery.
trimmed_prefix, contains_louter = query.trim_start(names_with_path)
query.remove_inherited_models()
# Add extra check to make sure the selected field will not be null
# since we are adding a IN <subquery> clause. This prevents the
# database from tripping over IN (...,NULL,...) selects and returning
# nothing
if self.is_nullable(query.select[0].field):
alias, col = query.select[0].col
query.where.add((Constraint(alias, col, query.select[0].field), 'isnull', False), AND)
condition = self.build_filter(
('%s__in' % trimmed_prefix, query),
current_negated=True, branch_negated=True, can_reuse=can_reuse)
if contains_louter:
or_null_condition = self.build_filter(
('%s__isnull' % trimmed_prefix, True),
current_negated=True, branch_negated=True, can_reuse=can_reuse)
condition.add(or_null_condition, OR)
# Note that the end result will be:
# (outercol NOT IN innerq AND outercol IS NOT NULL) OR outercol IS NULL.
# This might look crazy but due to how IN works, this seems to be
# correct. If the IS NOT NULL check is removed then outercol NOT
# IN will return UNKNOWN. If the IS NULL check is removed, then if
# outercol IS NULL we will not match the row.
return condition
def set_empty(self):
self.where = EmptyWhere()
self.having = EmptyWhere()
def is_empty(self):
return isinstance(self.where, EmptyWhere) or isinstance(self.having, EmptyWhere)
def set_limits(self, low=None, high=None):
"""
Adjusts the limits on the rows retrieved. We use low/high to set these,
as it makes it more Pythonic to read and write. When the SQL query is
created, they are converted to the appropriate offset and limit values.
Any limits passed in here are applied relative to the existing
constraints. So low is added to the current low value and both will be
clamped to any existing high value.
"""
if high is not None:
if self.high_mark is not None:
self.high_mark = min(self.high_mark, self.low_mark + high)
else:
self.high_mark = self.low_mark + high
if low is not None:
if self.high_mark is not None:
self.low_mark = min(self.high_mark, self.low_mark + low)
else:
self.low_mark = self.low_mark + low
def clear_limits(self):
"""
Clears any existing limits.
"""
self.low_mark, self.high_mark = 0, None
def can_filter(self):
"""
Returns True if adding filters to this instance is still possible.
Typically, this means no limits or offsets have been put on the results.
"""
return not self.low_mark and self.high_mark is None
def clear_select_clause(self):
"""
Removes all fields from SELECT clause.
"""
self.select = []
self.default_cols = False
self.select_related = False
self.set_extra_mask(())
self.set_aggregate_mask(())
def clear_select_fields(self):
"""
Clears the list of fields to select (but not extra_select columns).
Some queryset types completely replace any existing list of select
columns.
"""
self.select = []
def add_distinct_fields(self, *field_names):
"""
Adds and resolves the given fields to the query's "distinct on" clause.
"""
self.distinct_fields = field_names
self.distinct = True
def add_fields(self, field_names, allow_m2m=True):
"""
Adds the given (model) fields to the select set. The field names are
added in the order specified.
"""
alias = self.get_initial_alias()
opts = self.get_meta()
try:
for name in field_names:
field, targets, u2, joins, path = self.setup_joins(
name.split(LOOKUP_SEP), opts, alias, None, allow_m2m,
allow_explicit_fk=True, outer_if_first=True)
# Trim last join if possible
targets, final_alias, remaining_joins = self.trim_joins(targets, joins[-2:], path)
joins = joins[:-2] + remaining_joins
self.promote_joins(joins[1:])
for target in targets:
self.select.append(SelectInfo((final_alias, target.column), target))
except MultiJoin:
raise FieldError("Invalid field name: '%s'" % name)
except FieldError:
if LOOKUP_SEP in name:
# For lookups spanning over relationships, show the error
# from the model on which the lookup failed.
raise
else:
names = sorted(opts.get_all_field_names() + list(self.extra)
+ list(self.aggregate_select))
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(names)))
self.remove_inherited_models()
def add_ordering(self, *ordering):
"""
Adds items from the 'ordering' sequence to the query's "order by"
clause. These items are either field names (not column names) --
possibly with a direction prefix ('-' or '?') -- or ordinals,
corresponding to column positions in the 'select' list.
If 'ordering' is empty, all ordering is cleared from the query.
"""
errors = []
for item in ordering:
if not ORDER_PATTERN.match(item):
errors.append(item)
if errors:
raise FieldError('Invalid order_by arguments: %s' % errors)
if ordering:
self.order_by.extend(ordering)
else:
self.default_ordering = False
def clear_ordering(self, force_empty):
"""
Removes any ordering settings. If 'force_empty' is True, there will be
no ordering in the resulting query (not even the model's default).
"""
self.order_by = []
self.extra_order_by = ()
if force_empty:
self.default_ordering = False
def set_group_by(self):
"""
Expands the GROUP BY clause required by the query.
This will usually be the set of all non-aggregate fields in the
return data. If the database backend supports grouping by the
primary key, and the query would be equivalent, the optimization
will be made automatically.
"""
self.group_by = []
for col, _ in self.select:
self.group_by.append(col)
def add_count_column(self):
"""
Converts the query to do count(...) or count(distinct(pk)) in order to
get its size.
"""
if not self.distinct:
if not self.select:
count = self.aggregates_module.Count('*', is_summary=True)
else:
assert len(self.select) == 1, \
"Cannot add count col with multiple cols in 'select': %r" % self.select
count = self.aggregates_module.Count(self.select[0].col)
else:
opts = self.get_meta()
if not self.select:
count = self.aggregates_module.Count(
(self.join((None, opts.db_table, None)), opts.pk.column),
is_summary=True, distinct=True)
else:
# Because of SQL portability issues, multi-column, distinct
# counts need a sub-query -- see get_count() for details.
assert len(self.select) == 1, \
"Cannot add count col with multiple cols in 'select'."
count = self.aggregates_module.Count(self.select[0].col, distinct=True)
# Distinct handling is done in Count(), so don't do it at this
# level.
self.distinct = False
# Set only aggregate to be the count column.
# Clear out the select cache to reflect the new unmasked aggregates.
self.aggregates = {None: count}
self.set_aggregate_mask(None)
self.group_by = None
def add_select_related(self, fields):
"""
Sets up the select_related data structure so that we only select
certain related models (as opposed to all models, when
self.select_related=True).
"""
field_dict = {}
for field in fields:
d = field_dict
for part in field.split(LOOKUP_SEP):
d = d.setdefault(part, {})
self.select_related = field_dict
self.related_select_cols = []
def add_extra(self, select, select_params, where, params, tables, order_by):
"""
Adds data to the various extra_* attributes for user-created additions
to the query.
"""
if select:
# We need to pair any placeholder markers in the 'select'
# dictionary with their parameters in 'select_params' so that
# subsequent updates to the select dictionary also adjust the
# parameters appropriately.
select_pairs = SortedDict()
if select_params:
param_iter = iter(select_params)
else:
param_iter = iter([])
for name, entry in select.items():
entry = force_text(entry)
entry_params = []
pos = entry.find("%s")
while pos != -1:
entry_params.append(next(param_iter))
pos = entry.find("%s", pos + 2)
select_pairs[name] = (entry, entry_params)
# This is order preserving, since self.extra_select is a SortedDict.
self.extra.update(select_pairs)
if where or params:
self.where.add(ExtraWhere(where, params), AND)
if tables:
self.extra_tables += tuple(tables)
if order_by:
self.extra_order_by = order_by
def clear_deferred_loading(self):
"""
Remove any fields from the deferred loading set.
"""
self.deferred_loading = (set(), True)
def add_deferred_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
exclude from loading from the database when automatic column selection
is done. The new field names are added to any existing field names that
are deferred (or removed from any existing field names that are marked
as the only ones for immediate loading).
"""
# Fields on related models are stored in the literal double-underscore
# format, so that we can use a set datastructure. We do the foo__bar
# splitting and handling when computing the SQL colum names (as part of
# get_columns()).
existing, defer = self.deferred_loading
if defer:
# Add to existing deferred names.
self.deferred_loading = existing.union(field_names), True
else:
# Remove names from the set of any existing "immediate load" names.
self.deferred_loading = existing.difference(field_names), False
def add_immediate_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
retrieve when the SQL is executed ("immediate loading" fields). The
field names replace any existing immediate loading field names. If
there are field names already specified for deferred loading, those
names are removed from the new field_names before storing the new names
for immediate loading. (That is, immediate loading overrides any
existing immediate values, but respects existing deferrals.)
"""
existing, defer = self.deferred_loading
field_names = set(field_names)
if 'pk' in field_names:
field_names.remove('pk')
field_names.add(self.get_meta().pk.name)
if defer:
# Remove any existing deferred names from the current set before
# setting the new names.
self.deferred_loading = field_names.difference(existing), False
else:
# Replace any existing "immediate load" field names.
self.deferred_loading = field_names, False
def get_loaded_field_names(self):
"""
If any fields are marked to be deferred, returns a dictionary mapping
models to a set of names in those fields that will be loaded. If a
model is not in the returned dictionary, none of it's fields are
deferred.
If no fields are marked for deferral, returns an empty dictionary.
"""
# We cache this because we call this function multiple times
# (compiler.fill_related_selections, query.iterator)
try:
return self._loaded_field_names_cache
except AttributeError:
collection = {}
self.deferred_to_data(collection, self.get_loaded_field_names_cb)
self._loaded_field_names_cache = collection
return collection
def get_loaded_field_names_cb(self, target, model, fields):
"""
Callback used by get_deferred_field_names().
"""
target[model] = set([f.name for f in fields])
def set_aggregate_mask(self, names):
"Set the mask of aggregates that will actually be returned by the SELECT"
if names is None:
self.aggregate_select_mask = None
else:
self.aggregate_select_mask = set(names)
self._aggregate_select_cache = None
def set_extra_mask(self, names):
"""
Set the mask of extra select items that will be returned by SELECT,
we don't actually remove them from the Query since they might be used
later
"""
if names is None:
self.extra_select_mask = None
else:
self.extra_select_mask = set(names)
self._extra_select_cache = None
def _aggregate_select(self):
"""The SortedDict of aggregate columns that are not masked, and should
be used in the SELECT clause.
This result is cached for optimization purposes.
"""
if self._aggregate_select_cache is not None:
return self._aggregate_select_cache
elif self.aggregate_select_mask is not None:
self._aggregate_select_cache = SortedDict([
(k,v) for k,v in self.aggregates.items()
if k in self.aggregate_select_mask
])
return self._aggregate_select_cache
else:
return self.aggregates
aggregate_select = property(_aggregate_select)
def _extra_select(self):
if self._extra_select_cache is not None:
return self._extra_select_cache
elif self.extra_select_mask is not None:
self._extra_select_cache = SortedDict([
(k,v) for k,v in self.extra.items()
if k in self.extra_select_mask
])
return self._extra_select_cache
else:
return self.extra
extra_select = property(_extra_select)
def trim_start(self, names_with_path):
"""
Trims joins from the start of the join path. The candidates for trim
are the PathInfos in names_with_path structure that are m2m joins.
Also sets the select column so the start matches the join.
This method is meant to be used for generating the subquery joins &
cols in split_exclude().
Returns a lookup usable for doing outerq.filter(lookup=self). Returns
also if the joins in the prefix contain a LEFT OUTER join.
_"""
all_paths = []
for _, paths in names_with_path:
all_paths.extend(paths)
contains_louter = False
# Trim and operate only on tables that were generated for
# the lookup part of the query. That is, avoid trimming
# joins generated for F() expressions.
lookup_tables = [t for t in self.tables if t in self._lookup_joins or t == self.tables[0]]
for trimmed_paths, path in enumerate(all_paths):
if path.m2m:
break
if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type == self.LOUTER:
contains_louter = True
self.unref_alias(lookup_tables[trimmed_paths])
# The path.join_field is a Rel, lets get the other side's field
join_field = path.join_field.field
# Build the filter prefix.
paths_in_prefix = trimmed_paths
trimmed_prefix = []
for name, path in names_with_path:
if paths_in_prefix - len(path) < 0:
break
trimmed_prefix.append(name)
paths_in_prefix -= len(path)
trimmed_prefix.append(
join_field.foreign_related_fields[0].name)
trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix)
# Lets still see if we can trim the first join from the inner query
# (that is, self). We can't do this for LEFT JOINs because we would
# miss those rows that have nothing on the outer side.
if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type != self.LOUTER:
select_fields = [r[0] for r in join_field.related_fields]
select_alias = lookup_tables[trimmed_paths + 1]
self.unref_alias(lookup_tables[trimmed_paths])
extra_restriction = join_field.get_extra_restriction(
self.where_class, None, lookup_tables[trimmed_paths + 1])
if extra_restriction:
self.where.add(extra_restriction, AND)
else:
# TODO: It might be possible to trim more joins from the start of the
# inner query if it happens to have a longer join chain containing the
# values in select_fields. Lets punt this one for now.
select_fields = [r[1] for r in join_field.related_fields]
select_alias = lookup_tables[trimmed_paths]
self.select = [SelectInfo((select_alias, f.column), f) for f in select_fields]
return trimmed_prefix, contains_louter
def is_nullable(self, field):
"""
A helper to check if the given field should be treated as nullable.
Some backends treat '' as null and Django treats such fields as
nullable for those backends. In such situations field.null can be
False even if we should treat the field as nullable.
"""
# We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have
# (nor should it have) knowledge of which connection is going to be
# used. The proper fix would be to defer all decisions where
# is_nullable() is needed to the compiler stage, but that is not easy
# to do currently.
if ((connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls)
and field.empty_strings_allowed):
return True
else:
return field.null
def get_order_dir(field, default='ASC'):
"""
Returns the field name and direction for an order specification. For
example, '-foo' is returned as ('foo', 'DESC').
The 'default' param is used to indicate which way no prefix (or a '+'
prefix) should sort. The '-' prefix always sorts the opposite way.
"""
dirn = ORDER_DIR[default]
if field[0] == '-':
return field[1:], dirn[1]
return field, dirn[0]
def add_to_dict(data, key, value):
"""
A helper function to add "value" to the set of values for "key", whether or
not "key" already exists.
"""
if key in data:
data[key].add(value)
else:
data[key] = set([value])
def is_reverse_o2o(field):
"""
A little helper to check if the given field is reverse-o2o. The field is
expected to be some sort of relation field or related object.
"""
return not hasattr(field, 'rel') and field.field.unique
def alias_diff(refcounts_before, refcounts_after):
"""
Given the before and after copies of refcounts works out which aliases
have been added to the after copy.
"""
# Use -1 as default value so that any join that is created, then trimmed
# is seen as added.
return set(t for t in refcounts_after
if refcounts_after[t] > refcounts_before.get(t, -1))
| agpl-3.0 |
tectronics/py-lepton | lepton/pygame_renderer.py | 6 | 3955 | #############################################################################
#
# Copyright (c) 2008 by Casey Duncan and contributors
# All Rights Reserved.
#
# This software is subject to the provisions of the MIT License
# A copy of the license should accompany this distribution.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
#
#############################################################################
"""Pygame particle renderers.
(Obviously) requires pygame
"""
__version__ = '$Id$'
from pygame.transform import rotozoom
from math import degrees
class FillRenderer:
"""Renders particles to a pygame surface using simple fills"""
def __init__(self, surface, flags=None):
"""
surface -- pygame surface to render particles to
flags -- Special fill flags (pygame 1.8+ required)
"""
self.surface = surface
self.flags = flags
def draw(self, group):
fill = self.surface.fill
if self.flags is None:
for p in group:
fill(p.color.clamp(0, 255),
(p.position.x, p.position.y, p.size.x, p.size.y))
else:
flags = self.flags
for p in group:
fill(p.color.clamp(0, 255),
(p.position.x, p.position.y, p.size.x, p.size.y), flags)
class Cache:
"""Simple, fast, bounded cache that gives approximate MRU behavior"""
def __init__(self, max_size, load_factor=0.85):
self.max_size = max_size
self.max_recent_size = int(max_size * load_factor)
self._recent = {} # Recently accessed bucket
self._aged = {} # Less recently accessed bucket
self.accesses = 0
self.misses = 0
self.adds = 0
self.flips = 0
self.purged = 0
def __getitem__(self, key):
#self.accesses += 1
try:
try:
return self._recent[key]
except KeyError:
# Promote aged element to "recent"
value = self._aged.pop(key)
self._recent[key] = value
return value
except KeyError:
#self.misses += 1
raise
def __len__(self):
return len(self._recent) + len(self._aged)
def __contains__(self, key):
return key in self._recent or key in self._aged
def __setitem__(self, key, value):
assert value is not None
#self.adds += 1
if key in self._aged:
del self._aged[key]
if len(self._recent) >= self.max_recent_size:
# Flip the cache discarding aged entries
#self.flips += 1
#print self.flips, 'cache flips in', self.adds, ' adds. ',
#print self.misses, 'misses in', self.accesses, 'accesses (',
#print (self.accesses - self.misses) * 100 / self.accesses, '% hit rate) ',
#print 'with', self.purged, 'purged'
self._aged = self._recent
self._recent = {}
self._recent[key] = value
while self._aged and len(self) > self.max_size:
# Over max size, purge aged entries
#self.purged += 1
self._aged.popitem()
class BlitRenderer:
"""Renders particles by blitting to a pygame surface"""
surf_cache = Cache(200)
def __init__(self, surface, particle_surface, rotate_and_scale=False):
"""
surface -- pygame surface to render particles to
particle_surface -- surface blit to draw each particle.
rotate_and_scale -- If true, the particles surfaces are rotated and scaled
before blitting.
"""
self.surface = surface
self.particle_surface = particle_surface
self.rotate_and_scale = rotate_and_scale
def draw(self, group):
blit = self.surface.blit
psurface = self.particle_surface
if not self.rotate_and_scale:
for p in group:
blit(psurface, (p.position.x, p.position.y))
else:
cache = self.surf_cache
surfid = id(psurface)
for p in group:
size = int(p.size.x)
rot = int(p.rotation.x)
cachekey = (surfid, size, rot)
try:
surface = cache[cachekey]
except KeyError:
scale = p.size.x / psurface.get_width()
surface = cache[cachekey] = rotozoom(psurface, rot, scale)
blit(surface, (p.position.x, p.position.y))
| mit |
takaaptech/sky_engine | build/linux/unbundle/replace_gyp_files.py | 40 | 2929 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Replaces gyp files in tree with files from here that
make the build use system libraries.
"""
import optparse
import os.path
import shutil
import sys
REPLACEMENTS = {
'use_system_expat': 'third_party/expat/expat.gyp',
'use_system_ffmpeg': 'third_party/ffmpeg/ffmpeg.gyp',
'use_system_flac': 'third_party/flac/flac.gyp',
'use_system_harfbuzz': 'third_party/harfbuzz-ng/harfbuzz.gyp',
'use_system_icu': 'third_party/icu/icu.gyp',
'use_system_jsoncpp': 'third_party/jsoncpp/jsoncpp.gyp',
'use_system_libevent': 'third_party/libevent/libevent.gyp',
'use_system_libjpeg': 'third_party/libjpeg/libjpeg.gyp',
'use_system_libpng': 'third_party/libpng/libpng.gyp',
'use_system_libusb': 'third_party/libusb/libusb.gyp',
'use_system_libvpx': 'third_party/libvpx/libvpx.gyp',
'use_system_libwebp': 'third_party/libwebp/libwebp.gyp',
'use_system_libxml': 'third_party/libxml/libxml.gyp',
'use_system_libxnvctrl' : 'third_party/libXNVCtrl/libXNVCtrl.gyp',
'use_system_libxslt': 'third_party/libxslt/libxslt.gyp',
'use_system_opus': 'third_party/opus/opus.gyp',
'use_system_protobuf': 'third_party/protobuf/protobuf.gyp',
'use_system_re2': 'third_party/re2/re2.gyp',
'use_system_snappy': 'third_party/snappy/snappy.gyp',
'use_system_speex': 'third_party/speex/speex.gyp',
'use_system_sqlite': 'third_party/sqlite/sqlite.gyp',
'use_system_v8': 'v8/tools/gyp/v8.gyp',
'use_system_zlib': 'third_party/zlib/zlib.gyp',
}
def DoMain(argv):
my_dirname = os.path.dirname(__file__)
source_tree_root = os.path.abspath(
os.path.join(my_dirname, '..', '..', '..'))
parser = optparse.OptionParser()
# Accept arguments in gyp command-line syntax, so that the caller can re-use
# command-line for this script and gyp.
parser.add_option('-D', dest='defines', action='append')
parser.add_option('--undo', action='store_true')
options, args = parser.parse_args(argv)
for flag, path in REPLACEMENTS.items():
if '%s=1' % flag not in options.defines:
continue
if options.undo:
# Restore original file, and also remove the backup.
# This is meant to restore the source tree to its original state.
os.rename(os.path.join(source_tree_root, path + '.orig'),
os.path.join(source_tree_root, path))
else:
# Create a backup copy for --undo.
shutil.copyfile(os.path.join(source_tree_root, path),
os.path.join(source_tree_root, path + '.orig'))
# Copy the gyp file from directory of this script to target path.
shutil.copyfile(os.path.join(my_dirname, os.path.basename(path)),
os.path.join(source_tree_root, path))
return 0
if __name__ == '__main__':
sys.exit(DoMain(sys.argv))
| bsd-3-clause |
fabiobrandespim/flat-earth | flat_earth.py | 1 | 4983 | """
Author: Fabio Brandespim
Email: [email protected]
Location: Brazil - Goiania
Date: 09-19-2016
"""
#!C:/Python27_32/python.exe
import pygame
import math
import particle
import titulos
import email_py
#import time
from threading import Thread
from pygame.locals import *
img = pygame.image.load("terra_plana.bmp")
if not pygame.font:
print 'Attention, no founts found.'
if not pygame.mixer:
print 'Attention, theres no sound.'
pygame.init()
vermelho = (255, 0, 0)
amarelo = (255, 255, 0)
preto = (0, 0, 0)
branco2 = (255, 64, 64)
branco = (255, 255, 255)
azul = (0, 0, 255)
verde = (0, 255, 0)
pi = 3.141592653
comprimento_ecra = 820
altura_ecra = 820
ecra = pygame.display.set_mode((comprimento_ecra, altura_ecra))
xpos = (comprimento_ecra)/2
ypos = (altura_ecra)/2
raio_circulo = 15
raio = 130
raio2 = 130
#=================================
def g2rad(graus):
radianos = (graus * pi) / 180;
return radianos;
#================================
def sun(raio, pontocentral,graus):
rad = g2rad(graus);
x = (math.cos(rad) * raio) + pontocentral;
x = int(x)
y = (math.sin(rad) * raio) + pontocentral;
y = int(y)
return (x, y)
#===================================
def moon(raio, pontocentral, graus):
rad = g2rad(graus);
x = (math.cos(rad) * raio) + pontocentral;
x = int(x)
y = (math.sin(rad) * raio) + pontocentral;
y = int(y)
return (x, y)
#=========================================
def chama_classe_email(subject, mensagem):
e = email_py.SendEmail('[email protected]','Fabio123','[email protected]',subject,mensagem)
e.sendnow()
#circulo = pygame.draw.circle(ecra, amarelo, (410, 410), 100,1)
#circulo = pygame.draw.circle(ecra, amarelo, (410, 410), 200,1)
#circulo = pygame.draw.circle(ecra, amarelo, (410, 410), 300,1)
pygame.display.set_caption('Flat Earth by Fabio Brandespim 03-19-2016 +55 62 91909935')
pygame.display.flip()
pygame.key.set_repeat(100, 100)
graus = 0
graus2 = 0
subindo = True
subindo2 = True
volta = 0
while True:
for event in pygame.event.get():
pass
#if event.type == pygame.QUIT:
# pygame.quit()
# sys.exit()
tecla_pressionada = pygame.key.get_pressed()
if tecla_pressionada[K_ESCAPE]:
break
#===================================
graus += 10
if graus > 360:
graus = 1
if subindo:
if raio < 270:
raio += 10
volta = volta + 1
#if volta > 30:
# volta = 1
print(volta)
else:
volta = volta + 1
print(volta)
subindo = False
else:
if raio > 130:
raio -= 10
volta = volta + 1
#if volta > 30:
# volta = 1
print(volta)
else:
volta = volta + 1
print(volta)
subindo = True
x1, y1 = sun(raio, 410, graus)
#===================================
graus2 += 9.7055555
if graus2 > 360:
graus2 = 1
if subindo2:
if raio2 < 270:
raio2 += 10
else:
subindo2 = False
else:
if raio2 > 130:
raio2 -= 10
else:
subindo2 = True
x2, y2 = moon(raio2, 410, graus2)
#sun_shadow = pygame.draw.circle(ecra, amarelo, (x1, y1), 135,1)
sun2 = pygame.draw.circle(ecra, amarelo, (x1, y1), raio_circulo)
#moon_shadow = pygame.draw.circle(ecra, branco, (x2, y2), 135,1)
moon2 = pygame.draw.circle(ecra, branco, (x2, y2), raio_circulo)
pygame.display.flip()
#pygame.time.delay(1)
#ecra.fill((white))
#Imagem de fundo
ecra.blit(img,(0,0))
#Criar Linhas
pygame.draw.line(ecra, branco, [410, 0], [410, 820], 1)
pygame.draw.line(ecra, branco, [0, 410], [820, 410], 1)
#Criar Circulos
tropico_capricornio = particle.Particle((410, 410), 270)
tropico_capricornio.display()
equador = particle.Particle((410, 410), 200)
equador.display()
tropico_cancer = particle.Particle((410, 410), 130)
tropico_cancer.display()
polo_norte = particle.Particle((410, 410), 5)
polo_norte.display()
# Display Labels
titulo1 = titulos.titulo("South Pole",30)
titulo1.display()
titulo2 = titulos.titulo("Capricornio",130)
titulo2.display()
titulo3 = titulos.titulo("Equador",200)
titulo3.display()
titulo4 = titulos.titulo("Cancer",270)
titulo4.display()
titulo5 = titulos.titulo("North Pole",395)
titulo5.display()
titulo6 = titulos.titulo("South Pole",780)
titulo6.display()
#envia email com thread
if (x1==x2) and (y1==y2):
print('Eclipse')
th = Thread(target=chama_classe_email, args = ('Eclipse no dia: '+str(volta), "dia: "+str(volta),))
th.start()
| gpl-3.0 |
Karm/qpid-proton | proton-j/src/main/resources/cmessenger.py | 17 | 5177 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from org.apache.qpid.proton import Proton
from org.apache.qpid.proton.messenger import Messenger, Status
from org.apache.qpid.proton import InterruptException, TimeoutException
from cerror import *
# from proton/messenger.h
PN_STATUS_UNKNOWN = 0
PN_STATUS_PENDING = 1
PN_STATUS_ACCEPTED = 2
PN_STATUS_REJECTED = 3
PN_STATUS_RELEASED = 4
PN_STATUS_MODIFIED = 5
PN_STATUS_ABORTED = 6
PN_STATUS_SETTLED = 7
PN_CUMULATIVE = 1
class pn_messenger_wrapper:
def __init__(self, impl):
self.impl = impl
self.error = pn_error(0, None)
def pn_messenger(name):
if name is None:
return pn_messenger_wrapper(Proton.messenger())
else:
return pn_messenger_wrapper(Proton.messenger(name))
def pn_messenger_error(m):
return m.error
def pn_messenger_set_timeout(m, t):
m.impl.setTimeout(t)
return 0
def pn_messenger_set_blocking(m, b):
m.impl.setBlocking(b)
return 0
def pn_messenger_set_certificate(m, c):
m.impl.setCertificate(c)
return 0
def pn_messenger_set_private_key(m, p):
m.impl.setPrivateKey(p)
return 0
def pn_messenger_set_password(m, p):
m.impl.setPassword(p)
return 0
def pn_messenger_set_trusted_certificates(m, t):
m.impl.setTrustedCertificates(t)
return 0
def pn_messenger_set_incoming_window(m, w):
m.impl.setIncomingWindow(w)
return 0
def pn_messenger_set_outgoing_window(m, w):
m.impl.setOutgoingWindow(w)
return 0
def pn_messenger_start(m):
m.impl.start()
return 0
# XXX: ???
def pn_messenger_work(m, t):
try:
if m.impl.work(t):
return 1
else:
return PN_TIMEOUT
except InterruptException, e:
return PN_INTR
class pn_subscription:
def __init__(self):
pass
def pn_messenger_subscribe(m, source):
m.impl.subscribe(source)
return pn_subscription()
def pn_messenger_route(m, pattern, address):
m.impl.route(pattern, address)
return 0
def pn_messenger_rewrite(m, pattern, address):
m.impl.rewrite(pattern, address)
return 0
def pn_messenger_interrupt(m):
m.impl.interrupt()
return 0
def pn_messenger_buffered(m, t):
raise Skipped()
from org.apache.qpid.proton.engine import TransportException
def pn_messenger_stop(m):
m.impl.stop()
return 0
def pn_messenger_stopped(m):
return m.impl.stopped()
def pn_messenger_put(m, msg):
msg.pre_encode()
m.impl.put(msg.impl)
return 0
def pn_messenger_outgoing_tracker(m):
return m.impl.outgoingTracker()
def pn_messenger_send(m, n):
try:
m.impl.send(n)
return 0
except InterruptException, e:
return PN_INTR
except TimeoutException, e:
return PN_TIMEOUT
def pn_messenger_recv(m, n):
try:
m.impl.recv(n)
return 0
except InterruptException, e:
return PN_INTR
except TimeoutException, e:
return PN_TIMEOUT
def pn_messenger_receiving(m):
return m.impl.receiving()
def pn_messenger_incoming(m):
return m.impl.incoming()
def pn_messenger_outgoing(m):
return m.impl.outgoing()
def pn_messenger_get(m, msg):
mimpl = m.impl.get()
if msg:
msg.decode(mimpl)
return 0
def pn_messenger_incoming_tracker(m):
return m.impl.incomingTracker()
def pn_messenger_accept(m, tracker, flags):
if flags:
m.impl.accept(tracker, Messenger.CUMULATIVE)
else:
m.impl.accept(tracker, 0)
return 0
def pn_messenger_reject(m, tracker, flags):
if flags:
m.impl.reject(tracker, Messenger.CUMULATIVE)
else:
m.impl.reject(tracker, 0)
return 0
def pn_messenger_settle(m, tracker, flags):
if flags:
m.impl.settle(tracker, Messenger.CUMULATIVE)
else:
m.impl.settle(tracker, 0)
return 0
STATUS_P2J = {
PN_STATUS_UNKNOWN: Status.UNKNOWN,
PN_STATUS_PENDING: Status.PENDING,
PN_STATUS_ACCEPTED: Status.ACCEPTED,
PN_STATUS_REJECTED: Status.REJECTED,
PN_STATUS_RELEASED: Status.RELEASED,
PN_STATUS_MODIFIED: Status.MODIFIED,
PN_STATUS_ABORTED: Status.ABORTED,
PN_STATUS_SETTLED: Status.SETTLED
}
STATUS_J2P = {
Status.UNKNOWN: PN_STATUS_UNKNOWN,
Status.PENDING: PN_STATUS_PENDING,
Status.ACCEPTED: PN_STATUS_ACCEPTED,
Status.REJECTED: PN_STATUS_REJECTED,
Status.RELEASED: PN_STATUS_RELEASED,
Status.MODIFIED: PN_STATUS_MODIFIED,
Status.ABORTED: PN_STATUS_ABORTED,
Status.SETTLED: PN_STATUS_SETTLED
}
def pn_messenger_status(m, tracker):
return STATUS_J2P[m.impl.getStatus(tracker)]
def pn_messenger_set_passive(m, passive):
raise Skipped()
def pn_messenger_selectable(m):
raise Skipped()
| apache-2.0 |
kumar303/zamboni | mkt/tags/tests/test_models.py | 17 | 2143 | from nose.tools import eq_, ok_
import mkt.site.tests
from mkt.site.utils import app_factory
from mkt.tags.models import attach_tags, Tag
from mkt.websites.utils import website_factory
class TestTagManager(mkt.site.tests.TestCase):
def test_not_blocked(self):
"""Make sure Tag Manager filters right for not blocked tags."""
tag1 = Tag(tag_text='abc', blocked=False)
tag1.save()
tag2 = Tag(tag_text='swearword', blocked=True)
tag2.save()
eq_(Tag.objects.all().count(), 2)
eq_(Tag.objects.not_blocked().count(), 1)
eq_(Tag.objects.not_blocked()[0], tag1)
class TestAttachTags(mkt.site.tests.TestCase):
def test_attach_tags_apps(self):
tag1 = Tag.objects.create(tag_text='abc', blocked=False)
tag2 = Tag.objects.create(tag_text='xyz', blocked=False)
tag3 = Tag.objects.create(tag_text='swearword', blocked=True)
app1 = app_factory()
app1.tags.add(tag1)
app1.tags.add(tag2)
app1.tags.add(tag3)
app2 = app_factory()
app2.tags.add(tag2)
app2.tags.add(tag3)
app3 = app_factory()
ok_(not hasattr(app1, 'tags_list'))
attach_tags([app3, app2, app1])
eq_(app1.tags_list, ['abc', 'xyz'])
eq_(app2.tags_list, ['xyz'])
ok_(not hasattr(app3, 'tags_list'))
def test_attach_tags_websites(self):
tag1 = Tag.objects.create(tag_text='abc', blocked=False)
tag2 = Tag.objects.create(tag_text='xyz', blocked=False)
tag3 = Tag.objects.create(tag_text='swearword', blocked=True)
website1 = website_factory()
website1.keywords.add(tag1)
website1.keywords.add(tag2)
website1.keywords.add(tag3)
website2 = website_factory()
website2.keywords.add(tag2)
website2.keywords.add(tag3)
website3 = website_factory()
ok_(not hasattr(website1, 'keywords_list'))
attach_tags([website3, website2, website1])
eq_(website1.keywords_list, ['abc', 'xyz'])
eq_(website2.keywords_list, ['xyz'])
ok_(not hasattr(website3, 'keywords_list'))
| bsd-3-clause |
pschmitt/home-assistant | homeassistant/components/keba/__init__.py | 16 | 8405 | """Support for KEBA charging stations."""
import asyncio
import logging
from keba_kecontact.connection import KebaKeContact
import voluptuous as vol
from homeassistant.const import CONF_HOST
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DOMAIN = "keba"
SUPPORTED_COMPONENTS = ["binary_sensor", "sensor", "lock", "notify"]
CONF_RFID = "rfid"
CONF_FS = "failsafe"
CONF_FS_TIMEOUT = "failsafe_timeout"
CONF_FS_FALLBACK = "failsafe_fallback"
CONF_FS_PERSIST = "failsafe_persist"
CONF_FS_INTERVAL = "refresh_interval"
MAX_POLLING_INTERVAL = 5 # in seconds
MAX_FAST_POLLING_COUNT = 4
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_RFID, default="00845500"): cv.string,
vol.Optional(CONF_FS, default=False): cv.boolean,
vol.Optional(CONF_FS_TIMEOUT, default=30): cv.positive_int,
vol.Optional(CONF_FS_FALLBACK, default=6): cv.positive_int,
vol.Optional(CONF_FS_PERSIST, default=0): cv.positive_int,
vol.Optional(CONF_FS_INTERVAL, default=5): cv.positive_int,
}
)
},
extra=vol.ALLOW_EXTRA,
)
_SERVICE_MAP = {
"request_data": "async_request_data",
"set_energy": "async_set_energy",
"set_current": "async_set_current",
"authorize": "async_start",
"deauthorize": "async_stop",
"enable": "async_enable_ev",
"disable": "async_disable_ev",
"set_failsafe": "async_set_failsafe",
}
async def async_setup(hass, config):
"""Check connectivity and version of KEBA charging station."""
host = config[DOMAIN][CONF_HOST]
rfid = config[DOMAIN][CONF_RFID]
refresh_interval = config[DOMAIN][CONF_FS_INTERVAL]
keba = KebaHandler(hass, host, rfid, refresh_interval)
hass.data[DOMAIN] = keba
# Wait for KebaHandler setup complete (initial values loaded)
if not await keba.setup():
_LOGGER.error("Could not find a charging station at %s", host)
return False
# Set failsafe mode at start up of Home Assistant
failsafe = config[DOMAIN][CONF_FS]
timeout = config[DOMAIN][CONF_FS_TIMEOUT] if failsafe else 0
fallback = config[DOMAIN][CONF_FS_FALLBACK] if failsafe else 0
persist = config[DOMAIN][CONF_FS_PERSIST] if failsafe else 0
try:
hass.loop.create_task(keba.set_failsafe(timeout, fallback, persist))
except ValueError as ex:
_LOGGER.warning("Could not set failsafe mode %s", ex)
# Register services to hass
async def execute_service(call):
"""Execute a service to KEBA charging station.
This must be a member function as we need access to the keba
object here.
"""
function_name = _SERVICE_MAP[call.service]
function_call = getattr(keba, function_name)
await function_call(call.data)
for service in _SERVICE_MAP:
hass.services.async_register(DOMAIN, service, execute_service)
# Load components
for domain in SUPPORTED_COMPONENTS:
hass.async_create_task(
discovery.async_load_platform(hass, domain, DOMAIN, {}, config)
)
# Start periodic polling of charging station data
keba.start_periodic_request()
return True
class KebaHandler(KebaKeContact):
"""Representation of a KEBA charging station connection."""
def __init__(self, hass, host, rfid, refresh_interval):
"""Initialize charging station connection."""
super().__init__(host, self.hass_callback)
self._update_listeners = []
self._hass = hass
self.rfid = rfid
self.device_name = "keba" # correct device name will be set in setup()
self.device_id = "keba_wallbox_" # correct device id will be set in setup()
# Ensure at least MAX_POLLING_INTERVAL seconds delay
self._refresh_interval = max(MAX_POLLING_INTERVAL, refresh_interval)
self._fast_polling_count = MAX_FAST_POLLING_COUNT
self._polling_task = None
def start_periodic_request(self):
"""Start periodic data polling."""
self._polling_task = self._hass.loop.create_task(self._periodic_request())
async def _periodic_request(self):
"""Send periodic update requests."""
await self.request_data()
if self._fast_polling_count < MAX_FAST_POLLING_COUNT:
self._fast_polling_count += 1
_LOGGER.debug("Periodic data request executed, now wait for 2 seconds")
await asyncio.sleep(2)
else:
_LOGGER.debug(
"Periodic data request executed, now wait for %s seconds",
self._refresh_interval,
)
await asyncio.sleep(self._refresh_interval)
_LOGGER.debug("Periodic data request rescheduled")
self._polling_task = self._hass.loop.create_task(self._periodic_request())
async def setup(self, loop=None):
"""Initialize KebaHandler object."""
await super().setup(loop)
# Request initial values and extract serial number
await self.request_data()
if (
self.get_value("Serial") is not None
and self.get_value("Product") is not None
):
self.device_id = f"keba_wallbox_{self.get_value('Serial')}"
self.device_name = self.get_value("Product")
return True
return False
def hass_callback(self, data):
"""Handle component notification via callback."""
# Inform entities about updated values
for listener in self._update_listeners:
listener()
_LOGGER.debug("Notifying %d listeners", len(self._update_listeners))
def _set_fast_polling(self):
_LOGGER.debug("Fast polling enabled")
self._fast_polling_count = 0
self._polling_task.cancel()
self._polling_task = self._hass.loop.create_task(self._periodic_request())
def add_update_listener(self, listener):
"""Add a listener for update notifications."""
self._update_listeners.append(listener)
# initial data is already loaded, thus update the component
listener()
async def async_request_data(self, param):
"""Request new data in async way."""
await self.request_data()
_LOGGER.debug("New data from KEBA wallbox requested")
async def async_set_energy(self, param):
"""Set energy target in async way."""
try:
energy = param["energy"]
await self.set_energy(float(energy))
self._set_fast_polling()
except (KeyError, ValueError) as ex:
_LOGGER.warning("Energy value is not correct. %s", ex)
async def async_set_current(self, param):
"""Set current maximum in async way."""
try:
current = param["current"]
await self.set_current(float(current))
# No fast polling as this function might be called regularly
except (KeyError, ValueError) as ex:
_LOGGER.warning("Current value is not correct. %s", ex)
async def async_start(self, param=None):
"""Authorize EV in async way."""
await self.start(self.rfid)
self._set_fast_polling()
async def async_stop(self, param=None):
"""De-authorize EV in async way."""
await self.stop(self.rfid)
self._set_fast_polling()
async def async_enable_ev(self, param=None):
"""Enable EV in async way."""
await self.enable(True)
self._set_fast_polling()
async def async_disable_ev(self, param=None):
"""Disable EV in async way."""
await self.enable(False)
self._set_fast_polling()
async def async_set_failsafe(self, param=None):
"""Set failsafe mode in async way."""
try:
timeout = param[CONF_FS_TIMEOUT]
fallback = param[CONF_FS_FALLBACK]
persist = param[CONF_FS_PERSIST]
await self.set_failsafe(int(timeout), float(fallback), bool(persist))
self._set_fast_polling()
except (KeyError, ValueError) as ex:
_LOGGER.warning(
"failsafe_timeout, failsafe_fallback and/or "
"failsafe_persist value are not correct. %s",
ex,
)
| apache-2.0 |
ludmilamarian/invenio | invenio/base/setuptools/__init__.py | 21 | 1323 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import setuptools
class InvenioManageCommand(setuptools.Command):
"""
Setuptools command for running ```bower <command>```
"""
description = "run inveniomanage commands."
user_options = [
('manage-command=', 'c',
'inveniomanage command to run.'),
]
def initialize_options(self):
""" Default values for options """
self.manage_command = None
def finalize_options(self):
pass
def run(self):
cmd = ['inveniomanage', self.manage_command]
self.spawn(cmd)
| gpl-2.0 |
elit3ge/SickRage | sickbeard/notifiers/synologynotifier.py | 12 | 2585 | # Author: Nyaran <[email protected]>
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import os
import subprocess
import sickbeard
from sickbeard import logger
from sickbeard import common
from sickrage.helper.encoding import ek
from sickrage.helper.exceptions import ex
class synologyNotifier:
def notify_snatch(self, ep_name):
if sickbeard.SYNOLOGYNOTIFIER_NOTIFY_ONSNATCH:
self._send_synologyNotifier(ep_name, common.notifyStrings[common.NOTIFY_SNATCH])
def notify_download(self, ep_name):
if sickbeard.SYNOLOGYNOTIFIER_NOTIFY_ONDOWNLOAD:
self._send_synologyNotifier(ep_name, common.notifyStrings[common.NOTIFY_DOWNLOAD])
def notify_subtitle_download(self, ep_name, lang):
if sickbeard.SYNOLOGYNOTIFIER_NOTIFY_ONSUBTITLEDOWNLOAD:
self._send_synologyNotifier(ep_name + ": " + lang, common.notifyStrings[common.NOTIFY_SUBTITLE_DOWNLOAD])
def notify_git_update(self, new_version = "??"):
if sickbeard.USE_SYNOLOGYNOTIFIER:
update_text=common.notifyStrings[common.NOTIFY_GIT_UPDATE_TEXT]
title=common.notifyStrings[common.NOTIFY_GIT_UPDATE]
self._send_synologyNotifier(update_text + new_version, title)
def _send_synologyNotifier(self, message, title):
synodsmnotify_cmd = ["/usr/syno/bin/synodsmnotify", "@administrators", title, message]
logger.log(u"Executing command " + str(synodsmnotify_cmd))
logger.log(u"Absolute path to command: " + ek(os.path.abspath, synodsmnotify_cmd[0]), logger.DEBUG)
try:
p = subprocess.Popen(synodsmnotify_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
cwd=sickbeard.PROG_DIR)
out, err = p.communicate() #@UnusedVariable
logger.log(u"Script result: " + str(out), logger.DEBUG)
except OSError, e:
logger.log(u"Unable to run synodsmnotify: " + ex(e))
notifier = synologyNotifier
| gpl-3.0 |
AdrianoMaron/kWantera | argparse.py | 490 | 87791 | # Author: Steven J. Bethard <[email protected]>.
"""Command-line parsing library
This module is an optparse-inspired command-line parsing library that:
- handles both optional and positional arguments
- produces highly informative usage messages
- supports parsers that dispatch to sub-parsers
The following is a simple usage example that sums integers from the
command-line and writes the result to a file::
parser = argparse.ArgumentParser(
description='sum the integers at the command line')
parser.add_argument(
'integers', metavar='int', nargs='+', type=int,
help='an integer to be summed')
parser.add_argument(
'--log', default=sys.stdout, type=argparse.FileType('w'),
help='the file where the sum should be written')
args = parser.parse_args()
args.log.write('%s' % sum(args.integers))
args.log.close()
The module contains the following public classes:
- ArgumentParser -- The main entry point for command-line parsing. As the
example above shows, the add_argument() method is used to populate
the parser with actions for optional and positional arguments. Then
the parse_args() method is invoked to convert the args at the
command-line into an object with attributes.
- ArgumentError -- The exception raised by ArgumentParser objects when
there are errors with the parser's actions. Errors raised while
parsing the command-line are caught by ArgumentParser and emitted
as command-line messages.
- FileType -- A factory for defining types of files to be created. As the
example above shows, instances of FileType are typically passed as
the type= argument of add_argument() calls.
- Action -- The base class for parser actions. Typically actions are
selected by passing strings like 'store_true' or 'append_const' to
the action= argument of add_argument(). However, for greater
customization of ArgumentParser actions, subclasses of Action may
be defined and passed as the action= argument.
- HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter,
ArgumentDefaultsHelpFormatter -- Formatter classes which
may be passed as the formatter_class= argument to the
ArgumentParser constructor. HelpFormatter is the default,
RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser
not to change the formatting for help text, and
ArgumentDefaultsHelpFormatter adds information about argument defaults
to the help.
All other classes in this module are considered implementation details.
(Also note that HelpFormatter and RawDescriptionHelpFormatter are only
considered public as object names -- the API of the formatter objects is
still considered an implementation detail.)
"""
__version__ = '1.2.1'
__all__ = [
'ArgumentParser',
'ArgumentError',
'ArgumentTypeError',
'FileType',
'HelpFormatter',
'ArgumentDefaultsHelpFormatter',
'RawDescriptionHelpFormatter',
'RawTextHelpFormatter',
'Namespace',
'Action',
'ONE_OR_MORE',
'OPTIONAL',
'PARSER',
'REMAINDER',
'SUPPRESS',
'ZERO_OR_MORE',
]
import copy as _copy
import os as _os
import re as _re
import sys as _sys
import textwrap as _textwrap
from gettext import gettext as _
try:
set
except NameError:
# for python < 2.4 compatibility (sets module is there since 2.3):
from sets import Set as set
try:
basestring
except NameError:
basestring = str
try:
sorted
except NameError:
# for python < 2.4 compatibility:
def sorted(iterable, reverse=False):
result = list(iterable)
result.sort()
if reverse:
result.reverse()
return result
def _callable(obj):
return hasattr(obj, '__call__') or hasattr(obj, '__bases__')
SUPPRESS = '==SUPPRESS=='
OPTIONAL = '?'
ZERO_OR_MORE = '*'
ONE_OR_MORE = '+'
PARSER = 'A...'
REMAINDER = '...'
_UNRECOGNIZED_ARGS_ATTR = '_unrecognized_args'
# =============================
# Utility functions and classes
# =============================
class _AttributeHolder(object):
"""Abstract base class that provides __repr__.
The __repr__ method returns a string in the format::
ClassName(attr=name, attr=name, ...)
The attributes are determined either by a class-level attribute,
'_kwarg_names', or by inspecting the instance __dict__.
"""
def __repr__(self):
type_name = type(self).__name__
arg_strings = []
for arg in self._get_args():
arg_strings.append(repr(arg))
for name, value in self._get_kwargs():
arg_strings.append('%s=%r' % (name, value))
return '%s(%s)' % (type_name, ', '.join(arg_strings))
def _get_kwargs(self):
return sorted(self.__dict__.items())
def _get_args(self):
return []
def _ensure_value(namespace, name, value):
if getattr(namespace, name, None) is None:
setattr(namespace, name, value)
return getattr(namespace, name)
# ===============
# Formatting Help
# ===============
class HelpFormatter(object):
"""Formatter for generating usage messages and argument help strings.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def __init__(self,
prog,
indent_increment=2,
max_help_position=24,
width=None):
# default setting for width
if width is None:
try:
width = int(_os.environ['COLUMNS'])
except (KeyError, ValueError):
width = 80
width -= 2
self._prog = prog
self._indent_increment = indent_increment
self._max_help_position = max_help_position
self._width = width
self._current_indent = 0
self._level = 0
self._action_max_length = 0
self._root_section = self._Section(self, None)
self._current_section = self._root_section
self._whitespace_matcher = _re.compile(r'\s+')
self._long_break_matcher = _re.compile(r'\n\n\n+')
# ===============================
# Section and indentation methods
# ===============================
def _indent(self):
self._current_indent += self._indent_increment
self._level += 1
def _dedent(self):
self._current_indent -= self._indent_increment
assert self._current_indent >= 0, 'Indent decreased below 0.'
self._level -= 1
class _Section(object):
def __init__(self, formatter, parent, heading=None):
self.formatter = formatter
self.parent = parent
self.heading = heading
self.items = []
def format_help(self):
# format the indented section
if self.parent is not None:
self.formatter._indent()
join = self.formatter._join_parts
for func, args in self.items:
func(*args)
item_help = join([func(*args) for func, args in self.items])
if self.parent is not None:
self.formatter._dedent()
# return nothing if the section was empty
if not item_help:
return ''
# add the heading if the section was non-empty
if self.heading is not SUPPRESS and self.heading is not None:
current_indent = self.formatter._current_indent
heading = '%*s%s:\n' % (current_indent, '', self.heading)
else:
heading = ''
# join the section-initial newline, the heading and the help
return join(['\n', heading, item_help, '\n'])
def _add_item(self, func, args):
self._current_section.items.append((func, args))
# ========================
# Message building methods
# ========================
def start_section(self, heading):
self._indent()
section = self._Section(self, self._current_section, heading)
self._add_item(section.format_help, [])
self._current_section = section
def end_section(self):
self._current_section = self._current_section.parent
self._dedent()
def add_text(self, text):
if text is not SUPPRESS and text is not None:
self._add_item(self._format_text, [text])
def add_usage(self, usage, actions, groups, prefix=None):
if usage is not SUPPRESS:
args = usage, actions, groups, prefix
self._add_item(self._format_usage, args)
def add_argument(self, action):
if action.help is not SUPPRESS:
# find all invocations
get_invocation = self._format_action_invocation
invocations = [get_invocation(action)]
for subaction in self._iter_indented_subactions(action):
invocations.append(get_invocation(subaction))
# update the maximum item length
invocation_length = max([len(s) for s in invocations])
action_length = invocation_length + self._current_indent
self._action_max_length = max(self._action_max_length,
action_length)
# add the item to the list
self._add_item(self._format_action, [action])
def add_arguments(self, actions):
for action in actions:
self.add_argument(action)
# =======================
# Help-formatting methods
# =======================
def format_help(self):
help = self._root_section.format_help()
if help:
help = self._long_break_matcher.sub('\n\n', help)
help = help.strip('\n') + '\n'
return help
def _join_parts(self, part_strings):
return ''.join([part
for part in part_strings
if part and part is not SUPPRESS])
def _format_usage(self, usage, actions, groups, prefix):
if prefix is None:
prefix = _('usage: ')
# if usage is specified, use that
if usage is not None:
usage = usage % dict(prog=self._prog)
# if no optionals or positionals are available, usage is just prog
elif usage is None and not actions:
usage = '%(prog)s' % dict(prog=self._prog)
# if optionals and positionals are available, calculate usage
elif usage is None:
prog = '%(prog)s' % dict(prog=self._prog)
# split optionals from positionals
optionals = []
positionals = []
for action in actions:
if action.option_strings:
optionals.append(action)
else:
positionals.append(action)
# build full usage string
format = self._format_actions_usage
action_usage = format(optionals + positionals, groups)
usage = ' '.join([s for s in [prog, action_usage] if s])
# wrap the usage parts if it's too long
text_width = self._width - self._current_indent
if len(prefix) + len(usage) > text_width:
# break usage into wrappable parts
part_regexp = r'\(.*?\)+|\[.*?\]+|\S+'
opt_usage = format(optionals, groups)
pos_usage = format(positionals, groups)
opt_parts = _re.findall(part_regexp, opt_usage)
pos_parts = _re.findall(part_regexp, pos_usage)
assert ' '.join(opt_parts) == opt_usage
assert ' '.join(pos_parts) == pos_usage
# helper for wrapping lines
def get_lines(parts, indent, prefix=None):
lines = []
line = []
if prefix is not None:
line_len = len(prefix) - 1
else:
line_len = len(indent) - 1
for part in parts:
if line_len + 1 + len(part) > text_width:
lines.append(indent + ' '.join(line))
line = []
line_len = len(indent) - 1
line.append(part)
line_len += len(part) + 1
if line:
lines.append(indent + ' '.join(line))
if prefix is not None:
lines[0] = lines[0][len(indent):]
return lines
# if prog is short, follow it with optionals or positionals
if len(prefix) + len(prog) <= 0.75 * text_width:
indent = ' ' * (len(prefix) + len(prog) + 1)
if opt_parts:
lines = get_lines([prog] + opt_parts, indent, prefix)
lines.extend(get_lines(pos_parts, indent))
elif pos_parts:
lines = get_lines([prog] + pos_parts, indent, prefix)
else:
lines = [prog]
# if prog is long, put it on its own line
else:
indent = ' ' * len(prefix)
parts = opt_parts + pos_parts
lines = get_lines(parts, indent)
if len(lines) > 1:
lines = []
lines.extend(get_lines(opt_parts, indent))
lines.extend(get_lines(pos_parts, indent))
lines = [prog] + lines
# join lines into usage
usage = '\n'.join(lines)
# prefix with 'usage:'
return '%s%s\n\n' % (prefix, usage)
def _format_actions_usage(self, actions, groups):
# find group indices and identify actions in groups
group_actions = set()
inserts = {}
for group in groups:
try:
start = actions.index(group._group_actions[0])
except ValueError:
continue
else:
end = start + len(group._group_actions)
if actions[start:end] == group._group_actions:
for action in group._group_actions:
group_actions.add(action)
if not group.required:
if start in inserts:
inserts[start] += ' ['
else:
inserts[start] = '['
inserts[end] = ']'
else:
if start in inserts:
inserts[start] += ' ('
else:
inserts[start] = '('
inserts[end] = ')'
for i in range(start + 1, end):
inserts[i] = '|'
# collect all actions format strings
parts = []
for i, action in enumerate(actions):
# suppressed arguments are marked with None
# remove | separators for suppressed arguments
if action.help is SUPPRESS:
parts.append(None)
if inserts.get(i) == '|':
inserts.pop(i)
elif inserts.get(i + 1) == '|':
inserts.pop(i + 1)
# produce all arg strings
elif not action.option_strings:
part = self._format_args(action, action.dest)
# if it's in a group, strip the outer []
if action in group_actions:
if part[0] == '[' and part[-1] == ']':
part = part[1:-1]
# add the action string to the list
parts.append(part)
# produce the first way to invoke the option in brackets
else:
option_string = action.option_strings[0]
# if the Optional doesn't take a value, format is:
# -s or --long
if action.nargs == 0:
part = '%s' % option_string
# if the Optional takes a value, format is:
# -s ARGS or --long ARGS
else:
default = action.dest.upper()
args_string = self._format_args(action, default)
part = '%s %s' % (option_string, args_string)
# make it look optional if it's not required or in a group
if not action.required and action not in group_actions:
part = '[%s]' % part
# add the action string to the list
parts.append(part)
# insert things at the necessary indices
for i in sorted(inserts, reverse=True):
parts[i:i] = [inserts[i]]
# join all the action items with spaces
text = ' '.join([item for item in parts if item is not None])
# clean up separators for mutually exclusive groups
open = r'[\[(]'
close = r'[\])]'
text = _re.sub(r'(%s) ' % open, r'\1', text)
text = _re.sub(r' (%s)' % close, r'\1', text)
text = _re.sub(r'%s *%s' % (open, close), r'', text)
text = _re.sub(r'\(([^|]*)\)', r'\1', text)
text = text.strip()
# return the text
return text
def _format_text(self, text):
if '%(prog)' in text:
text = text % dict(prog=self._prog)
text_width = self._width - self._current_indent
indent = ' ' * self._current_indent
return self._fill_text(text, text_width, indent) + '\n\n'
def _format_action(self, action):
# determine the required width and the entry label
help_position = min(self._action_max_length + 2,
self._max_help_position)
help_width = self._width - help_position
action_width = help_position - self._current_indent - 2
action_header = self._format_action_invocation(action)
# ho nelp; start on same line and add a final newline
if not action.help:
tup = self._current_indent, '', action_header
action_header = '%*s%s\n' % tup
# short action name; start on the same line and pad two spaces
elif len(action_header) <= action_width:
tup = self._current_indent, '', action_width, action_header
action_header = '%*s%-*s ' % tup
indent_first = 0
# long action name; start on the next line
else:
tup = self._current_indent, '', action_header
action_header = '%*s%s\n' % tup
indent_first = help_position
# collect the pieces of the action help
parts = [action_header]
# if there was help for the action, add lines of help text
if action.help:
help_text = self._expand_help(action)
help_lines = self._split_lines(help_text, help_width)
parts.append('%*s%s\n' % (indent_first, '', help_lines[0]))
for line in help_lines[1:]:
parts.append('%*s%s\n' % (help_position, '', line))
# or add a newline if the description doesn't end with one
elif not action_header.endswith('\n'):
parts.append('\n')
# if there are any sub-actions, add their help as well
for subaction in self._iter_indented_subactions(action):
parts.append(self._format_action(subaction))
# return a single string
return self._join_parts(parts)
def _format_action_invocation(self, action):
if not action.option_strings:
metavar, = self._metavar_formatter(action, action.dest)(1)
return metavar
else:
parts = []
# if the Optional doesn't take a value, format is:
# -s, --long
if action.nargs == 0:
parts.extend(action.option_strings)
# if the Optional takes a value, format is:
# -s ARGS, --long ARGS
else:
default = action.dest.upper()
args_string = self._format_args(action, default)
for option_string in action.option_strings:
parts.append('%s %s' % (option_string, args_string))
return ', '.join(parts)
def _metavar_formatter(self, action, default_metavar):
if action.metavar is not None:
result = action.metavar
elif action.choices is not None:
choice_strs = [str(choice) for choice in action.choices]
result = '{%s}' % ','.join(choice_strs)
else:
result = default_metavar
def format(tuple_size):
if isinstance(result, tuple):
return result
else:
return (result, ) * tuple_size
return format
def _format_args(self, action, default_metavar):
get_metavar = self._metavar_formatter(action, default_metavar)
if action.nargs is None:
result = '%s' % get_metavar(1)
elif action.nargs == OPTIONAL:
result = '[%s]' % get_metavar(1)
elif action.nargs == ZERO_OR_MORE:
result = '[%s [%s ...]]' % get_metavar(2)
elif action.nargs == ONE_OR_MORE:
result = '%s [%s ...]' % get_metavar(2)
elif action.nargs == REMAINDER:
result = '...'
elif action.nargs == PARSER:
result = '%s ...' % get_metavar(1)
else:
formats = ['%s' for _ in range(action.nargs)]
result = ' '.join(formats) % get_metavar(action.nargs)
return result
def _expand_help(self, action):
params = dict(vars(action), prog=self._prog)
for name in list(params):
if params[name] is SUPPRESS:
del params[name]
for name in list(params):
if hasattr(params[name], '__name__'):
params[name] = params[name].__name__
if params.get('choices') is not None:
choices_str = ', '.join([str(c) for c in params['choices']])
params['choices'] = choices_str
return self._get_help_string(action) % params
def _iter_indented_subactions(self, action):
try:
get_subactions = action._get_subactions
except AttributeError:
pass
else:
self._indent()
for subaction in get_subactions():
yield subaction
self._dedent()
def _split_lines(self, text, width):
text = self._whitespace_matcher.sub(' ', text).strip()
return _textwrap.wrap(text, width)
def _fill_text(self, text, width, indent):
text = self._whitespace_matcher.sub(' ', text).strip()
return _textwrap.fill(text, width, initial_indent=indent,
subsequent_indent=indent)
def _get_help_string(self, action):
return action.help
class RawDescriptionHelpFormatter(HelpFormatter):
"""Help message formatter which retains any formatting in descriptions.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _fill_text(self, text, width, indent):
return ''.join([indent + line for line in text.splitlines(True)])
class RawTextHelpFormatter(RawDescriptionHelpFormatter):
"""Help message formatter which retains formatting of all help text.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _split_lines(self, text, width):
return text.splitlines()
class ArgumentDefaultsHelpFormatter(HelpFormatter):
"""Help message formatter which adds default values to argument help.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _get_help_string(self, action):
help = action.help
if '%(default)' not in action.help:
if action.default is not SUPPRESS:
defaulting_nargs = [OPTIONAL, ZERO_OR_MORE]
if action.option_strings or action.nargs in defaulting_nargs:
help += ' (default: %(default)s)'
return help
# =====================
# Options and Arguments
# =====================
def _get_action_name(argument):
if argument is None:
return None
elif argument.option_strings:
return '/'.join(argument.option_strings)
elif argument.metavar not in (None, SUPPRESS):
return argument.metavar
elif argument.dest not in (None, SUPPRESS):
return argument.dest
else:
return None
class ArgumentError(Exception):
"""An error from creating or using an argument (optional or positional).
The string value of this exception is the message, augmented with
information about the argument that caused it.
"""
def __init__(self, argument, message):
self.argument_name = _get_action_name(argument)
self.message = message
def __str__(self):
if self.argument_name is None:
format = '%(message)s'
else:
format = 'argument %(argument_name)s: %(message)s'
return format % dict(message=self.message,
argument_name=self.argument_name)
class ArgumentTypeError(Exception):
"""An error from trying to convert a command line string to a type."""
pass
# ==============
# Action classes
# ==============
class Action(_AttributeHolder):
"""Information about how to convert command line strings to Python objects.
Action objects are used by an ArgumentParser to represent the information
needed to parse a single argument from one or more strings from the
command line. The keyword arguments to the Action constructor are also
all attributes of Action instances.
Keyword Arguments:
- option_strings -- A list of command-line option strings which
should be associated with this action.
- dest -- The name of the attribute to hold the created object(s)
- nargs -- The number of command-line arguments that should be
consumed. By default, one argument will be consumed and a single
value will be produced. Other values include:
- N (an integer) consumes N arguments (and produces a list)
- '?' consumes zero or one arguments
- '*' consumes zero or more arguments (and produces a list)
- '+' consumes one or more arguments (and produces a list)
Note that the difference between the default and nargs=1 is that
with the default, a single value will be produced, while with
nargs=1, a list containing a single value will be produced.
- const -- The value to be produced if the option is specified and the
option uses an action that takes no values.
- default -- The value to be produced if the option is not specified.
- type -- The type which the command-line arguments should be converted
to, should be one of 'string', 'int', 'float', 'complex' or a
callable object that accepts a single string argument. If None,
'string' is assumed.
- choices -- A container of values that should be allowed. If not None,
after a command-line argument has been converted to the appropriate
type, an exception will be raised if it is not a member of this
collection.
- required -- True if the action must always be specified at the
command line. This is only meaningful for optional command-line
arguments.
- help -- The help string describing the argument.
- metavar -- The name to be used for the option's argument with the
help string. If None, the 'dest' value will be used as the name.
"""
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
self.option_strings = option_strings
self.dest = dest
self.nargs = nargs
self.const = const
self.default = default
self.type = type
self.choices = choices
self.required = required
self.help = help
self.metavar = metavar
def _get_kwargs(self):
names = [
'option_strings',
'dest',
'nargs',
'const',
'default',
'type',
'choices',
'help',
'metavar',
]
return [(name, getattr(self, name)) for name in names]
def __call__(self, parser, namespace, values, option_string=None):
raise NotImplementedError(_('.__call__() not defined'))
class _StoreAction(Action):
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
if nargs == 0:
raise ValueError('nargs for store actions must be > 0; if you '
'have nothing to store, actions such as store '
'true or store const may be more appropriate')
if const is not None and nargs != OPTIONAL:
raise ValueError('nargs must be %r to supply const' % OPTIONAL)
super(_StoreAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
class _StoreConstAction(Action):
def __init__(self,
option_strings,
dest,
const,
default=None,
required=False,
help=None,
metavar=None):
super(_StoreConstAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=const,
default=default,
required=required,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, self.const)
class _StoreTrueAction(_StoreConstAction):
def __init__(self,
option_strings,
dest,
default=False,
required=False,
help=None):
super(_StoreTrueAction, self).__init__(
option_strings=option_strings,
dest=dest,
const=True,
default=default,
required=required,
help=help)
class _StoreFalseAction(_StoreConstAction):
def __init__(self,
option_strings,
dest,
default=True,
required=False,
help=None):
super(_StoreFalseAction, self).__init__(
option_strings=option_strings,
dest=dest,
const=False,
default=default,
required=required,
help=help)
class _AppendAction(Action):
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
if nargs == 0:
raise ValueError('nargs for append actions must be > 0; if arg '
'strings are not supplying the value to append, '
'the append const action may be more appropriate')
if const is not None and nargs != OPTIONAL:
raise ValueError('nargs must be %r to supply const' % OPTIONAL)
super(_AppendAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
items = _copy.copy(_ensure_value(namespace, self.dest, []))
items.append(values)
setattr(namespace, self.dest, items)
class _AppendConstAction(Action):
def __init__(self,
option_strings,
dest,
const,
default=None,
required=False,
help=None,
metavar=None):
super(_AppendConstAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=const,
default=default,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
items = _copy.copy(_ensure_value(namespace, self.dest, []))
items.append(self.const)
setattr(namespace, self.dest, items)
class _CountAction(Action):
def __init__(self,
option_strings,
dest,
default=None,
required=False,
help=None):
super(_CountAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
default=default,
required=required,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
new_count = _ensure_value(namespace, self.dest, 0) + 1
setattr(namespace, self.dest, new_count)
class _HelpAction(Action):
def __init__(self,
option_strings,
dest=SUPPRESS,
default=SUPPRESS,
help=None):
super(_HelpAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
parser.print_help()
parser.exit()
class _VersionAction(Action):
def __init__(self,
option_strings,
version=None,
dest=SUPPRESS,
default=SUPPRESS,
help="show program's version number and exit"):
super(_VersionAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
self.version = version
def __call__(self, parser, namespace, values, option_string=None):
version = self.version
if version is None:
version = parser.version
formatter = parser._get_formatter()
formatter.add_text(version)
parser.exit(message=formatter.format_help())
class _SubParsersAction(Action):
class _ChoicesPseudoAction(Action):
def __init__(self, name, help):
sup = super(_SubParsersAction._ChoicesPseudoAction, self)
sup.__init__(option_strings=[], dest=name, help=help)
def __init__(self,
option_strings,
prog,
parser_class,
dest=SUPPRESS,
help=None,
metavar=None):
self._prog_prefix = prog
self._parser_class = parser_class
self._name_parser_map = {}
self._choices_actions = []
super(_SubParsersAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=PARSER,
choices=self._name_parser_map,
help=help,
metavar=metavar)
def add_parser(self, name, **kwargs):
# set prog from the existing prefix
if kwargs.get('prog') is None:
kwargs['prog'] = '%s %s' % (self._prog_prefix, name)
# create a pseudo-action to hold the choice help
if 'help' in kwargs:
help = kwargs.pop('help')
choice_action = self._ChoicesPseudoAction(name, help)
self._choices_actions.append(choice_action)
# create the parser and add it to the map
parser = self._parser_class(**kwargs)
self._name_parser_map[name] = parser
return parser
def _get_subactions(self):
return self._choices_actions
def __call__(self, parser, namespace, values, option_string=None):
parser_name = values[0]
arg_strings = values[1:]
# set the parser name if requested
if self.dest is not SUPPRESS:
setattr(namespace, self.dest, parser_name)
# select the parser
try:
parser = self._name_parser_map[parser_name]
except KeyError:
tup = parser_name, ', '.join(self._name_parser_map)
msg = _('unknown parser %r (choices: %s)' % tup)
raise ArgumentError(self, msg)
# parse all the remaining options into the namespace
# store any unrecognized options on the object, so that the top
# level parser can decide what to do with them
namespace, arg_strings = parser.parse_known_args(arg_strings, namespace)
if arg_strings:
vars(namespace).setdefault(_UNRECOGNIZED_ARGS_ATTR, [])
getattr(namespace, _UNRECOGNIZED_ARGS_ATTR).extend(arg_strings)
# ==============
# Type classes
# ==============
class FileType(object):
"""Factory for creating file object types
Instances of FileType are typically passed as type= arguments to the
ArgumentParser add_argument() method.
Keyword Arguments:
- mode -- A string indicating how the file is to be opened. Accepts the
same values as the builtin open() function.
- bufsize -- The file's desired buffer size. Accepts the same values as
the builtin open() function.
"""
def __init__(self, mode='r', bufsize=None):
self._mode = mode
self._bufsize = bufsize
def __call__(self, string):
# the special argument "-" means sys.std{in,out}
if string == '-':
if 'r' in self._mode:
return _sys.stdin
elif 'w' in self._mode:
return _sys.stdout
else:
msg = _('argument "-" with mode %r' % self._mode)
raise ValueError(msg)
# all other arguments are used as file names
if self._bufsize:
return open(string, self._mode, self._bufsize)
else:
return open(string, self._mode)
def __repr__(self):
args = [self._mode, self._bufsize]
args_str = ', '.join([repr(arg) for arg in args if arg is not None])
return '%s(%s)' % (type(self).__name__, args_str)
# ===========================
# Optional and Positional Parsing
# ===========================
class Namespace(_AttributeHolder):
"""Simple object for storing attributes.
Implements equality by attribute names and values, and provides a simple
string representation.
"""
def __init__(self, **kwargs):
for name in kwargs:
setattr(self, name, kwargs[name])
__hash__ = None
def __eq__(self, other):
return vars(self) == vars(other)
def __ne__(self, other):
return not (self == other)
def __contains__(self, key):
return key in self.__dict__
class _ActionsContainer(object):
def __init__(self,
description,
prefix_chars,
argument_default,
conflict_handler):
super(_ActionsContainer, self).__init__()
self.description = description
self.argument_default = argument_default
self.prefix_chars = prefix_chars
self.conflict_handler = conflict_handler
# set up registries
self._registries = {}
# register actions
self.register('action', None, _StoreAction)
self.register('action', 'store', _StoreAction)
self.register('action', 'store_const', _StoreConstAction)
self.register('action', 'store_true', _StoreTrueAction)
self.register('action', 'store_false', _StoreFalseAction)
self.register('action', 'append', _AppendAction)
self.register('action', 'append_const', _AppendConstAction)
self.register('action', 'count', _CountAction)
self.register('action', 'help', _HelpAction)
self.register('action', 'version', _VersionAction)
self.register('action', 'parsers', _SubParsersAction)
# raise an exception if the conflict handler is invalid
self._get_handler()
# action storage
self._actions = []
self._option_string_actions = {}
# groups
self._action_groups = []
self._mutually_exclusive_groups = []
# defaults storage
self._defaults = {}
# determines whether an "option" looks like a negative number
self._negative_number_matcher = _re.compile(r'^-\d+$|^-\d*\.\d+$')
# whether or not there are any optionals that look like negative
# numbers -- uses a list so it can be shared and edited
self._has_negative_number_optionals = []
# ====================
# Registration methods
# ====================
def register(self, registry_name, value, object):
registry = self._registries.setdefault(registry_name, {})
registry[value] = object
def _registry_get(self, registry_name, value, default=None):
return self._registries[registry_name].get(value, default)
# ==================================
# Namespace default accessor methods
# ==================================
def set_defaults(self, **kwargs):
self._defaults.update(kwargs)
# if these defaults match any existing arguments, replace
# the previous default on the object with the new one
for action in self._actions:
if action.dest in kwargs:
action.default = kwargs[action.dest]
def get_default(self, dest):
for action in self._actions:
if action.dest == dest and action.default is not None:
return action.default
return self._defaults.get(dest, None)
# =======================
# Adding argument actions
# =======================
def add_argument(self, *args, **kwargs):
"""
add_argument(dest, ..., name=value, ...)
add_argument(option_string, option_string, ..., name=value, ...)
"""
# if no positional args are supplied or only one is supplied and
# it doesn't look like an option string, parse a positional
# argument
chars = self.prefix_chars
if not args or len(args) == 1 and args[0][0] not in chars:
if args and 'dest' in kwargs:
raise ValueError('dest supplied twice for positional argument')
kwargs = self._get_positional_kwargs(*args, **kwargs)
# otherwise, we're adding an optional argument
else:
kwargs = self._get_optional_kwargs(*args, **kwargs)
# if no default was supplied, use the parser-level default
if 'default' not in kwargs:
dest = kwargs['dest']
if dest in self._defaults:
kwargs['default'] = self._defaults[dest]
elif self.argument_default is not None:
kwargs['default'] = self.argument_default
# create the action object, and add it to the parser
action_class = self._pop_action_class(kwargs)
if not _callable(action_class):
raise ValueError('unknown action "%s"' % action_class)
action = action_class(**kwargs)
# raise an error if the action type is not callable
type_func = self._registry_get('type', action.type, action.type)
if not _callable(type_func):
raise ValueError('%r is not callable' % type_func)
return self._add_action(action)
def add_argument_group(self, *args, **kwargs):
group = _ArgumentGroup(self, *args, **kwargs)
self._action_groups.append(group)
return group
def add_mutually_exclusive_group(self, **kwargs):
group = _MutuallyExclusiveGroup(self, **kwargs)
self._mutually_exclusive_groups.append(group)
return group
def _add_action(self, action):
# resolve any conflicts
self._check_conflict(action)
# add to actions list
self._actions.append(action)
action.container = self
# index the action by any option strings it has
for option_string in action.option_strings:
self._option_string_actions[option_string] = action
# set the flag if any option strings look like negative numbers
for option_string in action.option_strings:
if self._negative_number_matcher.match(option_string):
if not self._has_negative_number_optionals:
self._has_negative_number_optionals.append(True)
# return the created action
return action
def _remove_action(self, action):
self._actions.remove(action)
def _add_container_actions(self, container):
# collect groups by titles
title_group_map = {}
for group in self._action_groups:
if group.title in title_group_map:
msg = _('cannot merge actions - two groups are named %r')
raise ValueError(msg % (group.title))
title_group_map[group.title] = group
# map each action to its group
group_map = {}
for group in container._action_groups:
# if a group with the title exists, use that, otherwise
# create a new group matching the container's group
if group.title not in title_group_map:
title_group_map[group.title] = self.add_argument_group(
title=group.title,
description=group.description,
conflict_handler=group.conflict_handler)
# map the actions to their new group
for action in group._group_actions:
group_map[action] = title_group_map[group.title]
# add container's mutually exclusive groups
# NOTE: if add_mutually_exclusive_group ever gains title= and
# description= then this code will need to be expanded as above
for group in container._mutually_exclusive_groups:
mutex_group = self.add_mutually_exclusive_group(
required=group.required)
# map the actions to their new mutex group
for action in group._group_actions:
group_map[action] = mutex_group
# add all actions to this container or their group
for action in container._actions:
group_map.get(action, self)._add_action(action)
def _get_positional_kwargs(self, dest, **kwargs):
# make sure required is not specified
if 'required' in kwargs:
msg = _("'required' is an invalid argument for positionals")
raise TypeError(msg)
# mark positional arguments as required if at least one is
# always required
if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]:
kwargs['required'] = True
if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs:
kwargs['required'] = True
# return the keyword arguments with no option strings
return dict(kwargs, dest=dest, option_strings=[])
def _get_optional_kwargs(self, *args, **kwargs):
# determine short and long option strings
option_strings = []
long_option_strings = []
for option_string in args:
# error on strings that don't start with an appropriate prefix
if not option_string[0] in self.prefix_chars:
msg = _('invalid option string %r: '
'must start with a character %r')
tup = option_string, self.prefix_chars
raise ValueError(msg % tup)
# strings starting with two prefix characters are long options
option_strings.append(option_string)
if option_string[0] in self.prefix_chars:
if len(option_string) > 1:
if option_string[1] in self.prefix_chars:
long_option_strings.append(option_string)
# infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x'
dest = kwargs.pop('dest', None)
if dest is None:
if long_option_strings:
dest_option_string = long_option_strings[0]
else:
dest_option_string = option_strings[0]
dest = dest_option_string.lstrip(self.prefix_chars)
if not dest:
msg = _('dest= is required for options like %r')
raise ValueError(msg % option_string)
dest = dest.replace('-', '_')
# return the updated keyword arguments
return dict(kwargs, dest=dest, option_strings=option_strings)
def _pop_action_class(self, kwargs, default=None):
action = kwargs.pop('action', default)
return self._registry_get('action', action, action)
def _get_handler(self):
# determine function from conflict handler string
handler_func_name = '_handle_conflict_%s' % self.conflict_handler
try:
return getattr(self, handler_func_name)
except AttributeError:
msg = _('invalid conflict_resolution value: %r')
raise ValueError(msg % self.conflict_handler)
def _check_conflict(self, action):
# find all options that conflict with this option
confl_optionals = []
for option_string in action.option_strings:
if option_string in self._option_string_actions:
confl_optional = self._option_string_actions[option_string]
confl_optionals.append((option_string, confl_optional))
# resolve any conflicts
if confl_optionals:
conflict_handler = self._get_handler()
conflict_handler(action, confl_optionals)
def _handle_conflict_error(self, action, conflicting_actions):
message = _('conflicting option string(s): %s')
conflict_string = ', '.join([option_string
for option_string, action
in conflicting_actions])
raise ArgumentError(action, message % conflict_string)
def _handle_conflict_resolve(self, action, conflicting_actions):
# remove all conflicting options
for option_string, action in conflicting_actions:
# remove the conflicting option
action.option_strings.remove(option_string)
self._option_string_actions.pop(option_string, None)
# if the option now has no option string, remove it from the
# container holding it
if not action.option_strings:
action.container._remove_action(action)
class _ArgumentGroup(_ActionsContainer):
def __init__(self, container, title=None, description=None, **kwargs):
# add any missing keyword arguments by checking the container
update = kwargs.setdefault
update('conflict_handler', container.conflict_handler)
update('prefix_chars', container.prefix_chars)
update('argument_default', container.argument_default)
super_init = super(_ArgumentGroup, self).__init__
super_init(description=description, **kwargs)
# group attributes
self.title = title
self._group_actions = []
# share most attributes with the container
self._registries = container._registries
self._actions = container._actions
self._option_string_actions = container._option_string_actions
self._defaults = container._defaults
self._has_negative_number_optionals = \
container._has_negative_number_optionals
def _add_action(self, action):
action = super(_ArgumentGroup, self)._add_action(action)
self._group_actions.append(action)
return action
def _remove_action(self, action):
super(_ArgumentGroup, self)._remove_action(action)
self._group_actions.remove(action)
class _MutuallyExclusiveGroup(_ArgumentGroup):
def __init__(self, container, required=False):
super(_MutuallyExclusiveGroup, self).__init__(container)
self.required = required
self._container = container
def _add_action(self, action):
if action.required:
msg = _('mutually exclusive arguments must be optional')
raise ValueError(msg)
action = self._container._add_action(action)
self._group_actions.append(action)
return action
def _remove_action(self, action):
self._container._remove_action(action)
self._group_actions.remove(action)
class ArgumentParser(_AttributeHolder, _ActionsContainer):
"""Object for parsing command line strings into Python objects.
Keyword Arguments:
- prog -- The name of the program (default: sys.argv[0])
- usage -- A usage message (default: auto-generated from arguments)
- description -- A description of what the program does
- epilog -- Text following the argument descriptions
- parents -- Parsers whose arguments should be copied into this one
- formatter_class -- HelpFormatter class for printing help messages
- prefix_chars -- Characters that prefix optional arguments
- fromfile_prefix_chars -- Characters that prefix files containing
additional arguments
- argument_default -- The default value for all arguments
- conflict_handler -- String indicating how to handle conflicts
- add_help -- Add a -h/-help option
"""
def __init__(self,
prog=None,
usage=None,
description=None,
epilog=None,
version=None,
parents=[],
formatter_class=HelpFormatter,
prefix_chars='-',
fromfile_prefix_chars=None,
argument_default=None,
conflict_handler='error',
add_help=True):
if version is not None:
import warnings
warnings.warn(
"""The "version" argument to ArgumentParser is deprecated. """
"""Please use """
""""add_argument(..., action='version', version="N", ...)" """
"""instead""", DeprecationWarning)
superinit = super(ArgumentParser, self).__init__
superinit(description=description,
prefix_chars=prefix_chars,
argument_default=argument_default,
conflict_handler=conflict_handler)
# default setting for prog
if prog is None:
prog = _os.path.basename(_sys.argv[0])
self.prog = prog
self.usage = usage
self.epilog = epilog
self.version = version
self.formatter_class = formatter_class
self.fromfile_prefix_chars = fromfile_prefix_chars
self.add_help = add_help
add_group = self.add_argument_group
self._positionals = add_group(_('positional arguments'))
self._optionals = add_group(_('optional arguments'))
self._subparsers = None
# register types
def identity(string):
return string
self.register('type', None, identity)
# add help and version arguments if necessary
# (using explicit default to override global argument_default)
if '-' in prefix_chars:
default_prefix = '-'
else:
default_prefix = prefix_chars[0]
if self.add_help:
self.add_argument(
default_prefix+'h', default_prefix*2+'help',
action='help', default=SUPPRESS,
help=_('show this help message and exit'))
if self.version:
self.add_argument(
default_prefix+'v', default_prefix*2+'version',
action='version', default=SUPPRESS,
version=self.version,
help=_("show program's version number and exit"))
# add parent arguments and defaults
for parent in parents:
self._add_container_actions(parent)
try:
defaults = parent._defaults
except AttributeError:
pass
else:
self._defaults.update(defaults)
# =======================
# Pretty __repr__ methods
# =======================
def _get_kwargs(self):
names = [
'prog',
'usage',
'description',
'version',
'formatter_class',
'conflict_handler',
'add_help',
]
return [(name, getattr(self, name)) for name in names]
# ==================================
# Optional/Positional adding methods
# ==================================
def add_subparsers(self, **kwargs):
if self._subparsers is not None:
self.error(_('cannot have multiple subparser arguments'))
# add the parser class to the arguments if it's not present
kwargs.setdefault('parser_class', type(self))
if 'title' in kwargs or 'description' in kwargs:
title = _(kwargs.pop('title', 'subcommands'))
description = _(kwargs.pop('description', None))
self._subparsers = self.add_argument_group(title, description)
else:
self._subparsers = self._positionals
# prog defaults to the usage message of this parser, skipping
# optional arguments and with no "usage:" prefix
if kwargs.get('prog') is None:
formatter = self._get_formatter()
positionals = self._get_positional_actions()
groups = self._mutually_exclusive_groups
formatter.add_usage(self.usage, positionals, groups, '')
kwargs['prog'] = formatter.format_help().strip()
# create the parsers action and add it to the positionals list
parsers_class = self._pop_action_class(kwargs, 'parsers')
action = parsers_class(option_strings=[], **kwargs)
self._subparsers._add_action(action)
# return the created parsers action
return action
def _add_action(self, action):
if action.option_strings:
self._optionals._add_action(action)
else:
self._positionals._add_action(action)
return action
def _get_optional_actions(self):
return [action
for action in self._actions
if action.option_strings]
def _get_positional_actions(self):
return [action
for action in self._actions
if not action.option_strings]
# =====================================
# Command line argument parsing methods
# =====================================
def parse_args(self, args=None, namespace=None):
args, argv = self.parse_known_args(args, namespace)
if argv:
msg = _('unrecognized arguments: %s')
self.error(msg % ' '.join(argv))
return args
def parse_known_args(self, args=None, namespace=None):
# args default to the system args
if args is None:
args = _sys.argv[1:]
# default Namespace built from parser defaults
if namespace is None:
namespace = Namespace()
# add any action defaults that aren't present
for action in self._actions:
if action.dest is not SUPPRESS:
if not hasattr(namespace, action.dest):
if action.default is not SUPPRESS:
default = action.default
if isinstance(action.default, basestring):
default = self._get_value(action, default)
setattr(namespace, action.dest, default)
# add any parser defaults that aren't present
for dest in self._defaults:
if not hasattr(namespace, dest):
setattr(namespace, dest, self._defaults[dest])
# parse the arguments and exit if there are any errors
try:
namespace, args = self._parse_known_args(args, namespace)
if hasattr(namespace, _UNRECOGNIZED_ARGS_ATTR):
args.extend(getattr(namespace, _UNRECOGNIZED_ARGS_ATTR))
delattr(namespace, _UNRECOGNIZED_ARGS_ATTR)
return namespace, args
except ArgumentError:
err = _sys.exc_info()[1]
self.error(str(err))
def _parse_known_args(self, arg_strings, namespace):
# replace arg strings that are file references
if self.fromfile_prefix_chars is not None:
arg_strings = self._read_args_from_files(arg_strings)
# map all mutually exclusive arguments to the other arguments
# they can't occur with
action_conflicts = {}
for mutex_group in self._mutually_exclusive_groups:
group_actions = mutex_group._group_actions
for i, mutex_action in enumerate(mutex_group._group_actions):
conflicts = action_conflicts.setdefault(mutex_action, [])
conflicts.extend(group_actions[:i])
conflicts.extend(group_actions[i + 1:])
# find all option indices, and determine the arg_string_pattern
# which has an 'O' if there is an option at an index,
# an 'A' if there is an argument, or a '-' if there is a '--'
option_string_indices = {}
arg_string_pattern_parts = []
arg_strings_iter = iter(arg_strings)
for i, arg_string in enumerate(arg_strings_iter):
# all args after -- are non-options
if arg_string == '--':
arg_string_pattern_parts.append('-')
for arg_string in arg_strings_iter:
arg_string_pattern_parts.append('A')
# otherwise, add the arg to the arg strings
# and note the index if it was an option
else:
option_tuple = self._parse_optional(arg_string)
if option_tuple is None:
pattern = 'A'
else:
option_string_indices[i] = option_tuple
pattern = 'O'
arg_string_pattern_parts.append(pattern)
# join the pieces together to form the pattern
arg_strings_pattern = ''.join(arg_string_pattern_parts)
# converts arg strings to the appropriate and then takes the action
seen_actions = set()
seen_non_default_actions = set()
def take_action(action, argument_strings, option_string=None):
seen_actions.add(action)
argument_values = self._get_values(action, argument_strings)
# error if this argument is not allowed with other previously
# seen arguments, assuming that actions that use the default
# value don't really count as "present"
if argument_values is not action.default:
seen_non_default_actions.add(action)
for conflict_action in action_conflicts.get(action, []):
if conflict_action in seen_non_default_actions:
msg = _('not allowed with argument %s')
action_name = _get_action_name(conflict_action)
raise ArgumentError(action, msg % action_name)
# take the action if we didn't receive a SUPPRESS value
# (e.g. from a default)
if argument_values is not SUPPRESS:
action(self, namespace, argument_values, option_string)
# function to convert arg_strings into an optional action
def consume_optional(start_index):
# get the optional identified at this index
option_tuple = option_string_indices[start_index]
action, option_string, explicit_arg = option_tuple
# identify additional optionals in the same arg string
# (e.g. -xyz is the same as -x -y -z if no args are required)
match_argument = self._match_argument
action_tuples = []
while True:
# if we found no optional action, skip it
if action is None:
extras.append(arg_strings[start_index])
return start_index + 1
# if there is an explicit argument, try to match the
# optional's string arguments to only this
if explicit_arg is not None:
arg_count = match_argument(action, 'A')
# if the action is a single-dash option and takes no
# arguments, try to parse more single-dash options out
# of the tail of the option string
chars = self.prefix_chars
if arg_count == 0 and option_string[1] not in chars:
action_tuples.append((action, [], option_string))
char = option_string[0]
option_string = char + explicit_arg[0]
new_explicit_arg = explicit_arg[1:] or None
optionals_map = self._option_string_actions
if option_string in optionals_map:
action = optionals_map[option_string]
explicit_arg = new_explicit_arg
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
# if the action expect exactly one argument, we've
# successfully matched the option; exit the loop
elif arg_count == 1:
stop = start_index + 1
args = [explicit_arg]
action_tuples.append((action, args, option_string))
break
# error if a double-dash option did not use the
# explicit argument
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
# if there is no explicit argument, try to match the
# optional's string arguments with the following strings
# if successful, exit the loop
else:
start = start_index + 1
selected_patterns = arg_strings_pattern[start:]
arg_count = match_argument(action, selected_patterns)
stop = start + arg_count
args = arg_strings[start:stop]
action_tuples.append((action, args, option_string))
break
# add the Optional to the list and return the index at which
# the Optional's string args stopped
assert action_tuples
for action, args, option_string in action_tuples:
take_action(action, args, option_string)
return stop
# the list of Positionals left to be parsed; this is modified
# by consume_positionals()
positionals = self._get_positional_actions()
# function to convert arg_strings into positional actions
def consume_positionals(start_index):
# match as many Positionals as possible
match_partial = self._match_arguments_partial
selected_pattern = arg_strings_pattern[start_index:]
arg_counts = match_partial(positionals, selected_pattern)
# slice off the appropriate arg strings for each Positional
# and add the Positional and its args to the list
for action, arg_count in zip(positionals, arg_counts):
args = arg_strings[start_index: start_index + arg_count]
start_index += arg_count
take_action(action, args)
# slice off the Positionals that we just parsed and return the
# index at which the Positionals' string args stopped
positionals[:] = positionals[len(arg_counts):]
return start_index
# consume Positionals and Optionals alternately, until we have
# passed the last option string
extras = []
start_index = 0
if option_string_indices:
max_option_string_index = max(option_string_indices)
else:
max_option_string_index = -1
while start_index <= max_option_string_index:
# consume any Positionals preceding the next option
next_option_string_index = min([
index
for index in option_string_indices
if index >= start_index])
if start_index != next_option_string_index:
positionals_end_index = consume_positionals(start_index)
# only try to parse the next optional if we didn't consume
# the option string during the positionals parsing
if positionals_end_index > start_index:
start_index = positionals_end_index
continue
else:
start_index = positionals_end_index
# if we consumed all the positionals we could and we're not
# at the index of an option string, there were extra arguments
if start_index not in option_string_indices:
strings = arg_strings[start_index:next_option_string_index]
extras.extend(strings)
start_index = next_option_string_index
# consume the next optional and any arguments for it
start_index = consume_optional(start_index)
# consume any positionals following the last Optional
stop_index = consume_positionals(start_index)
# if we didn't consume all the argument strings, there were extras
extras.extend(arg_strings[stop_index:])
# if we didn't use all the Positional objects, there were too few
# arg strings supplied.
if positionals:
self.error(_('too few arguments'))
# make sure all required actions were present
for action in self._actions:
if action.required:
if action not in seen_actions:
name = _get_action_name(action)
self.error(_('argument %s is required') % name)
# make sure all required groups had one option present
for group in self._mutually_exclusive_groups:
if group.required:
for action in group._group_actions:
if action in seen_non_default_actions:
break
# if no actions were used, report the error
else:
names = [_get_action_name(action)
for action in group._group_actions
if action.help is not SUPPRESS]
msg = _('one of the arguments %s is required')
self.error(msg % ' '.join(names))
# return the updated namespace and the extra arguments
return namespace, extras
def _read_args_from_files(self, arg_strings):
# expand arguments referencing files
new_arg_strings = []
for arg_string in arg_strings:
# for regular arguments, just add them back into the list
if arg_string[0] not in self.fromfile_prefix_chars:
new_arg_strings.append(arg_string)
# replace arguments referencing files with the file content
else:
try:
args_file = open(arg_string[1:])
try:
arg_strings = []
for arg_line in args_file.read().splitlines():
for arg in self.convert_arg_line_to_args(arg_line):
arg_strings.append(arg)
arg_strings = self._read_args_from_files(arg_strings)
new_arg_strings.extend(arg_strings)
finally:
args_file.close()
except IOError:
err = _sys.exc_info()[1]
self.error(str(err))
# return the modified argument list
return new_arg_strings
def convert_arg_line_to_args(self, arg_line):
return [arg_line]
def _match_argument(self, action, arg_strings_pattern):
# match the pattern for this action to the arg strings
nargs_pattern = self._get_nargs_pattern(action)
match = _re.match(nargs_pattern, arg_strings_pattern)
# raise an exception if we weren't able to find a match
if match is None:
nargs_errors = {
None: _('expected one argument'),
OPTIONAL: _('expected at most one argument'),
ONE_OR_MORE: _('expected at least one argument'),
}
default = _('expected %s argument(s)') % action.nargs
msg = nargs_errors.get(action.nargs, default)
raise ArgumentError(action, msg)
# return the number of arguments matched
return len(match.group(1))
def _match_arguments_partial(self, actions, arg_strings_pattern):
# progressively shorten the actions list by slicing off the
# final actions until we find a match
result = []
for i in range(len(actions), 0, -1):
actions_slice = actions[:i]
pattern = ''.join([self._get_nargs_pattern(action)
for action in actions_slice])
match = _re.match(pattern, arg_strings_pattern)
if match is not None:
result.extend([len(string) for string in match.groups()])
break
# return the list of arg string counts
return result
def _parse_optional(self, arg_string):
# if it's an empty string, it was meant to be a positional
if not arg_string:
return None
# if it doesn't start with a prefix, it was meant to be positional
if not arg_string[0] in self.prefix_chars:
return None
# if the option string is present in the parser, return the action
if arg_string in self._option_string_actions:
action = self._option_string_actions[arg_string]
return action, arg_string, None
# if it's just a single character, it was meant to be positional
if len(arg_string) == 1:
return None
# if the option string before the "=" is present, return the action
if '=' in arg_string:
option_string, explicit_arg = arg_string.split('=', 1)
if option_string in self._option_string_actions:
action = self._option_string_actions[option_string]
return action, option_string, explicit_arg
# search through all possible prefixes of the option string
# and all actions in the parser for possible interpretations
option_tuples = self._get_option_tuples(arg_string)
# if multiple actions match, the option string was ambiguous
if len(option_tuples) > 1:
options = ', '.join([option_string
for action, option_string, explicit_arg in option_tuples])
tup = arg_string, options
self.error(_('ambiguous option: %s could match %s') % tup)
# if exactly one action matched, this segmentation is good,
# so return the parsed action
elif len(option_tuples) == 1:
option_tuple, = option_tuples
return option_tuple
# if it was not found as an option, but it looks like a negative
# number, it was meant to be positional
# unless there are negative-number-like options
if self._negative_number_matcher.match(arg_string):
if not self._has_negative_number_optionals:
return None
# if it contains a space, it was meant to be a positional
if ' ' in arg_string:
return None
# it was meant to be an optional but there is no such option
# in this parser (though it might be a valid option in a subparser)
return None, arg_string, None
def _get_option_tuples(self, option_string):
result = []
# option strings starting with two prefix characters are only
# split at the '='
chars = self.prefix_chars
if option_string[0] in chars and option_string[1] in chars:
if '=' in option_string:
option_prefix, explicit_arg = option_string.split('=', 1)
else:
option_prefix = option_string
explicit_arg = None
for option_string in self._option_string_actions:
if option_string.startswith(option_prefix):
action = self._option_string_actions[option_string]
tup = action, option_string, explicit_arg
result.append(tup)
# single character options can be concatenated with their arguments
# but multiple character options always have to have their argument
# separate
elif option_string[0] in chars and option_string[1] not in chars:
option_prefix = option_string
explicit_arg = None
short_option_prefix = option_string[:2]
short_explicit_arg = option_string[2:]
for option_string in self._option_string_actions:
if option_string == short_option_prefix:
action = self._option_string_actions[option_string]
tup = action, option_string, short_explicit_arg
result.append(tup)
elif option_string.startswith(option_prefix):
action = self._option_string_actions[option_string]
tup = action, option_string, explicit_arg
result.append(tup)
# shouldn't ever get here
else:
self.error(_('unexpected option string: %s') % option_string)
# return the collected option tuples
return result
def _get_nargs_pattern(self, action):
# in all examples below, we have to allow for '--' args
# which are represented as '-' in the pattern
nargs = action.nargs
# the default (None) is assumed to be a single argument
if nargs is None:
nargs_pattern = '(-*A-*)'
# allow zero or one arguments
elif nargs == OPTIONAL:
nargs_pattern = '(-*A?-*)'
# allow zero or more arguments
elif nargs == ZERO_OR_MORE:
nargs_pattern = '(-*[A-]*)'
# allow one or more arguments
elif nargs == ONE_OR_MORE:
nargs_pattern = '(-*A[A-]*)'
# allow any number of options or arguments
elif nargs == REMAINDER:
nargs_pattern = '([-AO]*)'
# allow one argument followed by any number of options or arguments
elif nargs == PARSER:
nargs_pattern = '(-*A[-AO]*)'
# all others should be integers
else:
nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs)
# if this is an optional action, -- is not allowed
if action.option_strings:
nargs_pattern = nargs_pattern.replace('-*', '')
nargs_pattern = nargs_pattern.replace('-', '')
# return the pattern
return nargs_pattern
# ========================
# Value conversion methods
# ========================
def _get_values(self, action, arg_strings):
# for everything but PARSER args, strip out '--'
if action.nargs not in [PARSER, REMAINDER]:
arg_strings = [s for s in arg_strings if s != '--']
# optional argument produces a default when not present
if not arg_strings and action.nargs == OPTIONAL:
if action.option_strings:
value = action.const
else:
value = action.default
if isinstance(value, basestring):
value = self._get_value(action, value)
self._check_value(action, value)
# when nargs='*' on a positional, if there were no command-line
# args, use the default if it is anything other than None
elif (not arg_strings and action.nargs == ZERO_OR_MORE and
not action.option_strings):
if action.default is not None:
value = action.default
else:
value = arg_strings
self._check_value(action, value)
# single argument or optional argument produces a single value
elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]:
arg_string, = arg_strings
value = self._get_value(action, arg_string)
self._check_value(action, value)
# REMAINDER arguments convert all values, checking none
elif action.nargs == REMAINDER:
value = [self._get_value(action, v) for v in arg_strings]
# PARSER arguments convert all values, but check only the first
elif action.nargs == PARSER:
value = [self._get_value(action, v) for v in arg_strings]
self._check_value(action, value[0])
# all other types of nargs produce a list
else:
value = [self._get_value(action, v) for v in arg_strings]
for v in value:
self._check_value(action, v)
# return the converted value
return value
def _get_value(self, action, arg_string):
type_func = self._registry_get('type', action.type, action.type)
if not _callable(type_func):
msg = _('%r is not callable')
raise ArgumentError(action, msg % type_func)
# convert the value to the appropriate type
try:
result = type_func(arg_string)
# ArgumentTypeErrors indicate errors
except ArgumentTypeError:
name = getattr(action.type, '__name__', repr(action.type))
msg = str(_sys.exc_info()[1])
raise ArgumentError(action, msg)
# TypeErrors or ValueErrors also indicate errors
except (TypeError, ValueError):
name = getattr(action.type, '__name__', repr(action.type))
msg = _('invalid %s value: %r')
raise ArgumentError(action, msg % (name, arg_string))
# return the converted value
return result
def _check_value(self, action, value):
# converted value must be one of the choices (if specified)
if action.choices is not None and value not in action.choices:
tup = value, ', '.join(map(repr, action.choices))
msg = _('invalid choice: %r (choose from %s)') % tup
raise ArgumentError(action, msg)
# =======================
# Help-formatting methods
# =======================
def format_usage(self):
formatter = self._get_formatter()
formatter.add_usage(self.usage, self._actions,
self._mutually_exclusive_groups)
return formatter.format_help()
def format_help(self):
formatter = self._get_formatter()
# usage
formatter.add_usage(self.usage, self._actions,
self._mutually_exclusive_groups)
# description
formatter.add_text(self.description)
# positionals, optionals and user-defined groups
for action_group in self._action_groups:
formatter.start_section(action_group.title)
formatter.add_text(action_group.description)
formatter.add_arguments(action_group._group_actions)
formatter.end_section()
# epilog
formatter.add_text(self.epilog)
# determine help from format above
return formatter.format_help()
def format_version(self):
import warnings
warnings.warn(
'The format_version method is deprecated -- the "version" '
'argument to ArgumentParser is no longer supported.',
DeprecationWarning)
formatter = self._get_formatter()
formatter.add_text(self.version)
return formatter.format_help()
def _get_formatter(self):
return self.formatter_class(prog=self.prog)
# =====================
# Help-printing methods
# =====================
def print_usage(self, file=None):
if file is None:
file = _sys.stdout
self._print_message(self.format_usage(), file)
def print_help(self, file=None):
if file is None:
file = _sys.stdout
self._print_message(self.format_help(), file)
def print_version(self, file=None):
import warnings
warnings.warn(
'The print_version method is deprecated -- the "version" '
'argument to ArgumentParser is no longer supported.',
DeprecationWarning)
self._print_message(self.format_version(), file)
def _print_message(self, message, file=None):
if message:
if file is None:
file = _sys.stderr
file.write(message)
# ===============
# Exiting methods
# ===============
def exit(self, status=0, message=None):
if message:
self._print_message(message, _sys.stderr)
_sys.exit(status)
def error(self, message):
"""error(message: string)
Prints a usage message incorporating the message to stderr and
exits.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
self.print_usage(_sys.stderr)
self.exit(2, _('%s: error: %s\n') % (self.prog, message))
| gpl-2.0 |
aptrishu/coala-bears | tests/haskell/HaskellLintBearTest.py | 8 | 1995 | from queue import Queue
from bears.haskell.HaskellLintBear import HaskellLintBear
from coalib.testing.LocalBearTestHelper import LocalBearTestHelper
from coalib.testing.BearTestHelper import generate_skip_decorator
from coalib.settings.Section import Section
good_single_line_file = """
myconcat = (++)
""".splitlines()
bad_single_line_file = """
myconcat a b = ((++) a b)
""".splitlines()
good_multiple_line_file = """
import qualified Data.ByteString.Char8 as BS
main :: IO()
main =
return $ BS.concat
[ BS.pack "I am being tested by hlint!"
, "String dummy"
, "Another String dummy"
]
""".splitlines()
bad_multiple_line_file = """
import qualified Data.ByteString.Char8 as BS
main :: IO()
main =
return $ BS.concat $
[ BS.pack $ "I am being tested by hlint!"
, "String dummy"
, "Another String dummy"
]
""".splitlines()
@generate_skip_decorator(HaskellLintBear)
class HaskellLintBearTest(LocalBearTestHelper):
def setUp(self):
self.section = Section('name')
self.uut = HaskellLintBear(self.section, Queue())
def test_valid(self):
self.check_validity(self.uut, good_single_line_file,
tempfile_kwargs={'suffix': '.hs'})
self.check_validity(self.uut, good_multiple_line_file,
tempfile_kwargs={'suffix': '.hs'})
def test_invalid(self):
results = self.check_invalidity(self.uut, bad_single_line_file,
tempfile_kwargs={'suffix': '.hs'})
self.assertEqual(len(results), 1, str(results))
self.assertIn('Redundant bracket',
results[0].message)
results = self.check_invalidity(self.uut, bad_multiple_line_file,
tempfile_kwargs={'suffix': '.hs'})
self.assertEqual(len(results), 2, str(results))
self.assertIn('Redundant $',
results[0].message)
| agpl-3.0 |
salivatears/ansible | lib/ansible/plugins/cache/base.py | 124 | 1479 | # (c) 2014, Brian Coca, Josh Drake, et al
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from abc import ABCMeta, abstractmethod
from six import with_metaclass
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class BaseCacheModule(with_metaclass(ABCMeta, object)):
display = display
@abstractmethod
def get(self, key):
pass
@abstractmethod
def set(self, key, value):
pass
@abstractmethod
def keys(self):
pass
@abstractmethod
def contains(self, key):
pass
@abstractmethod
def delete(self, key):
pass
@abstractmethod
def flush(self):
pass
@abstractmethod
def copy(self):
pass
| gpl-3.0 |
alxgu/ansible | lib/ansible/modules/identity/keycloak/keycloak_client.py | 27 | 32548 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Eike Frost <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: keycloak_client
short_description: Allows administration of Keycloak clients via Keycloak API
version_added: "2.5"
description:
- This module allows the administration of Keycloak clients via the Keycloak REST API. It
requires access to the REST API via OpenID Connect; the user connecting and the client being
used must have the requisite access rights. In a default Keycloak installation, admin-cli
and an admin user would work, as would a separate client definition with the scope tailored
to your needs and a user having the expected roles.
- The names of module options are snake_cased versions of the camelCase ones found in the
Keycloak API and its documentation at U(http://www.keycloak.org/docs-api/3.3/rest-api/).
Aliases are provided so camelCased versions can be used as well.
- The Keycloak API does not always sanity check inputs e.g. you can set
SAML-specific settings on an OpenID Connect client for instance and vice versa. Be careful.
If you do not specify a setting, usually a sensible default is chosen.
options:
state:
description:
- State of the client
- On C(present), the client will be created (or updated if it exists already).
- On C(absent), the client will be removed if it exists
choices: ['present', 'absent']
default: 'present'
realm:
description:
- The realm to create the client in.
client_id:
description:
- Client id of client to be worked on. This is usually an alphanumeric name chosen by
you. Either this or I(id) is required. If you specify both, I(id) takes precedence.
This is 'clientId' in the Keycloak REST API.
aliases:
- clientId
id:
description:
- Id of client to be worked on. This is usually an UUID. Either this or I(client_id)
is required. If you specify both, this takes precedence.
name:
description:
- Name of the client (this is not the same as I(client_id))
description:
description:
- Description of the client in Keycloak
root_url:
description:
- Root URL appended to relative URLs for this client
This is 'rootUrl' in the Keycloak REST API.
aliases:
- rootUrl
admin_url:
description:
- URL to the admin interface of the client
This is 'adminUrl' in the Keycloak REST API.
aliases:
- adminUrl
base_url:
description:
- Default URL to use when the auth server needs to redirect or link back to the client
This is 'baseUrl' in the Keycloak REST API.
aliases:
- baseUrl
enabled:
description:
- Is this client enabled or not?
type: bool
client_authenticator_type:
description:
- How do clients authenticate with the auth server? Either C(client-secret) or
C(client-jwt) can be chosen. When using C(client-secret), the module parameter
I(secret) can set it, while for C(client-jwt), you can use the keys C(use.jwks.url),
C(jwks.url), and C(jwt.credential.certificate) in the I(attributes) module parameter
to configure its behavior.
This is 'clientAuthenticatorType' in the Keycloak REST API.
choices: ['client-secret', 'client-jwt']
aliases:
- clientAuthenticatorType
secret:
description:
- When using I(client_authenticator_type) C(client-secret) (the default), you can
specify a secret here (otherwise one will be generated if it does not exit). If
changing this secret, the module will not register a change currently (but the
changed secret will be saved).
registration_access_token:
description:
- The registration access token provides access for clients to the client registration
service.
This is 'registrationAccessToken' in the Keycloak REST API.
aliases:
- registrationAccessToken
default_roles:
description:
- list of default roles for this client. If the client roles referenced do not exist
yet, they will be created.
This is 'defaultRoles' in the Keycloak REST API.
aliases:
- defaultRoles
redirect_uris:
description:
- Acceptable redirect URIs for this client.
This is 'redirectUris' in the Keycloak REST API.
aliases:
- redirectUris
web_origins:
description:
- List of allowed CORS origins.
This is 'webOrigins' in the Keycloak REST API.
aliases:
- webOrigins
not_before:
description:
- Revoke any tokens issued before this date for this client (this is a UNIX timestamp).
This is 'notBefore' in the Keycloak REST API.
aliases:
- notBefore
bearer_only:
description:
- The access type of this client is bearer-only.
This is 'bearerOnly' in the Keycloak REST API.
aliases:
- bearerOnly
type: bool
consent_required:
description:
- If enabled, users have to consent to client access.
This is 'consentRequired' in the Keycloak REST API.
aliases:
- consentRequired
type: bool
standard_flow_enabled:
description:
- Enable standard flow for this client or not (OpenID connect).
This is 'standardFlowEnabled' in the Keycloak REST API.
aliases:
- standardFlowEnabled
type: bool
implicit_flow_enabled:
description:
- Enable implicit flow for this client or not (OpenID connect).
This is 'implicitFlowEnabled' in the Keycloak REST API.
aliases:
- implicitFlowEnabled
type: bool
direct_access_grants_enabled:
description:
- Are direct access grants enabled for this client or not (OpenID connect).
This is 'directAccessGrantsEnabled' in the Keycloak REST API.
aliases:
- directAccessGrantsEnabled
type: bool
service_accounts_enabled:
description:
- Are service accounts enabled for this client or not (OpenID connect).
This is 'serviceAccountsEnabled' in the Keycloak REST API.
aliases:
- serviceAccountsEnabled
type: bool
authorization_services_enabled:
description:
- Are authorization services enabled for this client or not (OpenID connect).
This is 'authorizationServicesEnabled' in the Keycloak REST API.
aliases:
- authorizationServicesEnabled
type: bool
public_client:
description:
- Is the access type for this client public or not.
This is 'publicClient' in the Keycloak REST API.
aliases:
- publicClient
type: bool
frontchannel_logout:
description:
- Is frontchannel logout enabled for this client or not.
This is 'frontchannelLogout' in the Keycloak REST API.
aliases:
- frontchannelLogout
type: bool
protocol:
description:
- Type of client (either C(openid-connect) or C(saml).
choices: ['openid-connect', 'saml']
full_scope_allowed:
description:
- Is the "Full Scope Allowed" feature set for this client or not.
This is 'fullScopeAllowed' in the Keycloak REST API.
aliases:
- fullScopeAllowed
type: bool
node_re_registration_timeout:
description:
- Cluster node re-registration timeout for this client.
This is 'nodeReRegistrationTimeout' in the Keycloak REST API.
aliases:
- nodeReRegistrationTimeout
registered_nodes:
description:
- dict of registered cluster nodes (with C(nodename) as the key and last registration
time as the value).
This is 'registeredNodes' in the Keycloak REST API.
aliases:
- registeredNodes
client_template:
description:
- Client template to use for this client. If it does not exist this field will silently
be dropped.
This is 'clientTemplate' in the Keycloak REST API.
aliases:
- clientTemplate
use_template_config:
description:
- Whether or not to use configuration from the I(client_template).
This is 'useTemplateConfig' in the Keycloak REST API.
aliases:
- useTemplateConfig
type: bool
use_template_scope:
description:
- Whether or not to use scope configuration from the I(client_template).
This is 'useTemplateScope' in the Keycloak REST API.
aliases:
- useTemplateScope
type: bool
use_template_mappers:
description:
- Whether or not to use mapper configuration from the I(client_template).
This is 'useTemplateMappers' in the Keycloak REST API.
aliases:
- useTemplateMappers
type: bool
surrogate_auth_required:
description:
- Whether or not surrogate auth is required.
This is 'surrogateAuthRequired' in the Keycloak REST API.
aliases:
- surrogateAuthRequired
type: bool
authorization_settings:
description:
- a data structure defining the authorization settings for this client. For reference,
please see the Keycloak API docs at U(http://www.keycloak.org/docs-api/3.3/rest-api/index.html#_resourceserverrepresentation).
This is 'authorizationSettings' in the Keycloak REST API.
aliases:
- authorizationSettings
protocol_mappers:
description:
- a list of dicts defining protocol mappers for this client.
This is 'protocolMappers' in the Keycloak REST API.
aliases:
- protocolMappers
suboptions:
consentRequired:
description:
- Specifies whether a user needs to provide consent to a client for this mapper to be active.
consentText:
description:
- The human-readable name of the consent the user is presented to accept.
id:
description:
- Usually a UUID specifying the internal ID of this protocol mapper instance.
name:
description:
- The name of this protocol mapper.
protocol:
description:
- This is either C(openid-connect) or C(saml), this specifies for which protocol this protocol mapper
is active.
choices: ['openid-connect', 'saml']
protocolMapper:
description:
- The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is
impossible to provide since this may be extended through SPIs by the user of Keycloak,
by default Keycloak as of 3.4 ships with at least
- C(docker-v2-allow-all-mapper)
- C(oidc-address-mapper)
- C(oidc-full-name-mapper)
- C(oidc-group-membership-mapper)
- C(oidc-hardcoded-claim-mapper)
- C(oidc-hardcoded-role-mapper)
- C(oidc-role-name-mapper)
- C(oidc-script-based-protocol-mapper)
- C(oidc-sha256-pairwise-sub-mapper)
- C(oidc-usermodel-attribute-mapper)
- C(oidc-usermodel-client-role-mapper)
- C(oidc-usermodel-property-mapper)
- C(oidc-usermodel-realm-role-mapper)
- C(oidc-usersessionmodel-note-mapper)
- C(saml-group-membership-mapper)
- C(saml-hardcode-attribute-mapper)
- C(saml-hardcode-role-mapper)
- C(saml-role-list-mapper)
- C(saml-role-name-mapper)
- C(saml-user-attribute-mapper)
- C(saml-user-property-mapper)
- C(saml-user-session-note-mapper)
- An exhaustive list of available mappers on your installation can be obtained on
the admin console by going to Server Info -> Providers and looking under
'protocol-mapper'.
config:
description:
- Dict specifying the configuration options for the protocol mapper; the
contents differ depending on the value of I(protocolMapper) and are not documented
other than by the source of the mappers and its parent class(es). An example is given
below. It is easiest to obtain valid config values by dumping an already-existing
protocol mapper configuration through check-mode in the I(existing) field.
attributes:
description:
- A dict of further attributes for this client. This can contain various configuration
settings; an example is given in the examples section. While an exhaustive list of
permissible options is not available; possible options as of Keycloak 3.4 are listed below. The Keycloak
API does not validate whether a given option is appropriate for the protocol used; if specified
anyway, Keycloak will simply not use it.
suboptions:
saml.authnstatement:
description:
- For SAML clients, boolean specifying whether or not a statement containing method and timestamp
should be included in the login response.
saml.client.signature:
description:
- For SAML clients, boolean specifying whether a client signature is required and validated.
saml.encrypt:
description:
- Boolean specifying whether SAML assertions should be encrypted with the client's public key.
saml.force.post.binding:
description:
- For SAML clients, boolean specifying whether always to use POST binding for responses.
saml.onetimeuse.condition:
description:
- For SAML clients, boolean specifying whether a OneTimeUse condition should be included in login responses.
saml.server.signature:
description:
- Boolean specifying whether SAML documents should be signed by the realm.
saml.server.signature.keyinfo.ext:
description:
- For SAML clients, boolean specifying whether REDIRECT signing key lookup should be optimized through inclusion
of the signing key id in the SAML Extensions element.
saml.signature.algorithm:
description:
- Signature algorithm used to sign SAML documents. One of C(RSA_SHA256), C(RSA_SHA1), C(RSA_SHA512), or C(DSA_SHA1).
saml.signing.certificate:
description:
- SAML signing key certificate, base64-encoded.
saml.signing.private.key:
description:
- SAML signing key private key, base64-encoded.
saml_assertion_consumer_url_post:
description:
- SAML POST Binding URL for the client's assertion consumer service (login responses).
saml_assertion_consumer_url_redirect:
description:
- SAML Redirect Binding URL for the client's assertion consumer service (login responses).
saml_force_name_id_format:
description:
- For SAML clients, Boolean specifying whether to ignore requested NameID subject format and using the configured one instead.
saml_name_id_format:
description:
- For SAML clients, the NameID format to use (one of C(username), C(email), C(transient), or C(persistent))
saml_signature_canonicalization_method:
description:
- SAML signature canonicalization method. This is one of four values, namely
C(http://www.w3.org/2001/10/xml-exc-c14n#) for EXCLUSIVE,
C(http://www.w3.org/2001/10/xml-exc-c14n#WithComments) for EXCLUSIVE_WITH_COMMENTS,
C(http://www.w3.org/TR/2001/REC-xml-c14n-20010315) for INCLUSIVE, and
C(http://www.w3.org/TR/2001/REC-xml-c14n-20010315#WithComments) for INCLUSIVE_WITH_COMMENTS.
saml_single_logout_service_url_post:
description:
- SAML POST binding url for the client's single logout service.
saml_single_logout_service_url_redirect:
description:
- SAML redirect binding url for the client's single logout service.
user.info.response.signature.alg:
description:
- For OpenID-Connect clients, JWA algorithm for signed UserInfo-endpoint responses. One of C(RS256) or C(unsigned).
request.object.signature.alg:
description:
- For OpenID-Connect clients, JWA algorithm which the client needs to use when sending
OIDC request object. One of C(any), C(none), C(RS256).
use.jwks.url:
description:
- For OpenID-Connect clients, boolean specifying whether to use a JWKS URL to obtain client
public keys.
jwks.url:
description:
- For OpenID-Connect clients, URL where client keys in JWK are stored.
jwt.credential.certificate:
description:
- For OpenID-Connect clients, client certificate for validating JWT issued by
client and signed by its key, base64-encoded.
extends_documentation_fragment:
- keycloak
author:
- Eike Frost (@eikef)
'''
EXAMPLES = '''
- name: Create or update Keycloak client (minimal example)
local_action:
module: keycloak_client
auth_client_id: admin-cli
auth_keycloak_url: https://auth.example.com/auth
auth_realm: master
auth_username: USERNAME
auth_password: PASSWORD
client_id: test
state: present
- name: Delete a Keycloak client
local_action:
module: keycloak_client
auth_client_id: admin-cli
auth_keycloak_url: https://auth.example.com/auth
auth_realm: master
auth_username: USERNAME
auth_password: PASSWORD
client_id: test
state: absent
- name: Create or update a Keycloak client (with all the bells and whistles)
local_action:
module: keycloak_client
auth_client_id: admin-cli
auth_keycloak_url: https://auth.example.com/auth
auth_realm: master
auth_username: USERNAME
auth_password: PASSWORD
state: present
realm: master
client_id: test
id: d8b127a3-31f6-44c8-a7e4-4ab9a3e78d95
name: this_is_a_test
description: Description of this wonderful client
root_url: https://www.example.com/
admin_url: https://www.example.com/admin_url
base_url: basepath
enabled: True
client_authenticator_type: client-secret
secret: REALLYWELLKEPTSECRET
redirect_uris:
- https://www.example.com/*
- http://localhost:8888/
web_origins:
- https://www.example.com/*
not_before: 1507825725
bearer_only: False
consent_required: False
standard_flow_enabled: True
implicit_flow_enabled: False
direct_access_grants_enabled: False
service_accounts_enabled: False
authorization_services_enabled: False
public_client: False
frontchannel_logout: False
protocol: openid-connect
full_scope_allowed: false
node_re_registration_timeout: -1
client_template: test
use_template_config: False
use_template_scope: false
use_template_mappers: no
registered_nodes:
node01.example.com: 1507828202
registration_access_token: eyJWT_TOKEN
surrogate_auth_required: false
default_roles:
- test01
- test02
protocol_mappers:
- config:
access.token.claim: True
claim.name: "family_name"
id.token.claim: True
jsonType.label: String
user.attribute: lastName
userinfo.token.claim: True
consentRequired: True
consentText: "${familyName}"
name: family name
protocol: openid-connect
protocolMapper: oidc-usermodel-property-mapper
- config:
attribute.name: Role
attribute.nameformat: Basic
single: false
consentRequired: false
name: role list
protocol: saml
protocolMapper: saml-role-list-mapper
attributes:
saml.authnstatement: True
saml.client.signature: True
saml.force.post.binding: True
saml.server.signature: True
saml.signature.algorithm: RSA_SHA256
saml.signing.certificate: CERTIFICATEHERE
saml.signing.private.key: PRIVATEKEYHERE
saml_force_name_id_format: False
saml_name_id_format: username
saml_signature_canonicalization_method: "http://www.w3.org/2001/10/xml-exc-c14n#"
user.info.response.signature.alg: RS256
request.object.signature.alg: RS256
use.jwks.url: true
jwks.url: JWKS_URL_FOR_CLIENT_AUTH_JWT
jwt.credential.certificate: JWT_CREDENTIAL_CERTIFICATE_FOR_CLIENT_AUTH
'''
RETURN = '''
msg:
description: Message as to what action was taken
returned: always
type: str
sample: "Client testclient has been updated"
proposed:
description: client representation of proposed changes to client
returned: always
type: dict
sample: {
clientId: "test"
}
existing:
description: client representation of existing client (sample is truncated)
returned: always
type: dict
sample: {
"adminUrl": "http://www.example.com/admin_url",
"attributes": {
"request.object.signature.alg": "RS256",
}
}
end_state:
description: client representation of client after module execution (sample is truncated)
returned: always
type: dict
sample: {
"adminUrl": "http://www.example.com/admin_url",
"attributes": {
"request.object.signature.alg": "RS256",
}
}
'''
from ansible.module_utils.keycloak import KeycloakAPI, camel, keycloak_argument_spec
from ansible.module_utils.basic import AnsibleModule
def sanitize_cr(clientrep):
""" Removes probably sensitive details from a client representation
:param clientrep: the clientrep dict to be sanitized
:return: sanitized clientrep dict
"""
result = clientrep.copy()
if 'secret' in result:
result['secret'] = 'no_log'
if 'attributes' in result:
if 'saml.signing.private.key' in result['attributes']:
result['attributes']['saml.signing.private.key'] = 'no_log'
return result
def main():
"""
Module execution
:return:
"""
argument_spec = keycloak_argument_spec()
protmapper_spec = dict(
consentRequired=dict(type='bool'),
consentText=dict(type='str'),
id=dict(type='str'),
name=dict(type='str'),
protocol=dict(type='str', choices=['openid-connect', 'saml']),
protocolMapper=dict(type='str'),
config=dict(type='dict'),
)
meta_args = dict(
state=dict(default='present', choices=['present', 'absent']),
realm=dict(type='str', default='master'),
id=dict(type='str'),
client_id=dict(type='str', aliases=['clientId']),
name=dict(type='str'),
description=dict(type='str'),
root_url=dict(type='str', aliases=['rootUrl']),
admin_url=dict(type='str', aliases=['adminUrl']),
base_url=dict(type='str', aliases=['baseUrl']),
surrogate_auth_required=dict(type='bool', aliases=['surrogateAuthRequired']),
enabled=dict(type='bool'),
client_authenticator_type=dict(type='str', choices=['client-secret', 'client-jwt'], aliases=['clientAuthenticatorType']),
secret=dict(type='str', no_log=True),
registration_access_token=dict(type='str', aliases=['registrationAccessToken']),
default_roles=dict(type='list', aliases=['defaultRoles']),
redirect_uris=dict(type='list', aliases=['redirectUris']),
web_origins=dict(type='list', aliases=['webOrigins']),
not_before=dict(type='int', aliases=['notBefore']),
bearer_only=dict(type='bool', aliases=['bearerOnly']),
consent_required=dict(type='bool', aliases=['consentRequired']),
standard_flow_enabled=dict(type='bool', aliases=['standardFlowEnabled']),
implicit_flow_enabled=dict(type='bool', aliases=['implicitFlowEnabled']),
direct_access_grants_enabled=dict(type='bool', aliases=['directAccessGrantsEnabled']),
service_accounts_enabled=dict(type='bool', aliases=['serviceAccountsEnabled']),
authorization_services_enabled=dict(type='bool', aliases=['authorizationServicesEnabled']),
public_client=dict(type='bool', aliases=['publicClient']),
frontchannel_logout=dict(type='bool', aliases=['frontchannelLogout']),
protocol=dict(type='str', choices=['openid-connect', 'saml']),
attributes=dict(type='dict'),
full_scope_allowed=dict(type='bool', aliases=['fullScopeAllowed']),
node_re_registration_timeout=dict(type='int', aliases=['nodeReRegistrationTimeout']),
registered_nodes=dict(type='dict', aliases=['registeredNodes']),
client_template=dict(type='str', aliases=['clientTemplate']),
use_template_config=dict(type='bool', aliases=['useTemplateConfig']),
use_template_scope=dict(type='bool', aliases=['useTemplateScope']),
use_template_mappers=dict(type='bool', aliases=['useTemplateMappers']),
protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec, aliases=['protocolMappers']),
authorization_settings=dict(type='dict', aliases=['authorizationSettings']),
)
argument_spec.update(meta_args)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=([['client_id', 'id']]))
result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={})
# Obtain access token, initialize API
kc = KeycloakAPI(module)
realm = module.params.get('realm')
cid = module.params.get('id')
state = module.params.get('state')
# convert module parameters to client representation parameters (if they belong in there)
client_params = [x for x in module.params
if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm'] and
module.params.get(x) is not None]
keycloak_argument_spec().keys()
# See whether the client already exists in Keycloak
if cid is None:
before_client = kc.get_client_by_clientid(module.params.get('client_id'), realm=realm)
if before_client is not None:
cid = before_client['id']
else:
before_client = kc.get_client_by_id(cid, realm=realm)
if before_client is None:
before_client = dict()
# Build a proposed changeset from parameters given to this module
changeset = dict()
for client_param in client_params:
new_param_value = module.params.get(client_param)
# some lists in the Keycloak API are sorted, some are not.
if isinstance(new_param_value, list):
if client_param in ['attributes']:
try:
new_param_value = sorted(new_param_value)
except TypeError:
pass
# Unfortunately, the ansible argument spec checker introduces variables with null values when
# they are not specified
if client_param == 'protocol_mappers':
new_param_value = [dict((k, v) for k, v in x.items() if x[k] is not None) for x in new_param_value]
changeset[camel(client_param)] = new_param_value
# Whether creating or updating a client, take the before-state and merge the changeset into it
updated_client = before_client.copy()
updated_client.update(changeset)
result['proposed'] = sanitize_cr(changeset)
result['existing'] = sanitize_cr(before_client)
# If the client does not exist yet, before_client is still empty
if before_client == dict():
if state == 'absent':
# do nothing and exit
if module._diff:
result['diff'] = dict(before='', after='')
result['msg'] = 'Client does not exist, doing nothing.'
module.exit_json(**result)
# create new client
result['changed'] = True
if 'clientId' not in updated_client:
module.fail_json(msg='client_id needs to be specified when creating a new client')
if module._diff:
result['diff'] = dict(before='', after=sanitize_cr(updated_client))
if module.check_mode:
module.exit_json(**result)
kc.create_client(updated_client, realm=realm)
after_client = kc.get_client_by_clientid(updated_client['clientId'], realm=realm)
result['end_state'] = sanitize_cr(after_client)
result['msg'] = 'Client %s has been created.' % updated_client['clientId']
module.exit_json(**result)
else:
if state == 'present':
# update existing client
result['changed'] = True
if module.check_mode:
# We can only compare the current client with the proposed updates we have
if module._diff:
result['diff'] = dict(before=sanitize_cr(before_client),
after=sanitize_cr(updated_client))
result['changed'] = (before_client != updated_client)
module.exit_json(**result)
kc.update_client(cid, updated_client, realm=realm)
after_client = kc.get_client_by_id(cid, realm=realm)
if before_client == after_client:
result['changed'] = False
if module._diff:
result['diff'] = dict(before=sanitize_cr(before_client),
after=sanitize_cr(after_client))
result['end_state'] = sanitize_cr(after_client)
result['msg'] = 'Client %s has been updated.' % updated_client['clientId']
module.exit_json(**result)
else:
# Delete existing client
result['changed'] = True
if module._diff:
result['diff']['before'] = sanitize_cr(before_client)
result['diff']['after'] = ''
if module.check_mode:
module.exit_json(**result)
kc.delete_client(cid, realm=realm)
result['proposed'] = dict()
result['end_state'] = dict()
result['msg'] = 'Client %s has been deleted.' % before_client['clientId']
module.exit_json(**result)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
fparrel/regepe | wamp-src/cgi-bin/mathutil.py | 2 | 13205 |
from math import sqrt,cos,sin,atan2,ceil,floor,log10,pi,atan,tan
from log import Warn
try:
from math import fsum
except ImportError:
from mymath import fsum
## MISC MATH FUNCTIONS ##
def Mean(numbers):
"Returns the arithmetic mean of a numeric list."
return fsum(numbers) / len(numbers)
def InBounds(x,a,b):
"Returns x if x belongs to [a,b] else return the closest bound."
if x<a:
return a
elif x>b:
return b
else:
return x
def ApplyThreshold(x,threshold):
"Apply threshold on x"
if abs(x)>threshold:
if x<0:
return -threshold
else:
return threshold
else:
return x
def IdentIfPositive(x):
"If x>0 return x else return 0"
if x>0:
return x
else:
return 0
def sign(x):
"Returns x/abs(x)"
if x<0:
return -1
if x>0:
return 1
return 0
def Filter(data,FilterFunc,halfsize):
"Apply a filter function on a list of data."
maxid = len(data)-1
return [FilterFunc(data[InBounds(x-halfsize,0,maxid):InBounds(x+halfsize,0,maxid)]) for x in range(0,len(data))]
def MeanXY(datax,datay):
"2 dimension Mean for using with a filter"
#return (datax[0],Mean(datay))
return (Mean(datax),Mean(datay))
def FilterXY(datax,datay,FilterFunc,xsize):
"Apply 2 dimension filter on data"
j = 0
outx = []
outy = []
for i in range(1,len(datax)):
if datax[i]-datax[j]>=xsize or i==len(datax)-1:
(x,y) = FilterFunc(datax[j:i+1],datay[j:i+1])
if j==0:
x = datax[0]
if i==len(datax)-1:
x = datax[len(datax)-1]
outx.append(x)
outy.append(y)
j = i
#print((outx,outy))
return (outx,outy)
def FindLocalExtremums(y):
"Find local extremums from a list of floats, return two lists of [x,y[x]] (localmins and localmaxs)"
d = 0 # variation of function: 0 if stable, +1 if increasing, -1 if decreasing
localmins = [] # list of [id,value] of local minimums found
localmaxs = [] # local maximums found
for x in range(0,len(y)-1):
if y[x+1]>y[x] and d!=1:
# \/ or _/-> local minimum
localmins.append([x,y[x]])
d = 1
if y[x+1]<y[x] and d!=-1:
# _
# /\ or \-> local maximum
localmaxs.append([x,y[x]])
d = -1
if y[x+1]==y[x] and d!=0:
if d==-1:
# \_ -> local minimum
localmins.append([x,y[x]])
if d==1:
# _
# / -> local maximum
localmaxs.append([x,y[x]])
d = 0
return (localmins,localmaxs)
def FindLocalExtremums2(y):
"Find local extremums from a list of floats, return two lists of [x,y[x]] (localmins and localmaxs)"
d = 0 # variation of function: 0 if stable, +1 if increasing, -1 if decreasing
locextremums = [] # list of [id,type] of local extremums found
for x in range(0,len(y)-1):
if y[x+1]>y[x] and d!=1:
# \/ or _/-> local minimum
locextremums.append([x,'min'])
d = 1
if y[x+1]<y[x] and d!=-1:
# _
# /\ or \-> local maximum
locextremums.append([x,'max'])
d = -1
if y[x+1]==y[x] and d!=0:
if d==-1:
# \_ -> local minimum
locextremums.append([x,'min'])
if d==1:
# _
# / -> local maximum
locextremums.append([x,'max'])
d = 0
return locextremums
def FindLocalMaximums(points,key,FilterFunc,filterhalfsize):
"Find local maximums from a list of objects given a key, return a list of ids"
y = list(map(key,points))
# Filter input data
if FilterFunc==None:
y_filtered = y
else:
y_filtered = Filter(list(map(key,points)),FilterFunc,filterhalfsize)
# Find local mins and maxs
(localmins,localmaxs) = FindLocalExtremums(y_filtered)
# Remove doubloons when ___ but not
# / \ /\__/\
#for i in range(0,len(localmax)-1):
# if localmax[i+1][1] == localmax[i][1]:
#
# Remove filter side effect
if FilterFunc!=None:
for i in range(0,len(localmaxs)):
if i==0:
first = localmaxs[i][0]-filterhalfsize
else:
first = max(localmaxs[i][0]-filterhalfsize,localmaxs[i-1][0]+filterhalfsize)
if i==len(localmaxs)-1:
last_plus_1 = localmaxs[i][0]+filterhalfsize+1
else:
last_plus_1 = min(localmaxs[i][0]+filterhalfsize+1,localmaxs[i+1][0]-filterhalfsize)
first = max(len(localmaxs),min(0,first))
last_plus_1 = max(len(localmaxs),min(0,last_plus_1))
xys = [[x,y[x]] for x in range(first,last_plus_1)]
#xys = [[x,y[x]] for x in range(max(0,localmaxs[i][0]-filterhalfsize),min(localmaxs[i][0]+filterhalfsize+1,len(y_notfiltered)))]
if len(xys)>0:
xys.sort(key=lambda xy: xy[1],reverse=True)
localmaxs[i] = xys[0]
else:
x = localmaxs[i][0]
localmaxs[i] = [x,y[x]]
# Sort extremums
localmaxs.sort(key=lambda pt: pt[1],reverse=True)
localmins.sort(key=lambda pt: pt[1],reverse=True)
# Return ids of points matching local max
return [mymax[0] for mymax in localmaxs]
def GeodeticDist(lat1, lng1, lat2, lng2):
return GeodeticDistVincenty(lat1, lng1, lat2, lng2)
def GeodeticDistVincenty(lat1, lng1, lat2, lng2):
# Vincenty formula (taken from geopy) with WGS-84
# Convert degrees to radians
lat1 = lat1 * 0.0174532925199433
lng1 = lng1 * 0.0174532925199433
lat2 = lat2 * 0.0174532925199433
lng2 = lng2 * 0.0174532925199433
delta_lng = lng2 - lng1
reduced_lat1 = atan((1 - 0.00335281066474748071984552861852) * tan(lat1))
reduced_lat2 = atan((1 - 0.00335281066474748071984552861852) * tan(lat2))
sin_reduced1, cos_reduced1 = sin(reduced_lat1), cos(reduced_lat1)
sin_reduced2, cos_reduced2 = sin(reduced_lat2), cos(reduced_lat2)
lambda_lng = delta_lng
lambda_prime = 2 * pi
iter_limit = 20 #20 iterations max
i = 0
while abs(lambda_lng - lambda_prime) > 10e-12 and i <= iter_limit:
i += 1
sin_lambda_lng, cos_lambda_lng = sin(lambda_lng), cos(lambda_lng)
sin_sigma = sqrt(
(cos_reduced2 * sin_lambda_lng) ** 2 +
(cos_reduced1 * sin_reduced2 -
sin_reduced1 * cos_reduced2 * cos_lambda_lng) ** 2
)
if sin_sigma == 0:
return 0 # Coincident points
cos_sigma = (
sin_reduced1 * sin_reduced2 +
cos_reduced1 * cos_reduced2 * cos_lambda_lng
)
sigma = atan2(sin_sigma, cos_sigma)
sin_alpha = (
cos_reduced1 * cos_reduced2 * sin_lambda_lng / sin_sigma
)
cos_sq_alpha = 1 - sin_alpha ** 2
if cos_sq_alpha != 0:
cos2_sigma_m = cos_sigma - 2 * (
sin_reduced1 * sin_reduced2 / cos_sq_alpha
)
else:
cos2_sigma_m = 0.0 # Equatorial line
C = 0.00335281066474748071984552861852 / 16. * cos_sq_alpha * (4 + 0.00335281066474748071984552861852 * (4 - 3 * cos_sq_alpha))
lambda_prime = lambda_lng
lambda_lng = (
delta_lng + (1 - C) * 0.00335281066474748071984552861852 * sin_alpha * (
sigma + C * sin_sigma * (
cos2_sigma_m + C * cos_sigma * (
-1 + 2 * cos2_sigma_m ** 2
)
)
)
)
if i > iter_limit:
# Vincenty formula failed to converge => use great circle algorithm
Warn("Vincenty formula failed to converge")
return GeodeticDistGreatCircle(lat1, lng1, lat2, lng2)
u_sq = cos_sq_alpha * (6378137.0 ** 2 - 6356752.3142 ** 2) / 6356752.3142 ** 2
A = 1 + u_sq / 16384. * (
4096 + u_sq * (-768 + u_sq * (320 - 175 * u_sq))
)
B = u_sq / 1024. * (256 + u_sq * (-128 + u_sq * (74 - 47 * u_sq)))
delta_sigma = (
B * sin_sigma * (
cos2_sigma_m + B / 4. * (
cos_sigma * (
-1 + 2 * cos2_sigma_m ** 2
) - B / 6. * cos2_sigma_m * (
-3 + 4 * sin_sigma ** 2
) * (
-3 + 4 * cos2_sigma_m ** 2
)
)
)
)
s = 6356752.3142 * A * (sigma - delta_sigma)
return s
def GeodeticDistGreatCircleBitSlower(lat1,lon1,lat2,lon2):
lat1 = lat1 * 0.0174532925199433
lon1 = lon1 * 0.0174532925199433
lat2 = lat2 * 0.0174532925199433
lon2 = lon2 * 0.0174532925199433
sin_lat1, cos_lat1 = sin(lat1), cos(lat1)
sin_lat2, cos_lat2 = sin(lat2), cos(lat2)
delta_lng = lon2 - lon1
cos_delta_lng, sin_delta_lng = cos(delta_lng), sin(delta_lng)
d = atan2(sqrt((cos_lat2 * sin_delta_lng) ** 2 +
(cos_lat1 * sin_lat2 -
sin_lat1 * cos_lat2 * cos_delta_lng) ** 2),
sin_lat1 * sin_lat2 + cos_lat1 * cos_lat2 * cos_delta_lng)
return 6372795.0 * d
def GeodeticDistGreatCircle(lat1,lon1,lat2,lon2):
"Compute distance between two points of the earth geoid (approximated to a sphere)"
# convert inputs in degrees to radians
lat1 = lat1 * 0.0174532925199433
lon1 = lon1 * 0.0174532925199433
lat2 = lat2 * 0.0174532925199433
lon2 = lon2 * 0.0174532925199433
# just draw a schema of two points on a sphere and two radius and you'll understand
a = sin((lat2 - lat1)/2)**2 + cos(lat1) * cos(lat2) * sin((lon2 - lon1)/2)**2
c = 2 * atan2(sqrt(a), sqrt(1-a))
# earth mean radius is 6371 km
return 6372795.0 * c
def GeodeticCourse(lat1,lon1,lat2,lon2):
"Compute course from (lat1,lon1) to (lat2,lon2) Input is in degrees and output in degrees"
# convert inputs in degrees to radians
lat1 = lat1 * 0.0174532925199433
lon1 = lon1 * 0.0174532925199433
lat2 = lat2 * 0.0174532925199433
lon2 = lon2 * 0.0174532925199433
y = sin(lon2 - lon1) * cos(lat2)
x = cos(lat1)*sin(lat2) - sin(lat1)*cos(lat2)*cos(lon2 - lon1)
return (((atan2(y, x) * 180 / pi) + 360) % 360)
def ComputeDiffAfter(data):
"Return derivative of 'data'"
return [data[x+1]-data[x] for x in range(0,len(data)-2)]
def StrangeFilter(y):
"Return a function made of segments linking sucessive extremums from the continuous function 'y'"
(localmins,localmaxs) = FindLocalExtremums(y)
localextremums = localmins + localmaxs
localextremums.append([0,y[0]])
localextremums.append([len(y)-1,y[len(y)-1]])
localextremums.sort(key=lambda pt: pt[0])
val = y[0]
out = []
j = 0
for i in range(0,len(y)):
out.append(val)
if localextremums[j+1][0]>localextremums[j][0]:
val += (localextremums[j+1][1]-localextremums[j][1])/(localextremums[j+1][0]-localextremums[j][0])
if i==localextremums[j+1][0]:
j = j + 1
return out
def GetIndexOfClosestFromOrderedList(value,inputlist):
"Return the id of the item in 'inputlist' closest to 'value'. 'inputlist' must be ordered"
i = 0
# loop until inputlist[i] < value < inputlist[i+1] (or end of inputlist)
while i<len(inputlist) and inputlist[i] < value:
i += 1
if i==len(inputlist):
# all elements of inputlist are lower than value, return last id
out = i-1
elif i>0:
# if prev item is closer than current, return its id
if value-inputlist[i-1]<inputlist[i]-value:
out = i-1
else:
out = i
else:
out = i
assert(out>=0)
assert(out<len(inputlist))
return out
def GetIndexOfClosest(mylist,value):
"Return the index of the item of 'mylist' that is the closest to 'value'"
if len(mylist)<1:
raise IndexError('List is empty')
out_index = 0
min_dist = abs(mylist[out_index]-value)
for current_index in range(0,len(mylist)):
dist = abs(mylist[current_index]-value)
if dist < min_dist:
min_dist = dist
out_index = current_index
return out_index
## UNIT TEST CODE ##
def main():
from timeit import timeit
print(Mean([0.6,0.9,0.7]))
print("great circle 1",GeodeticDistGreatCircleBitSlower(45.0,0.0,46.0,1.0),timeit("GeodeticDistGreatCircleBitSlower(45.0,0.0,46.0,1.0)",setup="from __main__ import GeodeticDistGreatCircleBitSlower"))
print("great circle 2",GeodeticDistGreatCircle(45.0,0.0,46.0,1.0),timeit("GeodeticDistGreatCircle(45.0,0.0,46.0,1.0)",setup="from __main__ import GeodeticDistGreatCircle"))
print("vincenty",GeodeticDistVincenty(45.0,0.0,46.0,1.0),timeit("GeodeticDistVincenty(45.0,0.0,46.0,1.0)",setup="from __main__ import GeodeticDistVincenty"))
print("GeodeticDist",GeodeticDist(45.0,0.0,46.0,1.0))
if __name__ == '__main__':
main()
| gpl-3.0 |
edmundgentle/schoolscript | SchoolScript/bin/Debug/pythonlib/Lib/ctypes/test/test_values.py | 3 | 3597 | """
A testcase which accesses *values* in a dll.
"""
import unittest
from ctypes import *
import _ctypes_test
class ValuesTestCase(unittest.TestCase):
def test_an_integer(self):
# This test checks and changes an integer stored inside the
# _ctypes_test dll/shared lib.
ctdll = CDLL(_ctypes_test.__file__)
an_integer = c_int.in_dll(ctdll, "an_integer")
x = an_integer.value
self.assertEqual(x, ctdll.get_an_integer())
an_integer.value *= 2
self.assertEqual(x*2, ctdll.get_an_integer())
# To avoid test failures when this test is repeated several
# times the original value must be restored
an_integer.value = x
self.assertEqual(x, ctdll.get_an_integer())
def test_undefined(self):
ctdll = CDLL(_ctypes_test.__file__)
self.assertRaises(ValueError, c_int.in_dll, ctdll, "Undefined_Symbol")
class Win_ValuesTestCase(unittest.TestCase):
"""This test only works when python itself is a dll/shared library"""
def test_optimizeflag(self):
# This test accesses the Py_OptimizeFlag intger, which is
# exported by the Python dll.
# It's value is set depending on the -O and -OO flags:
# if not given, it is 0 and __debug__ is 1.
# If -O is given, the flag is 1, for -OO it is 2.
# docstrings are also removed in the latter case.
opt = c_int.in_dll(pydll, "Py_OptimizeFlag").value
if __debug__:
self.assertEqual(opt, 0)
elif ValuesTestCase.__doc__ is not None:
self.assertEqual(opt, 1)
else:
self.assertEqual(opt, 2)
def test_frozentable(self):
# Python exports a PyImport_FrozenModules symbol. This is a
# pointer to an array of struct _frozen entries. The end of the
# array is marked by an entry containing a NULL name and zero
# size.
# In standard Python, this table contains a __hello__
# module, and a __phello__ package containing a spam
# module.
class struct_frozen(Structure):
_fields_ = [("name", c_char_p),
("code", POINTER(c_ubyte)),
("size", c_int)]
FrozenTable = POINTER(struct_frozen)
ft = FrozenTable.in_dll(pydll, "PyImport_FrozenModules")
# ft is a pointer to the struct_frozen entries:
items = []
for entry in ft:
# This is dangerous. We *can* iterate over a pointer, but
# the loop will not terminate (maybe with an access
# violation;-) because the pointer instance has no size.
if entry.name is None:
break
items.append((entry.name, entry.size))
import sys
if sys.version_info[:2] >= (2, 3):
expected = [("__hello__", 104), ("__phello__", -104), ("__phello__.spam", 104)]
else:
expected = [("__hello__", 100), ("__phello__", -100), ("__phello__.spam", 100)]
self.assertEqual(items, expected)
from ctypes import _pointer_type_cache
del _pointer_type_cache[struct_frozen]
def test_undefined(self):
self.assertRaises(ValueError, c_int.in_dll, pydll, "Undefined_Symbol")
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
beni55/networkx | examples/drawing/giant_component.py | 33 | 2084 | #!/usr/bin/env python
"""
This example illustrates the sudden appearance of a
giant connected component in a binomial random graph.
Requires pygraphviz and matplotlib to draw.
"""
# Copyright (C) 2006-2008
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
try:
import matplotlib.pyplot as plt
except:
raise
import networkx as nx
import math
try:
from networkx import graphviz_layout
layout=nx.graphviz_layout
except ImportError:
print("PyGraphviz not found; drawing with spring layout; will be slow.")
layout=nx.spring_layout
n=150 # 150 nodes
# p value at which giant component (of size log(n) nodes) is expected
p_giant=1.0/(n-1)
# p value at which graph is expected to become completely connected
p_conn=math.log(n)/float(n)
# the following range of p values should be close to the threshold
pvals=[0.003, 0.006, 0.008, 0.015]
region=220 # for pylab 2x2 subplot layout
plt.subplots_adjust(left=0,right=1,bottom=0,top=0.95,wspace=0.01,hspace=0.01)
for p in pvals:
G=nx.binomial_graph(n,p)
pos=layout(G)
region+=1
plt.subplot(region)
plt.title("p = %6.3f"%(p))
nx.draw(G,pos,
with_labels=False,
node_size=10
)
# identify largest connected component
Gcc=sorted(nx.connected_component_subgraphs(G), key = len, reverse=True)
G0=Gcc[0]
nx.draw_networkx_edges(G0,pos,
with_labels=False,
edge_color='r',
width=6.0
)
# show other connected components
for Gi in Gcc[1:]:
if len(Gi)>1:
nx.draw_networkx_edges(Gi,pos,
with_labels=False,
edge_color='r',
alpha=0.3,
width=5.0
)
plt.savefig("giant_component.png")
plt.show() # display
| bsd-3-clause |
execuc/LCInterlocking | panel/hingeswidget.py | 1 | 5079 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ***************************************************************************
# * *
# * Copyright (c) 2016 execuc *
# * *
# * This file is part of LCInterlocking module. *
# * LCInterlocking module is free software; you can redistribute it and/or*
# * modify it under the terms of the GNU Lesser General Public *
# * License as published by the Free Software Foundation; either *
# * version 2.1 of the License, or (at your option) any later version. *
# * *
# * This module is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *
# * Lesser General Public License for more details. *
# * *
# * You should have received a copy of the GNU Lesser General Public *
# * License along with this library; if not, write to the Free Software *
# * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, *
# * MA 02110-1301 USA *
# * *
# ***************************************************************************
from lasercut.hingesproperties import GlobalLivingMaterialProperties
from lasercut.hingesproperties import HingesProperties
from panel.toolwidget import ParamWidget, WidgetValue
class GlobalLivingHingeWidget(ParamWidget):
def __init__(self, global_properties):
self.name = global_properties.name
self.label = global_properties.label
ParamWidget.__init__(self, global_properties)
self.widget_list.extend([WidgetValue(type=float, name="thickness", show_name="Thickness", widget=None),
WidgetValue(type=str, name="new_name", show_name="Flat part name", widget=None),
WidgetValue(type=list, name="hinge_type", show_name="Type", widget=None,
interval_value=[GlobalLivingMaterialProperties.HINGE_TYPE_ALTERNATE_DOUBLE]),
WidgetValue(type=float, name="alternate_nb_hinge", show_name="Nb hinge per column",
widget=None, interval_value=[1, 30], decimals=0, step=1,
parent_name="hinge_type",
parent_value=[GlobalLivingMaterialProperties.HINGE_TYPE_ALTERNATE_DOUBLE]),
WidgetValue(type=float, name="occupancy_ratio", show_name="Hinges occupancy ratio",
widget=None, interval_value=[0.1, 1.], decimals=4, step=0.05,
parent_name="hinge_type",
parent_value=[GlobalLivingMaterialProperties.HINGE_TYPE_ALTERNATE_DOUBLE]),
WidgetValue(type=float, name="link_clearance", show_name="Clearance width",
widget=None, interval_value=[0., 30.], decimals=4, step=0.05),
WidgetValue(type=float, name="laser_beam_diameter", show_name="Laser beam diameter",
widget=None, interval_value=[0., 30.], decimals=4, step=0.05),
WidgetValue(type=bool, name="generate_solid", show_name="Generate solid", widget=None)])
class LivingHingeWidget(ParamWidget):
def __init__(self, hingeProperties):
self.name = hingeProperties.name
ParamWidget.__init__(self, hingeProperties)
self.widget_list.extend([WidgetValue(type=float, name="arc_inner_radius", show_name="Arc radius (inner)", widget=None),
WidgetValue(type=float, name="arc_outer_radius", show_name="Arc radius (outer)", widget=None),
WidgetValue(type=float, name="arc_length", show_name="Arc length", widget=None),
WidgetValue(type=bool, name="reversed_angle", show_name="Reverse Angle", widget=None),
WidgetValue(type=float, name="deg_angle", show_name="Angle (degree)", widget=None),
WidgetValue(type=float, name="min_links_nb", show_name="Min. link", widget=None),
WidgetValue(type=float, name="nb_link", show_name="Number link",
widget=None, interval_value=[2, 300], decimals=0, step=1)
])
| lgpl-2.1 |
junbochen/pylearn2 | pylearn2/gui/tangent_plot.py | 44 | 1730 | """
Code for plotting curves with tangent lines.
"""
__author__ = "Ian Goodfellow"
try:
from matplotlib import pyplot
except Exception:
pyplot = None
from theano.compat.six.moves import xrange
def tangent_plot(x, y, s):
"""
Plots a curve with tangent lines.
Parameters
----------
x : list
List of x coordinates.
Assumed to be sorted into ascending order, so that the tangent
lines occupy 80 percent of the horizontal space between each pair
of points.
y : list
List of y coordinates
s : list
List of slopes
"""
assert isinstance(x, list)
assert isinstance(y, list)
assert isinstance(s, list)
n = len(x)
assert len(y) == n
assert len(s) == n
if pyplot is None:
raise RuntimeError("Could not import pyplot, can't run this code.")
pyplot.plot(x, y, color='b')
if n == 0:
pyplot.show()
return
pyplot.hold(True)
# Add dummy entries so that the for loop can use the same code on every
# entry
if n == 1:
x = [x[0] - 1] + x[0] + [x[0] + 1.]
else:
x = [x[0] - (x[1] - x[0])] + x + [x[-2] + (x[-1] - x[-2])]
y = [0.] + y + [0]
s = [0.] + s + [0]
for i in xrange(1, n + 1):
ld = 0.4 * (x[i] - x[i - 1])
lx = x[i] - ld
ly = y[i] - ld * s[i]
rd = 0.4 * (x[i + 1] - x[i])
rx = x[i] + rd
ry = y[i] + rd * s[i]
pyplot.plot([lx, rx], [ly, ry], color='g')
pyplot.show()
if __name__ == "__main__":
# Demo by plotting a quadratic function
import numpy as np
x = np.arange(-5., 5., .1)
y = 0.5 * (x ** 2)
x = list(x)
y = list(y)
tangent_plot(x, y, x)
| bsd-3-clause |
goFrendiAsgard/kokoropy | kokoropy/packages/sqlalchemy/dialects/sqlite/pysqlite.py | 23 | 14980 | # sqlite/pysqlite.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: sqlite+pysqlite
:name: pysqlite
:dbapi: sqlite3
:connectstring: sqlite+pysqlite:///file_path
:url: http://docs.python.org/library/sqlite3.html
Note that ``pysqlite`` is the same driver as the ``sqlite3``
module included with the Python distribution.
Driver
------
When using Python 2.5 and above, the built in ``sqlite3`` driver is
already installed and no additional installation is needed. Otherwise,
the ``pysqlite2`` driver needs to be present. This is the same driver as
``sqlite3``, just with a different name.
The ``pysqlite2`` driver will be loaded first, and if not found, ``sqlite3``
is loaded. This allows an explicitly installed pysqlite driver to take
precedence over the built in one. As with all dialects, a specific
DBAPI module may be provided to :func:`~sqlalchemy.create_engine()` to control
this explicitly::
from sqlite3 import dbapi2 as sqlite
e = create_engine('sqlite+pysqlite:///file.db', module=sqlite)
Connect Strings
---------------
The file specification for the SQLite database is taken as the "database"
portion of the URL. Note that the format of a SQLAlchemy url is::
driver://user:pass@host/database
This means that the actual filename to be used starts with the characters to
the **right** of the third slash. So connecting to a relative filepath
looks like::
# relative path
e = create_engine('sqlite:///path/to/database.db')
An absolute path, which is denoted by starting with a slash, means you
need **four** slashes::
# absolute path
e = create_engine('sqlite:////path/to/database.db')
To use a Windows path, regular drive specifications and backslashes can be
used. Double backslashes are probably needed::
# absolute path on Windows
e = create_engine('sqlite:///C:\\\\path\\\\to\\\\database.db')
The sqlite ``:memory:`` identifier is the default if no filepath is
present. Specify ``sqlite://`` and nothing else::
# in-memory database
e = create_engine('sqlite://')
Compatibility with sqlite3 "native" date and datetime types
-----------------------------------------------------------
The pysqlite driver includes the sqlite3.PARSE_DECLTYPES and
sqlite3.PARSE_COLNAMES options, which have the effect of any column
or expression explicitly cast as "date" or "timestamp" will be converted
to a Python date or datetime object. The date and datetime types provided
with the pysqlite dialect are not currently compatible with these options,
since they render the ISO date/datetime including microseconds, which
pysqlite's driver does not. Additionally, SQLAlchemy does not at
this time automatically render the "cast" syntax required for the
freestanding functions "current_timestamp" and "current_date" to return
datetime/date types natively. Unfortunately, pysqlite
does not provide the standard DBAPI types in ``cursor.description``,
leaving SQLAlchemy with no way to detect these types on the fly
without expensive per-row type checks.
Keeping in mind that pysqlite's parsing option is not recommended,
nor should be necessary, for use with SQLAlchemy, usage of PARSE_DECLTYPES
can be forced if one configures "native_datetime=True" on create_engine()::
engine = create_engine('sqlite://',
connect_args={'detect_types':
sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES},
native_datetime=True
)
With this flag enabled, the DATE and TIMESTAMP types (but note - not the
DATETIME or TIME types...confused yet ?) will not perform any bind parameter
or result processing. Execution of "func.current_date()" will return a string.
"func.current_timestamp()" is registered as returning a DATETIME type in
SQLAlchemy, so this function still receives SQLAlchemy-level result
processing.
.. _pysqlite_threading_pooling:
Threading/Pooling Behavior
---------------------------
Pysqlite's default behavior is to prohibit the usage of a single connection
in more than one thread. This is originally intended to work with older
versions of SQLite that did not support multithreaded operation under
various circumstances. In particular, older SQLite versions
did not allow a ``:memory:`` database to be used in multiple threads
under any circumstances.
Pysqlite does include a now-undocumented flag known as
``check_same_thread`` which will disable this check, however note that
pysqlite connections are still not safe to use in concurrently in multiple
threads. In particular, any statement execution calls would need to be
externally mutexed, as Pysqlite does not provide for thread-safe propagation
of error messages among other things. So while even ``:memory:`` databases
can be shared among threads in modern SQLite, Pysqlite doesn't provide enough
thread-safety to make this usage worth it.
SQLAlchemy sets up pooling to work with Pysqlite's default behavior:
* When a ``:memory:`` SQLite database is specified, the dialect by default
will use :class:`.SingletonThreadPool`. This pool maintains a single
connection per thread, so that all access to the engine within the current
thread use the same ``:memory:`` database - other threads would access a
different ``:memory:`` database.
* When a file-based database is specified, the dialect will use
:class:`.NullPool` as the source of connections. This pool closes and
discards connections which are returned to the pool immediately. SQLite
file-based connections have extremely low overhead, so pooling is not
necessary. The scheme also prevents a connection from being used again in
a different thread and works best with SQLite's coarse-grained file locking.
.. versionchanged:: 0.7
Default selection of :class:`.NullPool` for SQLite file-based databases.
Previous versions select :class:`.SingletonThreadPool` by
default for all SQLite databases.
Using a Memory Database in Multiple Threads
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To use a ``:memory:`` database in a multithreaded scenario, the same
connection object must be shared among threads, since the database exists
only within the scope of that connection. The
:class:`.StaticPool` implementation will maintain a single connection
globally, and the ``check_same_thread`` flag can be passed to Pysqlite
as ``False``::
from sqlalchemy.pool import StaticPool
engine = create_engine('sqlite://',
connect_args={'check_same_thread':False},
poolclass=StaticPool)
Note that using a ``:memory:`` database in multiple threads requires a recent
version of SQLite.
Using Temporary Tables with SQLite
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Due to the way SQLite deals with temporary tables, if you wish to use a
temporary table in a file-based SQLite database across multiple checkouts
from the connection pool, such as when using an ORM :class:`.Session` where
the temporary table should continue to remain after :meth:`.Session.commit` or
:meth:`.Session.rollback` is called, a pool which maintains a single
connection must be used. Use :class:`.SingletonThreadPool` if the scope is
only needed within the current thread, or :class:`.StaticPool` is scope is
needed within multiple threads for this case::
# maintain the same connection per thread
from sqlalchemy.pool import SingletonThreadPool
engine = create_engine('sqlite:///mydb.db',
poolclass=SingletonThreadPool)
# maintain the same connection across all threads
from sqlalchemy.pool import StaticPool
engine = create_engine('sqlite:///mydb.db',
poolclass=StaticPool)
Note that :class:`.SingletonThreadPool` should be configured for the number
of threads that are to be used; beyond that number, connections will be
closed out in a non deterministic way.
Unicode
-------
The pysqlite driver only returns Python ``unicode`` objects in result sets,
never plain strings, and accommodates ``unicode`` objects within bound
parameter values in all cases. Regardless of the SQLAlchemy string type in
use, string-based result values will by Python ``unicode`` in Python 2.
The :class:`.Unicode` type should still be used to indicate those columns that
require unicode, however, so that non-``unicode`` values passed inadvertently
will emit a warning. Pysqlite will emit an error if a non-``unicode`` string
is passed containing non-ASCII characters.
.. _pysqlite_serializable:
Serializable isolation / Savepoints / Transactional DDL
-------------------------------------------------------
In the section :ref:`sqlite_concurrency`, we refer to the pysqlite
driver's assortment of issues that prevent several features of SQLite
from working correctly. The pysqlite DBAPI driver has several
long-standing bugs which impact the correctness of its transactional
behavior. In its default mode of operation, SQLite features such as
SERIALIZABLE isolation, transactional DDL, and SAVEPOINT support are
non-functional, and in order to use these features, workarounds must
be taken.
The issue is essentially that the driver attempts to second-guess the user's
intent, failing to start transactions and sometimes ending them prematurely, in
an effort to minimize the SQLite databases's file locking behavior, even
though SQLite itself uses "shared" locks for read-only activities.
SQLAlchemy chooses to not alter this behavior by default, as it is the
long-expected behavior of the pysqlite driver; if and when the pysqlite
driver attempts to repair these issues, that will be more of a driver towards
defaults for SQLAlchemy.
The good news is that with a few events, we can implement transactional
support fully, by disabling pysqlite's feature entirely and emitting BEGIN
ourselves. This is achieved using two event listeners::
from sqlalchemy import create_engine, event
engine = create_engine("sqlite:///myfile.db")
@event.listens_for(engine, "connect")
def do_connect(dbapi_connection, connection_record):
# disable pysqlite's emitting of the BEGIN statement entirely.
# also stops it from emitting COMMIT before any DDL.
dbapi_connection.isolation_level = None
@event.listens_for(engine, "begin")
def do_begin(conn):
# emit our own BEGIN
conn.execute("BEGIN")
Above, we intercept a new pysqlite connection and disable any transactional
integration. Then, at the point at which SQLAlchemy knows that transaction
scope is to begin, we emit ``"BEGIN"`` ourselves.
When we take control of ``"BEGIN"``, we can also control directly SQLite's
locking modes, introduced at `BEGIN TRANSACTION <http://sqlite.org/lang_transaction.html>`_,
by adding the desired locking mode to our ``"BEGIN"``::
@event.listens_for(engine, "begin")
def do_begin(conn):
conn.execute("BEGIN EXCLUSIVE")
.. seealso::
`BEGIN TRANSACTION <http://sqlite.org/lang_transaction.html>`_ - on the SQLite site
`sqlite3 SELECT does not BEGIN a transaction <http://bugs.python.org/issue9924>`_ - on the Python bug tracker
`sqlite3 module breaks transactions and potentially corrupts data <http://bugs.python.org/issue10740>`_ - on the Python bug tracker
"""
from sqlalchemy.dialects.sqlite.base import SQLiteDialect, DATETIME, DATE
from sqlalchemy import exc, pool
from sqlalchemy import types as sqltypes
from sqlalchemy import util
import os
class _SQLite_pysqliteTimeStamp(DATETIME):
def bind_processor(self, dialect):
if dialect.native_datetime:
return None
else:
return DATETIME.bind_processor(self, dialect)
def result_processor(self, dialect, coltype):
if dialect.native_datetime:
return None
else:
return DATETIME.result_processor(self, dialect, coltype)
class _SQLite_pysqliteDate(DATE):
def bind_processor(self, dialect):
if dialect.native_datetime:
return None
else:
return DATE.bind_processor(self, dialect)
def result_processor(self, dialect, coltype):
if dialect.native_datetime:
return None
else:
return DATE.result_processor(self, dialect, coltype)
class SQLiteDialect_pysqlite(SQLiteDialect):
default_paramstyle = 'qmark'
colspecs = util.update_copy(
SQLiteDialect.colspecs,
{
sqltypes.Date: _SQLite_pysqliteDate,
sqltypes.TIMESTAMP: _SQLite_pysqliteTimeStamp,
}
)
if not util.py2k:
description_encoding = None
driver = 'pysqlite'
def __init__(self, **kwargs):
SQLiteDialect.__init__(self, **kwargs)
if self.dbapi is not None:
sqlite_ver = self.dbapi.version_info
if sqlite_ver < (2, 1, 3):
util.warn(
("The installed version of pysqlite2 (%s) is out-dated "
"and will cause errors in some cases. Version 2.1.3 "
"or greater is recommended.") %
'.'.join([str(subver) for subver in sqlite_ver]))
@classmethod
def dbapi(cls):
try:
from pysqlite2 import dbapi2 as sqlite
except ImportError as e:
try:
from sqlite3 import dbapi2 as sqlite # try 2.5+ stdlib name.
except ImportError:
raise e
return sqlite
@classmethod
def get_pool_class(cls, url):
if url.database and url.database != ':memory:':
return pool.NullPool
else:
return pool.SingletonThreadPool
def _get_server_version_info(self, connection):
return self.dbapi.sqlite_version_info
def create_connect_args(self, url):
if url.username or url.password or url.host or url.port:
raise exc.ArgumentError(
"Invalid SQLite URL: %s\n"
"Valid SQLite URL forms are:\n"
" sqlite:///:memory: (or, sqlite://)\n"
" sqlite:///relative/path/to/file.db\n"
" sqlite:////absolute/path/to/file.db" % (url,))
filename = url.database or ':memory:'
if filename != ':memory:':
filename = os.path.abspath(filename)
opts = url.query.copy()
util.coerce_kw_type(opts, 'timeout', float)
util.coerce_kw_type(opts, 'isolation_level', str)
util.coerce_kw_type(opts, 'detect_types', int)
util.coerce_kw_type(opts, 'check_same_thread', bool)
util.coerce_kw_type(opts, 'cached_statements', int)
return ([filename], opts)
def is_disconnect(self, e, connection, cursor):
return isinstance(e, self.dbapi.ProgrammingError) and \
"Cannot operate on a closed database." in str(e)
dialect = SQLiteDialect_pysqlite
| mit |
AsimmHirani/ISpyPi | tensorflow/contrib/tensorflow-master/tensorflow/python/debug/wrappers/local_cli_wrapper_test.py | 20 | 13116 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for local command-line-interface debug wrapper session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
from tensorflow.python.client import session
from tensorflow.python.debug.cli import cli_shared
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.wrappers import local_cli_wrapper
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class LocalCLIDebuggerWrapperSessionForTest(
local_cli_wrapper.LocalCLIDebugWrapperSession):
"""Subclasses the wrapper class for testing.
Overrides its CLI-related methods for headless testing environments.
Inserts observer variables for assertions.
"""
def __init__(self, command_args_sequence, sess, dump_root=None):
"""Constructor of the for-test subclass.
Args:
command_args_sequence: (list of list of str) A list of arguments for the
"run" command.
sess: See the doc string of LocalCLIDebugWrapperSession.__init__.
dump_root: See the doc string of LocalCLIDebugWrapperSession.__init__.
"""
local_cli_wrapper.LocalCLIDebugWrapperSession.__init__(
self, sess, dump_root=dump_root, log_usage=False)
self._command_args_sequence = command_args_sequence
self._response_pointer = 0
# Observer variables.
self.observers = {
"debug_dumps": [],
"tf_errors": [],
"run_start_cli_run_numbers": [],
"run_end_cli_run_numbers": [],
}
def _prep_cli_for_run_start(self):
pass
def _prep_cli_for_run_end(self, debug_dump, tf_error, passed_filter):
self.observers["debug_dumps"].append(debug_dump)
self.observers["tf_errors"].append(tf_error)
def _launch_cli(self):
if self._is_run_start:
self.observers["run_start_cli_run_numbers"].append(self._run_call_count)
else:
self.observers["run_end_cli_run_numbers"].append(self._run_call_count)
command_args = self._command_args_sequence[self._response_pointer]
self._response_pointer += 1
try:
self._run_handler(command_args)
except debugger_cli_common.CommandLineExit as e:
response = e.exit_token
return response
class LocalCLIDebugWrapperSessionTest(test_util.TensorFlowTestCase):
def setUp(self):
self._tmp_dir = tempfile.mktemp()
self.v = variables.Variable(10.0, name="v")
self.delta = constant_op.constant(1.0, name="delta")
self.inc_v = state_ops.assign_add(self.v, self.delta, name="inc_v")
self.ph = array_ops.placeholder(dtypes.float32, name="ph")
self.xph = array_ops.transpose(self.ph, name="xph")
self.m = constant_op.constant(
[[0.0, 1.0, 2.0], [-4.0, -1.0, 0.0]], dtype=dtypes.float32, name="m")
self.y = math_ops.matmul(self.m, self.xph, name="y")
self.sess = session.Session()
# Initialize variable.
self.sess.run(self.v.initializer)
def tearDown(self):
ops.reset_default_graph()
if os.path.isdir(self._tmp_dir):
shutil.rmtree(self._tmp_dir)
def testConstructWrapper(self):
local_cli_wrapper.LocalCLIDebugWrapperSession(
session.Session(), log_usage=False)
def testConstructWrapperWithExistingEmptyDumpRoot(self):
os.mkdir(self._tmp_dir)
self.assertTrue(os.path.isdir(self._tmp_dir))
local_cli_wrapper.LocalCLIDebugWrapperSession(
session.Session(), dump_root=self._tmp_dir, log_usage=False)
def testConstructWrapperWithExistingNonEmptyDumpRoot(self):
os.mkdir(self._tmp_dir)
dir_path = os.path.join(self._tmp_dir, "foo")
os.mkdir(dir_path)
self.assertTrue(os.path.isdir(dir_path))
with self.assertRaisesRegexp(
ValueError, "dump_root path points to a non-empty directory"):
local_cli_wrapper.LocalCLIDebugWrapperSession(
session.Session(), dump_root=self._tmp_dir, log_usage=False)
def testConstructWrapperWithExistingFileDumpRoot(self):
os.mkdir(self._tmp_dir)
file_path = os.path.join(self._tmp_dir, "foo")
open(file_path, "a").close() # Create the file
self.assertTrue(os.path.isfile(file_path))
with self.assertRaisesRegexp(ValueError, "dump_root path points to a file"):
local_cli_wrapper.LocalCLIDebugWrapperSession(
session.Session(), dump_root=file_path, log_usage=False)
def testRunsUnderDebugMode(self):
# Test command sequence: run; run; run;
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[[], [], []], self.sess, dump_root=self._tmp_dir)
# run under debug mode twice.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
# Verify that the assign_add op did take effect.
self.assertAllClose(12.0, self.sess.run(self.v))
# Assert correct run call numbers for which the CLI has been launched at
# run-start and run-end.
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([1, 2], wrapped_sess.observers["run_end_cli_run_numbers"])
# Verify that the dumps have been generated and picked up during run-end.
self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))
# Verify that the TensorFlow runtime errors are picked up and in this case,
# they should be both None.
self.assertEqual([None, None], wrapped_sess.observers["tf_errors"])
def testRunInfoOutputAtRunEndIsCorrect(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[[], [], []], self.sess, dump_root=self._tmp_dir)
wrapped_sess.run(self.inc_v)
run_info_output = wrapped_sess._run_info_handler([])
tfdbg_logo = cli_shared.get_tfdbg_logo()
# The run_info output in the first run() call should contain the tfdbg logo.
self.assertEqual(tfdbg_logo.lines,
run_info_output.lines[:len(tfdbg_logo.lines)])
menu = run_info_output.annotations[debugger_cli_common.MAIN_MENU_KEY]
self.assertIn("list_tensors", menu.captions())
wrapped_sess.run(self.inc_v)
run_info_output = wrapped_sess._run_info_handler([])
# The run_info output in the second run() call should NOT contain the logo.
self.assertNotEqual(tfdbg_logo.lines,
run_info_output.lines[:len(tfdbg_logo.lines)])
menu = run_info_output.annotations[debugger_cli_common.MAIN_MENU_KEY]
self.assertIn("list_tensors", menu.captions())
def testRunsUnderNonDebugMode(self):
# Test command sequence: run -n; run -n; run -n;
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["-n"], ["-n"], ["-n"]], self.sess, dump_root=self._tmp_dir)
# run three times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(13.0, self.sess.run(self.v))
self.assertEqual([1, 2, 3],
wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([], wrapped_sess.observers["run_end_cli_run_numbers"])
def testRunsUnderNonDebugThenDebugMode(self):
# Test command sequence: run -n; run -n; run; run;
# Do two NON_DEBUG_RUNs, followed by DEBUG_RUNs.
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["-n"], ["-n"], [], []], self.sess, dump_root=self._tmp_dir)
# run three times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(13.0, self.sess.run(self.v))
self.assertEqual([1, 2, 3],
wrapped_sess.observers["run_start_cli_run_numbers"])
# Here, the CLI should have been launched only under the third run,
# because the first and second runs are NON_DEBUG.
self.assertEqual([3], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual([None], wrapped_sess.observers["tf_errors"])
def testRunMultipleTimesWithinLimit(self):
# Test command sequence: run -t 3; run;
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["-t", "3"], []], self.sess, dump_root=self._tmp_dir)
# run three times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(13.0, self.sess.run(self.v))
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([3], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual([None], wrapped_sess.observers["tf_errors"])
def testRunMultipleTimesOverLimit(self):
# Test command sequence: run -t 3;
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["-t", "3"]], self.sess, dump_root=self._tmp_dir)
# run twice, which is less than the number of times specified by the
# command.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(12.0, self.sess.run(self.v))
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(0, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual([], wrapped_sess.observers["tf_errors"])
def testRunMixingDebugModeAndMultpleTimes(self):
# Test command sequence: run -n; run -t 2; run; run;
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["-n"], ["-t", "2"], [], []], self.sess, dump_root=self._tmp_dir)
# run four times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(14.0, self.sess.run(self.v))
self.assertEqual([1, 2],
wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([3, 4], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual([None, None], wrapped_sess.observers["tf_errors"])
def testRuntimeErrorShouldBeCaught(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[[], []], self.sess, dump_root=self._tmp_dir)
# Do a run that should lead to an TensorFlow runtime error.
wrapped_sess.run(self.y, feed_dict={self.ph: [[0.0], [1.0], [2.0]]})
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([1], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
# Verify that the runtime error is caught by the wrapped session properly.
self.assertEqual(1, len(wrapped_sess.observers["tf_errors"]))
tf_error = wrapped_sess.observers["tf_errors"][0]
self.assertEqual("y", tf_error.op.name)
def testRunTillFilterPassesShouldLaunchCLIAtCorrectRun(self):
# Test command sequence:
# run -f greater_than_twelve; run -f greater_than_twelve; run;
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["-f", "v_greater_than_twelve"], ["-f", "v_greater_than_twelve"], []],
self.sess,
dump_root=self._tmp_dir)
def v_greater_than_twelve(datum, tensor):
return datum.node_name == "v" and tensor > 12.0
wrapped_sess.add_tensor_filter("v_greater_than_twelve",
v_greater_than_twelve)
# run five times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(15.0, self.sess.run(self.v))
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
# run-end CLI should NOT have been launched for run #2 and #3, because only
# starting from run #4 v becomes greater than 12.0.
self.assertEqual([4, 5], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual([None, None], wrapped_sess.observers["tf_errors"])
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
mcreenan/eve | eve/logging.py | 17 | 1323 | from __future__ import absolute_import
import logging
from flask import request
# TODO right now we are only logging exceptions. We should probably
# add support for some INFO and maybe DEBUG level logging (like, log each time
# a endpoint is hit, etc.)
class RequestFilter(logging.Filter):
""" Adds Flask's request metadata to the log record so handlers can log
this information too.
import logging
handler = logging.FileHandler('app.log')
handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s '
'[in %(filename)s:%(lineno)d] -- ip: %(clientip)s url: %(url)s'))
app.logger.addHandler(handler)
The above example adds 'clientip' and request 'url' to every log record.
Note that the app.logger can also be used by callback functions.
def log_a_get(resoure, request, payload):
app.logger.info('we just responded to a GET request!')
app = Eve()
app.on_post_GET += log_a_get
.. versionadded:: 0.6
"""
def filter(self, record):
if request:
record.clientip = request.remote_addr
record.url = request.url
record.method = request.method
else:
record.clientip = None
record.url = None
record.method = None
return True
| bsd-3-clause |
vybstat/scikit-learn | examples/bicluster/plot_spectral_biclustering.py | 403 | 2011 | """
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
| bsd-3-clause |
NickDaly/GemRB-MultipleConfigs | gemrb/GUIScripts/pst/Start.py | 2 | 3127 | # -*-python-*-
# GemRB - Infinity Engine Emulator
# Copyright (C) 2003 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Start.py - intro and main menu screens
###################################################
import GemRB
from GUIDefines import *
StartWindow = 0
QuitWindow = 0
def OnLoad():
global StartWindow, QuitWindow
skip_videos = GemRB.GetVar ("SkipIntroVideos")
if not skip_videos:
GemRB.PlayMovie ("BISLOGO")
GemRB.PlayMovie ("TSRLOGO")
GemRB.PlayMovie ("OPENING")
GemRB.SetVar ("SkipIntroVideos", 1)
GemRB.LoadWindowPack("START")
#quit subwindow
QuitWindow = GemRB.LoadWindow(3)
QuitTextArea = QuitWindow.GetControl(0)
QuitTextArea.SetText(20582)
ConfirmButton = QuitWindow.GetControl(1)
ConfirmButton.SetText(23787)
ConfirmButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, ExitConfirmed)
ConfirmButton.SetFlags(IE_GUI_BUTTON_DEFAULT, OP_OR)
CancelButton = QuitWindow.GetControl(2)
CancelButton.SetText(23789)
CancelButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, ExitCancelled)
CancelButton.SetFlags(IE_GUI_BUTTON_CANCEL, OP_OR)
#main window
StartWindow = GemRB.LoadWindow(0)
NewLifeButton = StartWindow.GetControl(0)
ResumeLifeButton = StartWindow.GetControl(2)
ExitButton = StartWindow.GetControl(3)
NewLifeButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, NewLifePress)
ResumeLifeButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, ResumeLifePress)
ExitButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, ExitPress)
ExitButton.SetFlags(IE_GUI_BUTTON_CANCEL, OP_OR)
StartWindow.CreateLabel(0x0fff0000, 0,415,640,30, "EXOFONT", "", 1)
Label=StartWindow.GetControl(0x0fff0000)
Label.SetText(GEMRB_VERSION)
QuitWindow.SetVisible(WINDOW_INVISIBLE)
StartWindow.SetVisible(WINDOW_VISIBLE)
GemRB.LoadMusicPL("Main.mus")
return
def NewLifePress():
if QuitWindow:
QuitWindow.Unload()
if StartWindow:
StartWindow.Unload()
#to make difference between ingame change and new life
GemRB.SetVar("PlayMode",0)
GemRB.SetNextScript("NewLife")
return
def ResumeLifePress():
if QuitWindow:
QuitWindow.Unload()
if StartWindow:
StartWindow.Unload()
#to make difference between ingame load and initial load
GemRB.SetVar("PlayMode",0)
GemRB.SetNextScript("GUILOAD")
return
def ExitPress():
StartWindow.SetVisible(WINDOW_GRAYED)
QuitWindow.SetVisible(WINDOW_VISIBLE)
return
def ExitConfirmed():
GemRB.Quit()
return
def ExitCancelled():
QuitWindow.SetVisible(WINDOW_INVISIBLE)
StartWindow.SetVisible(WINDOW_VISIBLE)
return
| gpl-2.0 |
sander76/home-assistant | homeassistant/components/iaqualink/switch.py | 12 | 1713 | """Support for Aqualink pool feature switches."""
from homeassistant.components.switch import DOMAIN, SwitchEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.typing import HomeAssistantType
from . import AqualinkEntity, refresh_system
from .const import DOMAIN as AQUALINK_DOMAIN
PARALLEL_UPDATES = 0
async def async_setup_entry(
hass: HomeAssistantType, config_entry: ConfigEntry, async_add_entities
) -> None:
"""Set up discovered switches."""
devs = []
for dev in hass.data[AQUALINK_DOMAIN][DOMAIN]:
devs.append(HassAqualinkSwitch(dev))
async_add_entities(devs, True)
class HassAqualinkSwitch(AqualinkEntity, SwitchEntity):
"""Representation of a switch."""
@property
def name(self) -> str:
"""Return the name of the switch."""
return self.dev.label
@property
def icon(self) -> str:
"""Return an icon based on the switch type."""
if self.name == "Cleaner":
return "mdi:robot-vacuum"
if self.name == "Waterfall" or self.name.endswith("Dscnt"):
return "mdi:fountain"
if self.name.endswith("Pump") or self.name.endswith("Blower"):
return "mdi:fan"
if self.name.endswith("Heater"):
return "mdi:radiator"
@property
def is_on(self) -> bool:
"""Return whether the switch is on or not."""
return self.dev.is_on
@refresh_system
async def async_turn_on(self, **kwargs) -> None:
"""Turn on the switch."""
await self.dev.turn_on()
@refresh_system
async def async_turn_off(self, **kwargs) -> None:
"""Turn off the switch."""
await self.dev.turn_off()
| apache-2.0 |
lupien/pyHegel | pyHegel/instruments_base.py | 1 | 162990 | # -*- coding: utf-8 -*-
########################## Copyrights and license ############################
# #
# Copyright 2011-2015 Christian Lupien <[email protected]> #
# #
# This file is part of pyHegel. http://github.com/lupien/pyHegel #
# #
# pyHegel is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# pyHegel is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with pyHegel. If not, see <http://www.gnu.org/licenses/>. #
# #
##############################################################################
from __future__ import absolute_import
import numpy as np
import string
import functools
import ctypes
import hashlib
import os
import signal
import sys
import time
import inspect
import thread
import threading
import weakref
from collections import OrderedDict # this is a subclass of dict
from .qt_wrap import processEvents_managed, sleep
from .kbint_util import _sleep_signal_context_manager, _delayed_signal_context_manager
from . import visa_wrap
from . import instruments_registry
from .types import dict_improved
rsrc_mngr = None
def _load_resource_manager(path=None):
global rsrc_mngr
rsrc_mngr = None
rsrc_mngr = visa_wrap.get_resource_manager(path)
try:
_load_resource_manager()
except ImportError as exc:
print 'Error loading visa resource manager. You will have reduced functionality.'
try:
_globaldict # keep the previous values (when reloading this file)
except NameError:
_globaldict = {} # This is set in pyHegel _init_pyHegel_globals (from pyHegel.commands)
class _CHECKING():
def __init__(self):
self.state = False
def get(self):
return self.state
def set(self, state):
if not isinstance(state, bool):
raise ValueError('The state needs to be a boolean')
self.state = state
def __call__(self, state=None):
"""
Called with no arguments, returns current checking mode state
With a boolean, sets the check state
"""
if state is None:
return self.get()
else:
self.set(state)
CHECKING = _CHECKING()
###################
### New exceptions
class InvalidArgument(ValueError):
pass
class InvalidAutoArgument(InvalidArgument):
pass
class KeyError_Choices (KeyError):
pass
class Runtime_Get_Para_Checked(Exception):
"""
This exception is to be used to mark the end of parameter checking in a get function
"""
pass
def get_para_checked(*val):
"""
This function should be called in a _getdev after the parameters have been
checked for validity. When in CHECKING only mode, this will skip the rest of
the function.
you should call this with one parameter (passed to exception) or no parameters
When a parameter is given, it will be used as the get value (and cached)
"""
if CHECKING():
raise Runtime_Get_Para_Checked(*val)
###################
class ProxyMethod(object):
def __init__(self, bound_method):
#self.class_of_method = bound_method.im_class
self.instance = weakref.proxy(bound_method.__self__)
self.unbound_func = bound_method.__func__
def __call__(self, *arg, **kwarg):
return self.unbound_func(self.instance, *arg, **kwarg)
#######################################################
## Have a status line active
#######################################################
class time_check(object):
def __init__(self, delay=10):
self.delay = delay
self.restart()
def restart(self):
self.last_update = time.time()
def check(self):
now = time.time()
if now >= self.last_update + self.delay:
self.last_update = now
return True
return False
def __call__(self):
return self.check()
class UserStatusLine(object):
"""
The is the object created by MainStatusLine.new
You should not create it directly.
To use, just call the object with the new string.
If the new string is not empty, the status line is also output.
You can force an output using the method output.
The timed, when True or a time in s (True is equivalent to 10s),
makes the screen update slower than that time.
"""
def __init__(self, main, handle, timed=False):
self.main = main
self.handle = handle
if timed is not None and timed is not False:
if timed is True:
self._time_check = time_check()
else:
self._time_check = time_check(timed)
else:
self._time_check = None
@property
def delay(self):
if self._time_check is not None:
return self._time_check.delay
return 0
@delay.setter
def delay(self, d):
if self._time_check is not None:
self._time_check.delay = d
def restart_time(self):
if self._time_check is not None:
self._time_check.restart()
def check_time(self):
if self._time_check is not None:
return self._time_check()
return True
def remove(self):
self.main.delete(self.handle)
def __del__(self):
self.remove()
def __call__(self, new_status=''):
self.main.change(self.handle, new_status)
do_update = self.check_time()
if new_status != '' and do_update:
self.main.output()
def output(self):
self.main.output()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.remove()
class UserStatusLine_dummy(object):
"""
This is a dummy UserStatusLine so code can be more general.
"""
def __init__(self, main, handle, timed=False):
self.delay = 0.
def restart_time(self):
pass
def check_time(self):
return True
def __call__(self, new_status=''):
pass
def remove(self):
pass
def output(self):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
pass
class MainStatusLine(object):
"""
This class provides a tools for combining multiple strings in a status line.
The status line the next line on the console which we keep rewriting (using
a carriage return). To use, create a new user object (it will properly clean
itself on deletion) using a single instance of this class (so should use:
mainStatusLine.new()). You can select the priority you want for the status.
Larger priority will show before lower ones. You can also put a limit to the
update rate with timed (which is passed to UserStatusLine).
For information on using the user object see UserStatusLine
Can use attribute enable to turn off the status line display
Can also use the return object (from new) as a context manager to make sure it is properly cleaned.
"""
def __init__(self):
self.last_handle = 0
self.users = {}
self.enable = True
self._lock = threading.Lock()
self._dummy = UserStatusLine_dummy(self, 0)
def new(self, priority=1, timed=False, dummy=False):
if dummy:
return self._dummy
with self._lock:
handle = self.last_handle + 1
self.last_handle = handle
self.users[handle] = [priority, '']
return UserStatusLine(self, handle, timed)
# higher priority shows before lower ones
def delete(self, handle):
with self._lock:
if handle in self.users:
del self.users[handle]
def change(self, handle, new_status):
# This locking might not be necessary but lets do it to be sure.
with self._lock:
self.users[handle][1] = new_status
def output(self):
if not self.enable:
return
# This locking might not be necessary but lets do it to be sure.
with self._lock:
entries = self.users.values()
entries = sorted(entries, key=lambda x: x[0], reverse=True) # sort on decreasing priority only
outstr = ' '.join([e[1] for e in entries if e[1] != '']) # join the non-empty status
outstr = outstr if len(outstr)<=72 else outstr[:69]+'...'
sys.stdout.write('\r%-72s'%outstr)
sys.stdout.flush()
mainStatusLine = MainStatusLine()
def wait(sec, progress_base='Wait', progress_timed=True):
"""
Time to wait in seconds.
It can be stopped with CTRL-C, and it should update the GUI while waiting.
if progress_base is None, status line update will be disabled.
"""
if progress_base is None or sec < 1:
sleep(sec)
return
progress_base += ' {:.1f}/%.1f'%sec
to = time.time()
with mainStatusLine.new(priority=100, timed=progress_timed) as progress:
while True:
dif = time.time() - to
delay = min(sec - dif, .1)
if delay <= 0:
break
sleep(delay)
progress(progress_base.format(dif))
#######################################################
## find_all_instruments function (for VISA)
#######################################################
#can list instruments with : visa.get_instruments_list()
# or : visa.get_instruments_list(use_aliases=True)
# Based on visa.get_instruments_list
def find_all_instruments(use_aliases=True):
"""Get a list of all connected devices.
Parameters:
use_aliases -- if True, return an alias name for the device if it has one.
Otherwise, always return the standard resource name like "GPIB::10".
Return value:
A list of strings with the names of all connected devices, ready for being
used to open each of them.
"""
return rsrc_mngr.get_instrument_list(use_aliases)
def test_gpib_srq_state(bus=0):
""" Test the state of the gpib bus SRQ line.
It should normally be False unless an instrument is in the process of communicating.
If it is ever True and stays that way, it will prevent further use of the line by
any other device.
It can be caused by an instrument on the bus that is not openned in any session but
that is activating the srq line. Either open that device and clear it or turn it off.
"""
return rsrc_mngr.get_gpib_intfc_srq_state()
def _repr_or_string(val):
if isinstance(val, basestring):
return val
else:
return repr(val)
def _writevec_flatten_list(vals_list):
ret = []
for val in vals_list:
if isinstance(val, np.ndarray):
ret.extend(list(val.flatten()))
elif isinstance(val, (list, tuple)):
ret.extend(val)
else:
ret.append(val)
return ret
def _writevec(file_obj, vals_list, pre_str=''):
""" write a line of data in the open file_obj.
vals_list is a list of values or strings, or of np.ndarray which
are flatten. Any value that is not a base_string is converted
to a string use repr.
The columns in the file are separated by tabs.
pre_str is prepended to every line. Can use '#' when adding comments.
"""
vals_list = _writevec_flatten_list(vals_list)
strs_list = map(_repr_or_string, vals_list)
file_obj.write(pre_str+'\t'.join(strs_list)+'\n')
def _get_conf_header_util(header, obj, options):
if callable(header):
header = header(obj, options)
if header: # if either is not None or not ''
if isinstance(header, basestring):
header=[header]
return header
# header or header() can be None, '' or False for no output
# otherwise it can be a single string for a single line or
# a list of strings. Don't include the comment character or the newline.
def _get_conf_header(format):
header = format['header']
obj = format['obj']
options = format['options']
return _get_conf_header_util(header, obj, options)
def _replace_ext(filename, newext=None):
if newext is None:
return filename
root, ext = os.path.splitext(filename)
return root+newext
def _write_dev(val, filename, format=format, first=False):
append = format['append']
bin = format['bin']
dev = format['obj']
multi = format['multi']
extra_conf = format['extra_conf']
doheader = True
if bin:
doheader = False
if append and not first:
open_mode = 'a'
doheader = False
else:
open_mode = 'w'
if bin:
open_mode += 'b'
if bin != '.ext':
filename = _replace_ext(filename, bin)
f=open(filename, open_mode)
dev._last_filename = filename
header = _get_conf_header(format)
if doheader: # if either is not None or not ''
if header:
for h in header:
f.write('#'+h+'\n')
if extra_conf: # not None or ''
# extra_conf should be a complete string including # and new lines
f.write(extra_conf)
if isinstance(multi, tuple):
_writevec(f, multi, pre_str='#')
if append:
_writevec(f, val)
else:
# we assume val is array like, except for bin where it can also be a string
# remember that float64 has 53 bits (~16 digits) of precision
# for v of shape (2,100) this will output 2 columns and 100 lines
# because of .T
if bin == '.npy':
np.save(f, val)
elif bin =='.npz':
np.savez_compressed(f, val)
elif bin:
if isinstance(val, basestring):
f.write(val)
else:
val.tofile(f)
else:
# force array so single values and lists also work
val = np.atleast_1d(val)
np.savetxt(f, val.T, fmt='%.18g', delimiter='\t')
f.close()
def _retry_wait(func, timeout, delay=0.01, progress_base='Wait', progress_timed=True, keep_delay=False):
"""
this calls func() and stops when the return value is True
or timeout seconds have passed.
delay is the sleep duration between attempts.
progress_base is prefix when using status line (for timeout >= 1s)
when set to None, status line update is disabled.
progress_timed is mainStatusLine.new timed option.
keep_delay when False, will increase the delay to 20 ms (if smaller) after .5s of wait
to make sure to update the graphics.
"""
ret = False
dummy = (timeout < 1.) or (progress_base is None)
with mainStatusLine.new(priority=100, timed=progress_timed, dummy=dummy) as progress:
to = time.time()
endtime = to + timeout
if progress_base is not None:
progress_base = progress_base + ' %.1f/{:.1f}'.format(timeout)
while True:
ret = func()
if ret:
break
now = time.time()
duration = now - to
remaining = endtime - now
if remaining <= 0:
break
if progress_base is not None:
progress(progress_base%duration)
if duration>.5 and not keep_delay:
delay = max(delay, 0.02)
keep_delay = True
delay = min(delay, remaining)
sleep(delay)
return ret
class Lock_Extra(object):
def acquire(self):
return False
__enter__ = acquire
def release(self):
pass
def __exit__(self, exc_type, exc_value, exc_traceback):
self.release()
def is_owned(self):
return False
def force_release(self):
pass
class Lock_Instruments(threading._RLock):
"""
This is similar to threading.RLock (reentrant lock)
except acquire always waits in a non-blocking state.
Therefore you can press CTRL-C to stop the wait.
However if the other threads does not release the lock for long
enough, we might never be able to acquire it.
"""
def acquire_timeout(self, timeout):
func = lambda : super(Lock_Instruments, self).acquire(blocking=0)
return _retry_wait(func, timeout, delay=0.001)
def acquire(self):
return wait_on_event(self.acquire_timeout)
__enter__ = acquire
def is_owned(self):
return self._is_owned()
def force_release(self):
n = 0
try:
while True:
self.release()
n += 1
except RuntimeError as exc:
if exc.message != "cannot release un-acquired lock":
raise
if n:
print 'Released Intrument lock', n, 'time(s)'
else:
print 'Instrument lock was not held'
try:
self._RLock__block.release()
except thread.error as exc:
if exc.message != 'release unlocked lock':
raise
else:
print 'Inner lock was still locked, now released.'
# This functions was moved out of _locked_calling_helper
# because it was causing different errors in python < 2.7.9
# SyntaxError: unqualified exec is not allowed in function 'locked_calling' it contains a nested function with free variables
# see https://bugs.python.org/issue21591
# https://stackoverflow.com/questions/4484872/why-doesnt-exec-work-in-a-function-with-a-subfunction
# However fixing that (by using the "exec something in lcl" syntax) leads to another error:
# SyntaxError: function 'locked_calling' uses import * and bare exec, which are illegal because it contains a nested function with free variables
# which is because I kept the exec in an if else statement. (I need to keep the exec in function form for
# future upgrade to python 3)
def _locked_calling_helper(argspec, extra):
(args, varargs, varkw, defaults) = argspec
# find and replace class (like float), functions in defaults
# will use obj.__name__ but could also try to find the object name in the
# calling locals, globals, __builtin__
if defaults is not None:
defaults_repl = [(d, d.__name__) for d in defaults if getattr(d, '__name__', None)]
else:
defaults_repl = []
defaults_repl_obj = [d[0] for d in defaults_repl]
def def_repl_func(obj):
try:
ind = defaults_repl_obj.index(obj)
except ValueError:
return '='+repr(obj)
return '='+defaults_repl[ind][1]
def_arg = inspect.formatargspec(*argspec, formatvalue=def_repl_func) # this is: (self, arg1, arg2, kw1=1, kw2=5, *arg, *kwarg)
use_arg = inspect.formatargspec(*argspec, formatvalue=lambda name: '') # this is: (self, arg1, arg2, kw1, kw2, *arg, *kwarg)
selfname = args[0]+extra
return dict(def_arg=def_arg, use_arg=use_arg, self=selfname)
# Use this as a decorator
def locked_calling(func, extra=''):
""" This function is to be used as a decorator on a class method.
It will wrap func with
with self._lock_instrument, self._lock_extra:
Only use on method in classes derived from BaseInstrument
"""
argspec = inspect.getargspec(func)
frmt_para = _locked_calling_helper(argspec, extra)
def_str = """
@functools.wraps(func)
def locked_call_wrapper{def_arg}:
" locked_call_wrapper is a wrapper that executes func with the instrument locked."
with {self}._lock_instrument, {self}._lock_extra:
return func{use_arg}
""".format(**frmt_para)
lcl = {}
lcl.update(func=func)
lcl.update(functools=functools)
# lcl is uses as both globals and locals
exec(def_str, lcl)
locked_call_wrapper = lcl['locked_call_wrapper']
### only for ipython 0.12
### This makes newfunc?? show the correct function def (including decorator)
### note that for doc, ipython tests for getdoc method
locked_call_wrapper.__wrapped__ = func
return locked_call_wrapper
def locked_calling_dev(func):
""" Same as locked_calling, but for a BaseDevice subclass. """
return locked_calling(func, extra='.instr')
class release_lock_context(object):
def __init__(self, instr):
self.instr = instr
self.n = 0
def __enter__(self):
self.n = 0
try:
while True:
self.instr._lock_release()
self.n += 1
except RuntimeError as exc:
if exc.message != "cannot release un-acquired lock":
raise
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
for i in range(self.n):
self.instr._lock_acquire()
# Taken from python threading 2.7.2
class FastEvent(threading._Event):
def __init__(self, verbose=None):
threading._Verbose.__init__(self, verbose)
self._Event__cond = FastCondition(threading.Lock())
self._Event__flag = False
class FastCondition(threading._Condition):
def wait(self, timeout=None, balancing=True): # Newer version of threading have added balencing
# the old code is the same as balencing=True which is implemented here
if balancing is not True:
raise NotImplementedError("FastCondition does not handle balancing other than True")
if not self._is_owned():
raise RuntimeError("cannot wait on un-acquired lock")
waiter = threading._allocate_lock()
waiter.acquire()
self._Condition__waiters.append(waiter)
saved_state = self._release_save()
try: # restore state no matter what (e.g., KeyboardInterrupt)
if timeout is None:
waiter.acquire()
if __debug__:
self._note("%s.wait(): got it", self)
else:
# Balancing act: We can't afford a pure busy loop, so we
# have to sleep; but if we sleep the whole timeout time,
# we'll be unresponsive.
func = lambda : waiter.acquire(0)
gotit = _retry_wait(func, timeout, delay=0.01)
if not gotit:
if __debug__:
self._note("%s.wait(%s): timed out", self, timeout)
try:
self._Condition__waiters.remove(waiter)
except ValueError:
pass
else:
if __debug__:
self._note("%s.wait(%s): got it", self, timeout)
finally:
self._acquire_restore(saved_state)
#To implement async get:
# need multi level get
# 0: is initialization (Telling system what to read and to prepare it if necessary)
# dmm1 could do :init here if bus/ext trigger
# Also start one or multiple threads to capture and save data
# Should turn on a flag saying we are busy
# Be carefull with locking if more than one thread per instrument
# setup srq listening in init or here
# The end of the thread could decide to disable the srq
# 1: is to start the task
# is trigger step. For dmm1 do trigger, or :init: if trigger is immediate
# Also setup of producing signal to finish measurment (like *OPC or for dmm1 fetch?) and prevent
# other level 0: commands
# 2: Check if data has been read
# 3: get cache
# trigger/flags can be per instrument (visa) or device(acq card)
#Enable basic async for any device (like sr830) by allowing a delay before performing mesurement
#Allow to chain one device on completion of another one.
class asyncThread(threading.Thread):
def __init__(self, operations, lock_instrument, lock_extra, init_ops, detect=None, delay=0., trig=None, cleanup=None):
super(asyncThread, self).__init__()
self.daemon = True
self._stop = False
self._async_delay = delay
self._async_trig = trig
self._async_detect = detect
self._async_cleanup = cleanup
self._operations = operations
self._lock_instrument = lock_instrument
self._lock_extra = lock_extra
self._init_ops = init_ops # a list of (func, args, kwargs)
self.results = []
self._replace_index = 0
def add_init_op(self, func, *args, **kwargs):
self._init_ops.append((func, args, kwargs))
def change_delay(self, new_delay):
self._async_delay = new_delay
def change_trig(self, new_trig):
self._async_trig = new_trig
def change_detect(self, new_detect):
self._async_detect = new_detect
def change_cleanup(self, new_cleanup):
self._async_cleanup = new_cleanup
def replace_result(self, val, index=None):
if index is None:
index = self._replace_index
self._replace_index += 1
self.results[index] = val
@locked_calling
def run(self):
#t0 = time.time()
for f, args, kwargs in self._init_ops:
f(*args, **kwargs)
delay = self._async_delay
if delay and not CHECKING():
func = lambda: self._stop
_retry_wait(func, timeout=delay, delay=0.1)
if self._stop:
return
try:
if self._async_trig and not CHECKING():
self._async_trig()
#print 'Thread ready to detect ', time.time()-t0
if self._async_detect is not None:
while not self._async_detect():
if self._stop:
break
if self._stop:
return
finally:
if self._async_cleanup and not CHECKING():
self._async_cleanup()
#print 'Thread ready to read ', time.time()-t0
for func, kwarg in self._operations:
self.results.append(func(**kwarg))
#print 'Thread finished in ', time.time()-t0
def cancel(self):
self._stop = True
def wait(self, timeout=None):
# we use a the context manager because join uses sleep.
with _sleep_signal_context_manager():
self.join(timeout)
return not self.is_alive()
# For proper KeyboardInterrupt handling, the docheck function should
# be internally protected with _sleep_signal_context_manager
# This is the case for FastEvent and any function using sleep instead of time.sleep
def wait_on_event(task_or_event_or_func, check_state = None, max_time=None, progress_base='Event wait', progress_timed=True):
# task_or_event_or_func either needs to have a wait attribute with a parameter of
# seconds. Or it should be a function accepting a parameter of time in s.
# check_state allows to break the loop if check_state._error_state
# becomes True
# It returns True/False unless it is stopped with check_state in which case it returns None
# Note that Event.wait (actually threading.Condition.wait)
# tries to wait for 1ms then for 2ms more then 4, 8, 16, 32 and then in blocks
# of 50 ms. If the wait would be longer than what is left, the wait is just
# what is left. However, on windows 7 (at least), the wait ends up being
# rounded to: 1, 2, 4 and 8->10ms, 16->20ms, 32-> 40ms
# therefore, using Event.wait can produce times of 10, 20, 30, 40, 60, 100, 150
# 200 ms ...
# Can use FastEvent.wait instead of Event.wait to be faster
# progress_base is prefix when using status line (for timeout >= 1s)
# when set to None, status line update is disabled.
# progress_timed is mainStatusLine.new timed option.
start_time = time.time()
try: # should work for task (threading.Thread) and event (threading.Event)
docheck = task_or_event_or_func.wait
except AttributeError: # just consider it a function
docheck = task_or_event_or_func
dummy = (max_time is not None and max_time < 1.) or (progress_base is None)
with mainStatusLine.new(priority=100, timed=progress_timed, dummy=dummy) as progress:
if progress_base is not None:
progress_base += ' %.1f'
if max_time is not None:
progress_base = progress_base + '/{:.1f}'.format(max_time)
while True:
if max_time is not None:
check_time = max_time - (time.time()-start_time)
check_time = max(0., check_time) # make sure it is positive
check_time = min(check_time, 0.2) # and smaller than 0.2 s
else:
check_time = 0.2
if docheck(check_time):
return True
duration = time.time()-start_time
if max_time is not None and duration > max_time:
return False
if progress_base is not None:
progress(progress_base%duration)
if check_state is not None and check_state._error_state:
break
# processEvents is for the current Thread.
# if a thread does not have and event loop, this does nothing (not an error)
processEvents_managed(max_time_ms = 20)
def _general_check(val, min=None, max=None, choices=None, lims=None, msg_src=None):
# self is use for perror
if lims is not None:
if isinstance(lims, tuple):
min, max = lims
else:
choices = lims
mintest = maxtest = choicetest = True
if min is not None:
mintest = val >= min
if max is not None:
maxtest = val <= max
if choices:
choicetest = val in choices
state = mintest and maxtest and choicetest
if state == False:
if not mintest:
err='{val!s} is below MIN=%r'%min
if not maxtest:
err='{val!s} is above MAX=%r'%max
if not choicetest:
err='invalid value({val!s}): use one of {choices!s}'
if msg_src is None:
err = 'Failed check: '+err
else:
err = 'Failed check for %s: '%msg_src + err
d = dict(val=val, choices=repr(choices))
raise ValueError(err.format(**d), d)
#######################################################
## Base device
#######################################################
class BaseDevice(object):
"""
---------------- General device documentation
All devices provide a get method.
Some device also implement set, check methods.
Users should not call the get/set methods directly bus instead
should use the pyHegel set/get functions.
Both get and set use a cache variable which is accessible
with getcache, setcache methods
The gets have no positional parameters.
The sets and check have one positional parameter, which is the value.
They can have multiple keyword parameters
"""
def __init__(self, autoinit=True, doc='', setget=False, allow_kw_as_dict=False,
allow_missing_dict=False, allow_val_as_first_dict=False, get_has_check=False,
min=None, max=None, choices=None, multi=False, graph=True,
trig=False, redir_async=None):
# instr and name updated by instrument's _create_devs
# doc is inserted before the above doc
# autoinit can be False, True or a number.
# The number affects the default implementation of force_get:
# Bigger numbers are initialized first. 0 is not initialized, True is 1
# setget makes us get the value after setting it
# this is usefull for instruments that could change the value
# under us.
# allow_kw_as_dict allows the conversion of kw to a dict. There needs to be
# a choices.field_names list of values (like with ChoiceMultiple)
# allow_missing_dict, will fill the missing elements of dict with values
# from a get
# allow_val_as_first_dict when True, takes val as first element of dictionary.
# probably only useful if allow_missing_dict is also True
# get_has_check, make it true if the _getdev produces the Runtime_Get_Para_Checked
# exception (calls _get_para_checked). This is needed for proper CHECKING mode
# or if executing the get has not side effect.
self.instr = None
self.name = 'foo'
# Use thread local data to keep the last_filename and a version of cache
self._local_data = threading.local()
self._cache = None
self._set_delayed_cache = None
self._check_cache = {}
self._autoinit = autoinit
self._setdev_p = None
self._getdev_p = None
self._setget = setget
self._trig = trig
self._redir_async = redir_async
self._last_filename = None
self.min = min
self.max = max
self.choices = choices
self._allow_kw_as_dict = allow_kw_as_dict
self._allow_missing_dict = allow_missing_dict
self._allow_val_as_first_dict = allow_val_as_first_dict
self._get_has_check = get_has_check
self._doc = doc
# obj is used by _get_conf_header and _write_dev
self._format = dict(file=False, multi=multi, xaxis=None, graph=graph,
append=False, header=None, bin=False, extra_conf=None,
options={}, obj=self)
def _delayed_init(self):
""" This function is called by instrument's _create_devs once initialization is complete """
pass
@property
def _last_filename(self):
try:
return self._local_data.last_filename
except AttributeError:
return None
@_last_filename.setter
def _last_filename(self, filename):
self._local_data.last_filename = filename
def __getattribute__(self, name):
# we override __doc__ so for instances we return the result from _get_docstring
# But when asking for __doc__ on the class we get the original docstring
# Note that __doc__ is automatically set for every class (defaults to None)
# and it does not refer to its parent __doc__.
# Also __doc__ is not writable. To make it writable, it needs to be
# overwritten in a metaclass (cls.__doc__=cls.__doc__ is enough)
# Another option is to set __doc__ = property(_get_docstring) in all
# classes (or use a metaclass to do that automatically) but then
# asking for __doc__ on the class does not return a string but a property object.
if name == '__doc__':
return self._get_docstring()
return super(BaseDevice, self).__getattribute__(name)
def _get_docstring(self, added=''):
doc_base = BaseDevice.__doc__
if doc_base is None:
doc_base = ''
doc = self._doc
extra = ''
if self.choices:
extra = '\n-------------\n Possible value to set: %s\n'%repr(self.choices)
elif self.min is not None and self.max is not None:
extra = '\n-------------\n Value between %r and %r\n'%(self.min, self.max)
elif self.min is not None:
extra = '\n-------------\n Value at least %r\n'%(self.min)
elif self.max is not None:
extra = '\n-------------\n Value at most %r\n'%(self.max)
return doc + added + extra + doc_base
# for cache consistency
# get should return the same thing set uses
@locked_calling_dev
def set(self, *val, **kwarg):
if not CHECKING():
# So when checking, self.check will be seen as in a check instead
# of a set.
self._check_cache['in_set'] = True
self.check(*val, **kwarg)
if self._check_cache:
val = self._check_cache['val']
kwarg = self._check_cache['kwarg']
set_kwarg = self._check_cache['set_kwarg']
else:
val = val[0]
set_kwarg = kwarg
if not CHECKING():
self._set_delayed_cache = None # used in logical devices
self._setdev(val, **set_kwarg)
if self._setget:
val = self.get(**kwarg)
elif self._set_delayed_cache is not None:
val = self._set_delayed_cache
# only change cache after succesfull _setdev
self.setcache(val)
def _get_para_checked(self, *val):
get_para_checked(*val)
@locked_calling_dev
def get(self, **kwarg):
if self._getdev_p is None:
raise NotImplementedError, self.perror('This device does not handle _getdev')
if not CHECKING() or self._get_has_check:
self._last_filename = None
format = self.getformat(**kwarg)
kwarg.pop('graph', None) #now remove graph from parameters (was needed by getformat)
kwarg.pop('bin', None) #same for bin
kwarg.pop('extra_conf', None)
to_finish = False
if kwarg.get('filename', False) and not format['file']:
#we did not ask for a filename but got one.
#since _getdev probably does not understand filename
#we handle it here
filename = kwarg.pop('filename')
to_finish = True
try:
ret = self._getdev(**kwarg)
except Runtime_Get_Para_Checked as e:
if len(e.args) == 1:
ret = e.args[0]
elif len(e.args) > 1:
ret = e.args
else:
ret = self.getcache()
if to_finish:
_write_dev(ret, filename, format=format)
if format['bin']:
ret = None
else:
ret = self.getcache()
self.setcache(ret)
return ret
#@locked_calling_dev
def getcache(self, local=False):
"""
With local=True, returns thread local _cache. If it does not exist yet,
returns None. Use this for the data from a last fetch if another
thread is also doing fetches. (For example between after a get to make sure
getcache obtains the result from the current thread (unless they are protected with a lock))
With local=False (default), returns the main _cache which is shared between threads
(but not process). When the value is None and autoinit is set, it will
return the result of get. Use this if another thread might be changing the cached value
and you want the last one. However if another thread is changing values,
or the user changed the values on the instrument maually (using the front panel),
than you better do get instead of getcache to really get the up to date value.
"""
if local:
try:
return self._local_data.cache
except AttributeError:
return None
# local is False
with self.instr._lock_instrument: # only local data, so don't need _lock_extra
if self._cache is None and self._autoinit and not CHECKING():
# This can fail, but getcache should not care for
#InvalidAutoArgument exceptions
try:
return self.get()
except InvalidAutoArgument:
self._cache = None
return self._cache
def _do_redir_async(self):
obj = self
# go through all redirections
while obj._redir_async:
obj = obj._redir_async
return obj
def getasync(self, async, **kwarg):
obj = self._do_redir_async()
if async != 3 or self == obj:
ret = obj.instr._get_async(async, obj,
trig=obj._trig, **kwarg)
# now make sure obj._cache and self._cache are the same
else: # async == 3 and self != obj:
# async thread is finished, so lock should be available
with self.instr._lock_instrument: # only local data, so don't need _lock_extra
#_get_async blocks if it is not in the correct thread and is not
#complete. Here we just keep the lock until setcache is complete
# so setcache does not have to wait for a lock.
ret = obj.instr._get_async(async, obj, **kwarg)
self.setcache(ret)
self._last_filename = obj._last_filename
if async == 3:
# update the obj local thread cache data.
obj._local_data.cache = ret
return ret
#@locked_calling_dev
def setcache(self, val, nolock=False):
if nolock == True:
self._cache = val
else:
with self.instr._lock_instrument: # only local data, so don't need _lock_extra
self._cache = val
self._local_data.cache = val # thread local, requires no lock
def __call__(self, val=None):
raise SyntaxError, """Do NOT call a device directly, like instr.dev().
Instead use set/get on the device or
functions that use set/get like sweep or record."""
def __repr__(self):
gn, cn, p = self.instr._info()
return '<device "%s" of %s=(class "%s" at 0x%08x)>'%(self.name, gn, cn, p)
def __set__(self, instance, val):
#print instance
self.set(val)
def perror(self, error_str='', **dic):
dic.update(name=self.name, instr=self.instr, gname=self.instr.find_global_name())
return ('{gname}.{name}: '+error_str).format(**dic)
# Implement these in a derived class
def _setdev(self, val, **kwarg):
raise NotImplementedError, self.perror('This device does not handle _setdev')
def _getdev(self, **kwarg):
raise NotImplementedError, self.perror('This device does not handle _getdev')
def _general_check(self, val, min=None, max=None, choices=None, lims=None, msg_src=None, str_return=False):
# This wraps the _general_check function to wrap the error message with perror
# with str_return, it either returns a error string or None instead of producting an exception
try:
_general_check(val, min, max, choices, lims, msg_src)
except (ValueError, KeyError) as e:
new_message = self.perror(e.args[0])
# new_message = self.perror(e.args[0],**e.args[1])
if str_return:
return new_message
raise e.__class__(new_message)
def _pre_check(self, *val, **kwarg):
# This cleans up *val and **kwarg to handle _allow_kw_as_dict
# It returns a single val and a cleaned up kwarg.
# This will also always create a new _check_cache with at least the keys
# fnct_set, val, kwarg, fnct_str, set_kwarg
# in_set should be removed (so check after a set should work)
# kwarg should contain all the keyword (except for the _allow_kw_as_dict)
# that are needed for get
# set_kwarg are the kwarg passed to setdev
# Note that the returned kwarg is a copy so you can pop values out of it
# without modifying _check_cache['kwarg']
in_set = self._check_cache.get('in_set', False)
fnct_str = 'set' if in_set else 'check'
self._check_cache = {'fnct_set': in_set, 'fnct_str': fnct_str}
if self._setdev_p is None:
raise NotImplementedError, self.perror('This device does not handle %s'%fnct_str)
nval = len(val)
if nval == 1:
val = val[0]
elif nval == 0:
val = None
else:
raise RuntimeError(self.perror('%s can only have one positional parameter'%fnct_str))
allow_var_kw = False
if nval and self._allow_val_as_first_dict and not isinstance(val, dict):
val = {self.choices.field_names[0]:val}
allow_var_kw = True
if self._allow_kw_as_dict:
if val is None or allow_var_kw:
if val is None:
val = dict()
for k in kwarg.keys():
if k in self.choices.field_names:
val[k] = kwarg.pop(k)
elif nval == 0: # this permits to set a value to None
raise RuntimeError(self.perror('%s requires a value.'%fnct_str))
self._check_cache['val'] = val
self._check_cache['kwarg'] = kwarg
self._check_cache['set_kwarg'] = kwarg.copy()
return val, kwarg.copy()
def _set_missing_dict_helper(self, val, _allow=None, **kwarg):
"""
This will replace missing values if necessary.
_allow can be None (which uses self._allow_missing_dict)
or it can be False, True (which uses get) or 'cache'
which uses the cache
Actually using False is an error
it returns the possibly update val
"""
if _allow is None:
_allow = self._allow_missing_dict
if _allow == 'cache':
old_val = self.getcache()
elif _allow is True:
old_val = self.get(**kwarg)
else:
raise ValueError(self.perror('Called _set_missing_dict_helper with _allow=False'))
old_val.update(val)
return old_val
def _checkdev(self, val):
# This default _checkdev handles a general check with _allow_missing_dict
# but no extra kwarg. The caller should have tested and removed them
try:
self._general_check(val, self.min, self.max, self.choices)
except KeyError_Choices:
# need to catch the exception instead of always filling all the variables
# some device might accept partial entries
# they could override _set_missing_dict_helper to only add some entries.
if not self._allow_missing_dict:
raise
kwarg = self._check_cache['kwarg']
val = self._set_missing_dict_helper(val, **kwarg)
self._check_cache['val'] = val
self._general_check(val, self.min, self.max, self.choices)
@locked_calling_dev
def check(self, *val, **kwarg):
# This raises an exception if set does not work (_setdev_p is None)
val, kwarg = self._pre_check(*val, **kwarg)
self._checkdev(val, **kwarg)
def getformat(self, filename=None, **kwarg): # we need to absorb any filename argument
# This function should not communicate with the instrument.
# first handle options we don't want saved in 'options'
graph = kwarg.pop('graph', None)
extra_conf = kwarg.pop('extra_conf', None)
self._format['options'] = kwarg
#now handle the other overides
bin = kwarg.pop('bin', None)
xaxis = kwarg.pop('xaxis', None)
# we need to return a copy so changes to dict here and above does not
# affect the devices dict permanently
format = self._format.copy()
if graph is not None:
format['graph'] = graph
if bin is not None:
format['file'] = False
format['bin'] = bin
if xaxis is not None and format['xaxis'] is not None:
format['xaxis'] = xaxis
format['extra_conf'] = extra_conf
return format
def getfullname(self):
return self.instr.header.getcache()+'.'+self.name
def force_get(self):
"""
Force a reread of the instrument attached to this device.
This should be called before saving headers.
"""
self.instr.force_get()
class wrapDevice(BaseDevice):
def __init__(self, setdev=None, getdev=None, checkdev=None, getformat=None, **extrak):
# auto insert documentation if setdev or getdev has one.
if not extrak.has_key('doc'):
if setdev is not None and setdev.__doc__:
extrak['doc'] = setdev.__doc__
elif getdev is not None and getdev.__doc__:
extrak['doc'] = getdev.__doc__
BaseDevice.__init__(self, **extrak)
# the methods are unbounded methods.
self._setdev_p = setdev
self._getdev_p = getdev
self._checkdev_p = checkdev
self._getformat = getformat
def _setdev(self, val, **kwarg):
self._setdev_p(val, **kwarg)
def _getdev(self, **kwarg):
return self._getdev_p(**kwarg)
def _checkdev(self, val, **kwarg):
if self._checkdev_p is not None:
self._checkdev_p(val, **kwarg)
else:
super(wrapDevice, self)._checkdev(val, **kwarg)
def getformat(self, **kwarg):
if self._getformat is not None:
return self._getformat(**kwarg)
else:
return super(wrapDevice, self).getformat(**kwarg)
class cls_wrapDevice(BaseDevice):
def __init__(self, setdev=None, getdev=None, checkdev=None, getformat=None, **extrak):
# auto insert documentation if setdev or getdev has one.
if not extrak.has_key('doc'):
if setdev is not None and setdev.__doc__:
extrak['doc'] = setdev.__doc__
elif getdev is not None and getdev.__doc__:
extrak['doc'] = getdev.__doc__
BaseDevice.__init__(self, **extrak)
# the methods are unbounded methods.
self._setdev_p = setdev
self._getdev_p = getdev
self._checkdev_p = checkdev
self._getformat = getformat
def _setdev(self, val, **kwarg):
self._setdev_p(self.instr, val, **kwarg)
def _getdev(self, **kwarg):
return self._getdev_p(self.instr, **kwarg)
def _checkdev(self, val, **kwarg):
if self._checkdev_p is not None:
self._checkdev_p(self.instr, val, **kwarg)
else:
super(cls_wrapDevice, self)._checkdev(val, **kwarg)
def getformat(self, **kwarg):
if self._getformat is not None:
return self._getformat(self.instr, **kwarg)
else:
return super(cls_wrapDevice, self).getformat(**kwarg)
def _find_global_name(obj):
dic = _globaldict
try:
return [k for k,v in dic.iteritems() if v is obj and k[0]!='_'][0]
except IndexError:
return "name_not_found"
# Using this metaclass, the class method
# _add_class_devs will be executed at class creation.
# Hence added devices will be part of the class and will
# allow the inst.dev=2 syntax
# (Since for the device __set__ to work requires the
# object to be part of the class, not the instance)
class MetaClassInit(type):
def __init__(cls, name, bases, dct):
cls._add_class_devs()
type.__init__(cls, name, bases, dct)
#TODO: maybe override classmethod, automatically call _add_class_devs for all devices...
#######################################################
## Base Instrument
#######################################################
# Async behavior changed 2015-06-03
# Before, the device would select either trig or delay
# trig would use triggering, delay would use async_delay
# If multiple device used both, they would both be turned on
# and run_and_wait would only ever use trig, never async_delay
# That was never really used and did not provide flexibility
# like for devices that can sometimes need one or the other
# or making run_and_wait behave like async for delay
# Now, to improve the situation, I removed the option of
# delay for devices. Device can only say they need triggerring
# or not. They also use it when then need a delay.
# async_delay is always respected for every and all devices,
# and for both async and run_and_wait. It is used before the trig
# For the wait option in a trig, we use async_wait device.
# Finally the selection of whether to use a trigger or
# a delay is left to _async_trig and _async_detect.
# They both use information from _async_mode which should be
# set by _async_select which is called in the async thread (init_list)
# and by ReadvalDev
class BaseInstrument(object):
__metaclass__ = MetaClassInit
alias = None
# add _quiet_delete here in case we call __del__ before __init__ because of problem in subclass
_quiet_delete = False
def __init__(self, quiet_delete=False):
self._quiet_delete = quiet_delete
self.header_val = None
self._lock_instrument = Lock_Instruments()
if not hasattr(self, '_lock_extra'):
# don't overwrite what is assigned in subclasses
self._lock_extra = Lock_Extra()
self._async_mode = 'wait'
self._create_devs()
self._async_local_data = threading.local()
self._async_wait_check = True
# The _async_statusLinecan be used in _async_detect to update the user
# on the progress.
self._async_statusLine = mainStatusLine.new(timed=True)
self._last_force = time.time()
self._conf_helper_cache = None # this is filled by conf_helper (should be under a locked state to prevent troubles)
self.init(full=True)
def __del__(self):
if not self._quiet_delete:
print 'Destroying '+repr(self)
def _async_select(self, devs):
""" It receives a list of devices to help decide how to wait.
The list entries can be in the form (dev, option_dict) or just dev
"""
pass
def _async_detect(self, max_time=.5): # subclasses should only call this if they need async_wait
data = self._get_async_local_data()
cur = time.time()
left = data.async_wait - (cur - data.async_wait_start)
if left <= 0.:
return True
if left <= max_time:
sleep(left)
return True
sleep(max_time)
return False
@locked_calling
def _async_trig(self): # subclasses can always call this
self._async_statusLine.restart_time()
data = self._get_async_local_data()
if self._async_mode.startswith('wait'):
self._async_wait_check_helper()
data = self._get_async_local_data()
data.async_wait_start = time.time()
data.async_wait = self.async_wait.getcache()
def _async_cleanup_after(self): # subclasses overides should call this. Called unconditionnaly after async/run_and_wait
self._async_statusLine('')
def _async_wait_check_helper(self):
if self._async_wait_check and self.async_wait.getcache() == 0.:
print self.perror('***** WARNING You should give a value for async_wait *****')
self._async_wait_check = False
@locked_calling
def wait_after_trig(self):
"""
waits until the triggered event is finished
"""
try:
ret = wait_on_event(self._async_detect)
finally:
self._async_cleanup_after()
return ret
# Always make sure that asyncThread run behaves in the same way
@locked_calling
def run_and_wait(self):
"""
This initiate a trigger and waits for it to finish.
"""
sleep(self.async_delay.getcache())
try:
self._async_trig()
self.wait_after_trig()
finally: # in case we were stopped because of KeyboardInterrupt or something else.
self._async_cleanup_after()
def _get_async_local_data(self):
d = self._async_local_data
try:
d.async_level
except AttributeError:
d.async_list = []
d.async_select_list = []
d.async_list_init = []
d.async_level = -1
d.async_counter = 0
d.async_task = None
d.async_wait_start = 0.
d.async_wait = 0.
return d
def _under_async_setup(self, task):
self._async_running_task = task
def _under_async(self):
try:
return self._async_running_task.is_alive()
except AttributeError:
return False
def _get_async(self, async, obj, trig=False, **kwarg):
# get_async should note change anything about the instrument until
# we run the asyncThread. Should only change local thread data.
# we are not protected by a lock until that.
data = self._get_async_local_data()
if async == -1: # we reset task
if data.async_level > 1:
data.async_task.cancel()
data.async_level = -1
if async != 3 and not (async == 2 and data.async_level == -1) and (
async < data.async_level or async > data.async_level + 1):
if data.async_level > 1:
data.async_task.cancel()
data.async_level = -1
raise ValueError, 'Async in the wrong order. Reseting order. Try again..'
if async == 0: # setup async task
if data.async_level == -1: # first time through
data.async_list = []
data.async_select_list = []
data.async_list_init = [(self._async_select, (data.async_select_list, ), {})]
delay = self.async_delay.getcache()
data.async_task = asyncThread(data.async_list, self._lock_instrument, self._lock_extra, data.async_list_init, delay=delay)
data.async_list_init.append((self._under_async_setup, (data.async_task,), {}))
data.async_level = 0
if trig:
data.async_task.change_detect(self._async_detect)
data.async_task.change_trig(self._async_trig)
data.async_task.change_cleanup(self._async_cleanup_after)
data.async_list.append((obj.get, kwarg))
data.async_list.append((lambda: obj._last_filename, {}))
data.async_select_list.append((obj, kwarg))
elif async == 1: # Start async task (only once)
#print 'async', async, 'self', self, 'time', time.time()
if data.async_level == 0: # First time through
data.async_task.start()
data.async_level = 1
elif async == 2: # Wait for task to finish
#print 'async', async, 'self', self, 'time', time.time()
if data.async_level == 1: # First time through (no need to wait for subsequent calls)
wait_on_event(data.async_task)
data.async_level = -1
data.async_counter = 0
elif async == 3: # get values
#print 'async', async, 'self', self, 'time', time.time()
#return obj.getcache()
ret = data.async_task.results[data.async_counter]
# Need to copy the _last_filename item because it is thread local
self._last_filename = data.async_task.results[data.async_counter+1]
data.async_counter += 2
if data.async_counter == len(data.async_task.results):
# delete task so that instrument can be deleted
del data.async_task
del data.async_list
del data.async_select_list
del data.async_list_init
del self._async_running_task
return ret
def find_global_name(self):
return _find_global_name(self)
@classmethod
def _cls_devwrap(cls, name):
# Only use this if the class will be using only one instance
# Otherwise multiple instances will collide (reuse same wrapper)
setdev = getdev = checkdev = getformat = None
for s in dir(cls):
if s == '_'+name+'_setdev':
setdev = getattr(cls, s)
if s == '_'+name+'_getdev':
getdev = getattr(cls, s)
if s == '_'+name+'_checkdev':
checkdev = getattr(cls, s)
if s == '_'+name+'_getformat':
getformat = getattr(cls, s)
wd = cls_wrapDevice(setdev, getdev, checkdev, getformat)
setattr(cls, name, wd)
def _getdev_para_checked(self, *val):
"""
This function should be called in a _getdev (devwrap with get_has_check option enabled)
after the parameters have been
checked for validity. When in CHECKING only mode, this will skip the rest of
the function.
you should call this with one parameter (passed to exception) or no parameters
When a parameter is given, it will be used as the get value (and cached)
"""
get_para_checked(*val)
def _devwrap(self, name, **extrak):
setdev = getdev = checkdev = getformat = None
cls = type(self)
for s in dir(self):
if s == '_'+name+'_setdev':
setdev = getattr(cls, s)
if s == '_'+name+'_getdev':
getdev = getattr(cls, s)
if s == '_'+name+'_checkdev':
checkdev = getattr(cls, s)
if s == '_'+name+'_getformat':
getformat = getattr(cls, s)
wd = cls_wrapDevice(setdev, getdev, checkdev, getformat, **extrak)
setattr(self, name, wd)
def devs_iter(self):
for devname in dir(self):
obj = getattr(self, devname)
if devname != 'alias' and isinstance(obj, BaseDevice):
yield devname, obj
def _create_devs_helper(self, once=False):
"""
Users can call this function after creating new device for an instrument
that already exists. It will properly initialize the new devices.
The user might call it with once=True.
"""
# if instrument had a _current_config function and the device does
# not specify anything for header in its format string than
# we assign it.
#
# need the ProxyMethod to prevent binding which blocks __del__
if hasattr(self, '_current_config'):
conf = ProxyMethod(self._current_config)
else:
conf = None
for devname, obj in self.devs_iter():
if once and obj.instr is not None:
continue
obj.instr = weakref.proxy(self)
obj.name = devname
if conf and not obj._format['header']:
obj._format['header'] = conf
for devname, obj in self.devs_iter():
# some device depend on others. So finish all initialization before delayed_init
obj._delayed_init()
def _create_devs(self):
# devices need to be created here (not at class level)
# because we want each instrument instance to use its own
# device instance (otherwise they would share the instance data)
self.async_delay = MemoryDevice(0., doc=
"In seconds. This is the delay before the trigger in async and run_and_wait.")
self.async_wait = MemoryDevice(0., doc=
"In seconds. This is the wait time after a trig for devices that don't use a real trig/detect sequence.")
self._async_base_dev = MemoryDevice(0, doc="internal dummy device used for triggering", trig=True, autoinit=False, choices=[0])
self.run_and_wait_dev = ReadvalDev(self._async_base_dev, doc="This is a dummy device to be used when requiring a trigger (run_and_wait) from the instrument.")
self._devwrap('header')
self._create_devs_helper()
# def _current_config(self, dev_obj, get_options):
# pass
def _conf_helper(self, *devnames, **kwarg):
"""
The positional arguments are either device name strings or a dictionnary.
When given a dictionnary, it will be shown as options.
no_default: when True, skips adding some default entries (like idn)
It can only be a kwarg.
if not given, it behaves as True unless one of the options
is a dictionnary, the it behaves as False.
So for the default use of _conf_helper were only one the
calls includes the options dictionnary (and there is always
one), then there is no need to specify this values. The
default behavior is correct.
"""
ret = []
no_default = kwarg.pop('no_default', None)
if len(kwarg):
raise InvalidArgument('Invalid keyword arguments %s'%kwarg)
if no_default is None:
no_default = True
for devname in devnames[::-1]: # start from the end
if isinstance(devname, dict):
no_default = False
# by default we will append
add_to = lambda base, x: base.append(x)
if isinstance(devnames[-1], dict):
# unless last item is a dict then we insert before it
add_to = lambda base, x: base.insert(-1, x)
if not no_default:
async_delay = self.async_delay.getcache()
if async_delay != 0:
devnames = list(devnames) # need to convert from tuple to a mutable list
add_to(devnames, 'async_delay')
for devname in devnames:
if isinstance(devname, dict):
val = repr(devname)
devname = 'options'
else:
try:
val = _repr_or_string(getattr(self, devname).getcache())
except AttributeError:
val = _repr_or_string(getattr(self, devname)())
ret.append('%s=%s'%(devname, val))
if not no_default:
add_to(ret, 'class_name="%s"'%self.__class__.__name__)
add_to(ret, 'idn="%s"'%self.idn())
self._conf_helper_cache = no_default, add_to
return ret
def read(self):
raise NotImplementedError, self.perror('This instrument class does not implement read')
def write(self, val):
raise NotImplementedError, self.perror('This instrument class does not implement write')
def ask(self, question):
raise NotImplementedError, self.perror('This instrument class does not implement ask')
def ask_write(self, command):
"""
Automatically selects between ask or write depending on the presence of a ?
"""
if '?' in command:
return self.ask(command)
else:
self.write(command)
def init(self, full=False):
""" Do instrument initialization (full=True)/reset (full=False) here """
# Your function should try and not interfere with another thread/process
# already using the instrument (if it is allowed). So it should only set things
# to values that should not change afterwards, or reset things that are protected
# with locks
pass
# This allows instr.get() ... to be redirected to instr.alias.get()
def __getattr__(self, name):
if name in ['get', 'set', 'check', 'getcache', 'setcache', 'instr',
'name', 'getformat', 'getasync', 'getfullname']:
if self.alias is None:
raise AttributeError, self.perror('This instrument does not have an alias for {nm}', nm=name)
return getattr(self.alias, name)
else:
raise AttributeError, self.perror('{nm} is not an attribute of this instrument', nm=name)
def __call__(self):
if self.alias is None:
raise TypeError, self.perror('This instrument does not have an alias for call')
return self.alias()
@locked_calling
def force_get(self):
"""
Rereads all devices that have autoinit=True
This should be called when a user might have manualy changed some
settings on an instrument.
It is limited to once per 2 second.
"""
if time.time()-self._last_force < 2:
# less than 2s since last force, skip it
return
l = []
for s, obj in self.devs_iter():
if obj._autoinit:
l.append( (float(obj._autoinit), obj) )
l.sort(reverse=True)
for flag,obj in l:
try:
obj.get()
except InvalidAutoArgument:
pass
self._last_force = time.time()
@locked_calling
def iprint(self, force=False):
poptions = np.get_printoptions()
if force:
self.force_get()
ret = ''
np.set_printoptions(threshold=50)
for s, obj in self.devs_iter():
if obj is self._async_base_dev:
continue
if self.alias == obj:
ret += 'alias = '
val = obj.getcache()
ret += s+" = "+repr(val)+"\n"
np.set_printoptions(**poptions)
return ret
def idn(self):
"""
This method should return a string that uniquely identify the instrument.
For scpi it is often: <company name>,<model number>,<serial number>,<firmware revision>
"""
return "Undefined identification,X,0,0"
def idn_split(self):
idn = self.idn()
parts = idn.split(',', 4) # There could be , in serial firmware revision
# I also use lstrip because some device put a space after the comma.
return dict(vendor=parts[0], model=parts[1].lstrip(), serial=parts[2].lstrip(), firmware=parts[3].lstrip())
def _info(self):
return self.find_global_name(), self.__class__.__name__, id(self)
def __repr__(self):
gn, cn, p = self._info()
return '%s = <"%s" instrument at 0x%08x>'%(gn, cn, p)
def perror(self, error_str='', **dic):
dic.update(instr=self, gname=self.find_global_name())
return ('{gname}: '+error_str).format(**dic)
def _header_getdev(self):
if self.header_val is None:
return self.find_global_name()
else:
return self.header_val
def _header_setdev(self, val):
self.header_val = val
@classmethod
def _add_class_devs(cls):
pass
def trigger(self):
pass
def lock_force_release(self):
self._lock_instrument.force_release()
self._lock_extra.force_release()
def lock_is_owned(self):
return self._lock_instrument.is_owned() or self._lock_extra.is_owned()
def _lock_acquire(self):
self._lock_instrument.acquire()
self._lock_extra.acquire()
def _lock_release(self):
self._lock_instrument.release()
self._lock_extra.release()
#######################################################
## Memory device
#######################################################
class MemoryDevice(BaseDevice):
def __init__(self, initval=None, **kwarg):
"""
Provides _tostr and _fromstr using the choices functions if
choices are given. Otherwise it uses the type of initval.
autoinit and setget are disabled internally (they are useless for a Memory device.)
"""
kwarg['autoinit'] = False
kwarg['setget'] = False
kwarg['get_has_check'] = True
BaseDevice.__init__(self, **kwarg)
self.setcache(initval, nolock=True)
self._setdev_p = True # needed to enable BaseDevice set in checking mode and also the check function
self._getdev_p = True # needed to enable BaseDevice get in Checking mode
if self.choices is not None and isinstance(self.choices, ChoiceBase):
self.type = self.choices
else:
self.type = type(initval)
def _getdev(self):
self._get_para_checked() # This is not necessary, since in CHECKING we will read the cache anyway
# but place it here as an example and to test the code.
return self.getcache()
def _setdev(self, val):
self.setcache(val)
def _tostr(self, val):
# This function converts from val to a str for the command
t = self.type
return _tostr_helper(val, t)
def _fromstr(self, valstr):
# This function converts from the query result to a value
t = self.type
return _fromstr_helper(valstr, t)
def _tostr_helper(val, t):
# This function converts from val to a str for the command
if t == bool: # True= 1 or ON, False= 0 or OFF
return str(int(bool(val)))
if t == float or t == int:
# use repr instead of str to keep full precision
return repr(val)
if t == complex:
return '%r,%r'%(val.real, val.imag)
if t is None or (type(t) == type and issubclass(t, basestring)):
return val
return t.tostr(val)
def _fromstr_helper(valstr, t):
# This function converts from the query result to a value
if t == bool: # it is '0' or '1'
return bool(int(valstr))
#if t == bool: # it is '0' or '1' or ON or OFF
#try:
# return bool(int(valstr))
#except ValueError:
# if valstr.upper() == 'ON':
# return True
# elif valstr.upper() == 'OFF':
# return False
# else:
# raise
if t == float or t == int:
return t(valstr)
if t == complex:
vals = valstr.split(',')
vals = map(float, vals)
return complex(*vals)
if t is None or (type(t) == type and issubclass(t, basestring)):
return valstr
return t(valstr)
def _get_dev_min_max(instr, ask_str, str_type=float, ask='both'):
""" ask_str is the question string.
ask can be both, min or max. It always returns a tuple (min, max).
If the value was not obtained it will be None
See also dev._get_dev_min_max, instr._get_dev_min_max
"""
if ask not in ['both', 'min', 'max']:
raise ValueError('Invalid ask in _get_dev_min_max')
min = max = None
if ask in ['both', 'min']:
min = _fromstr_helper(instr.ask(ask_str+' min'), str_type)
if ask in ['both', 'max']:
max = _fromstr_helper(instr.ask(ask_str+' max'), str_type)
return min, max
#######################################################
## SCPI device
#######################################################
class scpiDevice(BaseDevice):
_autoset_val_str = ' {val}'
def __init__(self,setstr=None, getstr=None, raw=False, chunk_size=None, autoinit=True, autoget=True, get_cached_init=None,
str_type=None, choices=None, doc='',
auto_min_max=False,
options={}, options_lim={}, options_apply=[], options_conv={},
extra_check_func=None, extra_set_func=None, extra_set_after_func=None,
ask_write_opt={}, **kwarg):
"""
str_type can be float, int, None
If choices is a subclass of ChoiceBase, then str_Type will be
set to that object if unset.
If only getstr is not given and autoget is true
a getstr is created by appending '?' to setstr.
If autoget is false and there is no getstr, autoinit is set to False.
When autoget is false, if get_cached_init is not None, then
the cache is used instead of get and is initialized to the value of
get_cached_init. You probably should initialize it during the instrument
init.
auto_min_max can be False, True, 'min' or 'max'. True will do both 'min' and
'max'. It will call the getstr with min, max to obtain the limits.
raw when True will use read_raw instead of the default raw (in get)
chunk_size is the option for ask.
options is a list of optional parameters for get and set.
It is a dictionnary, where the keys are the option name
and the values are the default value for each option.
If the value is a device. Then by default the cache of the
device is used.
An option like 'ch' can be used in the setstr/getstr parameter
as {ch} (see string.format)
For the setstr string you can use {val} to specify the position of the
value, otherwise ' {val}' is automatically appended. Note that if specify
{val} in the setstr, autoget is disabled.
options_lim is dict of the range of values: It can be
-None (the default) which means no limit
-a tuple of (min, max)
either one can be None to be unset
-a list of choices (the object needs to handle __contains__)
options_conv is a dict of functions to convert the value to a useful format.
the functions receives 2 parameters (val, _tostr(val))
options_apply is a list of options that need to be set. In that order when defined.
By default, autoinit=True is transformed to 10 (higher priority)
unless options contains another device, then it is set to 1.
extra_check_func, extra_set_func, extra_set_after_func are all functions called
before or after (when in the name) the internal
implementation proceeds. They allow modification of the default behavior
(useful for more complicated range check). There signatures are:
extra_check_func(val, dev_obj)
can return "__SKIP__NEXT__" to jump the internal implementation
extra_set_func(val, dev_obj, **kwargs)
can return "__SKIP__NEXT__" to jump the internal implementation
extra_set_after_func(val, dev_obj, **kwargs)
ask_write_options are options passed to the ask and write methods
"""
if setstr is None and getstr is None:
raise ValueError, 'At least one of setstr or getstr needs to be specified'
if auto_min_max not in [False, True, 'min', 'max']:
raise ValueError('Invalid auto_min_max values')
self._auto_min_max = auto_min_max
if setstr is not None and getstr is None and autoget == False:
# we don't have get, so we remove autoinit to prevent problems with cache and force_get (iprint)
autoinit = False
if isinstance(choices, ChoiceBase) and str_type is None:
str_type = choices
if autoinit == True:
autoinit = 10
test = [ True for k,v in options.iteritems() if isinstance(v, BaseDevice)]
if len(test):
autoinit = 1
BaseDevice.__init__(self, doc=doc, autoinit=autoinit, choices=choices, get_has_check=True, **kwarg)
self._setdev_p = setstr
if setstr is not None:
fmtr = string.Formatter()
val_present = False
for txt, name, spec, conv in fmtr.parse(setstr):
if name == 'val':
val_present = True
autoget = False
if not val_present:
self._setdev_p = setstr + self._autoset_val_str
self._getdev_cache = False
if getstr is None:
if autoget:
getstr = setstr+'?'
elif get_cached_init is not None:
self.setcache(get_cached_init, nolock=True)
self._getdev_cache = True
getstr = True
self._getdev_p = getstr
self._options = options
self._options_lim = options_lim
self._options_apply = options_apply
self._options_conv = options_conv
self._ask_write_opt = ask_write_opt
self._extra_check_func = extra_check_func
self._extra_set_func = extra_set_func
self._extra_set_after_func = extra_set_after_func
self.type = str_type
self._raw = raw
self._chunk_size = chunk_size
self._option_cache = {}
def _delayed_init(self):
""" This is called after self.instr is set """
auto_min_max = self._auto_min_max
if auto_min_max in ['min', 'max']:
self._auto_set_min_max(auto_min_max)
elif auto_min_max:
self._auto_set_min_max()
super(scpiDevice, self)._delayed_init()
def _auto_set_min_max(self, ask='both'):
mnmx = self._get_dev_min_max(ask)
self._set_dev_min_max(*mnmx)
@locked_calling_dev
def _get_dev_min_max(self, ask='both'):
""" ask can be both, min or max. It always returns a tuple (min, max).
If the value was not obtained it will be None.
See also instr._get_dev_min_max
"""
options = self._combine_options()
command = self._getdev_p
command = command.format(**options)
return _get_dev_min_max(self.instr, command, self.type, ask)
def _set_dev_min_max(self, min=None, max=None):
if min is not None:
self.min = min
if max is not None:
self.max = max
def _get_docstring(self, added=''):
# we don't include options starting with _
if len(self._options) > 0:
added += '---------- Optional Parameters\n'
for optname, optval in self._options.iteritems():
basedev = False
if isinstance(optval, BaseDevice):
basedev = True
if optname[0] != '_':
added += '{optname}: has default value {optval!r}\n'.format(optname=optname, optval=optval)
lim = self._options_lim.get(optname, None)
if lim is not None:
if basedev:
added += ' current choices (above device): '
else:
added += ' current choices: '
if isinstance(lim, tuple):
if lim[0] is None and lim[1] is None:
added += 'any value allowed'
else:
if lim[0] is not None:
added += '%r <= '%lim[0]
added += '%s'%optname
if lim[1] is not None:
added += ' <= %r'%lim[1]
else:
added += repr(lim)
added += '\n'
return super(scpiDevice, self)._get_docstring(added=added)
def _tostr(self, val):
# This function converts from val to a str for the command
t = self.type
return _tostr_helper(val, t)
def _fromstr(self, valstr):
# This function converts from the query result to a value
t = self.type
return _fromstr_helper(valstr, t)
def _get_option_values(self, extradict={}):
opt = self._options.copy()
d = {k:v.getcache() for k, v in opt.iteritems() if isinstance(v, BaseDevice)}
opt.update(d)
opt.update(extradict)
return opt
@locked_calling_dev
def getcache(self, local=False):
if local:
return super(scpiDevice, self).getcache(local=True)
#we need to check if we still are using the same options
curr_cache = self._get_option_values()
if self._option_cache != curr_cache:
self.setcache(None)
return super(scpiDevice, self).getcache()
def _check_option(self, option, val):
"""
Checks the option with value val
If it is not an option, raise an KeyError
If it is not within min/max or choices for this option, returns an error string
If everything is fine, return None
"""
if option not in self._options.keys():
raise KeyError, self.perror('This device does not handle option "%s".'%option)
lim = self._options_lim.get(option)
# if no limits were given but this is a device, use the limits from the device.
# TODO use dev.check (trap error)
if lim is None and isinstance(self._options[option], BaseDevice):
dev = self._options[option]
lim = (dev.min, dev.max)
if dev.choices is not None:
lim = dev.choices
return self._general_check(val, lims=lim, msg_src='Option "%s"'%option, str_return=True)
def _combine_options(self, **kwarg):
# get values from devices when needed.
# The list of correct values could be a subset so push them to kwarg
# for testing.
# clean up kwarg by removing all None values
kwarg = { k:v for k, v in kwarg.iteritems() if v is not None}
for k, v in kwarg.iteritems():
ck = self._check_option(k, v)
if ck is not None:
# in case of error, raise it
raise InvalidArgument(ck)
# Some device need to keep track of current value so we set them
# if changed
for k in self._options_apply:
if k in kwarg.keys():
v = kwarg[k]
opt_dev = self._options[k]
if opt_dev.getcache() != v:
opt_dev.set(v)
# Now get default values and check them if necessary
options = self._get_option_values(kwarg)
for k,v in options.iteritems():
if k not in kwarg:
ck = self._check_option(k, v)
if ck is not None:
# There was an error, returned value not currently valid
# so return it instead of dictionnary
raise InvalidAutoArgument(ck)
# everything checks out so use those kwarg
options.update(kwarg)
self._option_cache = options.copy()
for k in options.iterkeys():
val = options[k]
option_dev = self._options[k]
option_lim = self._options_lim.get(k, None)
if isinstance(option_dev, BaseDevice):
try:
tostr_val = option_dev._tostr(val)
except AttributeError:
# Some devices like BaseDevice, cls_WrapDevice don't have _tostr
tostr_val = repr(val)
#elif isinstance(option_lim, ChoiceBase):
elif option_lim is not None:
try:
tostr_val = option_lim.tostr(val)
except AttributeError:
tostr_val = repr(val)
else:
tostr_val = repr(val)
try:
conv = self._options_conv[k]
except KeyError:
options[k] = tostr_val
else:
options[k] = conv(val, tostr_val)
return options
def _setdev(self, val):
# We only reach here if self._setdev_p is not None
if self._extra_set_func:
if self._extra_set_func(val, self) == "__SKIP__NEXT__":
return
val = self._tostr(val)
options = self._check_cache['options']
command = self._setdev_p
command = command.format(val=val, **options)
self.instr.write(command, **self._ask_write_opt)
if self._extra_set_after_func:
self._extra_set_after_func(val, self)
def _getdev(self, **kwarg):
if self._getdev_cache:
if kwarg == {}:
return self.getcache()
else:
raise SyntaxError, self.perror('This device does not handle _getdev with optional arguments')
try:
options = self._combine_options(**kwarg)
except InvalidAutoArgument:
self.setcache(None)
raise
command = self._getdev_p
command = command.format(**options)
self._get_para_checked()
ret = self.instr.ask(command, raw=self._raw, chunk_size=self._chunk_size, **self._ask_write_opt)
return self._fromstr(ret)
def _checkdev(self, val, **kwarg):
options = self._combine_options(**kwarg)
# all kwarg have been tested
self._check_cache['set_kwarg'] = {}
self._check_cache['options'] = options
if self._extra_check_func:
if self._extra_check_func(val, self) == "__SKIP__NEXT__":
return
super(scpiDevice, self)._checkdev(val)
#######################################################
## Readval device
#######################################################
class ReadvalDev(BaseDevice):
def _get_docstring(self, added=''):
if not self._do_local_doc:
return super(ReadvalDev, self)._get_docstring(added=added)
else:
basename = self._slave_dev.name
subdoc = self._slave_dev.__doc__
doc = """
This device behaves like doing a run_and_wait followed by a
{0}. When in async mode, it will trigger the device and then do
the {0}. It has the same parameters as the {0} device, so look for
its documentation.
It is appended here for convenience:
--- {0} doc
{1}
""".format(basename, subdoc)
return doc
def __init__(self, dev, autoinit=None, doc=None, **kwarg):
if doc is None:
self._do_local_doc = True
else:
self._do_local_doc = False
self._slave_dev = dev
if autoinit is None:
autoinit = dev._autoinit
super(ReadvalDev,self).__init__(redir_async=dev, autoinit=autoinit, doc=doc, get_has_check=True, **kwarg)
self._getdev_p = True
def _getdev(self, **kwarg):
self.instr._async_select([(self._slave_dev, kwarg)])
self.instr.run_and_wait()
ret = self._slave_dev.get(**kwarg)
self._last_filename = self._slave_dev._last_filename
return ret
def getformat(self, **kwarg):
d = self._slave_dev.getformat(**kwarg)
d['obj'] = self
return d
def _decode_block_header(s):
"""
Takes a string with the scpi block header
#niiiiivvvvvvvvvv
where n gives then number of i and i gives the number of bytes v
It returns slice, nbytes, nheaders
i.e. a slice on the str to return the data
a value for the number of bytes
and a value for the length of the header
If the strings does not start with a block format
returns a full slice (:), nbytes=-1, 0
"""
if len(s)==0 or s[0] != '#':
return slice(None), -1, 0
nh = int(s[1])
if nh: # a value of 0 is possible
nbytes = int(s[2:2+nh])
else:
nbytes = -1 # nh=0 is used for unknown length or lengths that require more than 9 digits.
return slice(2+nh, None), nbytes, 2+nh
def _decode_block_base(s, skip=None):
sl, nb, nh = _decode_block_header(s)
block = s[sl]
lb = len(block)
if nb != -1:
if lb < nb :
raise IndexError, 'Missing data for decoding. Got %i, expected %i'%(lb, nb)
elif lb > nb :
if lb-nb == 1 and (s[-1] in '\r\n'):
return block[:-1]
elif lb-nb == 2 and s[-2:] == '\r\n':
return block[:-2]
raise IndexError, 'Extra data in for decoding. Got %i ("%s ..."), expected %i'%(lb, block[nb:nb+10], nb)
elif skip:
if isinstance(skip, basestring):
if block.endswith(skip):
return block[:-len(skip)]
else:
raise RuntimeError('Data is not terminated by requested skip string.')
else:
return block[:-skip]
return block
def _encode_block_base(s):
"""
This inserts the scpi block header before the string start.
see _decode_block_header for the description of the header
"""
N = len(s)
N_as_string = str(N)
N_as_string_len = len(N_as_string)
if N_as_string_len >= 10: # starting at 1G
header = '#0'
else:
header = '#%i'%N_as_string_len + N_as_string
return header+s
def _decode_block(s, t='<f8', sep=None, skip=None):
"""
sep can be None for binaray encoding or ',' for ascii csv encoding
type can be np.float64 float32 int8 int16 int32 uint8 uint16 ...
or it can be entered as a string like 'float64'
skip can be used when the data length is unknown (#0....)
then you can enter the termination string you want removed from
the end, or an integer of the number of character to remove from the end.
"""
block = _decode_block_base(s, skip=skip)
if sep is None or len(block) == 0:
return np.fromstring(block, t)
return np.fromstring(block, t, sep=sep)
def _encode_block(v, sep=None):
"""
Encodes the iterable v (array, list ..., or just a string)
into either a scpi binary block (including header) when sep=None (default)
or into a sep separated string. Often sep is ',' for scpi
"""
if sep is not None:
return ','.join(map(repr, v))
if isinstance(v, basestring):
s = v
else:
s = np.asarray(v).tostring()
return _encode_block_base(s)
def _decode_block_auto(s, t='<f8', skip=None):
if len(s) and s[0] == '#':
sep = None
else:
sep = ','
return _decode_block(s, t, sep=sep, skip=skip)
class Block_Codec(object):
def __init__(self, dtype='<f8', sep=None, skip=None, single_not_array=False, empty=None):
self._dtype = dtype
self._sep = sep
self._skip = skip
self._single_not_array = single_not_array
self._empty = empty
def __call__(self, input_str):
ret = _decode_block(input_str, self._dtype, self._sep, self._skip)
empty = self._empty
if empty is not None and len(ret) == 0:
ret = np.append(ret, empty)
if len(ret) == 1:
ret = ret[0]
return ret
def tostr(self, array):
dtype = self._dtype
if isinstance(array, (int, long, float)):
array = np.array([array], dtype=dtype)
elif isinstance(array, (list, tuple)):
array = np.array(array, dtype=dtype)
if array.dtype != self._dtype:
array = array.astype(self._dtype)
return _encode_block(array, self._sep)
class Block_Codec_Raw(object):
def __init__(self, dtype='<f8', sep=None):
self._dtype = dtype
def __call__(self, input_str):
return np.fromstring(input_str, self._dtype)
def tostr(self, array):
return array.tostring()
decode_float64 = functools.partial(_decode_block_auto, t='<f8') # np.float64, little-endian
decode_float32 = functools.partial(_decode_block_auto, t='<f4') # np.float32, little-endian
decode_uint32 = functools.partial(_decode_block_auto, t='<u4') # np.uint32, little-endian
decode_uint8_bin = functools.partial(_decode_block, t=np.uint8)
decode_uint16_bin = functools.partial(_decode_block, t='<u2') # np.uint16, little-endian
decode_int32 = functools.partial(_decode_block_auto, t='<i4') # np.int32, little-endian
decode_int8 = functools.partial(_decode_block_auto, t=np.int8)
decode_int16 = functools.partial(_decode_block_auto, t='<i2') # np.int16, little-endian
decode_complex128 = functools.partial(_decode_block_auto, t='<c16') # np.complex128, little-endian
def decode_float64_2col(s):
v = _decode_block_auto(s, t='<f8')
v.shape = (-1,2)
return v.T
def decode_float64_avg(s):
return _decode_block_auto(s, t='<f8').mean()
def decode_float64_std(s):
return _decode_block_auto(s, t='<f8').std(ddof=1)
def decode_float64_meanstd(s):
data = _decode_block_auto(s, t='<f8')
return data.std(ddof=1)/np.sqrt(len(data))
class scaled_float(object):
def __init__(self, scale):
""" scale is used as:
python_val = instrument_val * scale
instrument_val = python_val / scale
"""
self.scale = scale
def __call__(self, input_str):
return _fromstr_helper(input_str, float)*self.scale
def tostr(self, val):
return _tostr_helper(val/self.scale, float)
class quoted_string(object):
def __init__(self, quote_char='"', quiet=False, tostr=True, fromstr=True):
"""
tostr, fromstr: are True to enable the quote adding/removal.
They can also be a string to use a different quote_char
"""
self._quote_char = quote_char
self._quiet = quiet
self._fromstr = fromstr
self._tostr = tostr
def __call__(self, quoted_str):
if not self._fromstr:
return quoted_str
if isinstance(self._fromstr, basestring):
quote_char = self._fromstr
else:
quote_char = self._quote_char
if len(quoted_str) and quote_char == quoted_str[0] and quote_char == quoted_str[-1]:
return quoted_str[1:-1]
else:
if not self._quiet:
print 'Warning, string <%s> does not start and end with <%s>'%(quoted_str, quote_char)
return quoted_str
def tostr(self, unquoted_str):
if not self._tostr:
return unquoted_str
if isinstance(self._tostr, basestring):
quote_char = self._tostr
else:
quote_char = self._quote_char
if quote_char in unquoted_str:
raise ValueError, 'The given string already contains a quote :%s:'%quote_char
return quote_char+unquoted_str+quote_char
class quoted_list(quoted_string):
def __init__(self, sep=',', element_type=None, protect_sep=None, **kwarg):
super(quoted_list,self).__init__(**kwarg)
self._sep = sep
# element_type can be a list of types. If it is not long enough for the input
# it is repeated.
self._element_type = element_type
self._protect_sep = protect_sep
def calc_element_type(self, input_list_len):
elem_type = self._element_type
if elem_type is not None:
N = input_list_len
if isinstance(elem_type, list):
Nt = len(elem_type)
if N%Nt != 0:
raise RuntimeError('Unexpected number of elements')
elem_type = elem_type*(N//Nt)
else:
elem_type = [elem_type]*N
return elem_type
def __call__(self, quoted_l, skip_type=False):
unquoted = super(quoted_list,self).__call__(quoted_l)
if self._protect_sep is not None:
start_sym, end_sym = self._protect_sep
lst = []
s = 0
i = 0
while i<len(unquoted):
# skip a start_sym to end_sym region
c = unquoted[i]
if c in start_sym:
ind = start_sym.find(c)
i = unquoted.find(end_sym[ind],i+1)
if i == -1:
i = len(unquoted)
break
elif c in self._sep:
lst.append(unquoted[s:i])
i += 1
s = i
else:
i += 1
lst.append(unquoted[s:])
else:
lst = unquoted.split(self._sep)
if self._element_type is not None and not skip_type:
elem_type = self.calc_element_type(len(lst))
lst = [_fromstr_helper(elem, et) for elem, et in zip(lst, elem_type)]
return lst
def tostr(self, unquoted_l, skip_type=False):
if self._element_type is not None and not skip_type:
elem_type = self.calc_element_type(len(unquoted_l))
unquoted_l = [_tostr_helper(elem, et) for elem,et in zip(unquoted_l, elem_type)]
unquoted = self._sep.join(unquoted_l)
return super(quoted_list,self).tostr(unquoted)
class quoted_dict(quoted_list):
def __init__(self, empty='NO CATALOG', **kwarg):
super(quoted_dict,self).__init__(**kwarg)
self._empty = empty
def __call__(self, quoted_l):
l = super(quoted_dict,self).__call__(quoted_l, skip_type=True)
if l == [self._empty]:
return OrderedDict()
l = super(quoted_dict,self).__call__(quoted_l)
return OrderedDict(zip(l[0::2], l[1::2]))
def tostr(self, d):
skip_type = False
if d == {}:
l = [self._empty]
skip_type = True
else:
l = []
for k,v in d.iteritems():
l.extend([k ,v])
return super(quoted_dict,self).tostr(l, skip_type=skip_type)
# NOTE: a choice function tostr and __call__ (fromstr)
# is used when not specifying the str_type to scpi_device
# and when it is used as an option device for scpi_device (to obtain
# the string replacement for the command/question)
# Therefore even if you override the functions (by defining str_type)
# they could still be used if they are within options.
# Therefore it is recommended to make them work all the time
# (this might require passing in a type during __init__)
# See ChoiceDevDep for example
class ChoiceBase(object):
def __call__(self, input_str):
raise NotImplementedError, 'ChoiceBase subclass should overwrite __call__'
def tostr(self, val):
raise NotImplementedError, 'ChoiceBase subclass should overwrite __tostr__'
def __repr__(self):
raise NotImplementedError, 'ChoiceBase subclass should overwrite __repr__'
def __contains__(self, val):
raise NotImplementedError, 'ChoiceBase subclass should overwrite __contains__'
class ChoiceLimits(ChoiceBase):
"""
This ChoiceBase implements a min/max check.
"""
def __init__(self, min=None, max=None, str_type=None):
self.min = min
self.max = max
self.str_type = str_type
def __call__(self, input_str):
return _fromstr_helper(input_str, self.str_type)
def tostr(self, val):
return _tostr_helper(val, self.str_type)
def __contains__(self, val):
try:
_general_check(val, min=self.min, max=self.max)
except ValueError:
return False
else:
return True
def __repr__(self):
if self.min is None and self.max is None:
return 'Limits: any val'
elif self.min is None:
return 'Limits: val <= %s'%self.max
elif self.max is None:
return 'Limits: %s <= val'%self.min
else:
return 'Limits: %s <= val <= %s'%(self.min, self.max)
class ChoiceStrings(ChoiceBase):
"""
Initialize the class with a list of strings
s=ChoiceStrings('Aa', 'Bb', ..)
then 'A' in s or 'aa' in s will return True
irrespective of capitalization.
if no_short=True option is given, then only the long names are allowed
The elements need to have the following format:
ABCdef
where: ABC is known as the short name and
abcdef is known has the long name.
When using in or searching with index method
both long and short names are looked for
normalizelong and normalizeshort return the above
(short is upper, long is lower)
Long and short name can be the same.
redirects option is a dictionnary of input strings to some other element.
It can be useful for device that list ON or 1 as possible values.
use it like {'1': 'ON'}
"""
def __init__(self, *values, **kwarg):
# use **kwarg because we can't have keyword arguments after *arg
self.quotes = kwarg.pop('quotes', False)
no_short = kwarg.pop('no_short', False)
self.redirects = kwarg.pop('redirects', {})
if kwarg != {}:
raise TypeError, 'ChoiceStrings only has quotes=False and no_short=False as keyword arguments'
self.values = values
self.long = [v.lower() for v in values]
if no_short:
self.short = self.long
else:
self.short = [v.translate(None, string.ascii_lowercase).lower() for v in values]
# for short having '', use the long name instead
# this happens for a string all in lower cap.
self.short = [s if s!='' else l for s,l in zip(self.short, self.long)]
def __contains__(self, x): # performs x in y; with y=Choice()
xl = x.lower()
inshort = xl in self.short
inlong = xl in self.long
return inshort or inlong
def index(self, value):
xl = value.lower()
try:
return self.short.index(xl)
except ValueError:
pass
return self.long.index(xl)
def normalizelong(self, x):
return self.long[self.index(x)]
def normalizeshort(self, x):
return self.short[self.index(x)].upper()
def __call__(self, input_str):
# this is called by dev._fromstr to convert a string to the needed format
input_str = self.redirects.get(input_str, input_str)
if self.quotes:
if input_str[0] != '"' or input_str[-1] != '"':
raise ValueError, 'The value --%s-- is not quoted properly'%input_str
return self.normalizelong(input_str[1:-1])
return self.normalizelong(input_str)
def tostr(self, input_choice):
# this is called by dev._tostr to convert a choice to the format needed by instrument
if self.quotes:
return '"%s"'%input_choice
return input_choice # no need to change. Already a proper string.
def __repr__(self):
return repr(self.values)
def __getitem__(self, index):
# index can be a single value: return it
# or it can be a slice or a list, return a new object with only the selected elements
# the list can be numbers or strings (which finds number with index)
if not isinstance(index, (slice, list)):
return self.values[index]
if isinstance(index, slice):
return ChoiceStrings(*self.values[index], quotes=self.quotes)
# we have a list
values = []
for i in index:
if isinstance(i, basestring):
i = self.index(i)
values.append(self.values[i])
return ChoiceStrings(*values, quotes=self.quotes)
class ChoiceSimpleMap(ChoiceBase):
"""
Given a dictionnary where keys are what is used on the instrument, and
the values are what are used on the python side.
filter, when given, is a function applied to the input from the instrument.
It can be used to normalize the input entries
"""
def __init__(self, input_dict, filter=None):
self.dict = input_dict
self.keys = input_dict.keys()
self.values = input_dict.values()
self.filter = filter
if filter is not None:
for x in self.keys:
if filter(x) != x:
raise ValueError, "The input dict has at least one key where filter(key)!=key."
def __contains__(self, x):
return x in self.values
def __call__(self, input_key):
if self.filter is not None:
input_key = self.filter(input_key)
return self.dict[input_key]
def tostr(self, input_choice):
return self.keys[self.values.index(input_choice)]
def __repr__(self):
return repr(self.values)
Choice_bool_OnOff = ChoiceSimpleMap(dict(ON=True, OFF=False), filter=string.upper)
Choice_bool_YesNo = ChoiceSimpleMap(dict(YES=True, NO=False), filter=string.upper)
class ChoiceIndex(ChoiceBase):
"""
Initialize the class with a list of values or a dictionnary
The instrument uses the index of a list or the key of the dictionnary
which needs to be integers. If you want a dictionnary with keys that
are strings see ChoiceSimpleMap.
option normalize when true rounds up the float values for better
comparison. Use it with a list created from a calculation.
"""
def __init__(self, list_or_dict, offset=0, normalize=False):
self._normalize = normalize
self._list_or_dict = list_or_dict
if isinstance(list_or_dict, np.ndarray):
list_or_dict = list(list_or_dict)
if isinstance(list_or_dict, list):
if self._normalize:
list_or_dict = [self.normalize_N(v) for v in list_or_dict]
self.keys = range(offset,offset+len(list_or_dict)) # instrument values
self.values = list_or_dict # pyHegel values
self.dict = dict(zip(self.keys, self.values))
else: # list_or_dict is dict
if self._normalize:
list_or_dict = {k:self.normalize_N(v) for k,v in list_or_dict.iteritems()}
self.dict = list_or_dict
self.keys = list_or_dict.keys()
self.values = list_or_dict.values()
@staticmethod
def normalize_N(v):
"""
This transforms 9.9999999999999991e-06 into 1e-05
so can compare the result of a calcualtion with the theoretical one
v can only be a single value
Anything with +-1e-25 becomes 0.
"""
if abs(v) < 1e-25:
return 0.
return float('%.13e'%v)
def index(self, val):
if self._normalize:
val = self.normalize_N(val)
return self.values.index(val)
def __getitem__(self, key):
# negative indices will not work
return self.dict[key]
def __call__(self, input_str):
# this is called by dev._fromstr to convert a string to the needed format
val = int(input_str)
return self[val]
def tostr(self, input_choice):
# this is called by dev._tostr to convert a choice to the format needed by instrument
i = self.index(input_choice)
return str(self.keys[i])
def __contains__(self, x):
if self._normalize:
x = self.normalize_N(x)
return x in self.values
def __repr__(self):
return repr(self.values)
class ChoiceDevDep(ChoiceBase):
""" This class selects options from a dictionnary of lists
or instances of ChoiceBase, based on the value of dev (match to the
dictionnary keys).
The keys can be values or and object that handles 'in' testing.
A default choice can be given with a key of None
sub_type is used to provide the proper from/to str converters.
Works the same as str_type from scpi_device.
if sub_type is None, it calls the to/from str of the selected value of
the dictionnary (which should be an instance of ChoiceBase).
"""
def __init__(self, dev, choices, sub_type=None):
self.choices = choices
self.dev = dev
self.sub_type = sub_type
def _get_choice(self):
val = self.dev.getcache()
for k, v in self.choices.iteritems():
if isinstance(k, (tuple, ChoiceBase)) and val in k:
return v
elif val == k:
return v
return self.choices.get(None, [])
def __call__(self, input_str):
if self.sub_type:
return _fromstr_helper(input_str, self.sub_type)
else:
return self._get_choice()(input_str)
def tostr(self, input_choice):
if self.sub_type:
return _tostr_helper(input_choice, self.sub_type)
else:
return self._get_choice().tostr(input_choice)
def __contains__(self, x):
return x in self._get_choice()
def __repr__(self):
return repr(self._get_choice())
class ChoiceDev(ChoiceBase):
"""
Get the choices from a device
Wether device return a dict or a list, it should work the same
For a dict you can use keys or values (when keys fail)
Indexing with one of the allowed values returns the value for list
or the key/value pair for dict.
For a list also using an integer is allowed, and it picks the nth value.
sub_type is used to provide the proper from/to str converters.
Works the same as str_type from scpi_device.
sub_type=None (default) is the same as sub_type=str (i.e. no conversion).
The tostr converter uses the key of the dict.
"""
def __init__(self, dev, sub_type=None):
self.dev = dev
self.sub_type = sub_type
def _get_choices(self):
return self.dev.getcache()
def __call__(self, input_str):
return _fromstr_helper(input_str, self.sub_type)
def tostr(self, input_choice):
choices = self._get_choices()
ch = self[input_choice]
if isinstance(choices, dict):
ch = ch[0]
return _tostr_helper(ch, self.sub_type)
def __contains__(self, x):
choices = self._get_choices()
if isinstance(choices, dict):
if x in choices.keys():
return True
choices = choices.values()
return x in choices
def __getitem__(self, key):
choices = self._get_choices()
if key not in self and isinstance(choices, list): # key might be an integer
return choices[key]
if key in self:
if isinstance(choices, dict):
if key not in choices.keys() and key in choices.values():
key = [k for k,v in choices.iteritems() if v == key][0]
return key, choices[key]
else:
return key
raise IndexError, 'Invalid index. choose among: %r'%choices
def __repr__(self):
return repr(self._get_choices())
class ChoiceDevSwitch(ChoiceDev):
"""
Same as ChoiceDev but the value for set/check can also
be something other (a type different than in_base_type),
in which case the other_conv function should convert it to the in_base_type.
"""
def __init__(self, dev, other_conv, sub_type=None, in_base_type=basestring):
self.other_conv = other_conv
self.in_base_type = in_base_type
super(ChoiceDevSwitch, self).__init__(dev, sub_type=sub_type)
def cleanup_entry(self, x):
if not isinstance(x, self.in_base_type):
x = self.other_conv(x)
return x
def __getitem__(self, input_choice):
input_choice = self.cleanup_entry(input_choice)
return super(ChoiceDevSwitch, self).__getitem__(input_choice)
def __contains__(self, input_choice):
input_choice = self.cleanup_entry(input_choice)
return super(ChoiceDevSwitch, self).__contains__(input_choice)
def make_choice_list(list_values, start_exponent, end_exponent):
"""
given list_values=[1,3]
start_exponent =-6
stop_expoenent = -3
produces [1e-6, 3e-6, 1e-5, 3e-5, 1e-4, 3e-4, 1e-3, 3e-3]
"""
powers = np.logspace(start_exponent, end_exponent, end_exponent-start_exponent+1)
return (powers[:,None] * np.array(list_values)).flatten()
class ChoiceMultiple(ChoiceBase):
def __init__(self, field_names, fmts=int, sep=',', ending_sep=False, ending_sep_get=None, allow_missing_keys=False, reading_sep=None):
"""
This handles scpi commands that return a list of options like
1,2,On,1.34
We convert it into a dictionary to name and acces the individual
parameters.
fmts can be a single converter or a list of converters
the same length as field_names
A converter is either a type or a (type, lims) tuple
where lims can be a tuple (min, max) with either one being None
or a list/object of choices.
Not that if you use a ChoiceBase object, you only need to specify
it as the type. It is automatically used as a choice also.
If one element of a list can affect the choices for a subsequent one,
see ChoiceMultipleDev
ending_sep, when True it adds (on writing) or remove (on reading) an extra sep
at the end of the string
ending_sep_get, when not None it overrides ending_sep for reading.
allow_missing_keys when True, will not produce error for missing field_names when checking
readding_sep when not None is used instead of sep when obtaining data from instrument.
"""
self.field_names = field_names
if not isinstance(fmts, (list, np.ndarray)):
fmts = [fmts]*len(field_names)
fmts_type = []
fmts_lims = []
for f in fmts:
if not isinstance(f, tuple):
if isinstance(f, ChoiceBase):
f = (f,f)
else:
f = (f, None)
fmts_type.append(f[0])
fmts_lims.append(f[1])
self.fmts_type = fmts_type
self.fmts_lims = fmts_lims
self.sep = sep
self.ending_sep_set = ending_sep
self.ending_sep_get = ending_sep
if ending_sep_get is not None:
self.ending_sep_get = ending_sep_get
self.reading_sep = sep if reading_sep is None else reading_sep
self.allow_missing_keys = allow_missing_keys
def __call__(self, fromstr):
sep = self.reading_sep
if self.ending_sep_get:
if fromstr.endswith(sep):
fromstr = fromstr[:-len(sep)]
else:
raise ValueError('Expected ending sep in class %s'%self.__class__.__name__)
v_base = fromstr.split(sep)
if len(v_base) != len(self.field_names):
raise ValueError('Invalid number of parameters in class %s'%self.__class__.__name__)
v_conv = []
names = []
for k, val, fmt in zip(self.field_names, v_base, self.fmts_type):
if isinstance(fmt, ChoiceMultipleDep):
fmt.set_current_vals(dict(zip(names, v_conv)))
v_conv.append(_fromstr_helper(val, fmt))
names.append(k)
return dict_improved(zip(self.field_names, v_conv), _freeze=True)
def tostr(self, fromdict=None, **kwarg):
# we assume check (__contains__) was called so we don't need to
# do fmt.set_current_vals again or check validity if dictionnary keys
if fromdict is None:
fromdict = kwarg
ret = []
for k, fmt in zip(self.field_names, self.fmts_type):
v = fromdict[k]
ret.append(_tostr_helper(v, fmt))
ret = self.sep.join(ret)
if self.ending_sep_set:
ret += self.sep
return ret
def __contains__(self, x): # performs x in y; with y=Choice(). Used for check
# Returns True if everything is fine.
# Otherwise raise a ValueError, a KeyError or a KeyError_Choices (for missing values)
xorig = x
x = x.copy() # make sure we don't change incoming dict
for k, fmt, lims in zip(self.field_names, self.fmts_type, self.fmts_lims):
if isinstance(fmt, ChoiceMultipleDep):
fmt.set_current_vals(xorig)
try:
val = x.pop(k) # generates KeyError if k not in x
except KeyError:
if self.allow_missing_keys:
continue
raise KeyError_Choices('key %s is missing'%k)
_general_check(val, lims=lims, msg_src='key %s'%k)
if x != {}:
raise KeyError('The following keys in the dictionnary are incorrect: %r'%x.keys())
return True
def __repr__(self):
r = ''
first = True
for k, lims in zip(self.field_names, self.fmts_lims):
if not first:
r += '\n'
first = False
r += 'key %s has limits %r'%(k, lims)
return r
class ChoiceMultipleDep(ChoiceBase):
""" This class selects options from a dictionnary of lists
or instances of ChoiceBase, based on the value of key (match to the
dictionnary keys). It is similar to ChoiceDevDep but selects on
a ChoiceMultiple element instead of a device.
It can only be used as a type for a ChoiceMultiple element.
The dictionnary keys can be values or and object that handles 'in' testing.
A default choice can be given with a key of None
sub_type is used to provide the proper from/to str converters.
Works the same as str_type from scpi_device.
if sub_type is None, it calls the to/from str of the selected value of
the dictionnary (which should be an instance of ChoiceBase).
Note that the dependent option currently requires the key to come before.
i.e. if the base is {'a':1, 'B':2} then 'B' can depend on 'a' but not
the reverse (the problem is with ChoiceMultiple __contains__, __call__
and tostr).
"""
def __init__(self, key, choices, sub_type=None):
self.choices = choices
self.key = key
self.all_vals = {key:None}
self.sub_type = sub_type
def set_current_vals(self, all_vals):
self.all_vals = all_vals
def _get_choice(self):
val = self.all_vals[self.key]
for k, v in self.choices.iteritems():
if isinstance(k, (tuple, ChoiceBase)) and val in k:
return v
elif val == k:
return v
return self.choices.get(None, [])
def __call__(self, input_str):
if self.sub_type:
return _fromstr_helper(input_str, self.sub_type)
else:
return self._get_choice()(input_str)
def tostr(self, input_choice):
if self.sub_type:
return _tostr_helper(input_choice, self.sub_type)
else:
return self._get_choice().tostr(input_choice)
def __contains__(self, x):
return x in self._get_choice()
def __repr__(self):
return repr(self.choices)
class Dict_SubDevice(BaseDevice):
"""
Use this to gain access to a single/multiple element of a device returning a dictionary
from ChoiceMultiple.
"""
def __init__(self, subdevice, key, force_default=False, **kwarg):
"""
This device and the subdevice need to be part of the same instrument
(otherwise async will not work properly)
Here we will only modify the value of key in dictionary.
key can be a single value, or a list of values (in which case set/get will work
on a list)
force_default, set the default value of force used in check/set.
It can be True, False or 'slave' which means to let the subdevice handle the
insertion of the missing parameters
"""
self._subdevice = subdevice
self._sub_key = key
self._force_default = force_default
subtype = self._subdevice.type
self._single_key = False
if not isinstance(key, list):
key = [key]
self._single_key = True
multi = False
else:
multi = key
self._sub_key = key
lims = []
for k in key:
if k not in subtype.field_names:
raise IndexError, "The key '%s' is not present in the subdevice"%k
lims.append( subtype.fmts_lims[subtype.field_names.index(k)] )
self._sub_lims = lims
setget = subdevice._setget
autoinit = subdevice._autoinit
trig = subdevice._trig
get_has_check = True
super(Dict_SubDevice, self).__init__(
setget=setget, autoinit=autoinit, trig=trig, multi=multi, get_has_check=get_has_check, **kwarg)
self._setdev_p = subdevice._setdev_p # needed to enable BaseDevice set in checking mode and also the check function
self._getdev_p = True # needed to enable BaseDevice get in Checking mode
def _get_docstring(self, added=''):
# we don't include options starting with _
if self._single_key:
added = """
This device set/get the '%s' dictionnary element of a subdevice.
It uses the same options as that subdevice (%s)
"""%(self._sub_key[0], self._subdevice)
else:
added = """
This device set/get the '%s' dictionnary elements of a subdevice.
It uses the same options as that subdevice (%s)
"""%(self._sub_key, self._subdevice)
return super(Dict_SubDevice, self)._get_docstring(added=added)
def setcache(self, val, nolock=False):
if nolock:
# no handled because getcache can lock
raise ValueError('Dict_SubDevice setcache does not handle nolock=True')
vals = self._subdevice.getcache()
if vals is not None:
vals = vals.copy()
if self._single_key:
val = [val]
if len(self._sub_key) != len(val):
raise ValueError('This Dict_SubDevice requires %i elements'%len(self._sub_key))
for k, v in zip(self._sub_key, val):
vals[k] = v
self._subdevice.setcache(vals)
def getcache(self, local=False):
if local:
vals = self._subdevice.getcache(local=True)
else:
vals = self._subdevice.getcache()
if vals is None:
ret = None
else:
ret = [vals[k] for k in self._sub_key]
if self._single_key:
ret = ret[0]
# Lets set the _cache variable anyway but it should never
# be used. _cache should always be accessed with getcache and this will
# bypass the value we set here.
super(Dict_SubDevice, self).setcache(ret)
return ret
def _force_helper(self, force):
if force is None:
force = self._force_default
return force
def _checkdev(self, val, force=None, **kwarg):
if self._single_key:
val = [val]
self._check_cache['cooked_val'] = val
if len(self._sub_key) != len(val):
raise ValueError(self.perror('This Dict_SubDevice requires %i elements'%len(self._sub_key)))
# Lets check the parameters individually, in order to help the user with
# a more descriptive message.
for i, limv in enumerate(zip(self._sub_lims, val)):
lim, v = limv
msg_src = None
if not self._single_key:
msg_src = 'element %i'%i
self._general_check(v, lims=lim, msg_src=msg_src)
force = self._force_helper(force)
allow = {True:True, False:'cache', 'slave':False}[force]
self._check_cache['allow'] = allow
op = self._check_cache['fnct_str']
# otherwise, the check will be done by set in _setdev below
if op == 'check':
# we need to complete the test as much as possible
vals = {k:v for k, v in zip(self._sub_key, val)}
if allow:
vals = self._subdevice._set_missing_dict_helper(vals, _allow=allow, **kwarg)
self._subdevice.check(vals, **kwarg)
def _getdev(self, **kwarg):
vals = self._subdevice.get(**kwarg)
if vals is None: # When checking and value not initialized
ret = [0] * len(self._sub_key)
else:
ret = [vals[k] for k in self._sub_key]
if self._single_key:
ret = ret[0]
return ret
def _setdev(self, val, force=None, **kwarg):
"""
force when True, it make sure to obtain the
subdevice value with get.
when False, it uses getcache.
The default is in self._force_default
"""
val = self._check_cache['cooked_val']
allow = self._check_cache['allow']
vals = {k:v for k, v in zip(self._sub_key, val)}
if allow:
vals = self._subdevice._set_missing_dict_helper(vals, _allow=allow, **kwarg)
self._subdevice.set(vals, **kwarg)
class Lock_Visa(object):
"""
This handles the locking of the visa session.
Once locked, this prevents any other visa session (same process or not) to
the same instrument from communicating with it.
It is a reentrant lock (release the same number of times as acquire
to fully unlock).
"""
def __init__(self, vi):
self._vi = vi
self._count = 0
def _visa_lock(self, timeout=0.001):
"""
It returns True if the lock was acquired before timeout, otherwise it
returns False
"""
timeout = max(int(timeout/1e-3),1) # convert from seconds to milliseconds
try:
if not CHECKING():
self._vi.lock_excl(timeout)
except visa_wrap.VisaIOError as exc:
if exc.error_code == visa_wrap.constants.VI_ERROR_TMO:
# This is for Agilent IO visa library
return False
elif exc.error_code == visa_wrap.constants.VI_ERROR_RSRC_LOCKED:
# This is for National Instruments visa library
return False
else:
raise
else:
# we have lock
self._count += 1
return True
def release(self):
if not CHECKING():
self._vi.unlock() # could produce VI_ERROR_SESN_NLOCKED
else:
if self._count < 1:
raise visa_wrap.VisaIOError(visa_wrap.constants.VI_ERROR_SESN_NLOCKED)
self._count -= 1
def acquire(self):
return wait_on_event(self._visa_lock)
__enter__ = acquire
def __exit__(self, exc_type, exc_value, exc_traceback):
self.release()
def is_owned(self):
return self._count != 0
def force_release(self):
n = 0
expect = self._count
try:
while True:
self.release()
n += 1
except visa_wrap.VisaIOError as exc:
if exc.error_code != visa_wrap.constants.VI_ERROR_SESN_NLOCKED:
raise
if n:
print 'Released Visa lock', n, 'time(s) (expected %i releases)'%expect
else:
print 'Visa lock was not held (expected %i releases)'%expect
self._count = 0
#######################################################
## VISA Instrument
#######################################################
_SharedStructure_debug = False
class _SharedStructure(object):
"""
This shares a single ctype object across multiple processes.
Access it with the data attribute.
If the data attribute has members, accessing it directly on this object will be forwared
to the data object.
Should only use this if the memory access are protected with locks (between process).
Visa can do that (otherwise have a look at multiprocessing.synchronize._multiprocessing.SemLock)
"""
def __init__(self, somectype, tagname):
import mmap
self._tagname = tagname
counter_type = ctypes.c_int32
counter_size = ctypes.sizeof(ctypes.c_int32)
size = counter_size + ctypes.sizeof(somectype)
if os.name != 'nt':
# we assume posix like. on linux need python-posix_ipc package (fedora)
import posix_ipc
self._shared_obj = posix_ipc.SharedMemory(tagname, posix_ipc.O_CREAT, size=size)
self.buffer = mmap.mmap(self._shared_obj.fd, size)
self._shared_obj.close_fd()
else: # for windows
self.buffer = mmap.mmap(-1, size, tagname=tagname)
self._counter = counter_type.from_buffer(self.buffer, 0)
self.data = somectype.from_buffer(self.buffer, counter_size)
self._add_count()
if _SharedStructure_debug:
print 'There are now %i users of %r'%(self._get_count(), tagname)
def __getattr__(self, name):
return getattr(self.data, name)
def __setattr__(self, name, value):
try:
data = object.__getattribute__(self, 'data')
if hasattr(data, name):
setattr(self.data, name, value)
return
except AttributeError:
pass
object.__setattr__(self, name, value)
def _get_count(self):
return self._counter.value
def _add_count(self):
self._counter.value += 1
def _dec_count(self):
self._counter.value -= 1
def __del__(self):
self._dec_count()
count = self._get_count()
if _SharedStructure_debug:
print 'Cleaned up mmap, counter now %i'%self._get_count()
self.buffer.close()
if count == 0 and os.name != 'nt':
self._shared_obj.unlink()
class _LastTime(ctypes.Structure):
_fields_ = [('write_time', ctypes.c_double),
('read_time', ctypes.c_double)]
def resource_info(visa_addr):
if isinstance(visa_addr, int):
visa_addr = _normalize_gpib(visa_addr)
return rsrc_mngr.resource_info(visa_addr)
class Keep_Alive(threading.Thread):
def __init__(self, interval, keep_alive_func):
# the function keep_alive_func should call update_time somewhere.
super(Keep_Alive, self).__init__()
self.keep_alive_func = ProxyMethod(keep_alive_func)
self.interval = interval
self.lck = threading.RLock()
self.update_time()
self.stop = False
self.daemon = True # This will allow python to exit
def run(self):
while True:
with self.lck:
if self.stop:
break
delta = time.time() - self.last
if delta >= self.interval:
self.keep_alive_func()
continue # skipt wait (we just changed self.last)
wait = min(self.interval - delta, 5) # wait at most 5s
time.sleep(wait)
def cancel(self):
with self.lck:
self.stop = True
def update_time(self, no_lock=False):
with self.lck:
self.last = time.time()
#def __del__(self):
# print 'cleaning up keep_alive thread.'
class visaInstrument(BaseInstrument):
"""
Open visa instrument with a visa address.
If the address is an integer, it is taken as the
gpib address of the instrument on the first gpib bus.
Otherwise use a visa string like:
'GPIB0::12::INSTR'
'GPIB::12'
'USB0::0x0957::0x0118::MY49012345::0::INSTR'
'USB::0x0957::0x0118::MY49012345'
"""
def __init__(self, visa_addr, skip_id_test=False, quiet_delete=False, keep_alive=False, keep_alive_time=15*60, **kwarg):
"""
skip_id_test when True will skip doing the idn test.
quiet_delete when True will prevent the print following an instrument delete
keep_alive can True/False/'auto'. If auto, it is activated only when on a tcpip connection (hislip, socket, instr)
keep_alive_time is the time in seconds between keep alive requests.
"""
# need to initialize visa before calling BaseInstrument init
# which might require access to device
if isinstance(visa_addr, int):
visa_addr = _normalize_gpib(visa_addr)
self.visa_addr = visa_addr
self._keep_alive_thread = None
if not CHECKING():
self.visa = rsrc_mngr.open_resource(visa_addr, **kwarg)
self._lock_extra = Lock_Visa(self.visa)
#self.visa.timeout = 3 # in seconds
# use 2.9 because I was getting 3.0 rounded to 10s timeouts on some visa lib configuration
# 2.9 seemed to be rounded up to 3s instead
self.set_timeout = 2.9 # in seconds
to = time.time()
self._last_rw_time = _LastTime(to, to) # When wait time are not 0, it will be replaced
self._write_write_wait = 0.
self._read_write_wait = 0.
if (keep_alive == 'auto' and self.visa.is_tcpip) or keep_alive is True:
# TODO handle keep_alive (get inspired by bluefors)
# Could use the keep_alive setting for visa (at least socket/hislip)
# However it is 2 hours by default on windows. Which is often too long.
# self.visa.set_visa_attribute(visa_wrap.constants.VI_ATTR_TCPIP_KEEPALIVE, True)
# Also note that writing an empty string (not even the newline) will not produce any tcpip
# communication. So keepalive should send at least '\n' if that is valid.
self._keep_alive_thread = Keep_Alive(keep_alive_time, self._keep_alive_func)
BaseInstrument.__init__(self, quiet_delete=quiet_delete)
if not CHECKING():
if not skip_id_test:
idns = self.idn_split()
if not instruments_registry.check_instr_id(self.__class__, idns['vendor'], idns['model'], idns['firmware']):
print 'WARNING: this particular instrument idn is not attached to this class: operations might misbehave.'
#print self.__class__, idns
if self._keep_alive_thread:
self._keep_alive_thread.start()
def __del__(self):
#print 'Destroying '+repr(self)
# no need to call self.visa.close()
# because self.visa does that when it is deleted
if self._keep_alive_thread:
self._keep_alive_thread.cancel()
super(visaInstrument, self).__del__()
# Do NOT enable locked_calling for read_status_byte, otherwise we get a hang
# when instrument is on gpib using agilent visa. But do use lock visa
# otherwise read_stb could fail because of lock held in another thread/process
# The locked_calling problem is that the handler runs in a separate thread,
# appart from the main locked thread (when using getasync)
#@locked_calling
def read_status_byte(self):
# since on serial visa does the *stb? request for us
# might as well be explicit and therefore handle the rw_wait properly
# and do the locking.
if CHECKING():
return 0
if self.visa.is_serial():
return int(self.ask('*stb?'))
else:
with self._lock_extra:
return self.visa.read_stb()
self._keep_alive_update()
@locked_calling
def control_remotelocal(self, remote=False, local_lockout=False, all=False):
"""
For all=True:
remote=True: REN line is asserted -> when instruments are addressed
they will go remote
remote=False: REN line is deasserted -> All instruments go local and
will NOT go remote when addressed
This also clears lockout state
For local_lockout=True:
remote=True: All instruments on the bus go to local lockout state
Also current instrument goes remote.
remote=False: Same as all=True, remote=False followed by
all=True, remote=True
local lockout state means the local button is disabled on the instrument.
The instrument can be switch for local to remote by gpib interface but
cannot be switched from remote to local using the instrument local button.
Not all instruments implement this lockout.
Otherwise:
remote=True: only this instrument goes into remote state.
remote=False: only this instrument goes into local state.
The instrument keeps its lockout state unchanged.
"""
# False for both all and local_lockout(first part) should proceed in a same way
# Here I use a different instruction but I think they both do the same
# i.e. VI_GPIB_REN_DEASSERT == VI_GPIB_REN_DEASSERT_GTL
# possibly they might behave differently on some other bus (gpib, tcp?)
# or for instruments that don't conform to proper 488.2 rules
# For those reason I keep the 2 different so it can be tested later.
# Unused state:
# VI_GPIB_REN_ASSERT_LLO : lockout only (no addressing)
if CHECKING():
return
cnsts = visa_wrap.constants
if all:
if remote:
val = cnsts.VI_GPIB_REN_ASSERT
else:
val = cnsts.VI_GPIB_REN_DEASSERT
elif local_lockout:
if remote:
val = cnsts.VI_GPIB_REN_ASSERT_ADDRESS_LLO
else:
val = cnsts.VI_GPIB_REN_DEASSERT_GTL
self.visa.control_ren(val)
val = cnsts.VI_GPIB_REN_ASSERT
else:
if remote:
val = cnsts.VI_GPIB_REN_ASSERT_ADDRESS
else:
val = cnsts.VI_GPIB_REN_ADDRESS_GTL
self.visa.control_ren(val)
self._keep_alive_update()
def _keep_alive_func(self):
self.write('') # should send just a newline.
def _keep_alive_update(self):
if self._keep_alive_thread:
self._keep_alive_thread.update_time()
def _do_wr_wait(self):
if self._last_rw_time.read_time > self._last_rw_time.write_time:
# last operation was a read
last_time = self._last_rw_time.read_time
wait_time = self._read_write_wait
else: # last operation was a write
last_time = self._last_rw_time.write_time
wait_time = self._write_write_wait
if wait_time == 0.:
return
if not isinstance(self._last_rw_time, _SharedStructure):
# The timeout needs to work across process, So we now share the last time values
tagname = 'pyHegel-' + self.__class__.__name__ + '-' + hashlib.sha1(self.visa_addr).hexdigest()
old = self._last_rw_time
self._last_rw_time = _SharedStructure(_LastTime, tagname)
self._last_rw_time.read_time = old.read_time
self._last_rw_time.write_time = old.write_time
cur_time = time.time()
delta = (last_time+wait_time) - cur_time
if delta >0:
sleep(delta)
@locked_calling
def read(self, raw=False, count=None, chunk_size=None):
""" reads data.
The default is to read until an end is resived in chunk_size blocks
(if chunk_size is not given, uses the default chunk_size)
It then strips then termination characters unless raw is False.
When a count is given, it does not wait for an end. It
only reads exactly count characters. It never strips the
termination characters.
"""
if CHECKING():
return ''
if count:
ret = self.visa.read_raw_n_all(count, chunk_size=chunk_size)
elif raw:
ret = self.visa.read_raw(size=chunk_size)
else:
ret = self.visa.read(chunk_size=chunk_size)
self._last_rw_time.read_time = time.time()
self._keep_alive_update()
return ret
@locked_calling
def write(self, val, termination='default'):
self._do_wr_wait()
if not CHECKING():
self.visa.write(val, termination=termination)
else:
if not isinstance(val, basestring):
raise ValueError(self.perror('The write val is not a string.'))
self._last_rw_time.write_time = time.time()
self._keep_alive_update()
@locked_calling
def ask(self, question, raw=False, chunk_size=None):
"""
Does write then read.
With raw=True, replaces read with a read_raw.
This is needed when dealing with binary data. The
base read strips newlines from the end always.
"""
# we prevent CTRL-C from breaking between write and read using context manager
with _delayed_signal_context_manager():
self.write(question)
ret = self.read(raw=raw, chunk_size=chunk_size)
return ret
def idn(self):
return self.ask('*idn?')
def idn_usb(self):
""" Returns the usb names attached to the vendor/product ids and the serial number
The return is a tuple (vendor, product, serial)
"""
if CHECKING():
return ('vendor', 'product', 'serial')
vendor = self.visa.get_visa_attribute(visa_wrap.constants.VI_ATTR_MANF_NAME)
product = self.visa.get_visa_attribute(visa_wrap.constants.VI_ATTR_MODEL_NAME)
serial = self.visa.get_visa_attribute(visa_wrap.constants.VI_ATTR_USB_SERIAL_NUM)
return (vendor, product, serial)
@locked_calling
def factory_reset(self):
"""
This returns the instrument to a known state.
Use CAREFULLY!
"""
self.write('*RST')
self.force_get()
@locked_calling
def clear(self):
"""
This sends the *cls 488.2 command that should clear the status/event/
errors (but not change the enable registers.)
It also cleans up any buffered status byte.
"""
self.write('*cls')
#some device buffer status byte so clear them
while self.read_status_byte()&0x40:
pass
@locked_calling
def _dev_clear(self):
""" This is the device clear instruction. For some devices it will
clear the output buffers.
(it should reset the interface state, but not change the state of
status/event registers, errors states. See clear for that.)
"""
if CHECKING():
return
self.visa.clear()
self._keep_alive_update()
@property
def set_timeout(self):
if CHECKING():
return None
timeout_ms = self.visa.timeout
if timeout_ms is None:
return None
else:
return timeout_ms/1000. # return in seconds
@set_timeout.setter
def set_timeout(self, seconds):
if seconds is None:
val = None
else:
val = int(seconds*1000.)
if CHECKING():
return
self.visa.timeout = val
def get_error(self):
return self.ask('SYSTem:ERRor?')
def _info(self):
gn, cn, p = BaseInstrument._info(self)
return gn, cn+'(%s)'%self.visa_addr, p
@locked_calling
def trigger(self):
# This should produce the hardware GET on gpib
# Another option would be to use the *TRG 488.2 command
if CHECKING():
return
self.visa.trigger()
self._keep_alive_update()
@locked_calling
def _get_dev_min_max(self, ask_str, str_type=float, ask='both'):
""" ask_str is the question string.
ask can be both, min or max. It always returns a tuple (min, max).
If the value was not obtained it will be None
See also dev._get_dev_min_max
"""
return _get_dev_min_max(self, ask_str, str_type, ask)
# To properly use self._conf_helper_cache, the caller (probably _current_config) should be locked.
def _conf_helper(self, *devnames, **kwarg):
ret = super(visaInstrument, self)._conf_helper(*devnames, **kwarg)
no_default, add_to = self._conf_helper_cache
if not no_default:
add_to(ret, 'visa_addr="%s"'%self.visa_addr)
return ret
#######################################################
## VISA Async Instrument
#######################################################
# Note about async:
# only one thread/process will have access to the device at a time
# others are waiting for a lock
# I only enable events (Queue or handlers) when I am about to use them
# and disable them when I am done waiting.
# wait_after_trig, run_and_wait and run in async should properly cleanup.
# In case where the cleanup is not done properly, it would leave
# some events/status in buffers and should be cleaned up on the
# next run.
# For agilent gpib, all device on bus will receive a handler/queue event.
# I use the handler (only one should be enabled, If not then only one will have
# the lock, the others will be waiting on read_status_byte: so only the important one
# will actually reset the srq.)
# For NI gpib, only the device that has SRQ on will receive the handler/queue event.
# handlers are called within the gpib notify callback. All handlers
# across all process are called. If one of the callback is slow, it only affects that process
# thread. While in the callback, it does not add other events.
# However queued events are only produced when waiting for the events,
# they are not generated otherwise (for queued events, the driver does not setup
# a notify callback). It is possible to loose events if the read_status
# occurs between ibwait (which is every 1ms). However, again, the status read is protected
# by the lock, and only one thread should be running anyway.
# Note also that the auto serial poll is not jammed if the device holding the line SRQ is
# not open. The driver will just keep autoprobing (during ibwait requests) and update the
# device status so it can still find out if the device is requesting service.
class visaInstrumentAsync(visaInstrument):
def __init__(self, visa_addr, poll=False):
# poll can be True (for always polling) 'not_gpib' for polling for lan and usb but
# use the regular technique for gpib
# or force_handler to always use the handler
# the _async_sre_flag should match an entry somewhere (like in init)
self._async_sre_flag = 0x20 #=32 which is standard event status byte (contains OPC)
self._async_last_status = 0
self._async_last_status_time = 0
self._async_last_esr = 0
self._async_do_cleanup = False
super(visaInstrumentAsync, self).__init__(visa_addr)
self._async_mode = 'srq'
if CHECKING():
is_gpib = False
is_agilent = False
self._async_polling = True
self._RQS_status = -1
return
is_gpib = self.visa.is_gpib()
is_agilent = rsrc_mngr.is_agilent()
self._async_polling = False
if poll == True or (poll == 'not_gpib' and not is_gpib):
self._async_polling = True
self._RQS_status = -1
elif (is_gpib and is_agilent) or poll == 'force_handler':
# Note that the agilent visa using a NI usb gpib adapter (at least)
# disables the autopoll settings of NI
# Hence a SRQ on the bus produces events for all devices on the bus.
# If those events are not read, the buffer eventually fills up.
# This is a problem when using more than one visaInstrumentAsync
# To avoid that problem, I use a handler in that case.
self._RQS_status = 0 #-1: no handler, 0 not ready, other is status byte
self._RQS_done = FastEvent() #starts in clear state
self._proxy_handler = ProxyMethod(self._RQS_handler)
# _handler_userval is the ctype object representing the user value (0 here)
# It is needed for uninstall
if not CHECKING():
self._handler_userval = self.visa.install_visa_handler(visa_wrap.constants.VI_EVENT_SERVICE_REQ,
self._proxy_handler, 0)
else:
self._RQS_status = -1
if self.visa.is_usb() and not self.visa.resource_manager.is_agilent():
# For some weird reason, for National Instruments visalib on usb
# the service request are queued by default until I enable/disable the service
# just disabling does not work (says it is already disabled)
# this with NI visa 14.0.0f0
self.visa.enable_event(visa_wrap.constants.VI_EVENT_SERVICE_REQ,
visa_wrap.constants.VI_QUEUE)
self.visa.disable_event(visa_wrap.constants.VI_EVENT_SERVICE_REQ,
visa_wrap.constants.VI_QUEUE)
def __del__(self):
if self._RQS_status != -1:
# Not absolutely necessary, but lets be nice
self.visa.disable_event(visa_wrap.constants.VI_ALL_ENABLED_EVENTS,
visa_wrap.constants.VI_ALL_MECH)
# only necessary to keep handlers list in sync
# the actual handler is removed when the visa is deleted (vi closed)
self.visa.uninstall_visa_handler(visa_wrap.constants.VI_EVENT_SERVICE_REQ,
self._proxy_handler, self._handler_userval)
super(visaInstrumentAsync, self).__del__()
def init(self, full=False):
# This clears the error state, and status/event flags?
self.clear()
if full:
self.write('*ese 1;*sre 32') # OPC flag
def _RQS_handler(self, vi, event_type, context, userHandle):
# For Agilent visalib (auto serial poll is off):
# Reading the status will clear the service request of this instrument
# if the SRQ line is still active, another call to the handler will occur
# after a short delay (30 ms I think) everytime a read_status_byte is done
# on the bus (and SRQ is still active).
# For agilent visa, the SRQ status is queried every 30ms. So
# you we might have to wait that time after the hardware signal is active
# before this handler is called.
# Because of locking, this only succeeds if we are owning the lock
# (so we are the ones waiting for data or nobody is.)
# Remember that we are called when any instrument on the gpib bus
# requests service (not only for this instrument)
status = self.read_status_byte()
#if status&0x40 and status & self._async_sre_flag:
#if status & self._async_sre_flag:
if status&0x40:
self._RQS_status = status
self._async_last_status = status
self._async_last_status_time = time.time()
#sleep(0.01) # give some time for other handlers to run
self._RQS_done.set()
#print 'Got it', vi
return visa_wrap.constants.VI_SUCCESS
def _get_esr(self):
if CHECKING():
return 0
return int(self.ask('*esr?'))
def _async_detect_poll_func(self):
if CHECKING():
status = 0x40
else:
status = self.read_status_byte()
if status & 0x40:
self._async_last_status = status
self._async_last_status_time = time.time()
self._async_last_esr = self._get_esr()
return True
return False
def _async_detect(self, max_time=.5): # 0.5 s max by default
"""
handles _async_mode of 'wait' (only wait delay), 'srq' (only detects srq)
'wait+srq' (wait followed by srq, so minimum of wait)
all the options starting with wait will warn once if async_wait is 0.
If you don't want the warning, replace 'wait' with '_wait' in the above strings.
"""
if self._async_mode not in ['wait', '_wait', 'wait+srq', '_wait+srq', 'srq']:
raise RuntimeError('Invalid async_mode selected')
if self._async_mode in ['wait', '_wait']:
return super(visaInstrumentAsync, self)._async_detect(max_time)
ret = False
if self._async_mode in ['wait+srq', '_wait+srq']:
if not super(visaInstrumentAsync, self)._async_detect(max_time):
return False
if self._async_polling:
if _retry_wait(self._async_detect_poll_func, max_time, delay=0.05):
ret = True
elif self._RQS_status == -1:
# On National Instrument (NI) visa
# the timeout actually used seems to be 16*ceil(max_time*1000/16) in ms.
wait_resp = self.visa.wait_on_event(visa_wrap.constants.VI_EVENT_SERVICE_REQ,
int(max_time*1000), capture_timeout=True)
# context in wait_resp will be closed automatically
#if wait_resp.context is not None:
if not wait_resp.timed_out:
# only reset event flag. We know the bit that is set already (OPC)
self._async_last_esr = self._get_esr()
# only reset SRQ flag. We know the bit that is set already
self._async_last_status = self.read_status_byte()
self._async_last_status_time = time.time()
ret = True
else:
if self._RQS_done.wait(max_time):
#we assume status only had bit 0x20(event) and 0x40(RQS) set
#and event only has OPC set
# status has already been reset. Now reset event flag.
self._async_last_esr = self._get_esr()
self._RQS_done.clear() # so that we can detect the next SRQ if needed without _doing async_trig (_async_trig_cleanup)
ret = True
return ret
def _async_cleanup_after(self):
super(visaInstrumentAsync, self)._async_cleanup_after()
if self._async_do_cleanup:
self.visa.disable_event(visa_wrap.constants.VI_EVENT_SERVICE_REQ, visa_wrap.constants.VI_ALL_MECH)
self._async_do_cleanup = False
def _async_trigger_helper(self):
self.write('INITiate;*OPC') # this assume trig_src is immediate for agilent multi
def _async_trig_cleanup(self):
if not self._async_polling:
self._async_do_cleanup = True
if self._RQS_status != -1:
self.visa.enable_event(visa_wrap.constants.VI_EVENT_SERVICE_REQ,
visa_wrap.constants.VI_HNDLR)
else:
self.visa.enable_event(visa_wrap.constants.VI_EVENT_SERVICE_REQ,
visa_wrap.constants.VI_QUEUE)
# We detect the end of acquisition using *OPC and status byte.
if self._get_esr() & 0x01:
print 'Unread event byte!'
# A while loop is needed when National Instrument (NI) gpib autopoll is active
# This is the default when using the NI Visa.
n = 0
while self.read_status_byte() & 0x40: # This is SRQ bit
if self.visa.is_usb() and not self.visa.resource_manager.is_agilent():
# National instruments visa buffers a usb status byte (the SRQ bit only)
# Therefore a request will be seen in multiple threads/process.
# so it is normal to have left overs
pass
else:
n += 1
if n > 0:
print 'Unread(%i) status byte!'%n
if self._async_polling:
pass
elif self._RQS_status != -1:
self._RQS_status = 0
self._RQS_done.clear()
else:
# could use self.visa.discard_events(visa_wrap.constants.VI_EVENT_SERVICE_REQ,
# visa_wrap.constans.VI_QUEUE)
# but then we would not know how many events were discarded.
n = 0
try:
while True:
self.visa.wait_on_event(visa_wrap.constants.VI_EVENT_SERVICE_REQ, 0)
n += 1
except visa_wrap.VisaIOError as exc:
if exc.error_code == visa_wrap.constants.VI_ERROR_TMO:
pass
else:
raise
if n>0:
print 'Unread(%i) event queue!'%n
self._async_last_status = 0
self._async_last_esr = 0
@locked_calling
def _async_trig(self):
super(visaInstrumentAsync, self)._async_trig()
if 'srq' in self._async_mode:
self._async_trig_cleanup()
self._async_trigger_helper()
def _normalize_usb(usb_resrc):
usb_resrc = usb_resrc.upper() # make sure it is all upercase
split = usb_resrc.split('::')
if split[-1] == 'INSTR':
del split[-1]
if len(split) != 5:
split.append('0')
usbn, manuf, model, serial, interfaceN = split
manuf = int(manuf, base=0)
model = int(model, base=0)
interfaceN = int(interfaceN, base=0)
return 'USB0::0x%04X::0x%04X::%s::%i'%(manuf, model, serial, interfaceN), manuf, model
def _normalize_gpib(gpib_resrc):
if isinstance(gpib_resrc, basestring):
gpib_resrc = gpib_resrc.upper()
split = gpib_resrc.split('::')
bus = 0
# split[0] is 'GPIB'
if len(split[0]) > 4:
bus = int(split[0][4:])
if split[-1] == 'INSTR':
del split[-1]
prim = int(split[1])
ret = 'GPIB%i::%i'%(bus, prim)
if len(split) > 2:
sec = int(split[2])
ret += '::%i'%sec
return ret+'::INSTR'
elif isinstance(gpib_resrc, int):
return 'GPIB0::%i::INSTR'%gpib_resrc
else:
raise TypeError('the address is not in an acceptable type.')
def _get_visa_idns(visa_addr, *args, **kwargs):
vi = visaInstrument(visa_addr, *args, skip_id_test=True, quiet_delete=True, **kwargs)
idns = vi.idn_split()
del vi
return idns
class visaAutoLoader(visaInstrument):
"""
You can use this class to automatically select the proper class to load
according to the idn returned by the instrument and the info in the registry.
It returns another class (it is a factory class).
Provide it at least a visa address.
For usb devices it will try the usb registry first. Otherwise, like for all
other device it will open it with visaInstrument first to read the idn then
properly load it with the correct class.
if skip_usb is set to True, then the usb search is skipped
"""
def __new__(cls, visa_addr, skip_usb=False, *args, **kwargs):
if not skip_usb and isinstance(visa_addr, basestring) and visa_addr.upper().startswith('USB'):
usb, manuf, model = _normalize_usb(visa_addr)
try:
cls = instruments_registry.find_usb(manuf, model)
except KeyError:
pass
else:
print 'Autoloading(USB) using instruments class "%s"'%cls.__name__
return cls(visa_addr, *args, **kwargs)
idns = _get_visa_idns(visa_addr, *args, **kwargs)
try:
cls = instruments_registry.find_instr(idns['vendor'], idns['model'], idns['firmware'])
except KeyError:
idn = '{vendor},{model},{firmware}'.format(**idns)
raise RuntimeError('Could not find an instrument for: %s (%s)'%(visa_addr, idn))
else:
print 'Autoloading using instruments class "%s"'%cls.__name__
return cls(visa_addr, *args, **kwargs)
| lgpl-3.0 |
nwjs/chromium.src | content/test/gpu/gpu_tests/gpu_integration_test_unittest.py | 2 | 16019 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import shutil
import tempfile
import unittest
import mock
import sys
import run_gpu_integration_test
import gpu_project_config
from gpu_tests import context_lost_integration_test
from gpu_tests import gpu_helper
from gpu_tests import gpu_integration_test
from gpu_tests import path_util
from gpu_tests import webgl_conformance_integration_test
from telemetry.testing import browser_test_runner
from telemetry.testing import fakes
from telemetry.internal.platform import system_info
path_util.AddDirToPathIfNeeded(path_util.GetChromiumSrcDir(), 'tools', 'perf')
from chrome_telemetry_build import chromium_config
VENDOR_NVIDIA = 0x10DE
VENDOR_AMD = 0x1002
VENDOR_INTEL = 0x8086
VENDOR_STRING_IMAGINATION = 'Imagination Technologies'
DEVICE_STRING_SGX = 'PowerVR SGX 554'
def _GetSystemInfo(
gpu='', device='', vendor_string='',
device_string='', passthrough=False, gl_renderer=''):
sys_info = {
'model_name': '',
'gpu': {
'devices': [
{'vendor_id': gpu, 'device_id': device,
'vendor_string': vendor_string, 'device_string': device_string},
],
'aux_attributes': {'passthrough_cmd_decoder': passthrough}
}
}
if gl_renderer:
sys_info['gpu']['aux_attributes']['gl_renderer'] = gl_renderer
return system_info.SystemInfo.FromDict(sys_info)
def _GetTagsToTest(browser, test_class=None, args=None):
test_class = test_class or gpu_integration_test.GpuIntegrationTest
tags = None
with mock.patch.object(
test_class, 'ExpectationsFiles', return_value=['exp.txt']):
possible_browser = fakes.FakePossibleBrowser()
possible_browser._returned_browser = browser
args = args or gpu_helper.GetMockArgs()
tags = set(test_class.GenerateTags(args, possible_browser))
return tags
def _GenerateNvidiaExampleTagsForTestClassAndArgs(test_class, args):
tags = None
with mock.patch.object(
test_class, 'ExpectationsFiles', return_value=['exp.txt']):
_ = [_ for _ in test_class.GenerateGpuTests(args)]
platform = fakes.FakePlatform('win', 'win10')
browser = fakes.FakeBrowser(platform, 'release')
browser._returned_system_info = _GetSystemInfo(
gpu=VENDOR_NVIDIA, device=0x1cb3, gl_renderer='ANGLE Direct3D9')
tags = _GetTagsToTest(browser, test_class)
return tags
class GpuIntegrationTestUnittest(unittest.TestCase):
def setUp(self):
self._test_state = {}
self._test_result = {}
def _RunGpuIntegrationTests(self, test_name, extra_args=None):
extra_args = extra_args or []
temp_file = tempfile.NamedTemporaryFile(delete=False)
temp_file.close()
test_argv = [
run_gpu_integration_test.__file__, test_name,
'--write-full-results-to=%s' % temp_file.name] + extra_args
unittest_config = chromium_config.ChromiumConfig(
top_level_dir=path_util.GetGpuTestDir(),
benchmark_dirs=[
os.path.join(path_util.GetGpuTestDir(), 'unittest_data')])
with mock.patch.object(sys, 'argv', test_argv):
with mock.patch.object(gpu_project_config, 'CONFIG', unittest_config):
try:
run_gpu_integration_test.main()
with open(temp_file.name) as f:
self._test_result = json.load(f)
finally:
temp_file.close()
def testOverrideDefaultRetryArgumentsinRunGpuIntegrationTests(self):
self._RunGpuIntegrationTests(
'run_tests_with_expectations_files', ['--retry-limit=1'])
self.assertEqual(
self._test_result['tests']['a']['b']
['unexpected-fail.html']['actual'],
'FAIL FAIL')
def testDefaultRetryArgumentsinRunGpuIntegrationTests(self):
self._RunGpuIntegrationTests('run_tests_with_expectations_files')
self.assertEqual(
self._test_result['tests']['a']['b']['expected-flaky.html']['actual'],
'FAIL FAIL FAIL')
def testTestNamePrefixGenerationInRunGpuIntegrationTests(self):
self._RunGpuIntegrationTests('simple_integration_unittest')
self.assertIn('expected_failure', self._test_result['tests'])
def testWithoutExpectationsFilesGenerateTagsReturnsEmptyList(self):
# we need to make sure that GenerateTags() returns an empty list if
# there are no expectations files returned from ExpectationsFiles() or
# else Typ will raise an exception
args = gpu_helper.GetMockArgs()
possible_browser = mock.MagicMock()
self.assertFalse(gpu_integration_test.GpuIntegrationTest.GenerateTags(
args, possible_browser))
def _TestTagGenerationForMockPlatform(self, test_class, args):
tag_set = _GenerateNvidiaExampleTagsForTestClassAndArgs(
webgl_conformance_integration_test.WebGLConformanceIntegrationTest,
args)
self.assertTrue(
set(['win', 'win10', 'd3d9', 'release',
'nvidia', 'nvidia-0x1cb3', 'no-passthrough']).issubset(tag_set))
return tag_set
def testGenerateContextLostExampleTagsForAsan(self):
args = gpu_helper.GetMockArgs(is_asan=True)
tag_set = self._TestTagGenerationForMockPlatform(
context_lost_integration_test.ContextLostIntegrationTest,
args)
self.assertIn('asan', tag_set)
self.assertNotIn('no-asan', tag_set)
def testGenerateContextLostExampleTagsForNoAsan(self):
args = gpu_helper.GetMockArgs()
tag_set = self._TestTagGenerationForMockPlatform(
context_lost_integration_test.ContextLostIntegrationTest,
args)
self.assertIn('no-asan', tag_set)
self.assertNotIn('asan', tag_set)
def testGenerateWebglConformanceExampleTagsForWebglVersion1andAsan(self):
args = gpu_helper.GetMockArgs(is_asan=True, webgl_version='1.0.0')
tag_set = self._TestTagGenerationForMockPlatform(
webgl_conformance_integration_test.WebGLConformanceIntegrationTest,
args)
self.assertTrue(set(['asan', 'webgl-version-1']).issubset(tag_set))
self.assertFalse(set(['no-asan', 'webgl-version-2']) & tag_set)
def testGenerateWebglConformanceExampleTagsForWebglVersion2andNoAsan(self):
args = gpu_helper.GetMockArgs(is_asan=False, webgl_version='2.0.0')
tag_set = self._TestTagGenerationForMockPlatform(
webgl_conformance_integration_test.WebGLConformanceIntegrationTest,
args)
self.assertTrue(set(['no-asan', 'webgl-version-2']) .issubset(tag_set))
self.assertFalse(set(['asan', 'webgl-version-1']) & tag_set)
def testGenerateNvidiaExampleTags(self):
platform = fakes.FakePlatform('win', 'win10')
browser = fakes.FakeBrowser(platform, 'release')
browser._returned_system_info = _GetSystemInfo(
gpu=VENDOR_NVIDIA, device=0x1cb3, gl_renderer='ANGLE Direct3D9')
self.assertEqual(
_GetTagsToTest(browser),
set(['win', 'win10', 'release', 'nvidia', 'nvidia-0x1cb3',
'd3d9', 'no-passthrough']))
def testGenerateVendorTagUsingVendorString(self):
platform = fakes.FakePlatform('mac', 'mojave')
browser = fakes.FakeBrowser(platform, 'release')
browser._returned_system_info = _GetSystemInfo(
vendor_string=VENDOR_STRING_IMAGINATION,
device_string=DEVICE_STRING_SGX,
passthrough=True, gl_renderer='ANGLE OpenGL ES')
self.assertEqual(
_GetTagsToTest(browser),
set(['mac', 'mojave', 'release', 'imagination',
'imagination-PowerVR-SGX-554',
'opengles', 'passthrough']))
def testGenerateVendorTagUsingDeviceString(self):
platform = fakes.FakePlatform('mac', 'mojave')
browser = fakes.FakeBrowser(platform, 'release')
browser._returned_system_info = _GetSystemInfo(
vendor_string='illegal vendor string',
device_string='ANGLE (Imagination, Triangle Monster 3000, 1.0)')
self.assertEqual(
_GetTagsToTest(browser),
set(['mac', 'mojave', 'release', 'imagination',
'imagination-Triangle-Monster-3000',
'no-angle', 'no-passthrough']))
def testSimpleIntegrationTest(self):
self._RunIntegrationTest(
'simple_integration_unittest',
['unexpected_error',
'unexpected_failure'],
['expected_flaky',
'expected_failure'],
['expected_skip'],
['--retry-only-retry-on-failure', '--retry-limit=3',
'--test-name-prefix=unittest_data.integration_tests.SimpleTest.'])
# The number of browser starts include the one call to StartBrowser at the
# beginning of the run of the test suite and for each RestartBrowser call
# which happens after every failure
self.assertEquals(self._test_state['num_browser_starts'], 6)
def testIntegrationTesttWithBrowserFailure(self):
self._RunIntegrationTest(
'browser_start_failure_integration_unittest', [],
['unittest_data.integration_tests.BrowserStartFailureTest.restart'],
[], [])
self.assertEquals(self._test_state['num_browser_crashes'], 2)
self.assertEquals(self._test_state['num_browser_starts'], 3)
def testIntegrationTestWithBrowserCrashUponStart(self):
self._RunIntegrationTest(
'browser_crash_after_start_integration_unittest', [],
[('unittest_data.integration_tests.BrowserCrashAfterStartTest.restart')],
[], [])
self.assertEquals(self._test_state['num_browser_crashes'], 2)
self.assertEquals(self._test_state['num_browser_starts'], 3)
def testRetryLimit(self):
self._RunIntegrationTest(
'test_retry_limit',
['unittest_data.integration_tests.TestRetryLimit.unexpected_failure'],
[],
[],
['--retry-limit=2'])
# The number of attempted runs is 1 + the retry limit.
self.assertEquals(self._test_state['num_test_runs'], 3)
def _RunTestsWithExpectationsFiles(self):
self._RunIntegrationTest(
'run_tests_with_expectations_files',
['a/b/unexpected-fail.html'],
['a/b/expected-fail.html', 'a/b/expected-flaky.html'],
['should_skip'],
['--retry-limit=3', '--retry-only-retry-on-failure-tests',
('--test-name-prefix=unittest_data.integration_tests.'
'RunTestsWithExpectationsFiles.')])
def testTestFilterCommandLineArg(self):
self._RunIntegrationTest(
'run_tests_with_expectations_files',
['a/b/unexpected-fail.html'],
['a/b/expected-fail.html'],
['should_skip'],
['--retry-limit=3', '--retry-only-retry-on-failure-tests',
('--test-filter=a/b/unexpected-fail.html::a/b/expected-fail.html::'
'should_skip'),
('--test-name-prefix=unittest_data.integration_tests.'
'RunTestsWithExpectationsFiles.')])
def testUseTestExpectationsFileToHandleExpectedSkip(self):
self._RunTestsWithExpectationsFiles()
results = self._test_result['tests']['should_skip']
self.assertEqual(results['expected'], 'SKIP')
self.assertEqual(results['actual'], 'SKIP')
self.assertNotIn('is_regression', results)
def testUseTestExpectationsFileToHandleUnexpectedTestFailure(self):
self._RunTestsWithExpectationsFiles()
results = self._test_result['tests']['a']['b']['unexpected-fail.html']
self.assertEqual(results['expected'], 'PASS')
self.assertEqual(results['actual'], 'FAIL')
self.assertIn('is_regression', results)
def testUseTestExpectationsFileToHandleExpectedFailure(self):
self._RunTestsWithExpectationsFiles()
results = self._test_result['tests']['a']['b']['expected-fail.html']
self.assertEqual(results['expected'], 'FAIL')
self.assertEqual(results['actual'], 'FAIL')
self.assertNotIn('is_regression', results)
def testUseTestExpectationsFileToHandleExpectedFlakyTest(self):
self._RunTestsWithExpectationsFiles()
results = self._test_result['tests']['a']['b']['expected-flaky.html']
self.assertEqual(results['expected'], 'PASS')
self.assertEqual(results['actual'], 'FAIL FAIL FAIL PASS')
self.assertNotIn('is_regression', results)
def testRepeat(self):
self._RunIntegrationTest(
'test_repeat',
[],
['unittest_data.integration_tests.TestRepeat.success'],
[],
['--repeat=3'])
self.assertEquals(self._test_state['num_test_runs'], 3)
def testAlsoRunDisabledTests(self):
self._RunIntegrationTest(
'test_also_run_disabled_tests',
['skip', 'flaky'],
# Tests that are expected to fail and do fail are treated as test passes
['expected_failure'],
[],
['--all', '--test-name-prefix',
'unittest_data.integration_tests.TestAlsoRunDisabledTests.',
'--retry-limit=3', '--retry-only-retry-on-failure'])
self.assertEquals(self._test_state['num_flaky_test_runs'], 4)
self.assertEquals(self._test_state['num_test_runs'], 6)
def testStartBrowser_Retries(self):
class TestException(Exception):
pass
def SetBrowserAndRaiseTestException():
gpu_integration_test.GpuIntegrationTest.browser = (
mock.MagicMock())
raise TestException
gpu_integration_test.GpuIntegrationTest.browser = None
gpu_integration_test.GpuIntegrationTest.platform = None
with mock.patch.object(
gpu_integration_test.serially_executed_browser_test_case.\
SeriallyExecutedBrowserTestCase,
'StartBrowser',
side_effect=SetBrowserAndRaiseTestException) as mock_start_browser:
with mock.patch.object(
gpu_integration_test.GpuIntegrationTest,
'StopBrowser') as mock_stop_browser:
with self.assertRaises(TestException):
gpu_integration_test.GpuIntegrationTest.StartBrowser()
self.assertEqual(mock_start_browser.call_count,
gpu_integration_test._START_BROWSER_RETRIES)
self.assertEqual(mock_stop_browser.call_count,
gpu_integration_test._START_BROWSER_RETRIES)
def _RunIntegrationTest(self, test_name, failures, successes, skips,
additional_args):
config = chromium_config.ChromiumConfig(
top_level_dir=path_util.GetGpuTestDir(),
benchmark_dirs=[
os.path.join(path_util.GetGpuTestDir(), 'unittest_data')])
temp_dir = tempfile.mkdtemp()
test_results_path = os.path.join(temp_dir, 'test_results.json')
test_state_path = os.path.join(temp_dir, 'test_state.json')
try:
browser_test_runner.Run(
config,
[test_name,
'--write-full-results-to=%s' % test_results_path,
'--test-state-json-path=%s' % test_state_path] + additional_args)
with open(test_results_path) as f:
self._test_result = json.load(f)
with open(test_state_path) as f:
self._test_state = json.load(f)
actual_successes, actual_failures, actual_skips = (
self._ExtractTestResults(self._test_result))
self.assertEquals(set(actual_failures), set(failures))
self.assertEquals(set(actual_successes), set(successes))
self.assertEquals(set(actual_skips), set(skips))
finally:
shutil.rmtree(temp_dir)
def _ExtractTestResults(self, test_result):
delimiter = test_result['path_delimiter']
failures = []
successes = []
skips = []
def _IsLeafNode(node):
test_dict = node[1]
return ('expected' in test_dict and
isinstance(test_dict['expected'], basestring))
node_queues = []
for t in test_result['tests']:
node_queues.append((t, test_result['tests'][t]))
while node_queues:
node = node_queues.pop()
full_test_name, test_dict = node
if _IsLeafNode(node):
if all(res not in test_dict['expected'].split() for res in
test_dict['actual'].split()):
failures.append(full_test_name)
elif test_dict['expected'] == test_dict['actual'] == 'SKIP':
skips.append(full_test_name)
else:
successes.append(full_test_name)
else:
for k in test_dict:
node_queues.append(
('%s%s%s' % (full_test_name, delimiter, k),
test_dict[k]))
return successes, failures, skips
| bsd-3-clause |
gino3a/tm-boilerplate | tailbone/turn/__init__.py | 34 | 3644 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tailbone import BaseHandler
from tailbone import as_json
from tailbone import AppError
from tailbone import DEBUG
from tailbone import PREFIX
from tailbone.compute_engine import LoadBalancer
from tailbone.compute_engine import TailboneCEInstance
from tailbone.compute_engine import STARTUP_SCRIPT_BASE
import binascii
from hashlib import sha1
import hmac
import md5
import time
import webapp2
from google.appengine.api import lib_config
from google.appengine.ext import ndb
class _ConfigDefaults(object):
SECRET = "notasecret"
REALM = "localhost"
RESTRICTED_DOMAINS = ["localhost"]
SOURCE_SNAPSHOT = None
PARAMS = {}
_config = lib_config.register('tailboneTurn', _ConfigDefaults.__dict__)
# Prefixing internal models with Tailbone to avoid clobbering when using RESTful API
class TailboneTurnInstance(TailboneCEInstance):
SOURCE_SNAPSHOT = _config.SOURCE_SNAPSHOT
PARAMS = dict(dict(TailboneCEInstance.PARAMS, **{
"name": "turn-id",
"metadata": {
"items": [
{
"key": "startup-script",
"value": STARTUP_SCRIPT_BASE + """
# load turnserver
curl -O http://rfc5766-turn-server.googlecode.com/files/turnserver-1.8.7.0-binary-linux-wheezy-ubuntu-mint-x86-64bits.tar.gz
tar xvfz turnserver-1.8.7.0-binary-linux-wheezy-ubuntu-mint-x86-64bits.tar.gz
dpkg -i rfc5766-turn-server_1.8.7.0-1_amd64.deb
apt-get -fy install
IP=$(gcutil getinstance $(hostname) 2>&1 | grep external-ip | grep -oEi "[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}")
turnserver --use-auth-secret -v -a -X $IP -f --static-auth-secret %s -r %s
""" % (_config.SECRET, _config.REALM)
},
],
}
}), **_config.PARAMS)
secret = ndb.StringProperty(default=_config.SECRET)
def credentials(username, secret=None):
timestamp = str(time.mktime(time.gmtime())).split('.')[0]
username = "{}:{}".format(username, timestamp)
if not secret:
secret = _config.SECRET
# force string
secret = str(secret)
password = hmac.new(secret, username, sha1)
password = binascii.b2a_base64(password.digest())[:-1]
return username, password
class TurnHandler(BaseHandler):
@as_json
def get(self):
if _config.RESTRICTED_DOMAINS:
if self.request.host_url not in _config.RESTRICTED_DOMAINS:
raise AppError("Invalid host.")
username = self.request.get("username")
if not username:
raise AppError("Must provide username.")
instance = LoadBalancer.find(TailboneTurnInstance)
if not instance:
raise AppError('Instance not found, try again later.')
username, password = credentials(username, instance.secret)
return {
"username": username,
"password": password,
"uris": [
"turn:{}:3478?transport=udp".format(instance.address),
"turn:{}:3478?transport=tcp".format(instance.address),
"turn:{}:3479?transport=udp".format(instance.address),
"turn:{}:3479?transport=tcp".format(instance.address),
],
}
app = webapp2.WSGIApplication([
(r"{}turn/?.*".format(PREFIX), TurnHandler),
], debug=DEBUG)
| apache-2.0 |
markoshorro/gem5 | src/arch/x86/isa/insts/simd64/integer/exit_media_state.py | 72 | 2182 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop EMMS {
emms
};
# FEMMS
'''
| bsd-3-clause |
srvg/ansible | lib/ansible/utils/unicode.py | 158 | 1166 | # (c) 2012-2014, Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils._text import to_text
__all__ = ('unicode_wrap')
def unicode_wrap(func, *args, **kwargs):
"""If a function returns a string, force it to be a text string.
Use with partial to ensure that filter plugins will return text values.
"""
return to_text(func(*args, **kwargs), nonstring='passthru')
| gpl-3.0 |
rickerc/ceilometer_audit | tests/storage/test_get_engine.py | 3 | 1507 | # -*- encoding: utf-8 -*-
#
# Copyright © 2012 New Dream Network, LLC (DreamHost)
#
# Author: Doug Hellmann <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for ceilometer/storage/
"""
import mox
import testtools
from ceilometer import storage
from ceilometer.storage import impl_log
class EngineTest(testtools.TestCase):
def test_get_engine(self):
conf = mox.Mox().CreateMockAnything()
conf.database = mox.Mox().CreateMockAnything()
conf.database.connection = 'log://localhost'
engine = storage.get_engine(conf)
self.assertIsInstance(engine, impl_log.LogStorage)
def test_get_engine_no_such_engine(self):
conf = mox.Mox().CreateMockAnything()
conf.database = mox.Mox().CreateMockAnything()
conf.database.connection = 'no-such-engine://localhost'
try:
storage.get_engine(conf)
except RuntimeError as err:
self.assertIn('no-such-engine', unicode(err))
| apache-2.0 |
tehguy/dndtools | dndtools/dnd/spells/urls.py | 3 | 2039 | # -*- coding: utf-8 -*-
from django.conf.urls import patterns, url
urlpatterns = patterns(
'dnd.spells.views',
# spells
url(
r'^$',
'spell_index',
name='spell_index',
),
# spells > rulebook
url(
r'^(?P<rulebook_slug>[^/]+)--(?P<rulebook_id>\d+)/$',
'spells_in_rulebook',
name='spells_in_rulebook',
),
# spells > rulebook > spell
url(
r'^(?P<rulebook_slug>[^/]+)--(?P<rulebook_id>\d+)/(?P<spell_slug>[^/]+)--(?P<spell_id>\d+)/$',
'spell_detail',
name='spell_detail',
),
# spells > descriptors
url(
r'^descriptors/$',
'spell_descriptor_list',
name='spell_descriptor_list',
),
# spells > descriptors > descriptor
url(
r'^descriptors/(?P<spell_descriptor_slug>[^/]+)/$',
'spell_descriptor_detail',
name='spell_descriptor_detail',
),
# spells > schools
url(
r'^schools/$',
'spell_school_list',
name='spell_school_list',
),
# spells > schools > detail
url(
r'^schools/(?P<spell_school_slug>[^/]+)/$',
'spell_school_detail',
name='spell_school_detail',
),
# spells > sub_schools > detail
url(
r'^sub-schools/(?P<spell_sub_school_slug>[^/]+)/$',
'spell_sub_school_detail',
name='spell_sub_school_detail',
),
# spells > domains
url(
r'^domains/$',
'spell_domain_list',
name='spell_domain_list',
),
# spells > domains > detail
url(
r'^domains/(?P<spell_domain_slug>[^/]+)/$',
'spell_domain_detail',
name='spell_domain_detail',
),
# spells > domains > detail (variant)
url(
r'^domains/(?P<rulebook_slug>[^/]+)--(?P<rulebook_id>\d+)/(?P<spell_domain_slug>[^/]+)/$',
'spell_domain_detail',
name='spell_variant_domain_detail',
),
url(
r'^verify/spell/(?P<spell_id>\d+)/$',
'spell_verify',
name='spell_verify',
),
)
| mit |
thecaffiend/jupyternb_to_c_over_websockets | server_side/driverclient/client.py | 1 | 3402 | """
Adapted from: https://docs.python.org/3.4/howto/sockets.html
TODO: Get this based on tornado TCPClient class instead of this half baked
thing
TODO: Do co-routines *or* callbacks. This goes for the whole thing, not just
this class.
"""
import socket
from tornado import (
gen,
)
class DriverClient:
"""
Client for talking to the c driver socket server.
TODO: Send/receive both need MSGLEN, and need to format the msgs right
(bytes-like objects).
TODO: Clean up use of coroutines vs callbacks (everywhere)
"""
def __init__(self, sock=None):
"""
Create the driver socket
sock: An already created socket to use
"""
if sock is None:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
else:
self.sock = sock
def connect(self, host, port):
"""
Connect to the driver's socket.
host: Host IP address
port: Port to connect to
"""
self.sock.connect((host, port))
# TODO: use select to determine when to read, otherwise this will throw
# an occasional exception on read...
self.sock.setblocking(0)
@gen.coroutine
def drvsend(self, msg):
"""
Send to the driver.
msg: String message to convert to a bytes-like object and send to the
server.
"""
totalsent = 0
# TODO: for now this is a string, so just encode it and send. make more
# robust
# while totalsent < MSGLEN:
# sent = self.sock.send(msg[totalsent:])
# if sent == 0:
# raise RuntimeError("socket connection broken")
# totalsent = totalsent + sent
sent = self.sock.send(msg.encode())
return sent
@gen.coroutine
def drvreceive(self):
"""
Receive from the driver.
"""
chunks = []
bytes_recd = 0
# TODO: hack so MSGLEN is defined. fix
MSGLEN = 2048
# TODO: get chunked read working
# while bytes_recd < MSGLEN:
# chunk = self.sock.recv(min(MSGLEN - bytes_recd, 2048))
# if chunk == b'':
# raise RuntimeError("socket connection broken")
# chunks.append(chunk)
# bytes_recd = bytes_recd + len(chunk)
# return b''.join(chunks)
ret = self.sock.recv(2048)
print('Received %s from the API server' % (ret))
return ret
def close(self):
"""
Close our socket.
"""
self.sock.close()
@gen.coroutine
def handle_ws_command(self, cmd, cmd_val):
"""
Handle a command from the wsserver.
"""
print('DriverClient is handling (%s, %s)' % (cmd, cmd_val))
sent = self.drvsend("{%s, %s}" % (cmd, cmd_val))
return sent
@gen.coroutine
def handle_ws_msg(self, msg):
"""
Handle a message (dict) from the wsserver.
"""
print('DriverClient is handling %s' % (msg))
sent = self.drvsend("%s" % (msg))
return sent
# TODO: just for testing, remove
def test_echo(self):
self.connect("127.0.0.1", 60002)
self.drvsend("test")
self.drvreceive()
self.sock.close()
| mit |
Endi1/Penguin | penguin/tests/tests.py | 1 | 1539 | import unittest
import os
import shutil
from penguin.main import newSite, buildSite, publishPosts
from penguin.penguin import Penguin
class TestContentCreation(unittest.TestCase):
def test_build_project(self):
newSite('test_site')
os.chdir('test_site')
site = Penguin()
buildSite(site)
self.assertTrue(os.path.exists('site'))
self.assertTrue(os.path.exists('site/index.html'))
self.assertTrue(os.path.exists('site/about/index.html'))
os.chdir('..')
shutil.rmtree('test_site')
def test_new_page(self):
newSite('test_site')
os.chdir('test_site')
site = Penguin()
buildSite(site)
with open('new.html', 'w') as f:
f.write('This is a new page')
f.close()
buildSite(site)
self.assertTrue(os.path.exists('site/new/index.html'))
with open('site/new/index.html', 'r') as f:
self.assertEqual(f.read(), 'This is a new page')
f.close()
os.chdir('..')
shutil.rmtree('test_site')
def test_publish(self):
newSite('test_site')
os.chdir('test_site')
site = Penguin()
buildSite(site)
publishPosts()
self.assertTrue(os.path.exists('site/posts'))
self.assertTrue(os.path.exists('site/posts/first.html'))
os.chdir('..')
shutil.rmtree('test_site')
suite = unittest.TestLoader().loadTestsFromTestCase(TestContentCreation)
unittest.TextTestRunner(verbosity=2).run(suite)
| mit |
mrbean-bremen/pyfakefs | pyfakefs/extra_packages.py | 1 | 1253 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Imports external packages that replace or emulate internal packages.
If the external module is not present, the build-in module is imported.
"""
try:
import pathlib2
pathlib = pathlib2
except ImportError:
pathlib2 = None
try:
import pathlib
except ImportError:
pathlib = None
try:
import scandir
use_scandir_package = True
use_builtin_scandir = False
except ImportError:
try:
from os import scandir # noqa: F401
use_builtin_scandir = True
use_scandir_package = False
except ImportError:
use_builtin_scandir = False
use_scandir_package = False
use_scandir = use_scandir_package or use_builtin_scandir
| apache-2.0 |
girving/tensorflow | tensorflow/python/keras/optimizer_v2/optimizer_v2.py | 1 | 55544 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Version 2 of class Optimizer."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import distribute as distribute_lib
from tensorflow.python.training import distribution_strategy_context
from tensorflow.python.training import optimizer as optimizer_v1
from tensorflow.python.training import slot_creator
from tensorflow.python.training.checkpointable import base as checkpointable
from tensorflow.python.util import nest
class _OptimizableVariable(object):
"""Interface for abstracting over variables in the optimizers."""
@abc.abstractmethod
def target(self):
"""Returns the optimization target for this variable."""
raise NotImplementedError("Calling an abstract method.")
@abc.abstractmethod
def update_op(self, optimizer, g, *args):
"""Returns the update ops for updating the variable."""
raise NotImplementedError("Calling an abstract method.")
class _RefVariableProcessor(_OptimizableVariable):
"""Processor for Variable."""
def __init__(self, v):
self._v = v
def target(self):
return self._v._ref() # pylint: disable=protected-access
def update_op(self, optimizer, g, *args):
if isinstance(g, ops.Tensor):
update_op = optimizer._apply_dense(g, self._v, *args) # pylint: disable=protected-access
if self._v.constraint is not None:
with ops.control_dependencies([update_op]):
return self._v.assign(self._v.constraint(self._v))
else:
return update_op
else:
assert isinstance(g, ops.IndexedSlices), ("Gradient ", g, " is neither a "
"tensor nor IndexedSlices.")
if self._v.constraint is not None:
raise RuntimeError(
"Cannot use a constraint function on a sparse variable.")
# pylint: disable=protected-access
return optimizer._apply_sparse_duplicate_indices(g, self._v, *args)
class _DenseReadResourceVariableProcessor(_OptimizableVariable):
"""Processor for dense ResourceVariables."""
def __init__(self, v):
self._v = v
def target(self):
return self._v
def update_op(self, optimizer, g, *args):
# pylint: disable=protected-access
update_op = optimizer._resource_apply_dense(g, self._v.op.inputs[0], *args)
if self._v.constraint is not None:
with ops.control_dependencies([update_op]):
return self._v.assign(self._v.constraint(self._v))
else:
return update_op
class _DenseResourceVariableProcessor(_OptimizableVariable):
"""Processor for dense ResourceVariables."""
def __init__(self, v):
self._v = v
def target(self):
return self._v
def update_op(self, optimizer, g, *args):
# pylint: disable=protected-access
if isinstance(g, ops.IndexedSlices):
if self._v.constraint is not None:
raise RuntimeError(
"Cannot use a constraint function on a sparse variable.")
return optimizer._resource_apply_sparse_duplicate_indices(
g.values, self._v, g.indices, *args)
update_op = optimizer._resource_apply_dense(g, self._v, *args)
if self._v.constraint is not None:
with ops.control_dependencies([update_op]):
return self._v.assign(self._v.constraint(self._v))
else:
return update_op
class _TensorProcessor(_OptimizableVariable):
"""Processor for ordinary Tensors.
Even though a Tensor can't really be updated, sometimes it is useful to
compute the gradients with respect to a Tensor using the optimizer. Updating
the Tensor is, of course, unsupported.
"""
def __init__(self, v):
self._v = v
def target(self):
return self._v
def update_op(self, optimizer, g, *args):
raise NotImplementedError("Trying to update a Tensor ", self._v)
def _get_processor(v):
"""The processor of v."""
if context.executing_eagerly():
if isinstance(v, ops.Tensor):
return _TensorProcessor(v)
else:
return _DenseResourceVariableProcessor(v)
if v.op.type == "VarHandleOp":
return _DenseResourceVariableProcessor(v)
if isinstance(v, variables.Variable):
return _RefVariableProcessor(v)
if isinstance(v, ops.Tensor):
return _TensorProcessor(v)
raise NotImplementedError("Trying to optimize unsupported type ", v)
def _var_key_v2(var):
"""Key for representing a primary variable, for looking up slots."""
# pylint: disable=protected-access
if hasattr(var, "_distributed_container"):
distributed_container = var._distributed_container()
assert distributed_container is not None
if context.executing_eagerly():
return distributed_container._unique_id
return distributed_container._shared_name
if context.executing_eagerly():
return var._unique_id
return var.op.name
def _resolve(value, name):
if callable(value):
value = value()
return ops.convert_to_tensor(value, name=name)
def _is_dynamic(value):
"""Returns true if __init__ arg `value` should be re-evaluated each step."""
if callable(value): return True
# Don't need to do anything special in graph mode, since dynamic values
# will propagate correctly automatically.
# TODO(josh11b): Add per-device caching across steps using variables for
# truly static values once we add distributed support.
if context.executing_eagerly() and isinstance(
value, resource_variable_ops.ResourceVariable):
return True
return False
class _OptimizerV2State(object):
"""Holds per-graph and per-step optimizer state.
Use _init_with_static_hyper() to create the state for a graph, and then
_copy_with_dynamic_hyper() to convert that to state for a particular step.
The difference between the two is that the former only has hyper
parameter values that are static and the latter also has values that
can change every step (according to _is_dynamic()).
"""
def __init__(self, op_name):
self._op_name = op_name
def _init_with_static_hyper(self, hyper):
"""Initialize a fresh state object from hyper dict."""
# self._hyper contains a dict from name to a dict with the Tensor values.
# This dict starts with a single item with key "None" with the hyper
# parameter value converted to a Tensor. Other items have dtype keys
# with that Tensor cast to that dtype.
with ops.init_scope():
self._hyper = {name: {None: ops.convert_to_tensor(value, name=name)}
for name, (dynamic, value) in sorted(hyper.items())
if not dynamic}
self._slots = {}
self._non_slot_dict = {}
# Extra state to help Optimizers implement Checkpointable. Holds information
# about variables which will be restored as soon as they're created.
self._deferred_dependencies = {} # Non-slot variables
self._deferred_slot_restorations = {} # Slot variables
def _copy_with_dynamic_hyper(self, hyper, distribution, non_slot_devices):
"""Create a new state object for a particular step."""
ret = _OptimizerV2State(self._op_name)
# pylint: disable=protected-access
ret._slots = self._slots
ret._non_slot_dict = self._non_slot_dict
ret._deferred_dependencies = self._deferred_dependencies
ret._deferred_slot_restorations = self._deferred_slot_restorations
ret._hyper = {name: {None: _resolve(value, name)}
for name, (dynamic, value) in sorted(hyper.items())
if dynamic}
ret._hyper.update(self._hyper)
ret._non_slot_devices = non_slot_devices
ret._distribution = distribution
return ret
def _variables(self):
"""Returns a list of all variables held by self."""
optimizer_variables = list(self._non_slot_dict.values())
for variable_dict in self._slots.values():
for slot_for_variable in variable_dict.values():
optimizer_variables.append(slot_for_variable)
# Sort variables by name so that the return is deterministic.
return sorted(optimizer_variables, key=lambda v: v.name)
def _slot_dict(self, slot_name):
"""Returns a dict for caching slots created under the given name.
Args:
slot_name: Name for the slot.
Returns:
A dict that maps primary `Variable` objects to the slot created
for that variable, under the given slot name.
"""
named_slots = self._slots.get(slot_name, None)
if named_slots is None:
named_slots = {}
self._slots[slot_name] = named_slots
return named_slots
def create_slot(self, var, val, slot_name, optional_op_name=None):
"""Find or create a slot for a variable.
Args:
var: A `Variable` object.
val: A `Tensor`. The initial value of the slot.
slot_name: Name for the slot.
optional_op_name: Name to use when scoping the Variable that
needs to be created for the slot.
Returns:
A `Variable` object.
"""
named_slots = self._slot_dict(slot_name)
var_key = _var_key_v2(var)
if var_key not in named_slots:
new_slot_variable = slot_creator.create_slot(
var, val, optional_op_name or self._op_name)
self._restore_slot_variable(
slot_name=slot_name, variable=var,
slot_variable=new_slot_variable)
named_slots[var_key] = new_slot_variable
return named_slots[var_key]
def create_slot_with_initializer(self, var, initializer, shape, dtype,
slot_name, optional_op_name=None):
"""Find or create a slot for a variable, using an Initializer.
Args:
var: A `Variable` object.
initializer: An `Initializer`. The initial value of the slot.
shape: Shape of the initial value of the slot.
dtype: Type of the value of the slot.
slot_name: Name for the slot.
optional_op_name: Name to use when scoping the Variable that
needs to be created for the slot.
Returns:
A `Variable` object.
"""
named_slots = self._slot_dict(slot_name)
var_key = _var_key_v2(var)
if var_key not in named_slots:
new_slot_variable = slot_creator.create_slot_with_initializer(
var, initializer, shape, dtype, optional_op_name or self._op_name)
self._restore_slot_variable(
slot_name=slot_name, variable=var,
slot_variable=new_slot_variable)
named_slots[var_key] = new_slot_variable
return named_slots[var_key]
def zeros_slot(self, var, slot_name, optional_op_name=None):
"""Find or create a slot initialized with 0.0.
Args:
var: A `Variable` object.
slot_name: Name for the slot.
optional_op_name: Name to use when scoping the Variable that
needs to be created for the slot.
Returns:
A `Variable` object.
"""
named_slots = self._slot_dict(slot_name)
var_key = _var_key_v2(var)
if var_key not in named_slots:
new_slot_variable = slot_creator.create_zeros_slot(
var, optional_op_name or self._op_name)
self._restore_slot_variable(
slot_name=slot_name, variable=var,
slot_variable=new_slot_variable)
named_slots[var_key] = new_slot_variable
return named_slots[var_key]
def _create_or_restore_slot_variable(
self, slot_variable_position, slot_name, variable,
optional_op_name=None):
"""Restore a slot variable's value, possibly creating it.
Called when a variable which has an associated slot variable is created or
restored. When executing eagerly, we create the slot variable with a
restoring initializer.
No new variables are created when graph building. Instead,
_restore_slot_variable catches these after normal creation and adds restore
ops to the graph. This method is nonetheless important when graph building
for the case when a slot variable has already been created but `variable`
has just been added to a dependency graph (causing us to realize that the
slot variable needs to be restored).
Args:
slot_variable_position: A `checkpointable._CheckpointPosition` object
indicating the slot variable `Checkpointable` object to be restored.
slot_name: The name of this `Optimizer`'s slot to restore into.
variable: The variable object this slot is being created for.
optional_op_name: Name to use when scoping the Variable that
needs to be created for the slot.
"""
slot_variable = self.get_slot(var=variable, name=slot_name)
if (slot_variable is None and context.executing_eagerly() and
slot_variable_position.is_simple_variable()
# Defer slot variable creation if there is an active variable creator
# scope. Generally we'd like to eagerly create/restore slot variables
# when possible, but this may mean that scopes intended to catch
# `variable` also catch its eagerly created slot variable
# unintentionally (specifically make_template would add a dependency on
# a slot variable if not for this case). Deferring is mostly harmless
# (aside from double initialization), and makes variable creator scopes
# behave the same way they do when graph building.
and not ops.get_default_graph()._variable_creator_stack): # pylint: disable=protected-access
initializer = checkpointable.CheckpointInitialValue(
checkpoint_position=slot_variable_position)
slot_variable = self.create_slot(
var=variable,
val=initializer,
slot_name=slot_name,
optional_op_name=optional_op_name)
# Optimizers do not have unconditional dependencies on their slot
# variables (nor do any other objects). They are only saved if the
# variables they were created for are also saved.
if slot_variable is not None:
# If we've either made this slot variable, or if we've pulled out an
# existing slot variable, we should restore it.
slot_variable_position.restore(slot_variable)
else:
# We didn't make the slot variable. Defer restoring until it gets created
# normally. We keep a list rather than the one with the highest restore
# UID in case slot variables have their own dependencies, in which case
# those could differ between restores.
variable_key = _var_key_v2(variable)
self._deferred_slot_restorations.setdefault(
slot_name, {}).setdefault(variable_key, []).append(
slot_variable_position)
def get_slot(self, var, name):
"""Return a slot named `name` created for `var` by the Optimizer.
Some `Optimizer` subclasses use additional variables. For example
`Momentum` and `Adagrad` use variables to accumulate updates. This method
gives access to these `Variable` objects if for some reason you need them.
Use `get_slot_names()` to get the list of slot names created by the
`Optimizer`.
Args:
var: A variable passed to `minimize()` or `apply_gradients()`.
name: A string.
Returns:
The `Variable` for the slot if it was created, `None` otherwise.
"""
named_slots = self._slots.get(name, None)
if not named_slots:
return None
return named_slots.get(_var_key_v2(var), None)
def get_slot_names(self):
"""Return a list of the names of slots created by the `Optimizer`.
See `get_slot()`.
Returns:
A list of strings.
"""
return sorted(self._slots.keys())
def create_non_slot(self, initial_value, name, colocate_with=None):
"""Add an extra variable, not associated with a slot."""
v = self._non_slot_dict.get(name, None)
if v is None:
if colocate_with is None: colocate_with = self._non_slot_devices
with self._distribution.colocate_vars_with(colocate_with):
# TODO(josh11b): Use get_variable() except for the legacy Adam use case.
v = variable_scope.variable(initial_value, name=name, trainable=False)
self._non_slot_dict[name] = v
deferred_dependencies_list = self._deferred_dependencies.pop(name, ())
for checkpoint_position in sorted(
deferred_dependencies_list,
key=lambda restore: restore.checkpoint.restore_uid,
reverse=True):
checkpoint_position.restore(v)
return v
def _restore_slot_variable(self, slot_name, variable, slot_variable):
"""Restore a newly created slot variable's value."""
variable_key = _var_key_v2(variable)
deferred_restorations = self._deferred_slot_restorations.get(
slot_name, {}).pop(variable_key, [])
# Iterate over restores, highest restore UID first to minimize the number
# of assignments.
deferred_restorations.sort(key=lambda position: position.restore_uid,
reverse=True)
for checkpoint_position in deferred_restorations:
checkpoint_position.restore(slot_variable)
def get_non_slot(self, name):
"""Returns the non-slot variable identified by `name`."""
return self._non_slot_dict.get(name, None)
def get_hyper(self, name, dtype=None):
"""Returns the `name` hyper parameter, optionally cast to `dtype`."""
dtype_dict = self._hyper[name]
# Do we have the value cast to dtype already cached? This should always
# succeed when dtype is None.
if dtype in dtype_dict:
return dtype_dict[dtype]
# Not cached, cast to dtype and save the result in the cache.
result = math_ops.cast(dtype_dict[None], dtype)
dtype_dict[dtype] = result
return result
class OptimizerV2(optimizer_v1.Optimizer):
"""Updated base class for optimizers.
This class defines the API to add Ops to train a model. You never use this
class directly, but instead instantiate one of its subclasses such as
`GradientDescentOptimizer`, `AdagradOptimizer`, or `MomentumOptimizer`.
### Usage
```python
# Create an optimizer with the desired parameters.
opt = GradientDescentOptimizer(learning_rate=0.1)
# Add Ops to the graph to minimize a cost by updating a list of variables.
# "cost" is a Tensor, and the list of variables contains tf.Variable
# objects.
opt_op = opt.minimize(cost, var_list=<list of variables>)
```
In the training program you will just have to run the returned Op.
```python
# Execute opt_op to do one step of training:
opt_op.run()
```
### Processing gradients before applying them.
Calling `minimize()` takes care of both computing the gradients and
applying them to the variables. If you want to process the gradients
before applying them you can instead use the optimizer in three steps:
1. Compute the gradients with `compute_gradients()`.
2. Process the gradients as you wish.
3. Apply the processed gradients with `apply_gradients()`.
Example:
```python
# Create an optimizer.
opt = GradientDescentOptimizer(learning_rate=0.1)
# Compute the gradients for a list of variables.
grads_and_vars = opt.compute_gradients(loss, <list of variables>)
# grads_and_vars is a list of tuples (gradient, variable). Do whatever you
# need to the 'gradient' part, for example cap them, etc.
capped_grads_and_vars = [(MyCapper(gv[0]), gv[1]) for gv in grads_and_vars]
# Ask the optimizer to apply the capped gradients.
opt.apply_gradients(capped_grads_and_vars)
```
### Gating Gradients
Both `minimize()` and `compute_gradients()` accept a `gate_gradients`
argument that controls the degree of parallelism during the application of
the gradients.
The possible values are: `GATE_NONE`, `GATE_OP`, and `GATE_GRAPH`.
<b>`GATE_NONE`</b>: Compute and apply gradients in parallel. This provides
the maximum parallelism in execution, at the cost of some non-reproducibility
in the results. For example the two gradients of `matmul` depend on the input
values: With `GATE_NONE` one of the gradients could be applied to one of the
inputs _before_ the other gradient is computed resulting in non-reproducible
results.
<b>`GATE_OP`</b>: For each Op, make sure all gradients are computed before
they are used. This prevents race conditions for Ops that generate gradients
for multiple inputs where the gradients depend on the inputs.
<b>`GATE_GRAPH`</b>: Make sure all gradients for all variables are computed
before any one of them is used. This provides the least parallelism but can
be useful if you want to process all gradients before applying any of them.
### Slots
Some optimizer subclasses, such as `MomentumOptimizer` and `AdagradOptimizer`
allocate and manage additional variables associated with the variables to
train. These are called <i>Slots</i>. Slots have names and you can ask the
optimizer for the names of the slots that it uses. Once you have a slot name
you can ask the optimizer for the variable it created to hold the slot value.
This can be useful if you want to log debug a training algorithm, report stats
about the slots, etc.
### Non-slot variables
Some optimizer subclasses, such as `AdamOptimizer` have variables that
are not associated with the variables to train, just the step itself.
### Hyper parameters
These are arguments passed to the optimizer subclass constructor
(the `__init__` method), and then passed to `self._set_hyper()`.
They can be either regular Python values (like 1.0), tensors, or
callables. If they are callable, the callable will be called during
`apply_gradients()` to get the value for the hyper parameter.
### State
Internal methods are passed a `state` argument with the correct
values to use for the slot and non-slot variables, and the hyper
parameters.
"""
# Values for gate_gradients.
GATE_NONE = 0
GATE_OP = 1
GATE_GRAPH = 2
def __init__(self, name):
"""Create a new Optimizer.
This must be called by the constructors of subclasses.
Note that Optimizer instances should not bind to a single graph,
and so shouldn't keep Tensors as member variables. Generally
you should be able to use the _set_hyper()/state.get_hyper()
facility instead.
Args:
name: A non-empty string. The name to use for accumulators created
for the optimizer.
Raises:
ValueError: If name is malformed.
RuntimeError: If _create_slots has been overridden instead of
_create_vars.
"""
# Note: We intentionally don't call parent __init__.
# Optimizer._create_slots was replaced by _create_vars in OptimizerV2.
if (self.__class__._create_slots.__code__ is not # pylint: disable=protected-access
OptimizerV2._create_slots.__code__):
raise RuntimeError("Override _create_vars instead of _create_slots when "
"descending from OptimizerV2 (class %s)" %
self.__class__.__name__)
if not name:
raise ValueError("Must specify the optimizer name")
self._use_locking = False
self._name = name
# Map from graph_key to state for that graph. We use the graph_key
# since it works in both eager and graph mode, and gives the outer
# graph inside functions.
tower_context = distribution_strategy_context.get_tower_context()
if tower_context is None:
# In a cross-tower context for a DistributionStrategy, which means
# only one Optimizer will be created, not one per tower.
self._per_graph_state = {}
else:
# We use get_tower_context().merge_call() to get a single dict
# shared across all model replicas when running with a
# DistributionStrategy.
self._per_graph_state = tower_context.merge_call(lambda _: {})
# Hyper parameters, and whether they should be re-evaluated every step.
self._hyper = {}
def _set_hyper(self, name, value):
self._hyper[name] = (_is_dynamic(value), value)
def minimize(self, loss, global_step=None, var_list=None,
gate_gradients=GATE_OP, aggregation_method=None,
colocate_gradients_with_ops=False, name=None,
grad_loss=None, stop_gradients=None,
scale_loss_by_num_towers=None):
"""Add operations to minimize `loss` by updating `var_list`.
This method simply combines calls `compute_gradients()` and
`apply_gradients()`. If you want to process the gradient before applying
them call `compute_gradients()` and `apply_gradients()` explicitly instead
of using this function.
Args:
loss: A `Tensor` containing the value to minimize.
global_step: Optional `Variable` to increment by one after the
variables have been updated.
var_list: Optional list or tuple of `Variable` objects to update to
minimize `loss`. Defaults to the list of variables collected in
the graph under the key `GraphKeys.TRAINABLE_VARIABLES`.
gate_gradients: How to gate the computation of gradients. Can be
`GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
name: Optional name for the returned operation.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
stop_gradients: Optional. A Tensor or list of tensors not to differentiate
through.
scale_loss_by_num_towers: Optional boolean. If true, scale the loss
down by the number of towers. By default, auto-detects whether this
is needed.
Returns:
An Operation that updates the variables in `var_list`. If `global_step`
was not `None`, that operation also increments `global_step`.
Raises:
ValueError: If some of the variables are not `Variable` objects.
@compatibility(eager)
When eager execution is enabled, `loss` should be a Python function that
takes elements of `var_list` as arguments and computes the value to be
minimized. If `var_list` is None, `loss` should take no arguments.
Minimization (and gradient computation) is done with respect to the
elements of `var_list` if not None, else with respect to any trainable
variables created during the execution of the `loss` function.
`gate_gradients`, `aggregation_method`, `colocate_gradients_with_ops` and
`grad_loss` are ignored when eager execution is enabled.
@end_compatibility
"""
grads_and_vars = self.compute_gradients(
loss, var_list=var_list, gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
grad_loss=grad_loss, stop_gradients=stop_gradients,
scale_loss_by_num_towers=scale_loss_by_num_towers)
vars_with_grad = [v for g, v in grads_and_vars if g is not None]
if not vars_with_grad:
raise ValueError(
"No gradients provided for any variable, check your graph for ops"
" that do not support gradients, between variables %s and loss %s." %
([str(v) for _, v in grads_and_vars], loss))
return self.apply_gradients(grads_and_vars, global_step=global_step,
name=name)
def compute_gradients(self, loss, var_list=None,
gate_gradients=GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
grad_loss=None, stop_gradients=None,
scale_loss_by_num_towers=None):
"""Compute gradients of `loss` for the variables in `var_list`.
This is the first part of `minimize()`. It returns a list
of (gradient, variable) pairs where "gradient" is the gradient
for "variable". Note that "gradient" can be a `Tensor`, an
`IndexedSlices`, or `None` if there is no gradient for the
given variable.
Args:
loss: A Tensor containing the value to minimize or a callable taking
no arguments which returns the value to minimize. When eager execution
is enabled it must be a callable.
var_list: Optional list or tuple of `tf.Variable` to update to minimize
`loss`. Defaults to the list of variables collected in the graph
under the key `GraphKeys.TRAINABLE_VARIABLES`.
gate_gradients: How to gate the computation of gradients. Can be
`GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
stop_gradients: Optional. A Tensor or list of tensors not to differentiate
through.
scale_loss_by_num_towers: Optional boolean. If true, scale the loss
down by the number of towers. By default, auto-detects whether this
is needed.
Returns:
A list of (gradient, variable) pairs. Variable is always present, but
gradient can be `None`.
Raises:
TypeError: If `var_list` contains anything else than `Variable` objects.
ValueError: If some arguments are invalid.
RuntimeError: If called with eager execution enabled and `loss` is
not callable.
@compatibility(eager)
When eager execution is enabled, `gate_gradients`, `aggregation_method`,
and `colocate_gradients_with_ops` are ignored.
@end_compatibility
"""
# TODO(josh11b): Test that we handle weight decay in a reasonable way.
if callable(loss):
with backprop.GradientTape() as tape:
if var_list is not None:
tape.watch(var_list)
loss_value = loss()
# Scale loss for number of towers (callable-loss case). In this case,
# we have to be careful to call distribute_lib.get_loss_reduction()
# *after* loss() is evaluated, so we know what loss reduction it uses.
if scale_loss_by_num_towers is None:
scale_loss_by_num_towers = (
distribute_lib.get_loss_reduction() ==
variable_scope.VariableAggregation.MEAN)
if scale_loss_by_num_towers:
num_towers = distribution_strategy_context.get_distribution_strategy(
).num_towers
if num_towers > 1:
loss_value *= 1. / num_towers
if var_list is None:
var_list = tape.watched_variables()
grads = tape.gradient(loss_value, var_list, grad_loss)
return list(zip(grads, var_list))
if context.executing_eagerly():
raise RuntimeError(
"`loss` passed to Optimizer.compute_gradients should "
"be a function when eager execution is enabled.")
# Scale loss for number of towers (non-callable-loss case).
if scale_loss_by_num_towers is None:
scale_loss_by_num_towers = (
distribute_lib.get_loss_reduction() ==
variable_scope.VariableAggregation.MEAN)
if scale_loss_by_num_towers:
num_towers = distribution_strategy_context.get_distribution_strategy(
).num_towers
if num_towers > 1:
loss *= 1. / num_towers
if gate_gradients not in [optimizer_v1.Optimizer.GATE_NONE,
optimizer_v1.Optimizer.GATE_OP,
optimizer_v1.Optimizer.GATE_GRAPH]:
raise ValueError("gate_gradients must be one of: Optimizer.GATE_NONE, "
"Optimizer.GATE_OP, Optimizer.GATE_GRAPH. Not %s" %
gate_gradients)
self._assert_valid_dtypes([loss])
if grad_loss is not None:
self._assert_valid_dtypes([grad_loss])
if var_list is None:
var_list = (
variables.trainable_variables() +
ops.get_collection(ops.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))
else:
var_list = nest.flatten(var_list)
# pylint: disable=protected-access
var_list += ops.get_collection(ops.GraphKeys._STREAMING_MODEL_PORTS)
# pylint: enable=protected-access
processors = [_get_processor(v) for v in var_list]
if not var_list:
raise ValueError("No variables to optimize.")
var_refs = [p.target() for p in processors]
grads = gradients.gradients(
loss, var_refs, grad_ys=grad_loss,
gate_gradients=(gate_gradients == optimizer_v1.Optimizer.GATE_OP),
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
stop_gradients=stop_gradients)
if gate_gradients == optimizer_v1.Optimizer.GATE_GRAPH:
grads = control_flow_ops.tuple(grads)
grads_and_vars = list(zip(grads, var_list))
self._assert_valid_dtypes(
[v for g, v in grads_and_vars
if g is not None and v.dtype != dtypes.resource])
return grads_and_vars
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Apply gradients to variables.
This is the second part of `minimize()`. It returns an `Operation` that
applies gradients.
Args:
grads_and_vars: List of (gradient, variable) pairs as returned by
`compute_gradients()`.
global_step: Optional `Variable` to increment by one after the
variables have been updated.
name: Optional name for the returned operation. Default to the
name passed to the `Optimizer` constructor.
Returns:
An `Operation` that applies the specified gradients. If `global_step`
was not None, that operation also increments `global_step`.
Raises:
TypeError: If `grads_and_vars` is malformed.
ValueError: If none of the variables have gradients.
"""
# This is a default implementation of apply_gradients() that can be shared
# by most optimizers. It relies on the subclass implementing the following
# methods: _create_vars(), _prepare(), _apply_dense(), and _apply_sparse().
# Filter out variables with gradients of `None`.
grads_and_vars = tuple(grads_and_vars) # Make sure repeat iteration works.
if not grads_and_vars:
raise ValueError("No variables provided.")
filtered = tuple((g, v) for (g, v) in grads_and_vars if g is not None)
if not filtered:
raise ValueError("No gradients provided for any variable: %s." %
([str(v) for _, v in grads_and_vars],))
return distribution_strategy_context.get_tower_context().merge_call(
self._distributed_apply, filtered, global_step=global_step, name=name)
def _get_or_create_state(self, var_list=None):
"""Either looks up or creates `_OptimizerV2State`.
If any variables are available, they should be passed via the `var_list`
argument, and these will be used to determine the graph to create/retrieve
state for. Otherwise the returned state is for the current default graph.
Args:
var_list: A list of variables to extract a graph from.
Returns:
An `_OptimizerV2State` object.
"""
# Determine the graph_key from the current graph.
eager_execution = context.executing_eagerly()
if eager_execution or var_list is None:
graph = ops.get_default_graph()
else:
graph = ops._get_graph_from_inputs(var_list) # pylint: disable=protected-access
assert graph is not None
graph_key = graph._graph_key # pylint: disable=protected-access
# Get the per graph state by looking up the graph_key.
if graph_key in self._per_graph_state:
per_graph_state = self._per_graph_state[graph_key]
else:
per_graph_state = _OptimizerV2State(self._name)
per_graph_state._init_with_static_hyper(self._hyper) # pylint: disable=protected-access
self._per_graph_state[graph_key] = per_graph_state
return per_graph_state
def _distributed_apply(self, distribution, grads_and_vars, global_step, name):
"""`apply_gradients` for use with a `DistributionStrategy`."""
reduced_grads = distribution.batch_reduce(
variable_scope.VariableAggregation.SUM, grads_and_vars)
var_list = [v for _, v in grads_and_vars]
grads_and_vars = zip(reduced_grads, var_list)
unwrapped_var_list = [x for v in var_list for x in distribution.unwrap(v)]
eager_execution = context.executing_eagerly()
if eager_execution:
# Give a clear error in this case instead of "name not supported
# for Eager Tensors" when we compute non_slot_devices.
for v in unwrapped_var_list:
if isinstance(v, ops.Tensor):
raise NotImplementedError("Trying to update a Tensor ", v)
with ops.name_scope(name, self._name) as name:
per_graph_state = self._get_or_create_state(var_list=unwrapped_var_list)
# Include the current value of any dynamic hyper parameters in `state`.
non_slot_devices = distribution.non_slot_devices(var_list)
state = per_graph_state._copy_with_dynamic_hyper( # pylint: disable=protected-access
self._hyper, distribution, non_slot_devices)
# Create any slot and non-slot variables we need in `state`.
with ops.init_scope():
self._create_vars(var_list, state)
with ops.name_scope(name): # Re-enter name_scope created above
# Give the child class a chance to do something before we start
# applying gradients.
self._prepare(state)
def update(v, g):
"""Update variable `v` using gradient `g`."""
assert v is not None
# Convert the grad to Tensor or IndexedSlices if necessary, and
# look up a processor for each variable's type.
try:
g = ops.convert_to_tensor_or_indexed_slices(g)
except TypeError:
raise TypeError(
"Gradient must be convertible to a Tensor"
" or IndexedSlices, or None: %s" % g)
if not isinstance(g, (ops.Tensor, ops.IndexedSlices)):
raise TypeError(
"Gradient must be a Tensor, IndexedSlices, or None: %s" % g)
processor = _get_processor(v)
# We colocate all ops created in _apply_dense or _apply_sparse
# on the same device as the variable.
# TODO(apassos): figure out how to get the variable name here.
scope_name = "" if eager_execution else v.op.name
# device_policy is set because non-mirrored tensors will be read in
# `update_op`.
# TODO(josh11b): Make different state objects for each device to
# avoid needing to set the device_policy.
with ops.name_scope("update_" + scope_name), \
context.context().device_policy(context.DEVICE_PLACEMENT_SILENT):
return processor.update_op(self, g, state)
# Use the processors to update the variables.
update_ops = []
for grad, var in grads_and_vars:
update_ops.extend(distribution.update(var, update, grad, grouped=False))
# Give the child class a chance to do something after applying
# gradients
def finish():
# TODO(josh11b): Make different state objects for each device to
# avoid needing to set the device_policy.
with context.context().device_policy(context.DEVICE_PLACEMENT_SILENT):
return self._finish(state)
update_ops = control_flow_ops.group(update_ops)
with ops.control_dependencies([update_ops]):
finish_updates = distribution.update_non_slot(
non_slot_devices, finish, grouped=False)
# We said grouped=False, which means finish_updates is always a list.
# It will be [None] when finish() returns None.
if finish_updates == [None]:
finish_updates = [update_ops]
# Update `global_step` (if any).
if global_step is None:
apply_updates = distribution.group(finish_updates, name=name)
else:
with ops.control_dependencies(finish_updates):
def update_global_step(global_step, name):
return global_step.assign_add(1, read_value=False, name=name)
apply_updates = distribution.update(global_step, update_global_step,
name)
# Add the training op to the TRAIN_OP graph collection in graph mode.
if not eager_execution:
if isinstance(apply_updates, ops.Tensor):
apply_updates = apply_updates.op
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
if apply_updates not in train_op:
train_op.append(apply_updates)
return apply_updates
def get_slot(self, var, name):
"""Return a slot named `name` created for `var` by the Optimizer.
Some `Optimizer` subclasses use additional variables. For example
`Momentum` and `Adagrad` use variables to accumulate updates. This method
gives access to these `Variable` objects if for some reason you need them.
Use `get_slot_names()` to get the list of slot names created by the
`Optimizer`.
Args:
var: A variable passed to `minimize()` or `apply_gradients()`.
name: A string.
Returns:
The `Variable` for the slot if it was created, `None` otherwise.
"""
state = self._get_state_for_var(var)
return state.get_slot(var, name) if state is not None else None
def get_slot_names(self):
"""Return a list of the names of slots created by the `Optimizer`.
See `get_slot()`.
Returns:
A list of strings.
"""
state = self._get_per_graph_state()
return state.get_slot_names() if state is not None else []
def variables(self):
"""A list of variables which encode the current state of `Optimizer`.
Includes slot variables and additional global variables created by the
optimizer in the current default graph.
Returns:
A list of variables.
"""
state = self._get_per_graph_state()
return state._variables() if state is not None else [] # pylint: disable=protected-access
# --------------
# Methods to be implemented by subclasses if they want to use the
# inherited implementation of apply_gradients() or compute_gradients().
# --------------
def _create_vars(self, var_list, state):
"""Create all slots needed by the variables and any non-slot variables.
Args:
var_list: A list of `Variable` objects.
state: An object with these methods:
`create_slot(var, val, slot_name, optional_op_name)`,
`create_slot_with_initializer(`
`var, initializer, shape, dtype, slot_name, optional_op_name)`,
`zeros_slot(var, slot_name, optional_op_name)`,
`create_non_slot_variable(initial_value, name, colocate_with)`,
`get_hyper(name)`
"""
# No slots needed by default
pass
def _prepare(self, state):
"""Code to execute before applying gradients.
Note that most uses of _prepare() in Optimizer have been subsumed
by explicit support for hyper parameters in OptimizerV2
Args:
state: An object with a `get_hyper(name)` method.
Returns:
Return value will be ignored.
"""
pass
def _apply_dense(self, grad, var, state):
"""Add ops to apply dense gradients to `var`.
Args:
grad: A `Tensor`.
var: A `Variable` object.
state: An object with `get_slot(var, name)`, `get_non_slot(self, name)`,
and `get_hyper(name)` methods.
Returns:
An `Operation`.
"""
raise NotImplementedError()
def _resource_apply_dense(self, grad, handle, state):
"""Add ops to apply dense gradients to the variable `handle`.
Args:
grad: a `Tensor` representing the gradient.
handle: a `Tensor` of dtype `resource` which points to the variable
to be updated.
state: An object with `get_slot(var, name)`, `get_non_slot(self, name)`,
and `get_hyper(name)` methods.
Returns:
An `Operation` which updates the value of the variable.
"""
raise NotImplementedError()
def _resource_apply_sparse_duplicate_indices(
self, grad, handle, indices, state):
"""Add ops to apply sparse gradients to `handle`, with repeated indices.
Optimizers which override this method must deal with repeated indices. See
the docstring of `_apply_sparse_duplicate_indices` for details. By default
the correct behavior, to sum non-unique indices and their associated
gradients, is enforced by first pre-processing `grad` and `indices` and
passing them on to `_resource_apply_sparse`. Optimizers which deal correctly
with duplicate indices may instead override this method to avoid the
overhead of summing.
Args:
grad: a `Tensor` representing the gradient for the affected indices.
handle: a `Tensor` of dtype `resource` which points to the variable
to be updated.
indices: a `Tensor` of integral type representing the indices for
which the gradient is nonzero. Indices may be repeated.
state: An object with `get_slot(var, name)`, `get_non_slot(self, name)`,
and `get_hyper(name)` methods.
Returns:
An `Operation` which updates the value of the variable.
"""
# pylint: disable=protected-access
summed_grad, unique_indices = optimizer_v1._deduplicate_indexed_slices(
values=grad, indices=indices)
# pylint: enable=protected-access
return self._resource_apply_sparse(
summed_grad, handle, unique_indices, state)
def _resource_apply_sparse(self, grad, handle, indices, state):
"""Add ops to apply sparse gradients to the variable `handle`.
Similar to `_apply_sparse`, the `indices` argument to this method has been
de-duplicated. Optimizers which deal correctly with non-unique indices may
instead override `_resource_apply_sparse_duplicate_indices` to avoid this
overhead.
Args:
grad: a `Tensor` representing the gradient for the affected indices.
handle: a `Tensor` of dtype `resource` which points to the variable
to be updated.
indices: a `Tensor` of integral type representing the indices for
which the gradient is nonzero. Indices are unique.
state: An object with `get_slot(var, name)`, `get_non_slot(self, name)`,
and `get_hyper(name)` methods.
Returns:
An `Operation` which updates the value of the variable.
"""
raise NotImplementedError()
def _apply_sparse_duplicate_indices(self, grad, var, state):
"""Add ops to apply sparse gradients to `var`, with repeated sparse indices.
Optimizers which override this method must deal with IndexedSlices objects
such as the following:
IndexedSlicesValue(values=[1, 1], indices=[0, 0], dense_shape=[1])
The correct interpretation is:
IndexedSlicesValue(values=[2], indices=[0], dense_shape=[1])
Many optimizers deal incorrectly with repeated indices when updating based
on sparse gradients (e.g. summing squares rather than squaring the sum, or
applying momentum terms multiple times). Adding first is always the correct
behavior, so this is enforced here by reconstructing the IndexedSlices to
have only unique indices, then calling _apply_sparse.
Optimizers which deal correctly with repeated indices may instead override
this method to avoid the overhead of summing indices.
Args:
grad: `IndexedSlices`.
var: A `Variable` object.
state: An object with `get_slot(var, name)`, `get_non_slot(self, name)`,
and `get_hyper(name)` methods.
Returns:
An `Operation`.
"""
# pylint: disable=protected-access
summed_values, unique_indices = optimizer_v1._deduplicate_indexed_slices(
values=grad.values, indices=grad.indices)
# pylint: enable=protected-access
gradient_no_duplicate_indices = ops.IndexedSlices(
indices=unique_indices,
values=summed_values,
dense_shape=grad.dense_shape)
return self._apply_sparse(gradient_no_duplicate_indices, var, state)
def _apply_sparse(self, grad, var, state):
"""Add ops to apply sparse gradients to `var`.
The IndexedSlices object passed to `grad` in this function is by default
pre-processed in `_apply_sparse_duplicate_indices` to remove duplicate
indices (see its docstring for details). Optimizers which can tolerate or
have correct special cases for duplicate sparse indices may override
`_apply_sparse_duplicate_indices` instead of this function, avoiding that
overhead.
Args:
grad: `IndexedSlices`, with no repeated indices.
var: A `Variable` object.
state: An object with `get_slot(var, name)`, `get_non_slot(self, name)`,
and `get_hyper(name)` methods.
Returns:
An `Operation`.
"""
raise NotImplementedError()
def _finish(self, state):
"""Do what is needed to finish the update.
This is called inside a scope colocated with any non-slot variables.
Args:
state: An object with `get_slot(var, name)`, `get_non_slot(self, name)`,
and `get_hyper(name)` methods.
Returns:
The operation to apply updates, or None if no updates.
"""
return None
# --------------
# Utility methods for subclasses.
# --------------
def _get_per_graph_state(self):
# pylint: disable=protected-access
return self._per_graph_state.get(ops.get_default_graph()._graph_key, None)
def _get_state_for_var(self, var):
# pylint: disable=protected-access
return self._per_graph_state.get(var._graph_key, None)
# --------------
# Overridden methods from Checkpointable.
# --------------
def _track_checkpointable(self, *args, **kwargs):
"""Optimizers may not track dependencies. Raises an error."""
raise NotImplementedError(
"Optimizers may not have dependencies. File a feature request if this "
"limitation bothers you.")
@property
def _checkpoint_dependencies(self):
"""From Checkpointable. Gather graph-specific non-slot variables to save."""
current_graph_non_slot_variables = []
state = self._get_per_graph_state()
if state is not None:
for name, variable_object in sorted(
state._non_slot_dict.items(), # pylint: disable=protected-access
# Avoid comparing variables
key=lambda item: item[0]):
current_graph_non_slot_variables.append(
checkpointable.CheckpointableReference(
name=name, ref=variable_object))
# Note: ignores super(); Optimizers may not have any dependencies outside of
# state objects.
return current_graph_non_slot_variables
def _lookup_dependency(self, name):
"""From Checkpointable. Find a non-slot variable in the current graph."""
state = self._get_per_graph_state()
if state is None:
return None
else:
return state.get_non_slot(name)
@property
def _deferred_dependencies(self):
"""Lets Checkpointable know where non-slot variables are created.
If necessary, creates a new state object for the current default graph.
Checkpointable will then add entries to that state's deferred dependency
dictionary. The state object will check that dictionary when creating
non-slot variables, restoring their value if an entry is found.
Returns:
A dictionary which holds deferred dependencies for the current default
graph.
"""
state = self._get_or_create_state()
return state._deferred_dependencies # pylint: disable=protected-access
def _create_or_restore_slot_variable(
self, slot_variable_position, slot_name, variable):
"""Checkpointable: Restore a slot variable's value, possibly creating it.
Called when a variable which has an associated slot variable is created or
restored.
Args:
slot_variable_position: A `checkpointable._CheckpointPosition` object
indicating the slot variable `Checkpointable` object to be restored.
slot_name: The name of this `Optimizer`'s slot to restore into.
variable: The variable object this slot is being created for.
"""
state = self._get_or_create_state(var_list=[variable])
state._create_or_restore_slot_variable( # pylint: disable=protected-access
slot_variable_position=slot_variable_position,
slot_name=slot_name,
variable=variable,
optional_op_name=self._name)
def get_config(self):
"""Returns the config of the optimimizer.
An optimizer config is a Python dictionary (serializable)
containing the configuration of an optimizer.
The same optimizer can be reinstantiated later
(without any saved state) from this configuration.
Returns:
Python dictionary.
"""
return {"name": self._name}
@classmethod
def from_config(cls, config, custom_objects=None):
"""Creates an optimizer from its config.
This method is the reverse of `get_config`,
capable of instantiating the same optimizer from the config
dictionary.
Arguments:
config: A Python dictionary, typically the output of get_config.
custom_objects: A Python dictionary mapping names to additional Python
objects used to create this optimizer, such as a function used for a
hyperparameter.
Returns:
An optimizer instance.
"""
return cls(**config)
def _serialize_hyperparameter(self, hyperparameter_name):
"""Serialize a hyperparameter that can be a float, callable, or Tensor."""
return self._hyper[hyperparameter_name][1]
# --------------
# Unsupported parent methods
# --------------
def _slot_dict(self, slot_name):
raise NotImplementedError(
"_slot_dict() method unsupported in OptimizerV2")
def _get_or_make_slot(self, var, val, slot_name, op_name):
raise NotImplementedError(
"_get_or_make_slot() method unsupported in OptimizerV2")
def _get_or_make_slot_with_initializer(self, var, initializer, shape, dtype,
slot_name, op_name):
raise NotImplementedError(
"_get_or_make_slot_with_initializer() method unsupported in "
"OptimizerV2")
def _create_non_slot_variable(self, initial_value, name, colocate_with):
raise NotImplementedError(
"_create_non_slot_variable() method unsupported in OptimizerV2")
def _get_non_slot_variable(self, name, graph=None):
raise NotImplementedError(
"_get_non_slot_variable() method unsupported in OptimizerV2")
def _non_slot_variables(self):
raise NotImplementedError(
"_non_slot_variables() method unsupported in OptimizerV2")
| apache-2.0 |
prasadtalasila/INET-Vagrant-Demos | Nonce_Demo/impacket-0.9.12/impacket/testcases/dot11/test_FrameManagementAssociationResponse.py | 6 | 7476 | #!/usr/bin/env python
# sorry, this is very ugly, but I'm in python 2.5
import sys
sys.path.insert(0,"../..")
from dot11 import Dot11, Dot11Types, Dot11ManagementFrame, Dot11ManagementAssociationResponse
from ImpactDecoder import RadioTapDecoder
from binascii import hexlify
import unittest
class TestDot11ManagementAssociationResponseFrames(unittest.TestCase):
def setUp(self):
# 802.11 Management Frame
#
self.rawframe="\x00\x00\x1c\x00\xef\x18\x00\x00\xc2L\xfa\x00<\x00\x00\x00\x10\x02\x85\t\xa0\x00\xb4\x9e_\x00\x00\x16\x10\x00:\x01p\x1a\x04T\xe3\x86\x00\x18\xf8lvB\x00\x18\xf8lvB\xf0\x02\x11\x04\x00\x00\x04\xc0\x01\x08\x82\x84\x8b\x96$0Hl2\x04\x0c\x12\x18`\xdd\t\x00\x10\x18\x02\x02\xf0\x00\x00\x00f%\xdf7"
self.radiotap_decoder = RadioTapDecoder()
radiotap=self.radiotap_decoder.decode(self.rawframe)
self.assertEqual(str(radiotap.__class__), "dot11.RadioTap")
self.dot11=radiotap.child()
self.assertEqual(str(self.dot11.__class__), "dot11.Dot11")
type = self.dot11.get_type()
self.assertEqual(type,Dot11Types.DOT11_TYPE_MANAGEMENT)
subtype = self.dot11.get_subtype()
self.assertEqual(subtype,Dot11Types.DOT11_SUBTYPE_MANAGEMENT_ASSOCIATION_RESPONSE)
typesubtype = self.dot11.get_type_n_subtype()
self.assertEqual(typesubtype,Dot11Types.DOT11_TYPE_MANAGEMENT_SUBTYPE_ASSOCIATION_RESPONSE)
self.management_base=self.dot11.child()
self.assertEqual(str(self.management_base.__class__), "dot11.Dot11ManagementFrame")
self.management_association_response=self.management_base.child()
self.assertEqual(str(self.management_association_response.__class__), "dot11.Dot11ManagementAssociationResponse")
def test_01(self):
'Test Header and Tail Size field'
self.assertEqual(self.management_base.get_header_size(), 22)
self.assertEqual(self.management_base.get_tail_size(), 0)
self.assertEqual(self.management_association_response.get_header_size(), 33)
self.assertEqual(self.management_association_response.get_tail_size(), 0)
def test_02(self):
'Test Duration field'
self.assertEqual(self.management_base.get_duration(), 0x013a)
self.management_base.set_duration(0x1234)
self.assertEqual(self.management_base.get_duration(), 0x1234)
def test_03(self):
'Test Destination Address field'
addr=self.management_base.get_destination_address()
self.assertEqual(addr.tolist(), [0x70,0x1a,0x04,0x54,0xe3,0x86])
addr[0]=0x12
addr[5]=0x34
self.management_base.set_destination_address(addr)
self.assertEqual(self.management_base.get_destination_address().tolist(), [0x12,0x1a,0x04,0x54,0xe3,0x34])
def test_04(self):
'Test Source Address field'
addr=self.management_base.get_source_address()
self.assertEqual(addr.tolist(), [0x00,0x18,0xF8,0x6C,0x76,0x42])
addr[0]=0x12
addr[5]=0x34
self.management_base.set_source_address(addr)
self.assertEqual(self.management_base.get_source_address().tolist(), [0x12,0x18,0xF8,0x6C,0x76,0x34])
def test_05(self):
'Test BSSID Address field'
addr=self.management_base.get_bssid()
self.assertEqual(addr.tolist(), [0x00,0x18,0xF8,0x6C,0x76,0x42])
addr[0]=0x12
addr[5]=0x34
self.management_base.set_bssid(addr)
self.assertEqual(self.management_base.get_bssid().tolist(), [0x12,0x18,0xF8,0x6C,0x76,0x34])
def test_06(self):
'Test Sequence control field'
self.assertEqual(self.management_base.get_sequence_control(), 0x02f0)
self.management_base.set_sequence_control(0x1234)
self.assertEqual(self.management_base.get_sequence_control(), 0x1234)
def test_07(self):
'Test Fragment number field'
self.assertEqual(self.management_base.get_fragment_number(), 0x00)
self.management_base.set_fragment_number(0xF1) # Es de 4 bit
self.assertEqual(self.management_base.get_fragment_number(), 0x01)
def test_08(self):
'Test Sequence number field'
self.assertEqual(self.management_base.get_sequence_number(), 47)
self.management_base.set_sequence_number(0xF234) # Es de 12 bit
self.assertEqual(self.management_base.get_sequence_number(), 0x0234)
def test_09(self):
'Test Management Frame Data field'
frame_body="\x11\x04\x00\x00\x04\xc0\x01\x08\x82\x84\x8b\x96$0Hl2\x04\x0c\x12\x18`\xdd\t\x00\x10\x18\x02\x02\xf0\x00\x00\x00"
self.assertEqual(self.management_base.get_frame_body(), frame_body)
def test_10(self):
'Test Management Association Response Capabilities field'
self.assertEqual(self.management_association_response.get_capabilities(), 0x0411)
self.management_association_response.set_capabilities(0x4321)
self.assertEqual(self.management_association_response.get_capabilities(), 0x4321)
def test_11(self):
'Test Management Association Response Status Code field'
self.assertEqual(self.management_association_response.get_status_code(), 0x0000)
self.management_association_response.set_status_code(0x4321)
self.assertEqual(self.management_association_response.get_status_code(), 0x4321)
def test_12(self):
'Test Management Association Response Association ID field'
self.assertEqual(self.management_association_response.get_association_id(), 0xc004)
self.management_association_response.set_association_id(0x4321)
self.assertEqual(self.management_association_response.get_association_id(), 0x4321)
def test_13(self):
'Test Management Association Response Supported_rates getter/setter methods'
self.assertEqual(self.management_association_response.get_supported_rates(), (0x82, 0x84, 0x8b, 0x96, 0x24, 0x30, 0x48, 0x6c))
self.assertEqual(self.management_association_response.get_supported_rates(human_readable=True), (1.0, 2.0, 5.5, 11.0, 18.0, 24.0, 36.0, 54.0))
self.management_association_response.set_supported_rates((0x12, 0x98, 0x24, 0xb0, 0x48, 0x60))
self.assertEqual(self.management_association_response.get_supported_rates(), (0x12, 0x98, 0x24, 0xb0, 0x48, 0x60))
self.assertEqual(self.management_association_response.get_supported_rates(human_readable=True), (9.0, 12.0, 18.0, 24.0, 36.0, 48.0))
self.assertEqual(self.management_association_response.get_header_size(), 33-2)
def test_14(self):
'Test Management Vendor Specific getter/setter methods'
self.assertEqual(self.management_association_response.get_vendor_specific(), [("\x00\x10\x18","\x02\x02\xf0\x00\x00\x00")])
self.management_association_response.add_vendor_specific("\x00\x00\x40", "\x04\x04\x04\x04\x04\x04")
self.assertEqual(self.management_association_response.get_vendor_specific(),
[("\x00\x10\x18", "\x02\x02\xf0\x00\x00\x00"),
("\x00\x00\x40", "\x04\x04\x04\x04\x04\x04"),
])
self.assertEqual(self.management_association_response.get_header_size(), 33+11)
suite = unittest.TestLoader().loadTestsFromTestCase(TestDot11ManagementAssociationResponseFrames)
unittest.TextTestRunner(verbosity=2).run(suite)
| gpl-2.0 |
bcl/anaconda | tests/glade/check_markup.py | 5 | 5424 | #!/usr/bin/python3
#
# Copyright (C) 2014 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: David Shea <[email protected]>
#
"""
Python script to check that properties in glade using Pango markup contain
valid markup.
"""
# Ignore any interruptible calls
# pylint: disable=interruptible-system-call
import sys
import argparse
# Import translation methods if needed
if ('-t' in sys.argv) or ('--translate' in sys.argv):
try:
from pocketlint.translatepo import translate_all
except ImportError:
print("Unable to load po translation module")
sys.exit(99)
from pocketlint.pangocheck import markup_nodes, markup_match, markup_necessary
try:
from lxml import etree
except ImportError:
print("You need to install the python-lxml package to use check_markup.py")
sys.exit(99)
class PangoElementException(Exception):
def __init__(self, element):
Exception.__init__(self)
self.element = element
def __str__(self):
return "Invalid element %s" % self.element
def _validate_pango_markup(root):
"""Validate parsed pango markup.
:param etree.ElementTree root: The pango markup parsed as an XML ElementTree
:raises PangoElementException: If the pango markup contains unknown elements
"""
if root.tag not in markup_nodes:
raise PangoElementException(root.tag)
for child in root:
_validate_pango_markup(child)
def check_glade_file(glade_file_path, po_map=None):
glade_success = True
with open(glade_file_path) as glade_file:
# Parse the XML
glade_tree = etree.parse(glade_file)
# Search for label properties on objects that have use_markup set to True
for label in glade_tree.xpath(".//property[@name='label' and ../property[@name='use_markup']/text() = 'True']"):
if po_map:
try:
label_texts = po_map.get(label.text, label.get("context"))
except KeyError:
continue
lang_str = " for language %s" % po_map.metadata['Language']
else:
label_texts = (label.text,)
lang_str = ""
# Wrap the label text in <markup> tags and parse the tree
for label_text in label_texts:
try:
# pylint: disable=unescaped-markup
pango_tree = etree.fromstring("<markup>%s</markup>" % label_text)
_validate_pango_markup(pango_tree)
# Check if the markup is necessary
if not markup_necessary(pango_tree):
print("Markup could be expressed as attributes at %s%s:%d" % \
(glade_file_path, lang_str, label.sourceline))
glade_success = False
except etree.XMLSyntaxError:
print("Unable to parse pango markup at %s%s:%d" % \
(glade_file_path, lang_str, label.sourceline))
glade_success = False
except PangoElementException as px:
print("Invalid pango element %s at %s%s:%d" % \
(px.element, glade_file_path, lang_str, label.sourceline))
glade_success = False
else:
if po_map:
# Check that translated markup has the same elements and attributes
if not markup_match(label.text, label_text):
print("Translated markup does not contain the same elements and attributes at %s%s:%d" % \
(glade_file_path, lang_str, label.sourceline))
glade_success = False
return glade_success
if __name__ == "__main__":
parser = argparse.ArgumentParser("Check Pango markup validity")
parser.add_argument("-t", "--translate", action='store_true',
help="Check translated strings")
parser.add_argument("-p", "--podir", action='store', type=str,
metavar='PODIR', help='Directory containing po files', default='./po')
parser.add_argument("glade_files", nargs="+", metavar="GLADE-FILE",
help='The glade file to check')
args = parser.parse_args(args=sys.argv[1:])
success = True
for file_path in args.glade_files:
if not check_glade_file(file_path):
success = False
# Now loop over all of the translations
if args.translate:
podicts = translate_all(args.podir)
for po_dict in podicts.values():
for file_path in args.glade_files:
if not check_glade_file(file_path, po_dict):
success = False
sys.exit(0 if success else 1)
| gpl-2.0 |
J861449197/edx-platform | common/djangoapps/embargo/tests/test_views.py | 136 | 3286 | """Tests for embargo app views. """
import unittest
from mock import patch
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.conf import settings
from mako.exceptions import TopLevelLookupException
import ddt
from util.testing import UrlResetMixin
from embargo import messages
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
@ddt.ddt
class CourseAccessMessageViewTest(UrlResetMixin, TestCase):
"""Tests for the courseware access message view.
These end-points serve static content.
While we *could* check the text on each page,
this will require changes to the test every time
the text on the page changes.
Instead, we load each page we expect to be available
(based on the configuration in `embargo.messages`)
and verify that we get the correct status code.
This will catch errors in the message configuration
(for example, moving a template and forgetting to
update the configuration appropriately).
"""
@patch.dict(settings.FEATURES, {'EMBARGO': True})
def setUp(self):
super(CourseAccessMessageViewTest, self).setUp('embargo')
@ddt.data(*messages.ENROLL_MESSAGES.keys())
def test_enrollment_messages(self, msg_key):
self._load_page('enrollment', msg_key)
@ddt.data(*messages.COURSEWARE_MESSAGES.keys())
def test_courseware_messages(self, msg_key):
self._load_page('courseware', msg_key)
@ddt.data('enrollment', 'courseware')
def test_invalid_message_key(self, access_point):
self._load_page(access_point, 'invalid', expected_status=404)
@patch.dict(settings.FEATURES, {'USE_CUSTOM_THEME': True})
@ddt.data('enrollment', 'courseware')
def test_custom_theme_override(self, access_point):
# Custom override specified for the "embargo" message
# for backwards compatibility with previous versions
# of the embargo app.
# This template isn't available by default, but we can at least
# verify that the view will look for it when the USE_CUSTOM_THEME
# feature flag is specified.
with self.assertRaisesRegexp(TopLevelLookupException, 'static_templates/theme-embargo.html'):
self._load_page(access_point, 'embargo')
@patch.dict(settings.FEATURES, {'USE_CUSTOM_THEME': True})
@ddt.data('enrollment', 'courseware')
def test_custom_theme_override_not_specified(self, access_point):
# No custom override specified for the "default" message
self._load_page(access_point, 'default')
def _load_page(self, access_point, message_key, expected_status=200):
"""Load the message page and check the status code. """
url = reverse('embargo_blocked_message', kwargs={
'access_point': access_point,
'message_key': message_key
})
response = self.client.get(url)
self.assertEqual(
response.status_code, expected_status,
msg=(
u"Unexpected status code when loading '{url}': "
u"expected {expected} but got {actual}"
).format(
url=url,
expected=expected_status,
actual=response.status_code
)
)
| agpl-3.0 |
Titan-C/sympy | sympy/concrete/products.py | 4 | 15193 | from __future__ import print_function, division
from sympy.tensor.indexed import Idx
from sympy.core.mul import Mul
from sympy.core.singleton import S
from sympy.core.symbol import symbols
from sympy.concrete.expr_with_intlimits import ExprWithIntLimits
from sympy.functions.elementary.exponential import exp, log
from sympy.polys import quo, roots
from sympy.simplify import powsimp
from sympy.core.compatibility import range
class Product(ExprWithIntLimits):
r"""Represents unevaluated products.
``Product`` represents a finite or infinite product, with the first
argument being the general form of terms in the series, and the second
argument being ``(dummy_variable, start, end)``, with ``dummy_variable``
taking all integer values from ``start`` through ``end``. In accordance
with long-standing mathematical convention, the end term is included in
the product.
Finite products
===============
For finite products (and products with symbolic limits assumed to be finite)
we follow the analogue of the summation convention described by Karr [1],
especially definition 3 of section 1.4. The product:
.. math::
\prod_{m \leq i < n} f(i)
has *the obvious meaning* for `m < n`, namely:
.. math::
\prod_{m \leq i < n} f(i) = f(m) f(m+1) \cdot \ldots \cdot f(n-2) f(n-1)
with the upper limit value `f(n)` excluded. The product over an empty set is
one if and only if `m = n`:
.. math::
\prod_{m \leq i < n} f(i) = 1 \quad \mathrm{for} \quad m = n
Finally, for all other products over empty sets we assume the following
definition:
.. math::
\prod_{m \leq i < n} f(i) = \frac{1}{\prod_{n \leq i < m} f(i)} \quad \mathrm{for} \quad m > n
It is important to note that above we define all products with the upper
limit being exclusive. This is in contrast to the usual mathematical notation,
but does not affect the product convention. Indeed we have:
.. math::
\prod_{m \leq i < n} f(i) = \prod_{i = m}^{n - 1} f(i)
where the difference in notation is intentional to emphasize the meaning,
with limits typeset on the top being inclusive.
Examples
========
>>> from sympy.abc import a, b, i, k, m, n, x
>>> from sympy import Product, factorial, oo
>>> Product(k, (k, 1, m))
Product(k, (k, 1, m))
>>> Product(k, (k, 1, m)).doit()
factorial(m)
>>> Product(k**2,(k, 1, m))
Product(k**2, (k, 1, m))
>>> Product(k**2,(k, 1, m)).doit()
factorial(m)**2
Wallis' product for pi:
>>> W = Product(2*i/(2*i-1) * 2*i/(2*i+1), (i, 1, oo))
>>> W
Product(4*i**2/((2*i - 1)*(2*i + 1)), (i, 1, oo))
Direct computation currently fails:
>>> W.doit()
Product(4*i**2/((2*i - 1)*(2*i + 1)), (i, 1, oo))
But we can approach the infinite product by a limit of finite products:
>>> from sympy import limit
>>> W2 = Product(2*i/(2*i-1)*2*i/(2*i+1), (i, 1, n))
>>> W2
Product(4*i**2/((2*i - 1)*(2*i + 1)), (i, 1, n))
>>> W2e = W2.doit()
>>> W2e
2**(-2*n)*4**n*factorial(n)**2/(RisingFactorial(1/2, n)*RisingFactorial(3/2, n))
>>> limit(W2e, n, oo)
pi/2
By the same formula we can compute sin(pi/2):
>>> from sympy import pi, gamma, simplify
>>> P = pi * x * Product(1 - x**2/k**2, (k, 1, n))
>>> P = P.subs(x, pi/2)
>>> P
pi**2*Product(1 - pi**2/(4*k**2), (k, 1, n))/2
>>> Pe = P.doit()
>>> Pe
pi**2*RisingFactorial(1 + pi/2, n)*RisingFactorial(-pi/2 + 1, n)/(2*factorial(n)**2)
>>> Pe = Pe.rewrite(gamma)
>>> Pe
pi**2*gamma(n + 1 + pi/2)*gamma(n - pi/2 + 1)/(2*gamma(1 + pi/2)*gamma(-pi/2 + 1)*gamma(n + 1)**2)
>>> Pe = simplify(Pe)
>>> Pe
sin(pi**2/2)*gamma(n + 1 + pi/2)*gamma(n - pi/2 + 1)/gamma(n + 1)**2
>>> limit(Pe, n, oo)
sin(pi**2/2)
Products with the lower limit being larger than the upper one:
>>> Product(1/i, (i, 6, 1)).doit()
120
>>> Product(i, (i, 2, 5)).doit()
120
The empty product:
>>> Product(i, (i, n, n-1)).doit()
1
An example showing that the symbolic result of a product is still
valid for seemingly nonsensical values of the limits. Then the Karr
convention allows us to give a perfectly valid interpretation to
those products by interchanging the limits according to the above rules:
>>> P = Product(2, (i, 10, n)).doit()
>>> P
2**(n - 9)
>>> P.subs(n, 5)
1/16
>>> Product(2, (i, 10, 5)).doit()
1/16
>>> 1/Product(2, (i, 6, 9)).doit()
1/16
An explicit example of the Karr summation convention applied to products:
>>> P1 = Product(x, (i, a, b)).doit()
>>> P1
x**(-a + b + 1)
>>> P2 = Product(x, (i, b+1, a-1)).doit()
>>> P2
x**(a - b - 1)
>>> simplify(P1 * P2)
1
And another one:
>>> P1 = Product(i, (i, b, a)).doit()
>>> P1
RisingFactorial(b, a - b + 1)
>>> P2 = Product(i, (i, a+1, b-1)).doit()
>>> P2
RisingFactorial(a + 1, -a + b - 1)
>>> P1 * P2
RisingFactorial(b, a - b + 1)*RisingFactorial(a + 1, -a + b - 1)
>>> simplify(P1 * P2)
1
See Also
========
Sum, summation
product
References
==========
.. [1] Michael Karr, "Summation in Finite Terms", Journal of the ACM,
Volume 28 Issue 2, April 1981, Pages 305-350
http://dl.acm.org/citation.cfm?doid=322248.322255
.. [2] http://en.wikipedia.org/wiki/Multiplication#Capital_Pi_notation
.. [3] http://en.wikipedia.org/wiki/Empty_product
"""
__slots__ = ['is_commutative']
def __new__(cls, function, *symbols, **assumptions):
obj = ExprWithIntLimits.__new__(cls, function, *symbols, **assumptions)
return obj
def _eval_rewrite_as_Sum(self, *args):
from sympy.concrete.summations import Sum
return exp(Sum(log(self.function), *self.limits))
@property
def term(self):
return self._args[0]
function = term
def _eval_is_zero(self):
# a Product is zero only if its term is zero.
return self.term.is_zero
def doit(self, **hints):
f = self.function
for index, limit in enumerate(self.limits):
i, a, b = limit
dif = b - a
if dif.is_Integer and dif < 0:
a, b = b + 1, a - 1
f = 1 / f
if isinstance(i, Idx):
i = i.label
g = self._eval_product(f, (i, a, b))
if g in (None, S.NaN):
return self.func(powsimp(f), *self.limits[index:])
else:
f = g
if hints.get('deep', True):
return f.doit(**hints)
else:
return powsimp(f)
def _eval_adjoint(self):
if self.is_commutative:
return self.func(self.function.adjoint(), *self.limits)
return None
def _eval_conjugate(self):
return self.func(self.function.conjugate(), *self.limits)
def _eval_product(self, term, limits):
from sympy.concrete.delta import deltaproduct, _has_simple_delta
from sympy.concrete.summations import summation
from sympy.functions import KroneckerDelta, RisingFactorial
(k, a, n) = limits
if k not in term.free_symbols:
if (term - 1).is_zero:
return S.One
return term**(n - a + 1)
if a == n:
return term.subs(k, a)
if term.has(KroneckerDelta) and _has_simple_delta(term, limits[0]):
return deltaproduct(term, limits)
dif = n - a
if dif.is_Integer:
return Mul(*[term.subs(k, a + i) for i in range(dif + 1)])
elif term.is_polynomial(k):
poly = term.as_poly(k)
A = B = Q = S.One
all_roots = roots(poly)
M = 0
for r, m in all_roots.items():
M += m
A *= RisingFactorial(a - r, n - a + 1)**m
Q *= (n - r)**m
if M < poly.degree():
arg = quo(poly, Q.as_poly(k))
B = self.func(arg, (k, a, n)).doit()
return poly.LC()**(n - a + 1) * A * B
elif term.is_Add:
p, q = term.as_numer_denom()
p = self._eval_product(p, (k, a, n))
q = self._eval_product(q, (k, a, n))
return p / q
elif term.is_Mul:
exclude, include = [], []
for t in term.args:
p = self._eval_product(t, (k, a, n))
if p is not None:
exclude.append(p)
else:
include.append(t)
if not exclude:
return None
else:
arg = term._new_rawargs(*include)
A = Mul(*exclude)
B = self.func(arg, (k, a, n)).doit()
return A * B
elif term.is_Pow:
if not term.base.has(k):
s = summation(term.exp, (k, a, n))
return term.base**s
elif not term.exp.has(k):
p = self._eval_product(term.base, (k, a, n))
if p is not None:
return p**term.exp
elif isinstance(term, Product):
evaluated = term.doit()
f = self._eval_product(evaluated, limits)
if f is None:
return self.func(evaluated, limits)
else:
return f
def _eval_simplify(self, ratio, measure):
from sympy.simplify.simplify import product_simplify
return product_simplify(self)
def _eval_transpose(self):
if self.is_commutative:
return self.func(self.function.transpose(), *self.limits)
return None
def is_convergent(self):
r"""
See docs of Sum.is_convergent() for explanation of convergence
in SymPy.
The infinite product:
.. math::
\prod_{1 \leq i < \infty} f(i)
is defined by the sequence of partial products:
.. math::
\prod_{i=1}^{n} f(i) = f(1) f(2) \cdots f(n)
as n increases without bound. The product converges to a non-zero
value if and only if the sum:
.. math::
\sum_{1 \leq i < \infty} \log{f(n)}
converges.
References
==========
.. [1] https://en.wikipedia.org/wiki/Infinite_product
Examples
========
>>> from sympy import Interval, S, Product, Symbol, cos, pi, exp, oo
>>> n = Symbol('n', integer=True)
>>> Product(n/(n + 1), (n, 1, oo)).is_convergent()
False
>>> Product(1/n**2, (n, 1, oo)).is_convergent()
False
>>> Product(cos(pi/n), (n, 1, oo)).is_convergent()
True
>>> Product(exp(-n**2), (n, 1, oo)).is_convergent()
False
"""
from sympy.concrete.summations import Sum
sequence_term = self.function
log_sum = log(sequence_term)
lim = self.limits
try:
is_conv = Sum(log_sum, *lim).is_convergent()
except NotImplementedError:
if Sum(sequence_term - 1, *lim).is_absolutely_convergent() is S.true:
return S.true
raise NotImplementedError("The algorithm to find the product convergence of %s "
"is not yet implemented" % (sequence_term))
return is_conv
def reverse_order(expr, *indices):
"""
Reverse the order of a limit in a Product.
Usage
=====
``reverse_order(expr, *indices)`` reverses some limits in the expression
``expr`` which can be either a ``Sum`` or a ``Product``. The selectors in
the argument ``indices`` specify some indices whose limits get reversed.
These selectors are either variable names or numerical indices counted
starting from the inner-most limit tuple.
Examples
========
>>> from sympy import Product, simplify, RisingFactorial, gamma, Sum
>>> from sympy.abc import x, y, a, b, c, d
>>> P = Product(x, (x, a, b))
>>> Pr = P.reverse_order(x)
>>> Pr
Product(1/x, (x, b + 1, a - 1))
>>> Pr = Pr.doit()
>>> Pr
1/RisingFactorial(b + 1, a - b - 1)
>>> simplify(Pr)
gamma(b + 1)/gamma(a)
>>> P = P.doit()
>>> P
RisingFactorial(a, -a + b + 1)
>>> simplify(P)
gamma(b + 1)/gamma(a)
While one should prefer variable names when specifying which limits
to reverse, the index counting notation comes in handy in case there
are several symbols with the same name.
>>> S = Sum(x*y, (x, a, b), (y, c, d))
>>> S
Sum(x*y, (x, a, b), (y, c, d))
>>> S0 = S.reverse_order(0)
>>> S0
Sum(-x*y, (x, b + 1, a - 1), (y, c, d))
>>> S1 = S0.reverse_order(1)
>>> S1
Sum(x*y, (x, b + 1, a - 1), (y, d + 1, c - 1))
Of course we can mix both notations:
>>> Sum(x*y, (x, a, b), (y, 2, 5)).reverse_order(x, 1)
Sum(x*y, (x, b + 1, a - 1), (y, 6, 1))
>>> Sum(x*y, (x, a, b), (y, 2, 5)).reverse_order(y, x)
Sum(x*y, (x, b + 1, a - 1), (y, 6, 1))
See Also
========
index, reorder_limit, reorder
References
==========
.. [1] Michael Karr, "Summation in Finite Terms", Journal of the ACM,
Volume 28 Issue 2, April 1981, Pages 305-350
http://dl.acm.org/citation.cfm?doid=322248.322255
"""
l_indices = list(indices)
for i, indx in enumerate(l_indices):
if not isinstance(indx, int):
l_indices[i] = expr.index(indx)
e = 1
limits = []
for i, limit in enumerate(expr.limits):
l = limit
if i in l_indices:
e = -e
l = (limit[0], limit[2] + 1, limit[1] - 1)
limits.append(l)
return Product(expr.function ** e, *limits)
def product(*args, **kwargs):
r"""
Compute the product.
The notation for symbols is similar to the notation used in Sum or
Integral. product(f, (i, a, b)) computes the product of f with
respect to i from a to b, i.e.,
::
b
_____
product(f(n), (i, a, b)) = | | f(n)
| |
i = a
If it cannot compute the product, it returns an unevaluated Product object.
Repeated products can be computed by introducing additional symbols tuples::
>>> from sympy import product, symbols
>>> i, n, m, k = symbols('i n m k', integer=True)
>>> product(i, (i, 1, k))
factorial(k)
>>> product(m, (i, 1, k))
m**k
>>> product(i, (i, 1, k), (k, 1, n))
Product(factorial(k), (k, 1, n))
"""
prod = Product(*args, **kwargs)
if isinstance(prod, Product):
return prod.doit(deep=False)
else:
return prod
| bsd-3-clause |
sentinelleader/limbo | limbo/plugins/emojicodedict.py | 14 | 46328 | #
# This file is based on emoji (https://github.com/kyokomi/emoji).
#
# The MIT License (MIT)
#
# Copyright (c) 2014 kyokomi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
emojiCodeDict = {
":capricorn:": u"\U00002651",
":end:": u"\U0001f51a",
":no_mobile_phones:": u"\U0001f4f5",
":couple:": u"\U0001f46b",
":snowman:": u"\U000026c4",
":sunrise_over_mountains:": u"\U0001f304",
":suspension_railway:": u"\U0001f69f",
":arrows_counterclockwise:": u"\U0001f504",
":bug:": u"\U0001f41b",
":confused:": u"\U0001f615",
":dress:": u"\U0001f457",
":honeybee:": u"\U0001f41d",
":waning_crescent_moon:": u"\U0001f318",
":balloon:": u"\U0001f388",
":bus:": u"\U0001f68c",
":package:": u"\U0001f4e6",
":pencil2:": u"\U0000270f",
":rage:": u"\U0001f621",
":space_invader:": u"\U0001f47e",
":white_medium_small_square:": u"\U000025fd",
":fast_forward:": u"\U000023e9",
":rice_cracker:": u"\U0001f358",
":incoming_envelope:": u"\U0001f4e8",
":sa:": u"\U0001f202",
":womens:": u"\U0001f6ba",
":arrow_right:": u"\U000027a1",
":construction_worker:": u"\U0001f477",
":notes:": u"\U0001f3b6",
":goat:": u"\U0001f410",
":grey_question:": u"\U00002754",
":lantern:": u"\U0001f3ee",
":rice_scene:": u"\U0001f391",
":running:": u"\U0001f3c3",
":ferris_wheel:": u"\U0001f3a1",
":musical_score:": u"\U0001f3bc",
":sparkle:": u"\U00002747",
":wink:": u"\U0001f609",
":art:": u"\U0001f3a8",
":clock330:": u"\U0001f55e",
":minidisc:": u"\U0001f4bd",
":no_entry_sign:": u"\U0001f6ab",
":wind_chime:": u"\U0001f390",
":cyclone:": u"\U0001f300",
":herb:": u"\U0001f33f",
":leopard:": u"\U0001f406",
":banana:": u"\U0001f34c",
":handbag:": u"\U0001f45c",
":honey_pot:": u"\U0001f36f",
":ok:": u"\U0001f197",
":hearts:": u"\U00002665",
":passport_control:": u"\U0001f6c2",
":moyai:": u"\U0001f5ff",
":smile:": u"\U0001f604",
":tiger2:": u"\U0001f405",
":twisted_rightwards_arrows:": u"\U0001f500",
":children_crossing:": u"\U0001f6b8",
":cow:": u"\U0001f42e",
":point_up:": u"\U0000261d",
":house:": u"\U0001f3e0",
":man_with_turban:": u"\U0001f473",
":mountain_railway:": u"\U0001f69e",
":vibration_mode:": u"\U0001f4f3",
":blowfish:": u"\U0001f421",
":it:": u"\U0001f1ee\U0001f1f9",
":oden:": u"\U0001f362",
":clock3:": u"\U0001f552",
":lollipop:": u"\U0001f36d",
":train:": u"\U0001f68b",
":scissors:": u"\U00002702",
":triangular_ruler:": u"\U0001f4d0",
":wedding:": u"\U0001f492",
":flashlight:": u"\U0001f526",
":secret:": u"\U00003299",
":sushi:": u"\U0001f363",
":blue_car:": u"\U0001f699",
":cd:": u"\U0001f4bf",
":milky_way:": u"\U0001f30c",
":mortar_board:": u"\U0001f393",
":crown:": u"\U0001f451",
":speech_balloon:": u"\U0001f4ac",
":bento:": u"\U0001f371",
":grey_exclamation:": u"\U00002755",
":hotel:": u"\U0001f3e8",
":keycap_ten:": u"\U0001f51f",
":newspaper:": u"\U0001f4f0",
":outbox_tray:": u"\U0001f4e4",
":racehorse:": u"\U0001f40e",
":laughing:": u"\U0001f606",
":black_large_square:": u"\U00002b1b",
":books:": u"\U0001f4da",
":eight_spoked_asterisk:": u"\U00002733",
":heavy_check_mark:": u"\U00002714",
":m:": u"\U000024c2",
":wave:": u"\U0001f44b",
":bicyclist:": u"\U0001f6b4",
":cocktail:": u"\U0001f378",
":european_castle:": u"\U0001f3f0",
":point_down:": u"\U0001f447",
":tokyo_tower:": u"\U0001f5fc",
":battery:": u"\U0001f50b",
":dancer:": u"\U0001f483",
":repeat:": u"\U0001f501",
":ru:": u"\U0001f1f7\U0001f1fa",
":new_moon:": u"\U0001f311",
":church:": u"\U000026ea",
":date:": u"\U0001f4c5",
":earth_americas:": u"\U0001f30e",
":footprints:": u"\U0001f463",
":libra:": u"\U0000264e",
":mountain_cableway:": u"\U0001f6a0",
":small_red_triangle_down:": u"\U0001f53b",
":top:": u"\U0001f51d",
":sunglasses:": u"\U0001f60e",
":abcd:": u"\U0001f521",
":cl:": u"\U0001f191",
":ski:": u"\U0001f3bf",
":book:": u"\U0001f4d6",
":hourglass_flowing_sand:": u"\U000023f3",
":stuck_out_tongue_closed_eyes:": u"\U0001f61d",
":cold_sweat:": u"\U0001f630",
":headphones:": u"\U0001f3a7",
":confetti_ball:": u"\U0001f38a",
":gemini:": u"\U0000264a",
":new:": u"\U0001f195",
":pray:": u"\U0001f64f",
":watch:": u"\U0000231a",
":coffee:": u"\U00002615",
":ghost:": u"\U0001f47b",
":on:": u"\U0001f51b",
":pouch:": u"\U0001f45d",
":taxi:": u"\U0001f695",
":hocho:": u"\U0001f52a",
":yum:": u"\U0001f60b",
":heavy_plus_sign:": u"\U00002795",
":tada:": u"\U0001f389",
":arrow_heading_down:": u"\U00002935",
":clock530:": u"\U0001f560",
":poultry_leg:": u"\U0001f357",
":elephant:": u"\U0001f418",
":gb:": u"\U0001f1ec\U0001f1e7",
":mahjong:": u"\U0001f004",
":rice:": u"\U0001f35a",
":musical_note:": u"\U0001f3b5",
":beginner:": u"\U0001f530",
":small_red_triangle:": u"\U0001f53a",
":tomato:": u"\U0001f345",
":clock1130:": u"\U0001f566",
":japanese_castle:": u"\U0001f3ef",
":sun_with_face:": u"\U0001f31e",
":four:": u"\U00000034\U000020e3",
":microphone:": u"\U0001f3a4",
":tennis:": u"\U0001f3be",
":arrow_up_down:": u"\U00002195",
":cn:": u"\U0001f1e8\U0001f1f3",
":horse_racing:": u"\U0001f3c7",
":no_bicycles:": u"\U0001f6b3",
":snail:": u"\U0001f40c",
":free:": u"\U0001f193",
":beetle:": u"\U0001f41e",
":black_small_square:": u"\U000025aa",
":file_folder:": u"\U0001f4c1",
":hushed:": u"\U0001f62f",
":skull:": u"\U0001f480",
":ab:": u"\U0001f18e",
":rocket:": u"\U0001f680",
":sweet_potato:": u"\U0001f360",
":guitar:": u"\U0001f3b8",
":poodle:": u"\U0001f429",
":tulip:": u"\U0001f337",
":large_orange_diamond:": u"\U0001f536",
":-1:": u"\U0001f44e",
":chart_with_upwards_trend:": u"\U0001f4c8",
":de:": u"\U0001f1e9\U0001f1ea",
":grapes:": u"\U0001f347",
":ideograph_advantage:": u"\U0001f250",
":japanese_ogre:": u"\U0001f479",
":telephone:": u"\U0000260e",
":clock230:": u"\U0001f55d",
":hourglass:": u"\U0000231b",
":leftwards_arrow_with_hook:": u"\U000021a9",
":sparkler:": u"\U0001f387",
":black_joker:": u"\U0001f0cf",
":clock730:": u"\U0001f562",
":first_quarter_moon_with_face:": u"\U0001f31b",
":man:": u"\U0001f468",
":clock4:": u"\U0001f553",
":fishing_pole_and_fish:": u"\U0001f3a3",
":tophat:": u"\U0001f3a9",
":white_medium_square:": u"\U000025fb",
":mega:": u"\U0001f4e3",
":spaghetti:": u"\U0001f35d",
":dart:": u"\U0001f3af",
":girl:": u"\U0001f467",
":womans_hat:": u"\U0001f452",
":bullettrain_front:": u"\U0001f685",
":department_store:": u"\U0001f3ec",
":heartbeat:": u"\U0001f493",
":palm_tree:": u"\U0001f334",
":swimmer:": u"\U0001f3ca",
":yellow_heart:": u"\U0001f49b",
":arrow_upper_right:": u"\U00002197",
":clock2:": u"\U0001f551",
":high_heel:": u"\U0001f460",
":arrow_double_up:": u"\U000023eb",
":cry:": u"\U0001f622",
":dvd:": u"\U0001f4c0",
":e-mail:": u"\U0001f4e7",
":baby_bottle:": u"\U0001f37c",
":cool:": u"\U0001f192",
":floppy_disk:": u"\U0001f4be",
":iphone:": u"\U0001f4f1",
":minibus:": u"\U0001f690",
":rooster:": u"\U0001f413",
":three:": u"\U00000033\U000020e3",
":white_small_square:": u"\U000025ab",
":cancer:": u"\U0000264b",
":question:": u"\U00002753",
":sake:": u"\U0001f376",
":birthday:": u"\U0001f382",
":dog2:": u"\U0001f415",
":loudspeaker:": u"\U0001f4e2",
":arrow_up_small:": u"\U0001f53c",
":camel:": u"\U0001f42b",
":koala:": u"\U0001f428",
":mag_right:": u"\U0001f50e",
":soccer:": u"\U000026bd",
":bike:": u"\U0001f6b2",
":ear_of_rice:": u"\U0001f33e",
":shit:": u"\U0001f4a9",
":u7981:": u"\U0001f232",
":bath:": u"\U0001f6c0",
":baby:": u"\U0001f476",
":lock_with_ink_pen:": u"\U0001f50f",
":necktie:": u"\U0001f454",
":bikini:": u"\U0001f459",
":blush:": u"\U0001f60a",
":heartpulse:": u"\U0001f497",
":pig_nose:": u"\U0001f43d",
":straight_ruler:": u"\U0001f4cf",
":u6e80:": u"\U0001f235",
":gift:": u"\U0001f381",
":traffic_light:": u"\U0001f6a5",
":hibiscus:": u"\U0001f33a",
":couple_with_heart:": u"\U0001f491",
":pushpin:": u"\U0001f4cc",
":u6709:": u"\U0001f236",
":walking:": u"\U0001f6b6",
":grinning:": u"\U0001f600",
":hash:": u"\U00000023\U000020e3",
":radio_button:": u"\U0001f518",
":raised_hand:": u"\U0000270b",
":shaved_ice:": u"\U0001f367",
":barber:": u"\U0001f488",
":cat:": u"\U0001f431",
":heavy_exclamation_mark:": u"\U00002757",
":ice_cream:": u"\U0001f368",
":mask:": u"\U0001f637",
":pig2:": u"\U0001f416",
":triangular_flag_on_post:": u"\U0001f6a9",
":arrow_upper_left:": u"\U00002196",
":bee:": u"\U0001f41d",
":beer:": u"\U0001f37a",
":black_nib:": u"\U00002712",
":exclamation:": u"\U00002757",
":dog:": u"\U0001f436",
":fire:": u"\U0001f525",
":ant:": u"\U0001f41c",
":broken_heart:": u"\U0001f494",
":chart:": u"\U0001f4b9",
":clock1:": u"\U0001f550",
":bomb:": u"\U0001f4a3",
":virgo:": u"\U0000264d",
":a:": u"\U0001f170",
":fork_and_knife:": u"\U0001f374",
":copyright:": u"\U000000a9",
":curly_loop:": u"\U000027b0",
":full_moon:": u"\U0001f315",
":shoe:": u"\U0001f45e",
":european_post_office:": u"\U0001f3e4",
":ng:": u"\U0001f196",
":office:": u"\U0001f3e2",
":raising_hand:": u"\U0001f64b",
":revolving_hearts:": u"\U0001f49e",
":aquarius:": u"\U00002652",
":electric_plug:": u"\U0001f50c",
":meat_on_bone:": u"\U0001f356",
":mens:": u"\U0001f6b9",
":briefcase:": u"\U0001f4bc",
":ship:": u"\U0001f6a2",
":anchor:": u"\U00002693",
":ballot_box_with_check:": u"\U00002611",
":bear:": u"\U0001f43b",
":beers:": u"\U0001f37b",
":dromedary_camel:": u"\U0001f42a",
":nut_and_bolt:": u"\U0001f529",
":construction:": u"\U0001f6a7",
":golf:": u"\U000026f3",
":toilet:": u"\U0001f6bd",
":blue_book:": u"\U0001f4d8",
":boom:": u"\U0001f4a5",
":deciduous_tree:": u"\U0001f333",
":kissing_closed_eyes:": u"\U0001f61a",
":smiley_cat:": u"\U0001f63a",
":fuelpump:": u"\U000026fd",
":kiss:": u"\U0001f48b",
":clock10:": u"\U0001f559",
":sheep:": u"\U0001f411",
":white_flower:": u"\U0001f4ae",
":boar:": u"\U0001f417",
":currency_exchange:": u"\U0001f4b1",
":facepunch:": u"\U0001f44a",
":flower_playing_cards:": u"\U0001f3b4",
":person_frowning:": u"\U0001f64d",
":poop:": u"\U0001f4a9",
":satisfied:": u"\U0001f606",
":8ball:": u"\U0001f3b1",
":disappointed_relieved:": u"\U0001f625",
":panda_face:": u"\U0001f43c",
":ticket:": u"\U0001f3ab",
":us:": u"\U0001f1fa\U0001f1f8",
":waxing_crescent_moon:": u"\U0001f312",
":dragon:": u"\U0001f409",
":gun:": u"\U0001f52b",
":mount_fuji:": u"\U0001f5fb",
":new_moon_with_face:": u"\U0001f31a",
":star2:": u"\U0001f31f",
":grimacing:": u"\U0001f62c",
":confounded:": u"\U0001f616",
":congratulations:": u"\U00003297",
":custard:": u"\U0001f36e",
":frowning:": u"\U0001f626",
":maple_leaf:": u"\U0001f341",
":police_car:": u"\U0001f693",
":cloud:": u"\U00002601",
":jeans:": u"\U0001f456",
":fish:": u"\U0001f41f",
":wavy_dash:": u"\U00003030",
":clock5:": u"\U0001f554",
":santa:": u"\U0001f385",
":japan:": u"\U0001f5fe",
":oncoming_taxi:": u"\U0001f696",
":whale:": u"\U0001f433",
":arrow_forward:": u"\U000025b6",
":kissing_heart:": u"\U0001f618",
":bullettrain_side:": u"\U0001f684",
":fearful:": u"\U0001f628",
":moneybag:": u"\U0001f4b0",
":runner:": u"\U0001f3c3",
":mailbox:": u"\U0001f4eb",
":sandal:": u"\U0001f461",
":zzz:": u"\U0001f4a4",
":apple:": u"\U0001f34e",
":arrow_heading_up:": u"\U00002934",
":family:": u"\U0001f46a",
":heavy_minus_sign:": u"\U00002796",
":saxophone:": u"\U0001f3b7",
":u5272:": u"\U0001f239",
":black_square_button:": u"\U0001f532",
":bouquet:": u"\U0001f490",
":love_letter:": u"\U0001f48c",
":metro:": u"\U0001f687",
":small_blue_diamond:": u"\U0001f539",
":thought_balloon:": u"\U0001f4ad",
":arrow_up:": u"\U00002b06",
":no_pedestrians:": u"\U0001f6b7",
":smirk:": u"\U0001f60f",
":blue_heart:": u"\U0001f499",
":large_blue_diamond:": u"\U0001f537",
":vs:": u"\U0001f19a",
":v:": u"\U0000270c",
":wheelchair:": u"\U0000267f",
":couplekiss:": u"\U0001f48f",
":tent:": u"\U000026fa",
":purple_heart:": u"\U0001f49c",
":relaxed:": u"\U0000263a",
":accept:": u"\U0001f251",
":green_heart:": u"\U0001f49a",
":pouting_cat:": u"\U0001f63e",
":tram:": u"\U0001f68a",
":bangbang:": u"\U0000203c",
":collision:": u"\U0001f4a5",
":convenience_store:": u"\U0001f3ea",
":person_with_blond_hair:": u"\U0001f471",
":uk:": u"\U0001f1ec\U0001f1e7",
":peach:": u"\U0001f351",
":tired_face:": u"\U0001f62b",
":bread:": u"\U0001f35e",
":mailbox_closed:": u"\U0001f4ea",
":open_mouth:": u"\U0001f62e",
":pig:": u"\U0001f437",
":put_litter_in_its_place:": u"\U0001f6ae",
":u7a7a:": u"\U0001f233",
":bulb:": u"\U0001f4a1",
":clock9:": u"\U0001f558",
":envelope_with_arrow:": u"\U0001f4e9",
":pisces:": u"\U00002653",
":baggage_claim:": u"\U0001f6c4",
":egg:": u"\U0001f373",
":sweat_smile:": u"\U0001f605",
":boat:": u"\U000026f5",
":fr:": u"\U0001f1eb\U0001f1f7",
":heavy_division_sign:": u"\U00002797",
":muscle:": u"\U0001f4aa",
":paw_prints:": u"\U0001f43e",
":arrow_left:": u"\U00002b05",
":black_circle:": u"\U000026ab",
":kissing_smiling_eyes:": u"\U0001f619",
":star:": u"\U00002b50",
":steam_locomotive:": u"\U0001f682",
":1234:": u"\U0001f522",
":clock130:": u"\U0001f55c",
":kr:": u"\U0001f1f0\U0001f1f7",
":monorail:": u"\U0001f69d",
":school:": u"\U0001f3eb",
":seven:": u"\U00000037\U000020e3",
":baby_chick:": u"\U0001f424",
":bridge_at_night:": u"\U0001f309",
":hotsprings:": u"\U00002668",
":rose:": u"\U0001f339",
":love_hotel:": u"\U0001f3e9",
":princess:": u"\U0001f478",
":ramen:": u"\U0001f35c",
":scroll:": u"\U0001f4dc",
":tropical_fish:": u"\U0001f420",
":heart_eyes_cat:": u"\U0001f63b",
":information_desk_person:": u"\U0001f481",
":mouse:": u"\U0001f42d",
":no_smoking:": u"\U0001f6ad",
":post_office:": u"\U0001f3e3",
":stars:": u"\U0001f320",
":arrow_double_down:": u"\U000023ec",
":unlock:": u"\U0001f513",
":arrow_backward:": u"\U000025c0",
":hand:": u"\U0000270b",
":hospital:": u"\U0001f3e5",
":ocean:": u"\U0001f30a",
":mountain_bicyclist:": u"\U0001f6b5",
":octopus:": u"\U0001f419",
":sos:": u"\U0001f198",
":dizzy_face:": u"\U0001f635",
":tongue:": u"\U0001f445",
":train2:": u"\U0001f686",
":checkered_flag:": u"\U0001f3c1",
":orange_book:": u"\U0001f4d9",
":sound:": u"\U0001f509",
":aerial_tramway:": u"\U0001f6a1",
":bell:": u"\U0001f514",
":dragon_face:": u"\U0001f432",
":flipper:": u"\U0001f42c",
":ok_woman:": u"\U0001f646",
":performing_arts:": u"\U0001f3ad",
":postal_horn:": u"\U0001f4ef",
":clock1030:": u"\U0001f565",
":email:": u"\U00002709",
":green_book:": u"\U0001f4d7",
":point_up_2:": u"\U0001f446",
":high_brightness:": u"\U0001f506",
":running_shirt_with_sash:": u"\U0001f3bd",
":bookmark:": u"\U0001f516",
":sob:": u"\U0001f62d",
":arrow_lower_right:": u"\U00002198",
":point_left:": u"\U0001f448",
":purse:": u"\U0001f45b",
":sparkles:": u"\U00002728",
":black_medium_small_square:": u"\U000025fe",
":pound:": u"\U0001f4b7",
":rabbit:": u"\U0001f430",
":woman:": u"\U0001f469",
":negative_squared_cross_mark:": u"\U0000274e",
":open_book:": u"\U0001f4d6",
":smiling_imp:": u"\U0001f608",
":spades:": u"\U00002660",
":baseball:": u"\U000026be",
":fountain:": u"\U000026f2",
":joy:": u"\U0001f602",
":lipstick:": u"\U0001f484",
":partly_sunny:": u"\U000026c5",
":ram:": u"\U0001f40f",
":red_circle:": u"\U0001f534",
":cop:": u"\U0001f46e",
":green_apple:": u"\U0001f34f",
":registered:": u"\U000000ae",
":+1:": u"\U0001f44d",
":crying_cat_face:": u"\U0001f63f",
":innocent:": u"\U0001f607",
":mobile_phone_off:": u"\U0001f4f4",
":underage:": u"\U0001f51e",
":dolphin:": u"\U0001f42c",
":busts_in_silhouette:": u"\U0001f465",
":umbrella:": u"\U00002614",
":angel:": u"\U0001f47c",
":small_orange_diamond:": u"\U0001f538",
":sunflower:": u"\U0001f33b",
":link:": u"\U0001f517",
":notebook:": u"\U0001f4d3",
":oncoming_bus:": u"\U0001f68d",
":bookmark_tabs:": u"\U0001f4d1",
":calendar:": u"\U0001f4c6",
":izakaya_lantern:": u"\U0001f3ee",
":mans_shoe:": u"\U0001f45e",
":name_badge:": u"\U0001f4db",
":closed_lock_with_key:": u"\U0001f510",
":fist:": u"\U0000270a",
":id:": u"\U0001f194",
":ambulance:": u"\U0001f691",
":musical_keyboard:": u"\U0001f3b9",
":ribbon:": u"\U0001f380",
":seedling:": u"\U0001f331",
":tv:": u"\U0001f4fa",
":football:": u"\U0001f3c8",
":nail_care:": u"\U0001f485",
":seat:": u"\U0001f4ba",
":alarm_clock:": u"\U000023f0",
":money_with_wings:": u"\U0001f4b8",
":relieved:": u"\U0001f60c",
":womans_clothes:": u"\U0001f45a",
":lips:": u"\U0001f444",
":clubs:": u"\U00002663",
":house_with_garden:": u"\U0001f3e1",
":sunrise:": u"\U0001f305",
":monkey:": u"\U0001f412",
":six:": u"\U00000036\U000020e3",
":smiley:": u"\U0001f603",
":feet:": u"\U0001f43e",
":waning_gibbous_moon:": u"\U0001f316",
":yen:": u"\U0001f4b4",
":baby_symbol:": u"\U0001f6bc",
":signal_strength:": u"\U0001f4f6",
":boy:": u"\U0001f466",
":busstop:": u"\U0001f68f",
":computer:": u"\U0001f4bb",
":night_with_stars:": u"\U0001f303",
":older_woman:": u"\U0001f475",
":parking:": u"\U0001f17f",
":trumpet:": u"\U0001f3ba",
":100:": u"\U0001f4af",
":sweat_drops:": u"\U0001f4a6",
":wc:": u"\U0001f6be",
":b:": u"\U0001f171",
":cupid:": u"\U0001f498",
":five:": u"\U00000035\U000020e3",
":part_alternation_mark:": u"\U0000303d",
":snowboarder:": u"\U0001f3c2",
":warning:": u"\U000026a0",
":white_large_square:": u"\U00002b1c",
":zap:": u"\U000026a1",
":arrow_down_small:": u"\U0001f53d",
":clock430:": u"\U0001f55f",
":expressionless:": u"\U0001f611",
":phone:": u"\U0000260e",
":roller_coaster:": u"\U0001f3a2",
":lemon:": u"\U0001f34b",
":one:": u"\U00000031\U000020e3",
":christmas_tree:": u"\U0001f384",
":hankey:": u"\U0001f4a9",
":hatched_chick:": u"\U0001f425",
":u7533:": u"\U0001f238",
":large_blue_circle:": u"\U0001f535",
":up:": u"\U0001f199",
":wine_glass:": u"\U0001f377",
":x:": u"\U0000274c",
":nose:": u"\U0001f443",
":rewind:": u"\U000023ea",
":two_hearts:": u"\U0001f495",
":envelope:": u"\U00002709",
":oncoming_automobile:": u"\U0001f698",
":ophiuchus:": u"\U000026ce",
":ring:": u"\U0001f48d",
":tropical_drink:": u"\U0001f379",
":turtle:": u"\U0001f422",
":crescent_moon:": u"\U0001f319",
":koko:": u"\U0001f201",
":microscope:": u"\U0001f52c",
":rugby_football:": u"\U0001f3c9",
":smoking:": u"\U0001f6ac",
":anger:": u"\U0001f4a2",
":aries:": u"\U00002648",
":city_sunset:": u"\U0001f306",
":clock1230:": u"\U0001f567",
":mailbox_with_no_mail:": u"\U0001f4ed",
":movie_camera:": u"\U0001f3a5",
":pager:": u"\U0001f4df",
":zero:": u"\U00000030\U000020e3",
":bank:": u"\U0001f3e6",
":eight_pointed_black_star:": u"\U00002734",
":knife:": u"\U0001f52a",
":u7121:": u"\U0001f21a",
":customs:": u"\U0001f6c3",
":melon:": u"\U0001f348",
":rowboat:": u"\U0001f6a3",
":corn:": u"\U0001f33d",
":eggplant:": u"\U0001f346",
":heart_decoration:": u"\U0001f49f",
":rotating_light:": u"\U0001f6a8",
":round_pushpin:": u"\U0001f4cd",
":cat2:": u"\U0001f408",
":chocolate_bar:": u"\U0001f36b",
":no_bell:": u"\U0001f515",
":radio:": u"\U0001f4fb",
":droplet:": u"\U0001f4a7",
":hamburger:": u"\U0001f354",
":fire_engine:": u"\U0001f692",
":heart:": u"\U00002764",
":potable_water:": u"\U0001f6b0",
":telephone_receiver:": u"\U0001f4de",
":dash:": u"\U0001f4a8",
":globe_with_meridians:": u"\U0001f310",
":guardsman:": u"\U0001f482",
":heavy_multiplication_x:": u"\U00002716",
":chart_with_downwards_trend:": u"\U0001f4c9",
":imp:": u"\U0001f47f",
":earth_asia:": u"\U0001f30f",
":mouse2:": u"\U0001f401",
":notebook_with_decorative_cover:": u"\U0001f4d4",
":telescope:": u"\U0001f52d",
":trolleybus:": u"\U0001f68e",
":card_index:": u"\U0001f4c7",
":euro:": u"\U0001f4b6",
":dollar:": u"\U0001f4b5",
":fax:": u"\U0001f4e0",
":mailbox_with_mail:": u"\U0001f4ec",
":raised_hands:": u"\U0001f64c",
":disappointed:": u"\U0001f61e",
":foggy:": u"\U0001f301",
":person_with_pouting_face:": u"\U0001f64e",
":statue_of_liberty:": u"\U0001f5fd",
":dolls:": u"\U0001f38e",
":light_rail:": u"\U0001f688",
":pencil:": u"\U0001f4dd",
":speak_no_evil:": u"\U0001f64a",
":calling:": u"\U0001f4f2",
":clock830:": u"\U0001f563",
":cow2:": u"\U0001f404",
":hear_no_evil:": u"\U0001f649",
":scream_cat:": u"\U0001f640",
":smile_cat:": u"\U0001f638",
":tractor:": u"\U0001f69c",
":clock11:": u"\U0001f55a",
":doughnut:": u"\U0001f369",
":hammer:": u"\U0001f528",
":loop:": u"\U000027bf",
":moon:": u"\U0001f314",
":soon:": u"\U0001f51c",
":cinema:": u"\U0001f3a6",
":factory:": u"\U0001f3ed",
":flushed:": u"\U0001f633",
":mute:": u"\U0001f507",
":neutral_face:": u"\U0001f610",
":scorpius:": u"\U0000264f",
":wolf:": u"\U0001f43a",
":clapper:": u"\U0001f3ac",
":joy_cat:": u"\U0001f639",
":pensive:": u"\U0001f614",
":sleeping:": u"\U0001f634",
":credit_card:": u"\U0001f4b3",
":leo:": u"\U0000264c",
":man_with_gua_pi_mao:": u"\U0001f472",
":open_hands:": u"\U0001f450",
":tea:": u"\U0001f375",
":arrow_down:": u"\U00002b07",
":nine:": u"\U00000039\U000020e3",
":punch:": u"\U0001f44a",
":slot_machine:": u"\U0001f3b0",
":clap:": u"\U0001f44f",
":information_source:": u"\U00002139",
":tiger:": u"\U0001f42f",
":city_sunrise:": u"\U0001f307",
":dango:": u"\U0001f361",
":thumbsdown:": u"\U0001f44e",
":u6307:": u"\U0001f22f",
":curry:": u"\U0001f35b",
":cherries:": u"\U0001f352",
":clock6:": u"\U0001f555",
":clock7:": u"\U0001f556",
":older_man:": u"\U0001f474",
":oncoming_police_car:": u"\U0001f694",
":syringe:": u"\U0001f489",
":heavy_dollar_sign:": u"\U0001f4b2",
":open_file_folder:": u"\U0001f4c2",
":arrow_right_hook:": u"\U000021aa",
":articulated_lorry:": u"\U0001f69b",
":dancers:": u"\U0001f46f",
":kissing_cat:": u"\U0001f63d",
":rainbow:": u"\U0001f308",
":u5408:": u"\U0001f234",
":boot:": u"\U0001f462",
":carousel_horse:": u"\U0001f3a0",
":fried_shrimp:": u"\U0001f364",
":lock:": u"\U0001f512",
":non-potable_water:": u"\U0001f6b1",
":o:": u"\U00002b55",
":persevere:": u"\U0001f623",
":diamond_shape_with_a_dot_inside:": u"\U0001f4a0",
":fallen_leaf:": u"\U0001f342",
":massage:": u"\U0001f486",
":volcano:": u"\U0001f30b",
":gem:": u"\U0001f48e",
":shower:": u"\U0001f6bf",
":speaker:": u"\U0001f508",
":last_quarter_moon_with_face:": u"\U0001f31c",
":mag:": u"\U0001f50d",
":anguished:": u"\U0001f627",
":monkey_face:": u"\U0001f435",
":sunny:": u"\U00002600",
":tangerine:": u"\U0001f34a",
":point_right:": u"\U0001f449",
":railway_car:": u"\U0001f683",
":triumph:": u"\U0001f624",
":two:": u"\U00000032\U000020e3",
":gift_heart:": u"\U0001f49d",
":ledger:": u"\U0001f4d2",
":sagittarius:": u"\U00002650",
":snowflake:": u"\U00002744",
":abc:": u"\U0001f524",
":horse:": u"\U0001f434",
":ok_hand:": u"\U0001f44c",
":video_camera:": u"\U0001f4f9",
":sparkling_heart:": u"\U0001f496",
":taurus:": u"\U00002649",
":frog:": u"\U0001f438",
":hamster:": u"\U0001f439",
":helicopter:": u"\U0001f681",
":fries:": u"\U0001f35f",
":mushroom:": u"\U0001f344",
":penguin:": u"\U0001f427",
":truck:": u"\U0001f69a",
":bar_chart:": u"\U0001f4ca",
":evergreen_tree:": u"\U0001f332",
":bow:": u"\U0001f647",
":clock12:": u"\U0001f55b",
":four_leaf_clover:": u"\U0001f340",
":inbox_tray:": u"\U0001f4e5",
":smirk_cat:": u"\U0001f63c",
":two_men_holding_hands:": u"\U0001f46c",
":water_buffalo:": u"\U0001f403",
":alien:": u"\U0001f47d",
":video_game:": u"\U0001f3ae",
":candy:": u"\U0001f36c",
":page_facing_up:": u"\U0001f4c4",
":watermelon:": u"\U0001f349",
":white_check_mark:": u"\U00002705",
":blossom:": u"\U0001f33c",
":crocodile:": u"\U0001f40a",
":no_mouth:": u"\U0001f636",
":o2:": u"\U0001f17e",
":shirt:": u"\U0001f455",
":clock8:": u"\U0001f557",
":eyes:": u"\U0001f440",
":rabbit2:": u"\U0001f407",
":tanabata_tree:": u"\U0001f38b",
":wrench:": u"\U0001f527",
":es:": u"\U0001f1ea\U0001f1f8",
":trophy:": u"\U0001f3c6",
":two_women_holding_hands:": u"\U0001f46d",
":clock630:": u"\U0001f561",
":pineapple:": u"\U0001f34d",
":stuck_out_tongue:": u"\U0001f61b",
":angry:": u"\U0001f620",
":athletic_shoe:": u"\U0001f45f",
":cookie:": u"\U0001f36a",
":flags:": u"\U0001f38f",
":game_die:": u"\U0001f3b2",
":bird:": u"\U0001f426",
":jack_o_lantern:": u"\U0001f383",
":ox:": u"\U0001f402",
":paperclip:": u"\U0001f4ce",
":sleepy:": u"\U0001f62a",
":astonished:": u"\U0001f632",
":back:": u"\U0001f519",
":closed_book:": u"\U0001f4d5",
":hatching_chick:": u"\U0001f423",
":arrows_clockwise:": u"\U0001f503",
":car:": u"\U0001f697",
":ear:": u"\U0001f442",
":haircut:": u"\U0001f487",
":icecream:": u"\U0001f366",
":bust_in_silhouette:": u"\U0001f464",
":diamonds:": u"\U00002666",
":no_good:": u"\U0001f645",
":pizza:": u"\U0001f355",
":chicken:": u"\U0001f414",
":eyeglasses:": u"\U0001f453",
":see_no_evil:": u"\U0001f648",
":earth_africa:": u"\U0001f30d",
":fireworks:": u"\U0001f386",
":page_with_curl:": u"\U0001f4c3",
":rice_ball:": u"\U0001f359",
":white_square_button:": u"\U0001f533",
":cake:": u"\U0001f370",
":red_car:": u"\U0001f697",
":tm:": u"\U00002122",
":unamused:": u"\U0001f612",
":fish_cake:": u"\U0001f365",
":key:": u"\U0001f511",
":speedboat:": u"\U0001f6a4",
":closed_umbrella:": u"\U0001f302",
":pear:": u"\U0001f350",
":satellite:": u"\U0001f4e1",
":scream:": u"\U0001f631",
":first_quarter_moon:": u"\U0001f313",
":jp:": u"\U0001f1ef\U0001f1f5",
":repeat_one:": u"\U0001f502",
":shell:": u"\U0001f41a",
":interrobang:": u"\U00002049",
":trident:": u"\U0001f531",
":u55b6:": u"\U0001f23a",
":atm:": u"\U0001f3e7",
":door:": u"\U0001f6aa",
":kissing:": u"\U0001f617",
":six_pointed_star:": u"\U0001f52f",
":thumbsup:": u"\U0001f44d",
":u6708:": u"\U0001f237",
":do_not_litter:": u"\U0001f6af",
":whale2:": u"\U0001f40b",
":school_satchel:": u"\U0001f392",
":cactus:": u"\U0001f335",
":clipboard:": u"\U0001f4cb",
":dizzy:": u"\U0001f4ab",
":waxing_gibbous_moon:": u"\U0001f314",
":camera:": u"\U0001f4f7",
":capital_abcd:": u"\U0001f520",
":leaves:": u"\U0001f343",
":left_luggage:": u"\U0001f6c5",
":bamboo:": u"\U0001f38d",
":bowling:": u"\U0001f3b3",
":eight:": u"\U00000038\U000020e3",
":kimono:": u"\U0001f458",
":left_right_arrow:": u"\U00002194",
":stuck_out_tongue_winking_eye:": u"\U0001f61c",
":surfer:": u"\U0001f3c4",
":sweat:": u"\U0001f613",
":violin:": u"\U0001f3bb",
":postbox:": u"\U0001f4ee",
":bride_with_veil:": u"\U0001f470",
":recycle:": u"\U0000267b",
":station:": u"\U0001f689",
":vhs:": u"\U0001f4fc",
":crossed_flags:": u"\U0001f38c",
":memo:": u"\U0001f4dd",
":no_entry:": u"\U000026d4",
":white_circle:": u"\U000026aa",
":arrow_lower_left:": u"\U00002199",
":chestnut:": u"\U0001f330",
":crystal_ball:": u"\U0001f52e",
":last_quarter_moon:": u"\U0001f317",
":loud_sound:": u"\U0001f50a",
":strawberry:": u"\U0001f353",
":worried:": u"\U0001f61f",
":circus_tent:": u"\U0001f3aa",
":weary:": u"\U0001f629",
":bathtub:": u"\U0001f6c1",
":snake:": u"\U0001f40d",
":grin:": u"\U0001f601",
":symbols:": u"\U0001f523",
":airplane:": u"\U00002708",
":heart_eyes:": u"\U0001f60d",
":sailboat:": u"\U000026f5",
":stew:": u"\U0001f372",
":tshirt:": u"\U0001f455",
":rat:": u"\U0001f400",
":black_medium_square:": u"\U000025fc",
":clock930:": u"\U0001f564",
":full_moon_with_face:": u"\U0001f31d",
":japanese_goblin:": u"\U0001f47a",
":restroom:": u"\U0001f6bb",
":vertical_traffic_light:": u"\U0001f6a6",
":basketball:": u"\U0001f3c0",
":cherry_blossom:": u"\U0001f338",
":low_brightness:": u"\U0001f505",
":pill:": u"\U0001f48a",
}
| mit |
juneJuly/backfuzz | plugins/imap/imap.py | 3 | 1323 | from functions import *
"""IMAP Fuzzer"""
PROPERTY={}
PROPERTY['PROTOCOL']="IMAP"
PROPERTY['NAME']=": IMAP Fuzzer"
PROPERTY['DESC']="Fuzz an IMAP server"
PROPERTY['AUTHOR']='localh0t'
user_stage = ['. login']
pass_stage = ['. login [email protected]']
stage_1 = ['. list ""','. lsub ""', '. status INBOX','. examine','. select','. create','. delete', '. rename INBOX','. fetch 1','. store 1 flags', '. copy 1:2','. subscribe','. unsubscribe','. getquotaroot','. getacl']
stage_2 = ['. list', '. status','. rename','. fetch','. store 1','. copy','. lsub']
stage_3 = ['. store']
class FuzzerClass:
def fuzzer(self):
(username,password) = createUser()
# Stage 0
fuzzTCP()
# User Stage
sock = createSocketTCP(0,0)
fuzzCommands(sock,user_stage,"test","DoubleCommand")
# Pass Stage
sock = createSocketTCP(0,0)
fuzzCommands(sock,pass_stage,0,"SingleCommand")
# Stage 1
login = ". login " + str(username)
sock = createSocketTCP(0,0)
sendCredential(sock,login,password)
fuzzCommands(sock,stage_1,0,"SingleCommand")
# Stage 2
sock = createSocketTCP(0,0)
sendCredential(sock,login,password)
fuzzCommands(sock,stage_2,1,"DoubleCommand")
# Stage 3
sock = createSocketTCP(0,0)
sendCredential(sock,login,password)
fuzzCommands(sock,stage_3,"+flags NonJunk","DoubleCommand")
exitProgram(2) | gpl-3.0 |
tempbottle/kbengine | kbe/src/lib/python/Lib/idlelib/Percolator.py | 82 | 3244 | from idlelib.WidgetRedirector import WidgetRedirector
from idlelib.Delegator import Delegator
class Percolator:
def __init__(self, text):
# XXX would be nice to inherit from Delegator
self.text = text
self.redir = WidgetRedirector(text)
self.top = self.bottom = Delegator(text)
self.bottom.insert = self.redir.register("insert", self.insert)
self.bottom.delete = self.redir.register("delete", self.delete)
self.filters = []
def close(self):
while self.top is not self.bottom:
self.removefilter(self.top)
self.top = None
self.bottom.setdelegate(None); self.bottom = None
self.redir.close(); self.redir = None
self.text = None
def insert(self, index, chars, tags=None):
# Could go away if inheriting from Delegator
self.top.insert(index, chars, tags)
def delete(self, index1, index2=None):
# Could go away if inheriting from Delegator
self.top.delete(index1, index2)
def insertfilter(self, filter):
# Perhaps rename to pushfilter()?
assert isinstance(filter, Delegator)
assert filter.delegate is None
filter.setdelegate(self.top)
self.top = filter
def removefilter(self, filter):
# XXX Perhaps should only support popfilter()?
assert isinstance(filter, Delegator)
assert filter.delegate is not None
f = self.top
if f is filter:
self.top = filter.delegate
filter.setdelegate(None)
else:
while f.delegate is not filter:
assert f is not self.bottom
f.resetcache()
f = f.delegate
f.setdelegate(filter.delegate)
filter.setdelegate(None)
def _percolator(parent):
import tkinter as tk
import re
class Tracer(Delegator):
def __init__(self, name):
self.name = name
Delegator.__init__(self, None)
def insert(self, *args):
print(self.name, ": insert", args)
self.delegate.insert(*args)
def delete(self, *args):
print(self.name, ": delete", args)
self.delegate.delete(*args)
root = tk.Tk()
root.title("Test Percolator")
width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
root.geometry("+%d+%d"%(x, y + 150))
text = tk.Text(root)
p = Percolator(text)
t1 = Tracer("t1")
t2 = Tracer("t2")
def toggle1():
if var1.get() == 0:
var1.set(1)
p.insertfilter(t1)
elif var1.get() == 1:
var1.set(0)
p.removefilter(t1)
def toggle2():
if var2.get() == 0:
var2.set(1)
p.insertfilter(t2)
elif var2.get() == 1:
var2.set(0)
p.removefilter(t2)
text.pack()
var1 = tk.IntVar()
cb1 = tk.Checkbutton(root, text="Tracer1", command=toggle1, variable=var1)
cb1.pack()
var2 = tk.IntVar()
cb2 = tk.Checkbutton(root, text="Tracer2", command=toggle2, variable=var2)
cb2.pack()
root.mainloop()
if __name__ == "__main__":
from idlelib.idle_test.htest import run
run(_percolator)
| lgpl-3.0 |
jbobotek/elcano | Vision/OpticalMouse/ADNS3080ImageGrabber.py | 4 | 6354 |
import serial
import string
import math
import time
from Tkinter import *
from threading import Timer
comPort = '/dev/ttyACM0' #default com port
comPortBaud = 38400
class App:
grid_size = 15
num_pixels = 30
image_started = FALSE
image_current_row = 0;
ser = serial.Serial(comPort, comPortBaud)
pixel_dictionary = {}
def __init__(self, master):
# set main window's title
master.title("ADNS3080ImageGrabber")
frame = Frame(master)
frame.grid(row=0,column=0)
self.comPortStr = StringVar()
self.comPort = Entry(frame,textvariable=self.comPortStr)
self.comPort.grid(row=0,column=0)
self.comPort.delete(0, END)
self.comPort.insert(0,comPort)
self.button = Button(frame, text="Open", fg="red", command=self.open_serial)
self.button.grid(row=0,column=1)
self.entryStr = StringVar()
self.entry = Entry(frame,textvariable=self.entryStr)
self.entry.grid(row=0,column=2)
self.entry.delete(0, END)
self.entry.insert(0,"I")
self.send_button = Button(frame, text="Send", command=self.send_to_serial)
self.send_button.grid(row=0,column=3)
self.canvas = Canvas(master, width=self.grid_size*self.num_pixels, height=self.grid_size*self.num_pixels)
self.canvas.grid(row=1)
## start attempts to read from serial port
self.read_loop()
def __del__(self):
self.stop_read_loop()
def open_serial(self):
# close the serial port
if( self.ser.isOpen() ):
try:
self.ser.close()
except:
i=i # do nothing
# open the serial port
try:
self.ser = serial.Serial(port=self.comPortStr.get(),baudrate=comPortBaud, timeout=1)
print("serial port '" + self.comPortStr.get() + "' opened!")
except:
print("failed to open serial port '" + self.comPortStr.get() + "'")
def send_to_serial(self):
if self.ser.isOpen():
self.ser.write(self.entryStr.get())
print "sent '" + self.entryStr.get() + "' to " + self.ser.portstr
else:
print "Serial port not open!"
def read_loop(self):
try:
self.t.cancel()
except:
aVar = 1 # do nothing
#print("reading")
if( self.ser.isOpen() ) :
self.read_from_serial();
self.t = Timer(0.0,self.read_loop)
self.t.start()
def stop_read_loop(self):
try:
self.t.cancel()
except:
print("failed to cancel timer")
# do nothing
def read_from_serial(self):
if( self.ser.isOpen() ):
while( self.ser.inWaiting() > 0 ):
self.line_processed = FALSE
line = self.ser.readline()
# process the line read
print("line starts")
if( line.find("-------------------------") == 0 ):
self.line_processed = TRUE
self.image_started = FALSE
self.image_current_row = 0
else:
self.image_started= TRUE
if( self.image_started == TRUE ):
if( self.image_current_row >= self.num_pixels ):
self.image_started == FALSE
else:
words = line.split()
if len(words) >= 30:
self.line_processed = TRUE
x = 0
for v in words:
try:
colour = int(v)
except:
colour = 0;
#self.display_pixel(x,self.image_current_row,colour)
self.display_pixel(self.num_pixels-1-self.image_current_row,self.num_pixels-1-x,colour)
x += 1
self.image_current_row += 1
else:
print("line " + str(self.image_current_row) + "incomplete (" + str(len(words)) + " of " + str(self.num_pixels) + "), ignoring")
#print("bad line: " + line);
if( line.find("image data") >= 0 ):
self.line_processed = TRUE
self.image_started = TRUE
self.image_current_row = 0
# clear canvas
#self.canvas.delete(ALL) # remove all items
#display the line if we couldn't understand it
# if( self.line_processed == FALSE ):
# print( line )
def display_default_image(self):
# display the grid
for x in range(0, self.num_pixels-1):
for y in range(0, self.num_pixels-1):
colour = x * y / 3.53
self.display_pixel(x,y,colour)
def display_pixel(self, x, y, colour):
if( x >= 0 and x < self.num_pixels and y >= 0 and y < self.num_pixels ) :
#find the old pixel if it exists and delete it
if self.pixel_dictionary.has_key(x+y*self.num_pixels) :
self.old_pixel = self.pixel_dictionary[x+y*self.num_pixels]
self.canvas.delete(self.old_pixel)
del(self.old_pixel)
fillColour = "#%02x%02x%02x" % (colour, colour, colour)
#draw a new pixel and add to pixel_array
self.new_pixel = self.canvas.create_rectangle(x*self.grid_size, y*self.grid_size, (x+1)*self.grid_size, (y+1)*self.grid_size, fill=fillColour)
self.pixel_dictionary[x+y*self.num_pixels] = self.new_pixel
## main loop ##
root = Tk()
#root.withdraw()
#serPort = SerialHandler(comPort,comPortBaud)
# create main display
app = App(root)
app.display_default_image()
print("entering main loop!")
root.mainloop()
app.stop_read_loop()
print("exiting")
| mit |
remyroy/uwsgi | contrib/runuwsgi.py | 17 | 2577 | import django
from django.core.management.base import BaseCommand
from django.conf import settings
import os
import sys
class Command(BaseCommand):
help = "Runs this project as a uWSGI application. Requires the uwsgi binary in system path."
http_port = '8000'
socket_addr = None
def handle(self, *args, **options):
for arg in args:
k, v = arg.split('=')
if k == 'http':
if self.http_port:
self.http_port = v
elif k == 'socket':
self.http_port = None
self.socket_addr = v
# load http and python plugin: first the specific version, otherwise try with the generic one
if self.http_port:
os.environ['UWSGI_PLUGINS'] = 'http,python%d%d:python' % (sys.version_info[0], sys.version_info[1])
else:
os.environ['UWSGI_PLUGINS'] = 'python%d%d:python' % (sys.version_info[0], sys.version_info[1])
# load the Django WSGI handler
os.environ['UWSGI_MODULE'] = 'django.core.handlers.wsgi:WSGIHandler()'
# DJANGO settings
if options['settings']:
os.environ['DJANGO_SETTINGS_MODULE'] = options['settings']
else:
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
# bind the http server to the default port
if self.http_port:
os.environ['UWSGI_HTTP'] = ':%s' % self.http_port
elif self.socket_addr:
os.environ['UWSGI_SOCKET'] = self.socket_addr
# map admin static files
os.environ['UWSGI_STATIC_MAP'] = '%s=%s' % (settings.ADMIN_MEDIA_PREFIX, os.path.join(django.__path__[0], 'contrib', 'admin', 'media'))
# remove sockets/pidfile at exit
os.environ['UWSGI_VACUUM'] = '1'
# retrieve/set the PythonHome
os.environ['UWSGI_PYHOME'] = sys.prefix
# increase buffer size a bit
os.environ['UWSGI_BUFFER_SIZE'] = '8192'
# add threads for concurrency
os.environ['UWSGI_THREADS'] = '8'
# enable the master process
os.environ['UWSGI_MASTER'] = '1'
# use uWSGI python module aliasing to fix the PYTHONPATH
os.environ['UWSGI_PYMODULE_ALIAS'] = '%s=./' % os.path.basename(os.getcwd())
# exec the uwsgi binary
os.execvp('uwsgi', ('uwsgi',))
def usage(self, subcomand):
return r"""
run this project on the uWSGI server
http=PORT run the embedded http server on port PORT
socket=ADDR bind the uwsgi server on address ADDR (this will disable the http server)
"""
| gpl-2.0 |
SimonSapin/servo | tests/wpt/web-platform-tests/tools/third_party/pytest/testing/test_compat.py | 30 | 2615 | from __future__ import absolute_import, division, print_function
import sys
import pytest
from _pytest.compat import is_generator, get_real_func, safe_getattr
from _pytest.outcomes import OutcomeException
def test_is_generator():
def zap():
yield
def foo():
pass
assert is_generator(zap)
assert not is_generator(foo)
def test_real_func_loop_limit():
class Evil(object):
def __init__(self):
self.left = 1000
def __repr__(self):
return "<Evil left={left}>".format(left=self.left)
def __getattr__(self, attr):
if not self.left:
raise RuntimeError("its over")
self.left -= 1
return self
evil = Evil()
with pytest.raises(ValueError):
res = get_real_func(evil)
print(res)
@pytest.mark.skipif(
sys.version_info < (3, 4), reason="asyncio available in Python 3.4+"
)
def test_is_generator_asyncio(testdir):
testdir.makepyfile(
"""
from _pytest.compat import is_generator
import asyncio
@asyncio.coroutine
def baz():
yield from [1,2,3]
def test_is_generator_asyncio():
assert not is_generator(baz)
"""
)
# avoid importing asyncio into pytest's own process,
# which in turn imports logging (#8)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(["*1 passed*"])
@pytest.mark.skipif(
sys.version_info < (3, 5), reason="async syntax available in Python 3.5+"
)
def test_is_generator_async_syntax(testdir):
testdir.makepyfile(
"""
from _pytest.compat import is_generator
def test_is_generator_py35():
async def foo():
await foo()
async def bar():
pass
assert not is_generator(foo)
assert not is_generator(bar)
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
class ErrorsHelper(object):
@property
def raise_exception(self):
raise Exception("exception should be catched")
@property
def raise_fail(self):
pytest.fail("fail should be catched")
def test_helper_failures():
helper = ErrorsHelper()
with pytest.raises(Exception):
helper.raise_exception
with pytest.raises(OutcomeException):
helper.raise_fail
def test_safe_getattr():
helper = ErrorsHelper()
assert safe_getattr(helper, "raise_exception", "default") == "default"
assert safe_getattr(helper, "raise_fail", "default") == "default"
| mpl-2.0 |
dvliman/jaikuengine | .google_appengine/lib/django-1.2/django/contrib/gis/sitemaps/views.py | 45 | 4353 | from django.http import HttpResponse, Http404
from django.template import loader
from django.contrib.sites.models import get_current_site
from django.core import urlresolvers
from django.core.paginator import EmptyPage, PageNotAnInteger
from django.contrib.gis.db.models.fields import GeometryField
from django.db import connections, DEFAULT_DB_ALIAS
from django.db.models import get_model
from django.utils.encoding import smart_str
from django.contrib.gis.shortcuts import render_to_kml, render_to_kmz
def index(request, sitemaps):
"""
This view generates a sitemap index that uses the proper view
for resolving geographic section sitemap URLs.
"""
current_site = get_current_site(request)
sites = []
protocol = request.is_secure() and 'https' or 'http'
for section, site in sitemaps.items():
if callable(site):
pages = site().paginator.num_pages
else:
pages = site.paginator.num_pages
sitemap_url = urlresolvers.reverse('django.contrib.gis.sitemaps.views.sitemap', kwargs={'section': section})
sites.append('%s://%s%s' % (protocol, current_site.domain, sitemap_url))
if pages > 1:
for page in range(2, pages+1):
sites.append('%s://%s%s?p=%s' % (protocol, current_site.domain, sitemap_url, page))
xml = loader.render_to_string('sitemap_index.xml', {'sitemaps': sites})
return HttpResponse(xml, mimetype='application/xml')
def sitemap(request, sitemaps, section=None):
"""
This view generates a sitemap with additional geographic
elements defined by Google.
"""
maps, urls = [], []
if section is not None:
if section not in sitemaps:
raise Http404("No sitemap available for section: %r" % section)
maps.append(sitemaps[section])
else:
maps = sitemaps.values()
page = request.GET.get("p", 1)
current_site = get_current_site(request)
for site in maps:
try:
if callable(site):
urls.extend(site().get_urls(page=page, site=current_site))
else:
urls.extend(site.get_urls(page=page, site=current_site))
except EmptyPage:
raise Http404("Page %s empty" % page)
except PageNotAnInteger:
raise Http404("No page '%s'" % page)
xml = smart_str(loader.render_to_string('gis/sitemaps/geo_sitemap.xml', {'urlset': urls}))
return HttpResponse(xml, mimetype='application/xml')
def kml(request, label, model, field_name=None, compress=False, using=DEFAULT_DB_ALIAS):
"""
This view generates KML for the given app label, model, and field name.
The model's default manager must be GeoManager, and the field name
must be that of a geographic field.
"""
placemarks = []
klass = get_model(label, model)
if not klass:
raise Http404('You must supply a valid app label and module name. Got "%s.%s"' % (label, model))
if field_name:
try:
info = klass._meta.get_field_by_name(field_name)
if not isinstance(info[0], GeometryField):
raise Exception
except:
raise Http404('Invalid geometry field.')
connection = connections[using]
if connection.ops.postgis:
# PostGIS will take care of transformation.
placemarks = klass._default_manager.using(using).kml(field_name=field_name)
else:
# There's no KML method on Oracle or MySQL, so we use the `kml`
# attribute of the lazy geometry instead.
placemarks = []
if connection.ops.oracle:
qs = klass._default_manager.using(using).transform(4326, field_name=field_name)
else:
qs = klass._default_manager.using(using).all()
for mod in qs:
setattr(mod, 'kml', getattr(mod, field_name).kml)
placemarks.append(mod)
# Getting the render function and rendering to the correct.
if compress:
render = render_to_kmz
else:
render = render_to_kml
return render('gis/kml/placemarks.kml', {'places' : placemarks})
def kmz(request, label, model, field_name=None, using=DEFAULT_DB_ALIAS):
"""
This view returns KMZ for the given app label, model, and field name.
"""
return kml(request, label, model, field_name, compress=True, using=using)
| apache-2.0 |
hrashk/sympy | sympy/assumptions/refine.py | 7 | 6862 | from __future__ import print_function, division
from sympy.core import S, Add, Expr
from sympy.assumptions import Q, ask
from sympy.core.logic import fuzzy_not
def refine(expr, assumptions=True):
"""
Simplify an expression using assumptions.
Gives the form of expr that would be obtained if symbols
in it were replaced by explicit numerical expressions satisfying
the assumptions.
Examples
========
>>> from sympy import refine, sqrt, Q
>>> from sympy.abc import x
>>> refine(sqrt(x**2), Q.real(x))
Abs(x)
>>> refine(sqrt(x**2), Q.positive(x))
x
"""
if not expr.is_Atom:
args = [refine(arg, assumptions) for arg in expr.args]
# TODO: this will probably not work with Integral or Polynomial
expr = expr.func(*args)
name = expr.__class__.__name__
handler = handlers_dict.get(name, None)
if handler is None:
return expr
new_expr = handler(expr, assumptions)
if (new_expr is None) or (expr == new_expr):
return expr
if not isinstance(new_expr, Expr):
return new_expr
return refine(new_expr, assumptions)
def refine_abs(expr, assumptions):
"""
Handler for the absolute value.
Examples
========
>>> from sympy import Symbol, Q, refine, Abs
>>> from sympy.assumptions.refine import refine_abs
>>> from sympy.abc import x
>>> refine_abs(Abs(x), Q.real(x))
>>> refine_abs(Abs(x), Q.positive(x))
x
>>> refine_abs(Abs(x), Q.negative(x))
-x
"""
arg = expr.args[0]
if ask(Q.real(arg), assumptions) and \
fuzzy_not(ask(Q.negative(arg), assumptions)):
# if it's nonnegative
return arg
if ask(Q.negative(arg), assumptions):
return -arg
def refine_Pow(expr, assumptions):
"""
Handler for instances of Pow.
>>> from sympy import Symbol, Q
>>> from sympy.assumptions.refine import refine_Pow
>>> from sympy.abc import x,y,z
>>> refine_Pow((-1)**x, Q.real(x))
>>> refine_Pow((-1)**x, Q.even(x))
1
>>> refine_Pow((-1)**x, Q.odd(x))
-1
For powers of -1, even parts of the exponent can be simplified:
>>> refine_Pow((-1)**(x+y), Q.even(x))
(-1)**y
>>> refine_Pow((-1)**(x+y+z), Q.odd(x) & Q.odd(z))
(-1)**y
>>> refine_Pow((-1)**(x+y+2), Q.odd(x))
(-1)**(y + 1)
>>> refine_Pow((-1)**(x+3), True)
(-1)**(x + 1)
"""
from sympy.core import Pow, Rational
from sympy.functions.elementary.complexes import Abs
from sympy.functions import sign
if isinstance(expr.base, Abs):
if ask(Q.real(expr.base.args[0]), assumptions) and \
ask(Q.even(expr.exp), assumptions):
return expr.base.args[0] ** expr.exp
if ask(Q.real(expr.base), assumptions):
if expr.base.is_number:
if ask(Q.even(expr.exp), assumptions):
return abs(expr.base) ** expr.exp
if ask(Q.odd(expr.exp), assumptions):
return sign(expr.base) * abs(expr.base) ** expr.exp
if isinstance(expr.exp, Rational):
if type(expr.base) is Pow:
return abs(expr.base.base) ** (expr.base.exp * expr.exp)
if expr.base is S.NegativeOne:
if expr.exp.is_Add:
old = expr
# For powers of (-1) we can remove
# - even terms
# - pairs of odd terms
# - a single odd term + 1
# - A numerical constant N can be replaced with mod(N,2)
coeff, terms = expr.exp.as_coeff_add()
terms = set(terms)
even_terms = set([])
odd_terms = set([])
initial_number_of_terms = len(terms)
for t in terms:
if ask(Q.even(t), assumptions):
even_terms.add(t)
elif ask(Q.odd(t), assumptions):
odd_terms.add(t)
terms -= even_terms
if len(odd_terms) % 2:
terms -= odd_terms
new_coeff = (coeff + S.One) % 2
else:
terms -= odd_terms
new_coeff = coeff % 2
if new_coeff != coeff or len(terms) < initial_number_of_terms:
terms.add(new_coeff)
expr = expr.base**(Add(*terms))
# Handle (-1)**((-1)**n/2 + m/2)
e2 = 2*expr.exp
if ask(Q.even(e2), assumptions):
if e2.could_extract_minus_sign():
e2 *= expr.base
if e2.is_Add:
i, p = e2.as_two_terms()
if p.is_Pow and p.base is S.NegativeOne:
if ask(Q.integer(p.exp), assumptions):
i = (i + 1)/2
if ask(Q.even(i), assumptions):
return expr.base**p.exp
elif ask(Q.odd(i), assumptions):
return expr.base**(p.exp + 1)
else:
return expr.base**(p.exp + i)
if old != expr:
return expr
def refine_exp(expr, assumptions):
"""
Handler for exponential function.
>>> from sympy import Symbol, Q, exp, I, pi
>>> from sympy.assumptions.refine import refine_exp
>>> from sympy.abc import x
>>> refine_exp(exp(pi*I*2*x), Q.real(x))
>>> refine_exp(exp(pi*I*2*x), Q.integer(x))
1
"""
arg = expr.args[0]
if arg.is_Mul:
coeff = arg.as_coefficient(S.Pi*S.ImaginaryUnit)
if coeff:
if ask(Q.integer(2*coeff), assumptions):
if ask(Q.even(coeff), assumptions):
return S.One
elif ask(Q.odd(coeff), assumptions):
return S.NegativeOne
elif ask(Q.even(coeff + S.Half), assumptions):
return -S.ImaginaryUnit
elif ask(Q.odd(coeff + S.Half), assumptions):
return S.ImaginaryUnit
def refine_Relational(expr, assumptions):
"""
Handler for Relational
>>> from sympy.assumptions.refine import refine_Relational
>>> from sympy.assumptions.ask import Q
>>> from sympy.abc import x
>>> refine_Relational(x<0, ~Q.is_true(x<0))
False
"""
return ask(Q.is_true(expr), assumptions)
handlers_dict = {
'Abs': refine_abs,
'Pow': refine_Pow,
'exp': refine_exp,
'Equality' : refine_Relational,
'Unequality' : refine_Relational,
'GreaterThan' : refine_Relational,
'LessThan' : refine_Relational,
'StrictGreaterThan' : refine_Relational,
'StrictLessThan' : refine_Relational
}
| bsd-3-clause |
bwrsandman/GitPython | git/test/test_commit.py | 12 | 12863 | # -*- coding: utf-8 -*-
# test_commit.py
# Copyright (C) 2008, 2009 Michael Trier ([email protected]) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
from __future__ import print_function
from git.test.lib import (
TestBase,
assert_equal,
assert_not_equal,
with_rw_repo,
fixture_path,
StringProcessAdapter
)
from git import (
Commit,
Actor,
)
from gitdb import IStream
from gitdb.test.lib import with_rw_directory
from git.compat import (
string_types,
text_type
)
from git import Repo
from git.repo.fun import touch
from io import BytesIO
import time
import sys
import re
import os
def assert_commit_serialization(rwrepo, commit_id, print_performance_info=False):
"""traverse all commits in the history of commit identified by commit_id and check
if the serialization works.
:param print_performance_info: if True, we will show how fast we are"""
ns = 0 # num serializations
nds = 0 # num deserializations
st = time.time()
for cm in rwrepo.commit(commit_id).traverse():
nds += 1
# assert that we deserialize commits correctly, hence we get the same
# sha on serialization
stream = BytesIO()
cm._serialize(stream)
ns += 1
streamlen = stream.tell()
stream.seek(0)
istream = rwrepo.odb.store(IStream(Commit.type, streamlen, stream))
assert istream.hexsha == cm.hexsha.encode('ascii')
nc = Commit(rwrepo, Commit.NULL_BIN_SHA, cm.tree,
cm.author, cm.authored_date, cm.author_tz_offset,
cm.committer, cm.committed_date, cm.committer_tz_offset,
cm.message, cm.parents, cm.encoding)
assert nc.parents == cm.parents
stream = BytesIO()
nc._serialize(stream)
ns += 1
streamlen = stream.tell()
stream.seek(0)
# reuse istream
istream.size = streamlen
istream.stream = stream
istream.binsha = None
nc.binsha = rwrepo.odb.store(istream).binsha
# if it worked, we have exactly the same contents !
assert nc.hexsha == cm.hexsha
# END check commits
elapsed = time.time() - st
if print_performance_info:
print("Serialized %i and deserialized %i commits in %f s ( (%f, %f) commits / s"
% (ns, nds, elapsed, ns / elapsed, nds / elapsed), file=sys.stderr)
# END handle performance info
class TestCommit(TestBase):
def test_bake(self):
commit = self.rorepo.commit('2454ae89983a4496a445ce347d7a41c0bb0ea7ae')
# commits have no dict
self.failUnlessRaises(AttributeError, setattr, commit, 'someattr', 1)
commit.author # bake
assert_equal("Sebastian Thiel", commit.author.name)
assert_equal("[email protected]", commit.author.email)
assert commit.author == commit.committer
assert isinstance(commit.authored_date, int) and isinstance(commit.committed_date, int)
assert isinstance(commit.author_tz_offset, int) and isinstance(commit.committer_tz_offset, int)
assert commit.message == "Added missing information to docstrings of commit and stats module\n"
def test_stats(self):
commit = self.rorepo.commit('33ebe7acec14b25c5f84f35a664803fcab2f7781')
stats = commit.stats
def check_entries(d):
assert isinstance(d, dict)
for key in ("insertions", "deletions", "lines"):
assert key in d
# END assertion helper
assert stats.files
assert stats.total
check_entries(stats.total)
assert "files" in stats.total
for filepath, d in stats.files.items():
check_entries(d)
# END for each stated file
# assure data is parsed properly
michael = Actor._from_string("Michael Trier <[email protected]>")
assert commit.author == michael
assert commit.committer == michael
assert commit.authored_date == 1210193388
assert commit.committed_date == 1210193388
assert commit.author_tz_offset == 14400, commit.author_tz_offset
assert commit.committer_tz_offset == 14400, commit.committer_tz_offset
assert commit.message == "initial project\n"
def test_unicode_actor(self):
# assure we can parse unicode actors correctly
name = u"Üäöß ÄußÉ"
assert len(name) == 9
special = Actor._from_string(u"%s <[email protected]>" % name)
assert special.name == name
assert isinstance(special.name, text_type)
def test_traversal(self):
start = self.rorepo.commit("a4d06724202afccd2b5c54f81bcf2bf26dea7fff")
first = self.rorepo.commit("33ebe7acec14b25c5f84f35a664803fcab2f7781")
p0 = start.parents[0]
p1 = start.parents[1]
p00 = p0.parents[0]
p10 = p1.parents[0]
# basic branch first, depth first
dfirst = start.traverse(branch_first=False)
bfirst = start.traverse(branch_first=True)
assert next(dfirst) == p0
assert next(dfirst) == p00
assert next(bfirst) == p0
assert next(bfirst) == p1
assert next(bfirst) == p00
assert next(bfirst) == p10
# at some point, both iterations should stop
assert list(bfirst)[-1] == first
stoptraverse = self.rorepo.commit("254d04aa3180eb8b8daf7b7ff25f010cd69b4e7d").traverse(as_edge=True)
l = list(stoptraverse)
assert len(l[0]) == 2
# ignore self
assert next(start.traverse(ignore_self=False)) == start
# depth
assert len(list(start.traverse(ignore_self=False, depth=0))) == 1
# prune
assert next(start.traverse(branch_first=1, prune=lambda i, d: i == p0)) == p1
# predicate
assert next(start.traverse(branch_first=1, predicate=lambda i, d: i == p1)) == p1
# traversal should stop when the beginning is reached
self.failUnlessRaises(StopIteration, next, first.traverse())
# parents of the first commit should be empty ( as the only parent has a null
# sha )
assert len(first.parents) == 0
def test_iteration(self):
# we can iterate commits
all_commits = Commit.list_items(self.rorepo, self.rorepo.head)
assert all_commits
assert all_commits == list(self.rorepo.iter_commits())
# this includes merge commits
mcomit = self.rorepo.commit('d884adc80c80300b4cc05321494713904ef1df2d')
assert mcomit in all_commits
# we can limit the result to paths
ltd_commits = list(self.rorepo.iter_commits(paths='CHANGES'))
assert ltd_commits and len(ltd_commits) < len(all_commits)
# show commits of multiple paths, resulting in a union of commits
less_ltd_commits = list(Commit.iter_items(self.rorepo, 'master', paths=('CHANGES', 'AUTHORS')))
assert len(ltd_commits) < len(less_ltd_commits)
def test_iter_items(self):
# pretty not allowed
self.failUnlessRaises(ValueError, Commit.iter_items, self.rorepo, 'master', pretty="raw")
def test_rev_list_bisect_all(self):
"""
'git rev-list --bisect-all' returns additional information
in the commit header. This test ensures that we properly parse it.
"""
revs = self.rorepo.git.rev_list('933d23bf95a5bd1624fbcdf328d904e1fa173474',
first_parent=True,
bisect_all=True)
commits = Commit._iter_from_process_or_stream(self.rorepo, StringProcessAdapter(revs.encode('ascii')))
expected_ids = (
'7156cece3c49544abb6bf7a0c218eb36646fad6d',
'1f66cfbbce58b4b552b041707a12d437cc5f400a',
'33ebe7acec14b25c5f84f35a664803fcab2f7781',
'933d23bf95a5bd1624fbcdf328d904e1fa173474'
)
for sha1, commit in zip(expected_ids, commits):
assert_equal(sha1, commit.hexsha)
@with_rw_directory
def test_ambiguous_arg_iteration(self, rw_dir):
rw_repo = Repo.init(os.path.join(rw_dir, 'test_ambiguous_arg'))
path = os.path.join(rw_repo.working_tree_dir, 'master')
touch(path)
rw_repo.index.add([path])
rw_repo.index.commit('initial commit')
list(rw_repo.iter_commits(rw_repo.head.ref)) # should fail unless bug is fixed
def test_count(self):
assert self.rorepo.tag('refs/tags/0.1.5').commit.count() == 143
def test_list(self):
# This doesn't work anymore, as we will either attempt getattr with bytes, or compare 20 byte string
# with actual 20 byte bytes. This usage makes no sense anyway
assert isinstance(Commit.list_items(self.rorepo, '0.1.5', max_count=5)[
'5117c9c8a4d3af19a9958677e45cda9269de1541'], Commit)
def test_str(self):
commit = Commit(self.rorepo, Commit.NULL_BIN_SHA)
assert_equal(Commit.NULL_HEX_SHA, str(commit))
def test_repr(self):
commit = Commit(self.rorepo, Commit.NULL_BIN_SHA)
assert_equal('<git.Commit "%s">' % Commit.NULL_HEX_SHA, repr(commit))
def test_equality(self):
commit1 = Commit(self.rorepo, Commit.NULL_BIN_SHA)
commit2 = Commit(self.rorepo, Commit.NULL_BIN_SHA)
commit3 = Commit(self.rorepo, "\1" * 20)
assert_equal(commit1, commit2)
assert_not_equal(commit2, commit3)
def test_iter_parents(self):
# should return all but ourselves, even if skip is defined
c = self.rorepo.commit('0.1.5')
for skip in (0, 1):
piter = c.iter_parents(skip=skip)
first_parent = next(piter)
assert first_parent != c
assert first_parent == c.parents[0]
# END for each
def test_name_rev(self):
name_rev = self.rorepo.head.commit.name_rev
assert isinstance(name_rev, string_types)
@with_rw_repo('HEAD', bare=True)
def test_serialization(self, rwrepo):
# create all commits of our repo
assert_commit_serialization(rwrepo, '0.1.6')
def test_serialization_unicode_support(self):
assert Commit.default_encoding.lower() == 'utf-8'
# create a commit with unicode in the message, and the author's name
# Verify its serialization and deserialization
cmt = self.rorepo.commit('0.1.6')
assert isinstance(cmt.message, text_type) # it automatically decodes it as such
assert isinstance(cmt.author.name, text_type) # same here
cmt.message = u"üäêèß"
assert len(cmt.message) == 5
cmt.author.name = u"äüß"
assert len(cmt.author.name) == 3
cstream = BytesIO()
cmt._serialize(cstream)
cstream.seek(0)
assert len(cstream.getvalue())
ncmt = Commit(self.rorepo, cmt.binsha)
ncmt._deserialize(cstream)
assert cmt.author.name == ncmt.author.name
assert cmt.message == ncmt.message
# actually, it can't be printed in a shell as repr wants to have ascii only
# it appears
cmt.author.__repr__()
def test_gpgsig(self):
cmt = self.rorepo.commit()
cmt._deserialize(open(fixture_path('commit_with_gpgsig'), 'rb'))
fixture_sig = """-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1.4.11 (GNU/Linux)
iQIcBAABAgAGBQJRk8zMAAoJEG5mS6x6i9IjsTEP/0v2Wx/i7dqyKban6XMIhVdj
uI0DycfXqnCCZmejidzeao+P+cuK/ZAA/b9fU4MtwkDm2USvnIOrB00W0isxsrED
sdv6uJNa2ybGjxBolLrfQcWutxGXLZ1FGRhEvkPTLMHHvVriKoNFXcS7ewxP9MBf
NH97K2wauqA+J4BDLDHQJgADCOmLrGTAU+G1eAXHIschDqa6PZMH5nInetYZONDh
3SkOOv8VKFIF7gu8X7HC+7+Y8k8U0TW0cjlQ2icinwCc+KFoG6GwXS7u/VqIo1Yp
Tack6sxIdK7NXJhV5gAeAOMJBGhO0fHl8UUr96vGEKwtxyZhWf8cuIPOWLk06jA0
g9DpLqmy/pvyRfiPci+24YdYRBua/vta+yo/Lp85N7Hu/cpIh+q5WSLvUlv09Dmo
TTTG8Hf6s3lEej7W8z2xcNZoB6GwXd8buSDU8cu0I6mEO9sNtAuUOHp2dBvTA6cX
PuQW8jg3zofnx7CyNcd3KF3nh2z8mBcDLgh0Q84srZJCPRuxRcp9ylggvAG7iaNd
XMNvSK8IZtWLkx7k3A3QYt1cN4y1zdSHLR2S+BVCEJea1mvUE+jK5wiB9S4XNtKm
BX/otlTa8pNE3fWYBxURvfHnMY4i3HQT7Bc1QjImAhMnyo2vJk4ORBJIZ1FTNIhJ
JzJMZDRLQLFvnzqZuCjE
=przd
-----END PGP SIGNATURE-----"""
assert cmt.gpgsig == fixture_sig
cmt.gpgsig = "<test\ndummy\nsig>"
assert cmt.gpgsig != fixture_sig
cstream = BytesIO()
cmt._serialize(cstream)
assert re.search(r"^gpgsig <test\n dummy\n sig>$", cstream.getvalue().decode('ascii'), re.MULTILINE)
cstream.seek(0)
cmt.gpgsig = None
cmt._deserialize(cstream)
assert cmt.gpgsig == "<test\ndummy\nsig>"
cmt.gpgsig = None
cstream = BytesIO()
cmt._serialize(cstream)
assert not re.search(r"^gpgsig ", cstream.getvalue().decode('ascii'), re.MULTILINE)
| bsd-3-clause |
shacker/django | tests/template_tests/filter_tests/test_striptags.py | 197 | 1632 | from django.template.defaultfilters import striptags
from django.test import SimpleTestCase
from django.utils.functional import lazystr
from django.utils.safestring import mark_safe
from ..utils import setup
class StriptagsTests(SimpleTestCase):
@setup({'striptags01': '{{ a|striptags }} {{ b|striptags }}'})
def test_striptags01(self):
output = self.engine.render_to_string(
'striptags01',
{
'a': '<a>x</a> <p><b>y</b></p>',
'b': mark_safe('<a>x</a> <p><b>y</b></p>'),
},
)
self.assertEqual(output, 'x y x y')
@setup({'striptags02': '{% autoescape off %}{{ a|striptags }} {{ b|striptags }}{% endautoescape %}'})
def test_striptags02(self):
output = self.engine.render_to_string(
'striptags02',
{
'a': '<a>x</a> <p><b>y</b></p>',
'b': mark_safe('<a>x</a> <p><b>y</b></p>'),
},
)
self.assertEqual(output, 'x y x y')
class FunctionTests(SimpleTestCase):
def test_strip(self):
self.assertEqual(
striptags('some <b>html</b> with <script>alert("You smell")</script> disallowed <img /> tags'),
'some html with alert("You smell") disallowed tags',
)
def test_non_string_input(self):
self.assertEqual(striptags(123), '123')
def test_strip_lazy_string(self):
self.assertEqual(
striptags(lazystr('some <b>html</b> with <script>alert("Hello")</script> disallowed <img /> tags')),
'some html with alert("Hello") disallowed tags',
)
| bsd-3-clause |
xq262144/hue | desktop/core/ext-py/pysaml2-2.4.0/example/idp2_repoze/modules/login.mako.py | 31 | 2690 | # -*- encoding:utf-8 -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 6
_modified_time = 1367126126.936375
_template_filename='htdocs/login.mako'
_template_uri='login.mako'
_template_cache=cache.Cache(__name__, _modified_time)
_source_encoding='utf-8'
_exports = []
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
pass
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, u'root.mako', _template_uri)
def render_body(context,**pageargs):
context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
redirect_uri = context.get('redirect_uri', UNDEFINED)
key = context.get('key', UNDEFINED)
action = context.get('action', UNDEFINED)
authn_reference = context.get('authn_reference', UNDEFINED)
login = context.get('login', UNDEFINED)
password = context.get('password', UNDEFINED)
__M_writer = context.writer()
# SOURCE LINE 1
__M_writer(u'\n\n<h1>Please log in</h1>\n<p class="description">\n To register it\'s quite simple: enter a login and a password\n</p>\n\n<form action="')
# SOURCE LINE 8
__M_writer(unicode(action))
__M_writer(u'" method="post">\n <input type="hidden" name="key" value="')
# SOURCE LINE 9
__M_writer(unicode(key))
__M_writer(u'"/>\n <input type="hidden" name="authn_reference" value="')
# SOURCE LINE 10
__M_writer(unicode(authn_reference))
__M_writer(u'"/>\n <input type="hidden" name="redirect_uri" value="')
# SOURCE LINE 11
__M_writer(unicode(redirect_uri))
__M_writer(u'"/>\n\n <div class="label">\n <label for="login">Username</label>\n </div>\n <div>\n <input type="text" name="login" value="')
# SOURCE LINE 17
__M_writer(unicode(login))
__M_writer(u'"/><br/>\n </div>\n\n <div class="label">\n <label for="password">Password</label>\n </div>\n <div>\n <input type="password" name="password"\n value="')
# SOURCE LINE 25
__M_writer(unicode(password))
__M_writer(u'"/>\n </div>\n\n <input class="submit" type="submit" name="form.submitted" value="Log In"/>\n</form>\n')
return ''
finally:
context.caller_stack._pop_frame()
| apache-2.0 |
chirilo/mozillians | vendor-local/lib/python/tablib/packages/odf/elementtypes.py | 83 | 10218 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2008 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from namespaces import *
# Inline element don't cause a box
# They are analogous to the HTML elements SPAN, B, I etc.
inline_elements = (
(TEXTNS,u'a'),
(TEXTNS,u'author-initials'),
(TEXTNS,u'author-name'),
(TEXTNS,u'bibliography-mark'),
(TEXTNS,u'bookmark-ref'),
(TEXTNS,u'chapter'),
(TEXTNS,u'character-count'),
(TEXTNS,u'conditional-text'),
(TEXTNS,u'creation-date'),
(TEXTNS,u'creation-time'),
(TEXTNS,u'creator'),
(TEXTNS,u'database-display'),
(TEXTNS,u'database-name'),
(TEXTNS,u'database-next'),
(TEXTNS,u'database-row-number'),
(TEXTNS,u'database-row-select'),
(TEXTNS,u'date'),
(TEXTNS,u'dde-connection'),
(TEXTNS,u'description'),
(TEXTNS,u'editing-cycles'),
(TEXTNS,u'editing-duration'),
(TEXTNS,u'execute-macro'),
(TEXTNS,u'expression'),
(TEXTNS,u'file-name'),
(TEXTNS,u'hidden-paragraph'),
(TEXTNS,u'hidden-text'),
(TEXTNS,u'image-count'),
(TEXTNS,u'initial-creator'),
(TEXTNS,u'keywords'),
(TEXTNS,u'measure'),
(TEXTNS,u'modification-date'),
(TEXTNS,u'modification-time'),
(TEXTNS,u'note-ref'),
(TEXTNS,u'object-count'),
(TEXTNS,u'page-continuation'),
(TEXTNS,u'page-count'),
(TEXTNS,u'page-number'),
(TEXTNS,u'page-variable-get'),
(TEXTNS,u'page-variable-set'),
(TEXTNS,u'paragraph-count'),
(TEXTNS,u'placeholder'),
(TEXTNS,u'print-date'),
(TEXTNS,u'printed-by'),
(TEXTNS,u'print-time'),
(TEXTNS,u'reference-ref'),
(TEXTNS,u'ruby'),
(TEXTNS,u'ruby-base'),
(TEXTNS,u'ruby-text'),
(TEXTNS,u'script'),
(TEXTNS,u'sender-city'),
(TEXTNS,u'sender-company'),
(TEXTNS,u'sender-country'),
(TEXTNS,u'sender-email'),
(TEXTNS,u'sender-fax'),
(TEXTNS,u'sender-firstname'),
(TEXTNS,u'sender-initials'),
(TEXTNS,u'sender-lastname'),
(TEXTNS,u'sender-phone-private'),
(TEXTNS,u'sender-phone-work'),
(TEXTNS,u'sender-position'),
(TEXTNS,u'sender-postal-code'),
(TEXTNS,u'sender-state-or-province'),
(TEXTNS,u'sender-street'),
(TEXTNS,u'sender-title'),
(TEXTNS,u'sequence'),
(TEXTNS,u'sequence-ref'),
(TEXTNS,u'sheet-name'),
(TEXTNS,u'span'),
(TEXTNS,u'subject'),
(TEXTNS,u'table-count'),
(TEXTNS,u'table-formula'),
(TEXTNS,u'template-name'),
(TEXTNS,u'text-input'),
(TEXTNS,u'time'),
(TEXTNS,u'title'),
(TEXTNS,u'user-defined'),
(TEXTNS,u'user-field-get'),
(TEXTNS,u'user-field-input'),
(TEXTNS,u'variable-get'),
(TEXTNS,u'variable-input'),
(TEXTNS,u'variable-set'),
(TEXTNS,u'word-count'),
)
# It is almost impossible to determine what elements are block elements.
# There are so many that don't fit the form
block_elements = (
(TEXTNS,u'h'),
(TEXTNS,u'p'),
(TEXTNS,u'list'),
(TEXTNS,u'list-item'),
(TEXTNS,u'section'),
)
declarative_elements = (
(OFFICENS,u'font-face-decls'),
(PRESENTATIONNS,u'date-time-decl'),
(PRESENTATIONNS,u'footer-decl'),
(PRESENTATIONNS,u'header-decl'),
(TABLENS,u'table-template'),
(TEXTNS,u'alphabetical-index-entry-template'),
(TEXTNS,u'alphabetical-index-source'),
(TEXTNS,u'bibliography-entry-template'),
(TEXTNS,u'bibliography-source'),
(TEXTNS,u'dde-connection-decls'),
(TEXTNS,u'illustration-index-entry-template'),
(TEXTNS,u'illustration-index-source'),
(TEXTNS,u'index-source-styles'),
(TEXTNS,u'index-title-template'),
(TEXTNS,u'note-continuation-notice-backward'),
(TEXTNS,u'note-continuation-notice-forward'),
(TEXTNS,u'notes-configuration'),
(TEXTNS,u'object-index-entry-template'),
(TEXTNS,u'object-index-source'),
(TEXTNS,u'sequence-decls'),
(TEXTNS,u'table-index-entry-template'),
(TEXTNS,u'table-index-source'),
(TEXTNS,u'table-of-content-entry-template'),
(TEXTNS,u'table-of-content-source'),
(TEXTNS,u'user-field-decls'),
(TEXTNS,u'user-index-entry-template'),
(TEXTNS,u'user-index-source'),
(TEXTNS,u'variable-decls'),
)
empty_elements = (
(ANIMNS,u'animate'),
(ANIMNS,u'animateColor'),
(ANIMNS,u'animateMotion'),
(ANIMNS,u'animateTransform'),
(ANIMNS,u'audio'),
(ANIMNS,u'param'),
(ANIMNS,u'set'),
(ANIMNS,u'transitionFilter'),
(CHARTNS,u'categories'),
(CHARTNS,u'data-point'),
(CHARTNS,u'domain'),
(CHARTNS,u'error-indicator'),
(CHARTNS,u'floor'),
(CHARTNS,u'grid'),
(CHARTNS,u'legend'),
(CHARTNS,u'mean-value'),
(CHARTNS,u'regression-curve'),
(CHARTNS,u'stock-gain-marker'),
(CHARTNS,u'stock-loss-marker'),
(CHARTNS,u'stock-range-line'),
(CHARTNS,u'symbol-image'),
(CHARTNS,u'wall'),
(DR3DNS,u'cube'),
(DR3DNS,u'extrude'),
(DR3DNS,u'light'),
(DR3DNS,u'rotate'),
(DR3DNS,u'sphere'),
(DRAWNS,u'contour-path'),
(DRAWNS,u'contour-polygon'),
(DRAWNS,u'equation'),
(DRAWNS,u'fill-image'),
(DRAWNS,u'floating-frame'),
(DRAWNS,u'glue-point'),
(DRAWNS,u'gradient'),
(DRAWNS,u'handle'),
(DRAWNS,u'hatch'),
(DRAWNS,u'layer'),
(DRAWNS,u'marker'),
(DRAWNS,u'opacity'),
(DRAWNS,u'page-thumbnail'),
(DRAWNS,u'param'),
(DRAWNS,u'stroke-dash'),
(FORMNS,u'connection-resource'),
(FORMNS,u'list-value'),
(FORMNS,u'property'),
(MANIFESTNS,u'algorithm'),
(MANIFESTNS,u'key-derivation'),
(METANS,u'auto-reload'),
(METANS,u'document-statistic'),
(METANS,u'hyperlink-behaviour'),
(METANS,u'template'),
(NUMBERNS,u'am-pm'),
(NUMBERNS,u'boolean'),
(NUMBERNS,u'day'),
(NUMBERNS,u'day-of-week'),
(NUMBERNS,u'era'),
(NUMBERNS,u'fraction'),
(NUMBERNS,u'hours'),
(NUMBERNS,u'minutes'),
(NUMBERNS,u'month'),
(NUMBERNS,u'quarter'),
(NUMBERNS,u'scientific-number'),
(NUMBERNS,u'seconds'),
(NUMBERNS,u'text-content'),
(NUMBERNS,u'week-of-year'),
(NUMBERNS,u'year'),
(OFFICENS,u'dde-source'),
(PRESENTATIONNS,u'date-time'),
(PRESENTATIONNS,u'footer'),
(PRESENTATIONNS,u'header'),
(PRESENTATIONNS,u'placeholder'),
(PRESENTATIONNS,u'play'),
(PRESENTATIONNS,u'show'),
(PRESENTATIONNS,u'sound'),
(SCRIPTNS,u'event-listener'),
(STYLENS,u'column'),
(STYLENS,u'column-sep'),
(STYLENS,u'drop-cap'),
(STYLENS,u'footnote-sep'),
(STYLENS,u'list-level-properties'),
(STYLENS,u'map'),
(STYLENS,u'ruby-properties'),
(STYLENS,u'table-column-properties'),
(STYLENS,u'tab-stop'),
(STYLENS,u'text-properties'),
(SVGNS,u'definition-src'),
(SVGNS,u'font-face-format'),
(SVGNS,u'font-face-name'),
(SVGNS,u'stop'),
(TABLENS,u'body'),
(TABLENS,u'cell-address'),
(TABLENS,u'cell-range-source'),
(TABLENS,u'change-deletion'),
(TABLENS,u'consolidation'),
(TABLENS,u'database-source-query'),
(TABLENS,u'database-source-sql'),
(TABLENS,u'database-source-table'),
(TABLENS,u'data-pilot-display-info'),
(TABLENS,u'data-pilot-field-reference'),
(TABLENS,u'data-pilot-group-member'),
(TABLENS,u'data-pilot-layout-info'),
(TABLENS,u'data-pilot-member'),
(TABLENS,u'data-pilot-sort-info'),
(TABLENS,u'data-pilot-subtotal'),
(TABLENS,u'dependency'),
(TABLENS,u'error-macro'),
(TABLENS,u'even-columns'),
(TABLENS,u'even-rows'),
(TABLENS,u'filter-condition'),
(TABLENS,u'first-column'),
(TABLENS,u'first-row'),
(TABLENS,u'highlighted-range'),
(TABLENS,u'insertion-cut-off'),
(TABLENS,u'iteration'),
(TABLENS,u'label-range'),
(TABLENS,u'last-column'),
(TABLENS,u'last-row'),
(TABLENS,u'movement-cut-off'),
(TABLENS,u'named-expression'),
(TABLENS,u'named-range'),
(TABLENS,u'null-date'),
(TABLENS,u'odd-columns'),
(TABLENS,u'odd-rows'),
(TABLENS,u'operation'),
(TABLENS,u'scenario'),
(TABLENS,u'sort-by'),
(TABLENS,u'sort-groups'),
(TABLENS,u'source-range-address'),
(TABLENS,u'source-service'),
(TABLENS,u'subtotal-field'),
(TABLENS,u'table-column'),
(TABLENS,u'table-source'),
(TABLENS,u'target-range-address'),
(TEXTNS,u'alphabetical-index-auto-mark-file'),
(TEXTNS,u'alphabetical-index-mark'),
(TEXTNS,u'alphabetical-index-mark-end'),
(TEXTNS,u'alphabetical-index-mark-start'),
(TEXTNS,u'bookmark'),
(TEXTNS,u'bookmark-end'),
(TEXTNS,u'bookmark-start'),
(TEXTNS,u'change'),
(TEXTNS,u'change-end'),
(TEXTNS,u'change-start'),
(TEXTNS,u'dde-connection-decl'),
(TEXTNS,u'index-entry-bibliography'),
(TEXTNS,u'index-entry-chapter'),
(TEXTNS,u'index-entry-link-end'),
(TEXTNS,u'index-entry-link-start'),
(TEXTNS,u'index-entry-page-number'),
(TEXTNS,u'index-entry-tab-stop'),
(TEXTNS,u'index-entry-text'),
(TEXTNS,u'index-source-style'),
(TEXTNS,u'line-break'),
(TEXTNS,u'page'),
(TEXTNS,u'reference-mark'),
(TEXTNS,u'reference-mark-end'),
(TEXTNS,u'reference-mark-start'),
(TEXTNS,u's'),
(TEXTNS,u'section-source'),
(TEXTNS,u'sequence-decl'),
(TEXTNS,u'soft-page-break'),
(TEXTNS,u'sort-key'),
(TEXTNS,u'tab'),
(TEXTNS,u'toc-mark'),
(TEXTNS,u'toc-mark-end'),
(TEXTNS,u'toc-mark-start'),
(TEXTNS,u'user-field-decl'),
(TEXTNS,u'user-index-mark'),
(TEXTNS,u'user-index-mark-end'),
(TEXTNS,u'user-index-mark-start'),
(TEXTNS,u'variable-decl')
)
| bsd-3-clause |
huichen-cs/learnsorting | quick_sort_unittest.py | 1 | 1092 | import unittest
from quick_sort_concept import quick_sort
class QuickSortTest(unittest.TestCase):
def test_quick_sort_random_1(self):
data = [4, 1, 10, 4, 4, 3, 9, 4, 1, 9]
expected = [1, 1, 3, 4, 4, 4, 4, 9, 9, 10]
output = quick_sort(data)
self.assertEqual(expected, output)
def test_quick_sort_random_2(self):
data = [10, 3, 10, 9, 7, 9, 6, 2, 7, 7]
expected = [2, 3, 6, 7, 7, 7, 9, 9, 10, 10]
output = quick_sort(data)
self.assertEqual(expected, output)
def test_quick_sort_sorted_asc(self):
data = [2, 3, 6, 7, 7, 7, 9, 9, 10, 10]
expected = [2, 3, 6, 7, 7, 7, 9, 9, 10, 10]
output = quick_sort(data)
self.assertEqual(expected, output)
def test_quick_sort_sorted_des(self):
data = [10, 10, 9, 9, 7, 7, 7, 6, 3, 2]
expected = [2, 3, 6, 7, 7, 7, 9, 9, 10, 10]
output = quick_sort(data)
self.assertEqual(expected, output)
if __name__ == "__main__":
unittest.main() | gpl-3.0 |
OpenTrons/opentrons_sdk | api/src/opentrons/system/wifi.py | 3 | 6702 | import hashlib
import logging
import os
import shutil
from typing import Generator, Optional, Dict, Any
from dataclasses import dataclass
from opentrons.config import CONFIG
from opentrons.system import nmcli
log = logging.getLogger(__name__)
class ConfigureArgsError(Exception):
pass
EAP_CONFIG_SHAPE = {
'options': [
{'name': method.qualified_name(),
'displayName': method.display_name(),
'options': [{k: v for k, v in arg.items()
if k in ['name',
'displayName',
'required',
'type']}
for arg in method.args()]}
for method in nmcli.EAP_TYPES]
}
@dataclass(frozen=True)
class Key:
directory: str
file: str
@dataclass(frozen=True)
class AddKeyResult:
created: bool
key: Key
def add_key(key_file_name: str, key_contents: bytes) -> AddKeyResult:
"""
Add a key file (for later use in EAP config) to the system.
"""
keys_dir = CONFIG['wifi_keys_dir']
hasher = hashlib.sha256()
hasher.update(key_contents)
key_hash = hasher.hexdigest()
if key_hash in os.listdir(keys_dir):
files = os.listdir(os.path.join(keys_dir, key_hash))
if files:
return AddKeyResult(created=False,
key=Key(directory=key_hash,
file=files[0]))
else:
log.warning(
"Key directory with nothing in it: {}"
.format(key_hash))
os.rmdir(os.path.join(keys_dir, key_hash))
key_hash_path = os.path.join(keys_dir, key_hash)
os.mkdir(key_hash_path)
with open(os.path.join(key_hash_path,
os.path.basename(key_file_name)), 'wb') as f:
f.write(key_contents)
return AddKeyResult(created=True,
key=Key(directory=key_hash,
file=key_file_name))
def list_keys() -> Generator[Key, None, None]:
"""
List wifi keys known to the system.
:return: A generator yielding Key objects
"""
keys_dir = CONFIG['wifi_keys_dir']
# TODO(mc, 2018-10-24): add last modified info to keys for sort purposes
for path in os.listdir(keys_dir):
full_path = os.path.join(keys_dir, path)
if os.path.isdir(full_path):
in_path = os.listdir(full_path)
if len(in_path) > 1:
log.warning("Garbage in key dir for key {}".format(path))
yield Key(directory=path,
file=in_path[0])
else:
log.warning("Garbage in wifi keys dir: {}".format(full_path))
def remove_key(requested_hash: str) -> Optional[str]:
"""
Try to delete key file
:param requested_hash: The hash to delete
:return: The name of the deleted file or None if not found
"""
keys_dir = CONFIG['wifi_keys_dir']
available_keys = os.listdir(keys_dir)
if requested_hash not in available_keys:
return None
key_path = os.path.join(keys_dir, requested_hash)
name = os.listdir(key_path)[0]
shutil.rmtree(key_path)
return name
def get_key_file(key: str) -> str:
"""
Get the full path of a key file
:param key: The key to look for
:return: the path
"""
keys_dir = CONFIG['wifi_keys_dir']
available_keys = os.listdir(keys_dir)
if key not in available_keys:
raise ConfigureArgsError(f'Key ID {key} is not valid on the system')
files_in_dir = os.listdir(os.path.join(keys_dir, key))
if len(files_in_dir) > 1:
raise OSError(
f'Key ID {key} has multiple files, try deleting and re-uploading'
)
return os.path.join(keys_dir, key, files_in_dir[0])
def _eap_check_no_extra_args(
config: Dict[str, Any], options: Any):
# options is an Any because the type annotation for EAP_CONFIG_SHAPE itself
# can’t quite express the type properly because of the inference from the
# dict annotation.
"""Check for args that are not required for this method (to aid debugging)
``config`` should be the user config.
``options`` should be the options sub-member for the eap method.
Before this method is called, the validity of the 'eapType' key should be
established.
"""
arg_names = [k for k in config.keys() if k != 'eapType']
valid_names = [o['name'] for o in options]
for an in arg_names:
if an not in valid_names:
raise ConfigureArgsError(
'Option {} is not valid for EAP method {}'
.format(an, config['eapType']))
def _eap_check_option_ok(opt: Dict[str, str], config: Dict[str, Any]):
"""
Check that a given EAP option is in the user config (if required)
and, if specified, is the right type.
``opt`` should be an options dict from EAP_CONFIG_SHAPE.
``config`` should be the user config dict.
Before this method is called, the validity of the eapType key should be
established.
"""
if opt['name'] not in config:
if opt['required']:
raise ConfigureArgsError(
'Required argument {} for eap method {} not present'
.format(opt['displayName'], config['eapType']))
else:
return
name = opt['name']
o_type = opt['type']
arg = config[name]
if name in config:
if o_type in ('string', 'password') and not isinstance(arg, str):
raise ConfigureArgsError('Option {} should be a str'
.format(name))
elif o_type == 'file' and not isinstance(arg, str):
raise ConfigureArgsError('Option {} must be a str'
.format(name))
def eap_check_config(eap_config: Dict[str, Any]) -> Dict[str, Any]:
"""Check the eap specific args, and replace values where needed."""
eap_type = eap_config.get('eapType')
for method in EAP_CONFIG_SHAPE['options']:
if method['name'] == eap_type:
options = method['options']
break
else:
raise ConfigureArgsError('EAP method {} is not valid'.format(eap_type))
_eap_check_no_extra_args(eap_config, options)
for opt in options: # type: ignore
# Ignoring most types to do with EAP_CONFIG_SHAPE because of issues
# wth type inference for dict comprehensions
_eap_check_option_ok(opt, eap_config)
if opt['type'] == 'file' and opt['name'] in eap_config:
# Special work for file: rewrite from key id to path
eap_config[opt['name']] = get_key_file(eap_config[opt['name']])
return eap_config
| apache-2.0 |
fitermay/intellij-community | python/lib/Lib/site-packages/django/contrib/syndication/views.py | 87 | 8404 | import datetime
from django.conf import settings
from django.contrib.sites.models import get_current_site
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist
from django.http import HttpResponse, Http404
from django.template import loader, Template, TemplateDoesNotExist, RequestContext
from django.utils import feedgenerator, tzinfo
from django.utils.encoding import force_unicode, iri_to_uri, smart_unicode
from django.utils.html import escape
def add_domain(domain, url, secure=False):
if not (url.startswith('http://')
or url.startswith('https://')
or url.startswith('mailto:')):
# 'url' must already be ASCII and URL-quoted, so no need for encoding
# conversions here.
if secure:
protocol = 'https'
else:
protocol = 'http'
url = iri_to_uri(u'%s://%s%s' % (protocol, domain, url))
return url
class FeedDoesNotExist(ObjectDoesNotExist):
pass
class Feed(object):
feed_type = feedgenerator.DefaultFeed
title_template = None
description_template = None
def __call__(self, request, *args, **kwargs):
try:
obj = self.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
raise Http404('Feed object does not exist.')
feedgen = self.get_feed(obj, request)
response = HttpResponse(mimetype=feedgen.mime_type)
feedgen.write(response, 'utf-8')
return response
def item_title(self, item):
# Titles should be double escaped by default (see #6533)
return escape(force_unicode(item))
def item_description(self, item):
return force_unicode(item)
def item_link(self, item):
try:
return item.get_absolute_url()
except AttributeError:
raise ImproperlyConfigured('Give your %s class a get_absolute_url() method, or define an item_link() method in your Feed class.' % item.__class__.__name__)
def __get_dynamic_attr(self, attname, obj, default=None):
try:
attr = getattr(self, attname)
except AttributeError:
return default
if callable(attr):
# Check func_code.co_argcount rather than try/excepting the
# function and catching the TypeError, because something inside
# the function may raise the TypeError. This technique is more
# accurate.
if hasattr(attr, 'func_code'):
argcount = attr.func_code.co_argcount
else:
argcount = attr.__call__.func_code.co_argcount
if argcount == 2: # one argument is 'self'
return attr(obj)
else:
return attr()
return attr
def feed_extra_kwargs(self, obj):
"""
Returns an extra keyword arguments dictionary that is used when
initializing the feed generator.
"""
return {}
def item_extra_kwargs(self, item):
"""
Returns an extra keyword arguments dictionary that is used with
the `add_item` call of the feed generator.
"""
return {}
def get_object(self, request, *args, **kwargs):
return None
def get_feed(self, obj, request):
"""
Returns a feedgenerator.DefaultFeed object, fully populated, for
this feed. Raises FeedDoesNotExist for invalid parameters.
"""
current_site = get_current_site(request)
link = self.__get_dynamic_attr('link', obj)
link = add_domain(current_site.domain, link, request.is_secure())
feed = self.feed_type(
title = self.__get_dynamic_attr('title', obj),
subtitle = self.__get_dynamic_attr('subtitle', obj),
link = link,
description = self.__get_dynamic_attr('description', obj),
language = settings.LANGUAGE_CODE.decode(),
feed_url = add_domain(
current_site.domain,
self.__get_dynamic_attr('feed_url', obj) or request.path,
request.is_secure(),
),
author_name = self.__get_dynamic_attr('author_name', obj),
author_link = self.__get_dynamic_attr('author_link', obj),
author_email = self.__get_dynamic_attr('author_email', obj),
categories = self.__get_dynamic_attr('categories', obj),
feed_copyright = self.__get_dynamic_attr('feed_copyright', obj),
feed_guid = self.__get_dynamic_attr('feed_guid', obj),
ttl = self.__get_dynamic_attr('ttl', obj),
**self.feed_extra_kwargs(obj)
)
title_tmp = None
if self.title_template is not None:
try:
title_tmp = loader.get_template(self.title_template)
except TemplateDoesNotExist:
pass
description_tmp = None
if self.description_template is not None:
try:
description_tmp = loader.get_template(self.description_template)
except TemplateDoesNotExist:
pass
for item in self.__get_dynamic_attr('items', obj):
if title_tmp is not None:
title = title_tmp.render(RequestContext(request, {'obj': item, 'site': current_site}))
else:
title = self.__get_dynamic_attr('item_title', item)
if description_tmp is not None:
description = description_tmp.render(RequestContext(request, {'obj': item, 'site': current_site}))
else:
description = self.__get_dynamic_attr('item_description', item)
link = add_domain(
current_site.domain,
self.__get_dynamic_attr('item_link', item),
request.is_secure(),
)
enc = None
enc_url = self.__get_dynamic_attr('item_enclosure_url', item)
if enc_url:
enc = feedgenerator.Enclosure(
url = smart_unicode(enc_url),
length = smart_unicode(self.__get_dynamic_attr('item_enclosure_length', item)),
mime_type = smart_unicode(self.__get_dynamic_attr('item_enclosure_mime_type', item))
)
author_name = self.__get_dynamic_attr('item_author_name', item)
if author_name is not None:
author_email = self.__get_dynamic_attr('item_author_email', item)
author_link = self.__get_dynamic_attr('item_author_link', item)
else:
author_email = author_link = None
pubdate = self.__get_dynamic_attr('item_pubdate', item)
if pubdate and not pubdate.tzinfo:
ltz = tzinfo.LocalTimezone(pubdate)
pubdate = pubdate.replace(tzinfo=ltz)
feed.add_item(
title = title,
link = link,
description = description,
unique_id = self.__get_dynamic_attr('item_guid', item, link),
enclosure = enc,
pubdate = pubdate,
author_name = author_name,
author_email = author_email,
author_link = author_link,
categories = self.__get_dynamic_attr('item_categories', item),
item_copyright = self.__get_dynamic_attr('item_copyright', item),
**self.item_extra_kwargs(item)
)
return feed
def feed(request, url, feed_dict=None):
"""Provided for backwards compatibility."""
import warnings
warnings.warn('The syndication feed() view is deprecated. Please use the '
'new class based view API.',
category=DeprecationWarning)
if not feed_dict:
raise Http404("No feeds are registered.")
try:
slug, param = url.split('/', 1)
except ValueError:
slug, param = url, ''
try:
f = feed_dict[slug]
except KeyError:
raise Http404("Slug %r isn't registered." % slug)
try:
feedgen = f(slug, request).get_feed(param)
except FeedDoesNotExist:
raise Http404("Invalid feed parameters. Slug %r is valid, but other parameters, or lack thereof, are not." % slug)
response = HttpResponse(mimetype=feedgen.mime_type)
feedgen.write(response, 'utf-8')
return response
| apache-2.0 |
alienity/three.js | utils/exporters/blender/addons/io_three/exporter/api/mesh.py | 124 | 23228 | """
Blender API for querying mesh data. Animation data is also
handled here since Three.js associates the animation (skeletal,
morph targets) with the geometry nodes.
"""
import operator
from bpy import data, types, context
from . import material, texture, animation
from . import object as object_
from .. import constants, utilities, logger, exceptions
def _mesh(func):
"""
:param func:
"""
def inner(name, *args, **kwargs):
"""
:param name:
:param *args:
:param **kwargs:
"""
if isinstance(name, types.Mesh):
mesh = name
else:
mesh = data.meshes[name]
return func(mesh, *args, **kwargs)
return inner
@_mesh
def skeletal_animation(mesh, options):
"""
:param mesh:
:param options:
:rtype: []
"""
logger.debug("mesh.animation(%s, %s)", mesh, options)
armature = _armature(mesh)
if not armature:
logger.warning("No armature found (%s)", mesh)
return []
anim_type = options.get(constants.ANIMATION)
# pose_position = armature.data.pose_position
dispatch = {
constants.POSE: animation.pose_animation,
constants.REST: animation.rest_animation
}
func = dispatch[anim_type]
# armature.data.pose_position = anim_type.upper()
animations = func(armature, options)
# armature.data.pose_position = pose_position
return animations
@_mesh
def bones(mesh, options):
"""
:param mesh:
:param options:
:rtype: [], {}
"""
logger.debug("mesh.bones(%s)", mesh)
armature = _armature(mesh)
if not armature:
return [], {}
anim_type = options.get(constants.ANIMATION)
# pose_position = armature.data.pose_position
if anim_type == constants.OFF:
logger.info("Animation type not set, defaulting "
"to using REST position for the armature.")
func = _rest_bones
# armature.data.pose_position = "REST"
else:
dispatch = {
constants.REST: _rest_bones,
constants.POSE: _pose_bones
}
logger.info("Using %s for the armature", anim_type)
func = dispatch[anim_type]
# armature.data.pose_position = anim_type.upper()
bones_, bone_map = func(armature)
# armature.data.pose_position = pose_position
return (bones_, bone_map)
@_mesh
def buffer_normal(mesh):
"""
:param mesh:
:rtype: []
"""
normals_ = []
for face in mesh.tessfaces:
vert_count = len(face.vertices)
if vert_count is not 3:
msg = "Non-triangulated face detected"
raise exceptions.BufferGeometryError(msg)
for vertex_index in face.vertices:
normal = mesh.vertices[vertex_index].normal
vector = (normal.x, normal.y, normal.z)
normals_.extend(vector)
return normals_
@_mesh
def buffer_position(mesh):
"""
:param mesh:
:rtype: []
"""
position = []
for face in mesh.tessfaces:
vert_count = len(face.vertices)
if vert_count is not 3:
msg = "Non-triangulated face detected"
raise exceptions.BufferGeometryError(msg)
for vertex_index in face.vertices:
vertex = mesh.vertices[vertex_index]
vector = (vertex.co.x, vertex.co.y, vertex.co.z)
position.extend(vector)
return position
@_mesh
def buffer_uv(mesh):
"""
:param mesh:
:rtype: []
"""
uvs_ = []
if len(mesh.uv_layers) is 0:
return uvs_
elif len(mesh.uv_layers) > 1:
# if memory serves me correctly buffer geometry
# only uses one UV layer
logger.warning("%s has more than 1 UV layer", mesh.name)
for uv_data in mesh.uv_layers[0].data:
uv_tuple = (uv_data.uv[0], uv_data.uv[1])
uvs_.extend(uv_tuple)
return uvs_
@_mesh
def faces(mesh, options):
"""
:param mesh:
:param options:
"""
logger.debug("mesh.faces(%s, %s)", mesh, options)
vertex_uv = len(mesh.uv_textures) > 0
has_colors = len(mesh.vertex_colors) > 0
logger.info("Has UVs = %s", vertex_uv)
logger.info("Has vertex colours = %s", has_colors)
opt_colours = options[constants.COLORS] and has_colors
opt_uvs = options[constants.UVS] and vertex_uv
opt_materials = options.get(constants.FACE_MATERIALS)
opt_normals = options[constants.NORMALS]
logger.debug("Vertex colours enabled = %s", opt_colours)
logger.debug("UVS enabled = %s", opt_uvs)
logger.debug("Materials enabled = %s", opt_materials)
logger.debug("Normals enabled = %s", opt_normals)
uv_layers = _uvs(mesh) if opt_uvs else None
vertex_normals = _normals(mesh) if opt_normals else None
vertex_colours = vertex_colors(mesh) if opt_colours else None
faces_data = []
colour_indices = {}
if vertex_colours:
logger.debug("Indexing colours")
for index, colour in enumerate(vertex_colours):
colour_indices[str(colour)] = index
normal_indices = {}
if vertex_normals:
logger.debug("Indexing normals")
for index, normal in enumerate(vertex_normals):
normal_indices[str(normal)] = index
logger.info("Parsing %d faces", len(mesh.tessfaces))
for face in mesh.tessfaces:
vert_count = len(face.vertices)
if vert_count not in (3, 4):
logger.error("%d vertices for face %d detected",
vert_count,
face.index)
raise exceptions.NGonError("ngons are not supported")
mat_index = face.material_index is not None and opt_materials
mask = {
constants.QUAD: vert_count is 4,
constants.MATERIALS: mat_index,
constants.UVS: False,
constants.NORMALS: False,
constants.COLORS: False
}
face_data = []
face_data.extend([v for v in face.vertices])
if mask[constants.MATERIALS]:
face_data.append(face.material_index)
# @TODO: this needs the same optimization as what
# was done for colours and normals
if uv_layers:
for index, uv_layer in enumerate(uv_layers):
layer = mesh.tessface_uv_textures[index]
for uv_data in layer.data[face.index].uv:
uv_tuple = (uv_data[0], uv_data[1])
face_data.append(uv_layer.index(uv_tuple))
mask[constants.UVS] = True
if vertex_normals:
for vertex in face.vertices:
normal = mesh.vertices[vertex].normal
normal = (normal.x, normal.y, normal.z)
face_data.append(normal_indices[str(normal)])
mask[constants.NORMALS] = True
if vertex_colours:
colours = mesh.tessface_vertex_colors.active.data[face.index]
for each in (colours.color1, colours.color2, colours.color3):
each = utilities.rgb2int(each)
face_data.append(colour_indices[str(each)])
mask[constants.COLORS] = True
if mask[constants.QUAD]:
colour = utilities.rgb2int(colours.color4)
face_data.append(colour_indices[str(colour)])
face_data.insert(0, utilities.bit_mask(mask))
faces_data.extend(face_data)
return faces_data
@_mesh
def morph_targets(mesh, options):
"""
:param mesh:
:param options:
"""
logger.debug("mesh.morph_targets(%s, %s)", mesh, options)
obj = object_.objects_using_mesh(mesh)[0]
original_frame = context.scene.frame_current
frame_step = options.get(constants.FRAME_STEP, 1)
scene_frames = range(context.scene.frame_start,
context.scene.frame_end+1,
frame_step)
morphs = []
for frame in scene_frames:
logger.info("Processing data at frame %d", frame)
context.scene.frame_set(frame, 0.0)
morphs.append([])
vertices_ = object_.extract_mesh(obj, options).vertices[:]
for vertex in vertices_:
morphs[-1].extend([vertex.co.x, vertex.co.y, vertex.co.z])
context.scene.frame_set(original_frame, 0.0)
morphs_detected = False
for index, each in enumerate(morphs):
if index is 0:
continue
morphs_detected = morphs[index-1] != each
if morphs_detected:
logger.info("Valid morph target data detected")
break
else:
logger.info("No valid morph data detected")
return []
manifest = []
for index, morph in enumerate(morphs):
manifest.append({
constants.NAME: 'animation_%06d' % index,
constants.VERTICES: morph
})
return manifest
@_mesh
def materials(mesh, options):
"""
:param mesh:
:param options:
"""
logger.debug("mesh.materials(%s, %s)", mesh, options)
indices = set([face.material_index for face in mesh.tessfaces])
material_sets = [(mesh.materials[index], index) for index in indices]
materials_ = []
maps = options.get(constants.MAPS)
mix = options.get(constants.MIX_COLORS)
use_colors = options.get(constants.COLORS)
logger.info("Colour mix is set to %s", mix)
logger.info("Vertex colours set to %s", use_colors)
for mat, index in material_sets:
try:
dbg_color = constants.DBG_COLORS[index]
except IndexError:
dbg_color = constants.DBG_COLORS[0]
logger.info("Compiling attributes for %s", mat.name)
attributes = {
constants.COLOR_AMBIENT: material.ambient_color(mat),
constants.COLOR_EMISSIVE: material.emissive_color(mat),
constants.SHADING: material.shading(mat),
constants.OPACITY: material.opacity(mat),
constants.TRANSPARENT: material.transparent(mat),
constants.VISIBLE: material.visible(mat),
constants.WIREFRAME: material.wireframe(mat),
constants.BLENDING: material.blending(mat),
constants.DEPTH_TEST: material.depth_test(mat),
constants.DEPTH_WRITE: material.depth_write(mat),
constants.DBG_NAME: mat.name,
constants.DBG_COLOR: dbg_color,
constants.DBG_INDEX: index
}
if use_colors:
colors = material.use_vertex_colors(mat)
attributes[constants.VERTEX_COLORS] = colors
if (use_colors and mix) or (not use_colors):
colors = material.diffuse_color(mat)
attributes[constants.COLOR_DIFFUSE] = colors
if attributes[constants.SHADING] == constants.PHONG:
logger.info("Adding specular attributes")
attributes.update({
constants.SPECULAR_COEF: material.specular_coef(mat),
constants.COLOR_SPECULAR: material.specular_color(mat)
})
if mesh.show_double_sided:
logger.info("Double sided is on")
attributes[constants.DOUBLE_SIDED] = True
materials_.append(attributes)
if not maps:
continue
diffuse = _diffuse_map(mat)
if diffuse:
logger.info("Diffuse map found")
attributes.update(diffuse)
light = _light_map(mat)
if light:
logger.info("Light map found")
attributes.update(light)
specular = _specular_map(mat)
if specular:
logger.info("Specular map found")
attributes.update(specular)
if attributes[constants.SHADING] == constants.PHONG:
normal = _normal_map(mat)
if normal:
logger.info("Normal map found")
attributes.update(normal)
bump = _bump_map(mat)
if bump:
logger.info("Bump map found")
attributes.update(bump)
return materials_
@_mesh
def normals(mesh):
"""
:param mesh:
:rtype: []
"""
logger.debug("mesh.normals(%s)", mesh)
normal_vectors = []
for vector in _normals(mesh):
normal_vectors.extend(vector)
return normal_vectors
@_mesh
def skin_weights(mesh, bone_map, influences):
"""
:param mesh:
:param bone_map:
:param influences:
"""
logger.debug("mesh.skin_weights(%s)", mesh)
return _skinning_data(mesh, bone_map, influences, 1)
@_mesh
def skin_indices(mesh, bone_map, influences):
"""
:param mesh:
:param bone_map:
:param influences:
"""
logger.debug("mesh.skin_indices(%s)", mesh)
return _skinning_data(mesh, bone_map, influences, 0)
@_mesh
def texture_registration(mesh):
"""
:param mesh:
"""
logger.debug("mesh.texture_registration(%s)", mesh)
materials_ = mesh.materials or []
registration = {}
funcs = (
(constants.MAP_DIFFUSE, material.diffuse_map),
(constants.SPECULAR_MAP, material.specular_map),
(constants.LIGHT_MAP, material.light_map),
(constants.BUMP_MAP, material.bump_map),
(constants.NORMAL_MAP, material.normal_map)
)
def _registration(file_path, file_name):
"""
:param file_path:
:param file_name:
"""
return {
'file_path': file_path,
'file_name': file_name,
'maps': []
}
logger.info("found %d materials", len(materials_))
for mat in materials_:
for (key, func) in funcs:
tex = func(mat)
if tex is None:
continue
logger.info("%s has texture %s", key, tex.name)
file_path = texture.file_path(tex)
file_name = texture.file_name(tex)
reg = registration.setdefault(
utilities.hash(file_path),
_registration(file_path, file_name))
reg["maps"].append(key)
return registration
@_mesh
def uvs(mesh):
"""
:param mesh:
:rtype: []
"""
logger.debug("mesh.uvs(%s)", mesh)
uvs_ = []
for layer in _uvs(mesh):
uvs_.append([])
logger.info("Parsing UV layer %d", len(uvs_))
for pair in layer:
uvs_[-1].extend(pair)
return uvs_
@_mesh
def vertex_colors(mesh):
"""
:param mesh:
"""
logger.debug("mesh.vertex_colors(%s)", mesh)
vertex_colours = []
try:
vertex_colour = mesh.tessface_vertex_colors.active.data
except AttributeError:
logger.info("No vertex colours found")
return
for face in mesh.tessfaces:
colours = (vertex_colour[face.index].color1,
vertex_colour[face.index].color2,
vertex_colour[face.index].color3,
vertex_colour[face.index].color4)
for colour in colours:
colour = utilities.rgb2int((colour.r, colour.g, colour.b))
if colour not in vertex_colours:
vertex_colours.append(colour)
return vertex_colours
@_mesh
def vertices(mesh):
"""
:param mesh:
:rtype: []
"""
logger.debug("mesh.vertices(%s)", mesh)
vertices_ = []
for vertex in mesh.vertices:
vertices_.extend((vertex.co.x, vertex.co.y, vertex.co.z))
return vertices_
def _normal_map(mat):
"""
:param mat:
"""
tex = material.normal_map(mat)
if tex is None:
return
logger.info("Found normal texture map %s", tex.name)
normal = {
constants.MAP_NORMAL:
texture.file_name(tex),
constants.MAP_NORMAL_FACTOR:
material.normal_scale(mat),
constants.MAP_NORMAL_ANISOTROPY:
texture.anisotropy(tex),
constants.MAP_NORMAL_WRAP: texture.wrap(tex),
constants.MAP_NORMAL_REPEAT: texture.repeat(tex)
}
return normal
def _bump_map(mat):
"""
:param mat:
"""
tex = material.bump_map(mat)
if tex is None:
return
logger.info("Found bump texture map %s", tex.name)
bump = {
constants.MAP_BUMP:
texture.file_name(tex),
constants.MAP_BUMP_ANISOTROPY:
texture.anisotropy(tex),
constants.MAP_BUMP_WRAP: texture.wrap(tex),
constants.MAP_BUMP_REPEAT: texture.repeat(tex),
constants.MAP_BUMP_SCALE:
material.bump_scale(mat),
}
return bump
def _specular_map(mat):
"""
:param mat:
"""
tex = material.specular_map(mat)
if tex is None:
return
logger.info("Found specular texture map %s", tex.name)
specular = {
constants.MAP_SPECULAR:
texture.file_name(tex),
constants.MAP_SPECULAR_ANISOTROPY:
texture.anisotropy(tex),
constants.MAP_SPECULAR_WRAP: texture.wrap(tex),
constants.MAP_SPECULAR_REPEAT: texture.repeat(tex)
}
return specular
def _light_map(mat):
"""
:param mat:
"""
tex = material.light_map(mat)
if tex is None:
return
logger.info("Found light texture map %s", tex.name)
light = {
constants.MAP_LIGHT:
texture.file_name(tex),
constants.MAP_LIGHT_ANISOTROPY:
texture.anisotropy(tex),
constants.MAP_LIGHT_WRAP: texture.wrap(tex),
constants.MAP_LIGHT_REPEAT: texture.repeat(tex)
}
return light
def _diffuse_map(mat):
"""
:param mat:
"""
tex = material.diffuse_map(mat)
if tex is None:
return
logger.info("Found diffuse texture map %s", tex.name)
diffuse = {
constants.MAP_DIFFUSE:
texture.file_name(tex),
constants.MAP_DIFFUSE_ANISOTROPY:
texture.anisotropy(tex),
constants.MAP_DIFFUSE_WRAP: texture.wrap(tex),
constants.MAP_DIFFUSE_REPEAT: texture.repeat(tex)
}
return diffuse
def _normals(mesh):
"""
:param mesh:
:rtype: []
"""
vectors = []
vectors_ = {}
for face in mesh.tessfaces:
for vertex_index in face.vertices:
normal = mesh.vertices[vertex_index].normal
vector = (normal.x, normal.y, normal.z)
str_vec = str(vector)
try:
vectors_[str_vec]
except KeyError:
vectors.append(vector)
vectors_[str_vec] = True
return vectors
def _uvs(mesh):
"""
:param mesh:
"""
uv_layers = []
for layer in mesh.uv_layers:
uv_layers.append([])
for uv_data in layer.data:
uv_tuple = (uv_data.uv[0], uv_data.uv[1])
if uv_tuple not in uv_layers[-1]:
uv_layers[-1].append(uv_tuple)
return uv_layers
def _armature(mesh):
"""
:param mesh:
"""
obj = object_.objects_using_mesh(mesh)[0]
armature = obj.find_armature()
if armature:
logger.info("Found armature %s for %s", armature.name, obj.name)
else:
logger.info("Found no armature for %s", obj.name)
return armature
def _skinning_data(mesh, bone_map, influences, array_index):
"""
:param mesh:
:param bone_map:
:param influences:
:param array_index:
"""
armature = _armature(mesh)
manifest = []
if not armature:
return manifest
obj = object_.objects_using_mesh(mesh)[0]
logger.debug("Skinned object found %s", obj.name)
for vertex in mesh.vertices:
bone_array = []
for group in vertex.groups:
bone_array.append((group.group, group.weight))
bone_array.sort(key=operator.itemgetter(1), reverse=True)
for index in range(influences):
if index >= len(bone_array):
manifest.append(0)
continue
name = obj.vertex_groups[bone_array[index][0]].name
for bone_index, bone in enumerate(armature.pose.bones):
if bone.name != name:
continue
if array_index is 0:
entry = bone_map.get(bone_index, -1)
else:
entry = bone_array[index][1]
manifest.append(entry)
break
else:
manifest.append(0)
return manifest
def _pose_bones(armature):
"""
:param armature:
:rtype: [], {}
"""
bones_ = []
bone_map = {}
bone_count = 0
armature_matrix = armature.matrix_world
for bone_count, pose_bone in enumerate(armature.pose.bones):
armature_bone = pose_bone.bone
bone_index = None
if armature_bone.parent is None:
bone_matrix = armature_matrix * armature_bone.matrix_local
bone_index = -1
else:
parent_bone = armature_bone.parent
parent_matrix = armature_matrix * parent_bone.matrix_local
bone_matrix = armature_matrix * armature_bone.matrix_local
bone_matrix = parent_matrix.inverted() * bone_matrix
bone_index = index = 0
for pose_parent in armature.pose.bones:
armature_parent = pose_parent.bone.name
if armature_parent == parent_bone.name:
bone_index = index
index += 1
bone_map[bone_count] = bone_count
pos, rot, scl = bone_matrix.decompose()
bones_.append({
constants.PARENT: bone_index,
constants.NAME: armature_bone.name,
constants.POS: (pos.x, pos.z, -pos.y),
constants.ROTQ: (rot.x, rot.z, -rot.y, rot.w),
constants.SCL: (scl.x, scl.z, scl.y)
})
return bones_, bone_map
def _rest_bones(armature):
"""
:param armature:
:rtype: [], {}
"""
bones_ = []
bone_map = {}
bone_count = 0
bone_index_rel = 0
for bone in armature.data.bones:
logger.info("Parsing bone %s", bone.name)
if not bone.use_deform:
logger.debug("Ignoring bone %s at: %d",
bone.name, bone_index_rel)
continue
if bone.parent is None:
bone_pos = bone.head_local
bone_index = -1
else:
bone_pos = bone.head_local - bone.parent.head_local
bone_index = 0
index = 0
for parent in armature.data.bones:
if parent.name == bone.parent.name:
bone_index = bone_map.get(index)
index += 1
bone_world_pos = armature.matrix_world * bone_pos
x_axis = bone_world_pos.x
y_axis = bone_world_pos.z
z_axis = -bone_world_pos.y
logger.debug("Adding bone %s at: %s, %s",
bone.name, bone_index, bone_index_rel)
bone_map[bone_count] = bone_index_rel
bone_index_rel += 1
# @TODO: the rotq probably should not have these
# hard coded values
bones_.append({
constants.PARENT: bone_index,
constants.NAME: bone.name,
constants.POS: (x_axis, y_axis, z_axis),
constants.ROTQ: (0, 0, 0, 1)
})
bone_count += 1
return (bones_, bone_map)
| mit |
andersonvom/python_koans | python3/koans/about_classes.py | 22 | 4779 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutClasses(Koan):
class Dog:
"Dogs need regular walkies. Never, ever let them drive."
def test_instances_of_classes_can_be_created_adding_parentheses(self):
# NOTE: The .__name__ attribute will convert the class
# into a string value.
fido = self.Dog()
self.assertEqual(__, fido.__class__.__name__)
def test_classes_have_docstrings(self):
self.assertRegexpMatches(self.Dog.__doc__, __)
# ------------------------------------------------------------------
class Dog2:
def __init__(self):
self._name = 'Paul'
def set_name(self, a_name):
self._name = a_name
def test_init_method_is_the_constructor(self):
dog = self.Dog2()
self.assertEqual(__, dog._name)
def test_private_attributes_are_not_really_private(self):
dog = self.Dog2()
dog.set_name("Fido")
self.assertEqual(__, dog._name)
# The _ prefix in _name implies private ownership, but nothing is truly
# private in Python.
def test_you_can_also_access_the_value_out_using_getattr_and_dict(self):
fido = self.Dog2()
fido.set_name("Fido")
self.assertEqual(__, getattr(fido, "_name"))
# getattr(), setattr() and delattr() are a way of accessing attributes
# by method rather than through assignment operators
self.assertEqual(__, fido.__dict__["_name"])
# Yes, this works here, but don't rely on the __dict__ object! Some
# class implementations use optimization which result in __dict__ not
# showing everything.
# ------------------------------------------------------------------
class Dog3:
def __init__(self):
self._name = None
def set_name(self, a_name):
self._name = a_name
def get_name(self):
return self._name
name = property(get_name, set_name)
def test_that_name_can_be_read_as_a_property(self):
fido = self.Dog3()
fido.set_name("Fido")
# access as method
self.assertEqual(__, fido.get_name())
# access as property
self.assertEqual(__, fido.name)
# ------------------------------------------------------------------
class Dog4:
def __init__(self):
self._name = None
@property
def name(self):
return self._name
@name.setter
def name(self, a_name):
self._name = a_name
def test_creating_properties_with_decorators_is_slightly_easier(self):
fido = self.Dog4()
fido.name = "Fido"
self.assertEqual(__, fido.name)
# ------------------------------------------------------------------
class Dog5:
def __init__(self, initial_name):
self._name = initial_name
@property
def name(self):
return self._name
def test_init_provides_initial_values_for_instance_variables(self):
fido = self.Dog5("Fido")
self.assertEqual(__, fido.name)
def test_args_must_match_init(self):
with self.assertRaises(___):
self.Dog5()
# THINK ABOUT IT:
# Why is this so?
def test_different_objects_have_difference_instance_variables(self):
fido = self.Dog5("Fido")
rover = self.Dog5("Rover")
self.assertEqual(__, rover.name == fido.name)
# ------------------------------------------------------------------
class Dog6:
def __init__(self, initial_name):
self._name = initial_name
def get_self(self):
return self
def __str__(self):
#
# Implement this!
#
return __
def __repr__(self):
return "<Dog named '" + self._name + "'>"
def test_inside_a_method_self_refers_to_the_containing_object(self):
fido = self.Dog6("Fido")
self.assertEqual(__, fido.get_self()) # Not a string!
def test_str_provides_a_string_version_of_the_object(self):
fido = self.Dog6("Fido")
self.assertEqual("Fido", str(fido))
def test_str_is_used_explicitly_in_string_interpolation(self):
fido = self.Dog6("Fido")
self.assertEqual(__, "My dog is " + str(fido))
def test_repr_provides_a_more_complete_string_version(self):
fido = self.Dog6("Fido")
self.assertEqual(__, repr(fido))
def test_all_objects_support_str_and_repr(self):
seq = [1, 2, 3]
self.assertEqual(__, str(seq))
self.assertEqual(__, repr(seq))
self.assertEqual(__, str("STRING"))
self.assertEqual(__, repr("STRING"))
| mit |
inspirehep/invenio | modules/bibcheck/lib/plugins/doi.py | 6 | 2215 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" Bibcheck plugin add the DOIs (from crossref) """
from invenio.bibrecord import record_add_field
from invenio.crossrefutils import get_doi_for_records
from invenio.bibupload import find_record_from_doi
def check_records(records, doi_field="0247_a", extra_subfields=(("2", "DOI"), ("9", "bibcheck"))):
"""
Find the DOI for the records using crossref and add it to the specified
field.
This plugin won't ask for the DOI if it's already set.
"""
records_to_check = {}
for record in records:
has_doi = False
for position, value in record.iterfield("0247_2"):
if value.lower() == "doi":
has_doi = True
break
if not has_doi:
records_to_check[record.record_id] = record
dois = get_doi_for_records(records_to_check.values())
for record_id, doi in dois.iteritems():
record = records_to_check[record_id]
dup_doi_recid = find_record_from_doi(doi)
if dup_doi_recid:
record.warn("DOI %s to be added to record %s already exists in record/s %s" % (doi, record_id, dup_doi_recid))
continue
subfields = [(doi_field[5], doi.encode("utf-8"))] + map(tuple, extra_subfields)
record_add_field(record, tag=doi_field[:3], ind1=doi_field[3],
ind2=doi_field[4], subfields=subfields)
record.set_amended("Added DOI in field %s" % doi_field)
| gpl-2.0 |
otmaneJai/Zipline | zipline/utils/memoize.py | 7 | 2540 | """
Tools for memoization of function results.
"""
from functools import wraps
from six import iteritems
from weakref import WeakKeyDictionary
class lazyval(object):
"""
Decorator that marks that an attribute should not be computed until
needed, and that the value should be memoized.
Example
-------
>>> from zipline.utils.memoize import lazyval
>>> class C(object):
... def __init__(self):
... self.count = 0
... @lazyval
... def val(self):
... self.count += 1
... return "val"
...
>>> c = C()
>>> c.count
0
>>> c.val, c.count
('val', 1)
>>> c.val, c.count
('val', 1)
"""
def __init__(self, get):
self._get = get
self._cache = WeakKeyDictionary()
def __get__(self, instance, owner):
if instance is None:
return self
try:
return self._cache[instance]
except KeyError:
self._cache[instance] = val = self._get(instance)
return val
def remember_last(f):
"""
Decorator that remembers the last computed value of a function and doesn't
recompute it when called with the same inputs multiple times.
Parameters
----------
f : The function to be memoized. All arguments to f should be hashable.
Example
-------
>>> counter = 0
>>> @remember_last
... def foo(x):
... global counter
... counter += 1
... return x, counter
>>> foo(1)
(1, 1)
>>> foo(1)
(1, 1)
>>> foo(0)
(0, 2)
>>> foo(1)
(1, 3)
Notes
-----
This decorator is equivalent to `lru_cache(1)` in Python 3, but with less
bells and whistles for handling things like threadsafety. If we ever
decide we need such bells and whistles, we should just make functools32 a
dependency.
"""
# This needs to be a mutable data structure so we can change it from inside
# the function. In pure Python 3, we'd use the nonlocal keyword for this.
_previous = [None, None]
KEY, VALUE = 0, 1
_kwd_mark = object()
@wraps(f)
def memoized_f(*args, **kwds):
# Hashing logic taken from functools32.lru_cache.
key = args
if kwds:
key += _kwd_mark + tuple(sorted(iteritems(kwds)))
key_hash = hash(key)
if key_hash != _previous[KEY]:
_previous[VALUE] = f(*args, **kwds)
_previous[KEY] = key_hash
return _previous[VALUE]
return memoized_f
| apache-2.0 |
amousset/ansible | lib/ansible/template/safe_eval.py | 47 | 4154 | # (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import sys
from six.moves import builtins
from ansible import constants as C
from ansible.plugins import filter_loader, test_loader
def safe_eval(expr, locals={}, include_exceptions=False):
'''
This is intended for allowing things like:
with_items: a_list_variable
Where Jinja2 would return a string but we do not want to allow it to
call functions (outside of Jinja2, where the env is constrained). If
the input data to this function came from an untrusted (remote) source,
it should first be run through _clean_data_struct() to ensure the data
is further sanitized prior to evaluation.
Based on:
http://stackoverflow.com/questions/12523516/using-ast-and-whitelists-to-make-pythons-eval-safe
'''
# this is the whitelist of AST nodes we are going to
# allow in the evaluation. Any node type other than
# those listed here will raise an exception in our custom
# visitor class defined below.
SAFE_NODES = set(
(
ast.Add,
ast.BinOp,
ast.Call,
ast.Compare,
ast.Dict,
ast.Div,
ast.Expression,
ast.List,
ast.Load,
ast.Mult,
ast.Num,
ast.Name,
ast.Str,
ast.Sub,
ast.Tuple,
ast.UnaryOp,
)
)
# AST node types were expanded after 2.6
if not sys.version.startswith('2.6'):
SAFE_NODES.union(
set(
(ast.Set,)
)
)
filter_list = []
for filter in filter_loader.all():
filter_list.extend(filter.filters().keys())
test_list = []
for test in test_loader.all():
test_list.extend(test.tests().keys())
CALL_WHITELIST = C.DEFAULT_CALLABLE_WHITELIST + filter_list + test_list
class CleansingNodeVisitor(ast.NodeVisitor):
def generic_visit(self, node, inside_call=False):
if type(node) not in SAFE_NODES:
raise Exception("invalid expression (%s)" % expr)
elif isinstance(node, ast.Call):
inside_call = True
elif isinstance(node, ast.Name) and inside_call:
if hasattr(builtins, node.id) and node.id not in CALL_WHITELIST:
raise Exception("invalid function: %s" % node.id)
# iterate over all child nodes
for child_node in ast.iter_child_nodes(node):
self.generic_visit(child_node, inside_call)
if not isinstance(expr, basestring):
# already templated to a datastructure, perhaps?
if include_exceptions:
return (expr, None)
return expr
cnv = CleansingNodeVisitor()
try:
parsed_tree = ast.parse(expr, mode='eval')
cnv.visit(parsed_tree)
compiled = compile(parsed_tree, expr, 'eval')
result = eval(compiled, {}, locals)
if include_exceptions:
return (result, None)
else:
return result
except SyntaxError as e:
# special handling for syntax errors, we just return
# the expression string back as-is
if include_exceptions:
return (expr, None)
return expr
except Exception as e:
if include_exceptions:
return (expr, e)
return expr
| gpl-3.0 |
comptech/atrex | Software/gaussfitter.py | 1 | 23761 | """
===========
gaussfitter
===========
.. codeauthor:: Adam Ginsburg <[email protected]> 3/17/08
Latest version available at <http://code.google.com/p/agpy/source/browse/trunk/agpy/gaussfitter.py>
"""
import numpy
from numpy.ma import median
from numpy import pi
#from scipy import optimize,stats,pi
from scipy.optimize import curve_fit
from mpfit import mpfit
"""
Note about mpfit/leastsq:
I switched everything over to the Markwardt mpfit routine for a few reasons,
but foremost being the ability to set limits on parameters, not just force them
to be fixed. As far as I can tell, leastsq does not have that capability.
The version of mpfit I use can be found here:
http://code.google.com/p/agpy/source/browse/trunk/mpfit
.. todo::
-turn into a class instead of a collection of objects
-implement WCS-based gaussian fitting with correct coordinates
"""
def moments(data,circle,rotate,vheight,estimator=median,**kwargs):
"""Returns (height, amplitude, x, y, width_x, width_y, rotation angle)
the gaussian parameters of a 2D distribution by calculating its
moments. Depending on the input parameters, will only output
a subset of the above.
If using masked arrays, pass estimator=numpy.ma.median
"""
total = numpy.abs(data).sum()
Y, X = numpy.indices(data.shape) # python convention: reverse x,y numpy.indices
y = numpy.argmax((X*numpy.abs(data)).sum(axis=1)/total)
x = numpy.argmax((Y*numpy.abs(data)).sum(axis=0)/total)
col = data[int(y),:]
# FIRST moment, not second!
width_x = numpy.sqrt(numpy.abs((numpy.arange(col.size)-y)*col).sum()/numpy.abs(col).sum())
row = data[:, int(x)]
width_y = numpy.sqrt(numpy.abs((numpy.arange(row.size)-x)*row).sum()/numpy.abs(row).sum())
width = ( width_x + width_y ) / 2.
height = estimator(data.ravel())
amplitude = data.max()-height
mylist = [amplitude,x,y]
if numpy.isnan(width_y) or numpy.isnan(width_x) or numpy.isnan(height) or numpy.isnan(amplitude):
raise ValueError("something is nan")
if vheight==1:
mylist = [height] + mylist
if circle==0:
mylist = mylist + [width_x,width_y]
if rotate==1:
mylist = mylist + [0.] #rotation "moment" is just zero...
# also, circles don't rotate.
else:
mylist = mylist + [width]
return mylist
def twodgaussian(inpars, circle=False, rotate=True, vheight=True, shape=None):
"""Returns a 2d gaussian function of the form:
x' = numpy.cos(rota) * x - numpy.sin(rota) * y
y' = numpy.sin(rota) * x + numpy.cos(rota) * y
(rota should be in degrees)
g = b + a * numpy.exp ( - ( ((x-center_x)/width_x)**2 +
((y-center_y)/width_y)**2 ) / 2 )
inpars = [b,a,center_x,center_y,width_x,width_y,rota]
(b is background height, a is peak amplitude)
where x and y are the input parameters of the returned function,
and all other parameters are specified by this function
However, the above values are passed by list. The list should be:
inpars = (height,amplitude,center_x,center_y,width_x,width_y,rota)
You can choose to ignore / neglect some of the above input parameters
unumpy.sing the following options:
circle=0 - default is an elliptical gaussian (different x, y
widths), but can reduce the input by one parameter if it's a
circular gaussian
rotate=1 - default allows rotation of the gaussian ellipse. Can
remove last parameter by setting rotate=0
vheight=1 - default allows a variable height-above-zero, i.e. an
additive constant for the Gaussian function. Can remove first
parameter by setting this to 0
shape=None - if shape is set (to a 2-parameter list) then returns
an image with the gaussian defined by inpars
"""
inpars_old = inpars
inpars = list(inpars)
if vheight == 1:
height = inpars.pop(0)
height = float(height)
else:
height = float(0)
amplitude, center_y, center_x = inpars.pop(0),inpars.pop(0),inpars.pop(0)
amplitude = float(amplitude)
center_x = float(center_x)
center_y = float(center_y)
if circle == 1:
width = inpars.pop(0)
width_x = float(width)
width_y = float(width)
rotate = 0
else:
width_x, width_y = inpars.pop(0),inpars.pop(0)
width_x = float(width_x)
width_y = float(width_y)
if rotate == 1:
rota = inpars.pop(0)
rota = pi/180. * float(rota)
rcen_x = center_x * numpy.cos(rota) - center_y * numpy.sin(rota)
rcen_y = center_x * numpy.sin(rota) + center_y * numpy.cos(rota)
else:
rcen_x = center_x
rcen_y = center_y
if len(inpars) > 0:
raise ValueError("There are still input parameters:" + str(inpars) + \
" and you've input: " + str(inpars_old) + \
" circle=%d, rotate=%d, vheight=%d" % (circle,rotate,vheight) )
def rotgauss(x,y):
if rotate==1:
xp = x * numpy.cos(rota) - y * numpy.sin(rota)
yp = x * numpy.sin(rota) + y * numpy.cos(rota)
else:
xp = x
yp = y
g = height+amplitude*numpy.exp(
-(((rcen_x-xp)/width_x)**2+
((rcen_y-yp)/width_y)**2)/2.)
return g
if shape is not None:
return rotgauss(*numpy.indices(shape))
else:
return rotgauss
def gaussfit(data,err=None,params=(),autoderiv=True,return_all=False,circle=False,
fixed=numpy.repeat(False,7),limitedmin=[False,False,False,False,True,True,True],
limitedmax=[False,False,False,False,False,False,True],
usemoment=numpy.array([],dtype='bool'),
minpars=numpy.repeat(0,7),maxpars=[0,0,0,0,0,0,360],
rotate=1,vheight=1,quiet=True,returnmp=False,
returnfitimage=False,**kwargs):
"""
Gaussian fitter with the ability to fit a variety of different forms of
2-dimensional gaussian.
Input Parameters:
data - 2-dimensional data array
err=None - error array with same size as data array
params=[] - initial input parameters for Gaussian function.
(height, amplitude, x, y, width_x, width_y, rota)
if not input, these will be determined from the moments of the system,
assuming no rotation
autoderiv=1 - use the autoderiv provided in the lmder.f function (the
alternative is to us an analytic derivative with lmdif.f: this method
is less robust)
return_all=0 - Default is to return only the Gaussian parameters.
1 - fit params, fit error
returnfitimage - returns (best fit params,best fit image)
returnmp - returns the full mpfit struct
circle=0 - default is an elliptical gaussian (different x, y widths),
but can reduce the input by one parameter if it's a circular gaussian
rotate=1 - default allows rotation of the gaussian ellipse. Can remove
last parameter by setting rotate=0. numpy.expects angle in DEGREES
vheight=1 - default allows a variable height-above-zero, i.e. an
additive constant for the Gaussian function. Can remove first
parameter by setting this to 0
usemoment - can choose which parameters to use a moment estimation for.
Other parameters will be taken from params. Needs to be a boolean
array.
Output:
Default output is a set of Gaussian parameters with the same shape as
the input parameters
Can also output the covariance matrix, 'infodict' that contains a lot
more detail about the fit (see scipy.optimize.leastsq), and a message
from leastsq telling what the exit status of the fitting routine was
Warning: Does NOT necessarily output a rotation angle between 0 and 360 degrees.
"""
usemoment=numpy.array(usemoment,dtype='bool')
params=numpy.array(params,dtype='float')
if usemoment.any() and len(params)==len(usemoment):
moment = numpy.array(moments(data,circle,rotate,vheight,**kwargs),dtype='float')
params[usemoment] = moment[usemoment]
elif params == [] or len(params)==0:
params = (moments(data,circle,rotate,vheight,**kwargs))
if vheight==0:
vheight=1
params = numpy.concatenate([[0],params])
fixed[0] = 1
# mpfit will fail if it is given a start parameter outside the allowed range:
for i in xrange(len(params)):
if params[i] > maxpars[i] and limitedmax[i]: params[i] = maxpars[i]
if params[i] < minpars[i] and limitedmin[i]: params[i] = minpars[i]
if err is None:
errorfunction = lambda p: numpy.ravel((twodgaussian(p,circle,rotate,vheight)\
(*numpy.indices(data.shape)) - data))
else:
errorfunction = lambda p: numpy.ravel((twodgaussian(p,circle,rotate,vheight)\
(*numpy.indices(data.shape)) - data)/err)
def mpfitfun(data,err):
if err is None:
def f(p,fjac=None): return [0,numpy.ravel(data-twodgaussian(p,circle,rotate,vheight)\
(*numpy.indices(data.shape)))]
else:
def f(p,fjac=None): return [0,numpy.ravel((data-twodgaussian(p,circle,rotate,vheight)\
(*numpy.indices(data.shape)))/err)]
return f
parinfo = [
{'n':1,'value':params[1],'limits':[minpars[1],maxpars[1]],'limited':[limitedmin[1],limitedmax[1]],'fixed':fixed[1],'parname':"AMPLITUDE",'error':0},
{'n':2,'value':params[2],'limits':[minpars[2],maxpars[2]],'limited':[limitedmin[2],limitedmax[2]],'fixed':fixed[2],'parname':"XSHIFT",'error':0},
{'n':3,'value':params[3],'limits':[minpars[3],maxpars[3]],'limited':[limitedmin[3],limitedmax[3]],'fixed':fixed[3],'parname':"YSHIFT",'error':0},
{'n':4,'value':params[4],'limits':[minpars[4],maxpars[4]],'limited':[limitedmin[4],limitedmax[4]],'fixed':fixed[4],'parname':"XWIDTH",'error':0} ]
if vheight == 1:
parinfo.insert(0,{'n':0,'value':params[0],'limits':[minpars[0],maxpars[0]],'limited':[limitedmin[0],limitedmax[0]],'fixed':fixed[0],'parname':"HEIGHT",'error':0})
if circle == 0:
parinfo.append({'n':5,'value':params[5],'limits':[minpars[5],maxpars[5]],'limited':[limitedmin[5],limitedmax[5]],'fixed':fixed[5],'parname':"YWIDTH",'error':0})
if rotate == 1:
parinfo.append({'n':6,'value':params[6],'limits':[minpars[6],maxpars[6]],'limited':[limitedmin[6],limitedmax[6]],'fixed':fixed[6],'parname':"ROTATION",'error':0})
if autoderiv == 0:
# the analytic derivative, while not terribly difficult, is less
# efficient and useful. I only bothered putting it here because I was
# instructed to do so for a class project - please ask if you would
# like this feature implemented
raise ValueError("I'm sorry, I haven't implemented this feature yet.")
else:
# p, cov, infodict, errmsg, success = optimize.leastsq(errorfunction,\
# params, full_output=1)
mp = mpfit(mpfitfun(data,err),parinfo=parinfo,quiet=quiet)
if returnmp:
returns = (mp)
elif return_all == 0:
returns = mp.params
elif return_all == 1:
returns = mp.params,mp.perror
if returnfitimage:
fitimage = twodgaussian(mp.params,circle,rotate,vheight)(*numpy.indices(data.shape))
returns = (returns,fitimage)
return returns
def onedmoments(Xax,data,vheight=True,estimator=median,negamp=None,
veryverbose=False, **kwargs):
"""Returns (height, amplitude, x, width_x)
the gaussian parameters of a 1D distribution by calculating its
moments. Depending on the input parameters, will only output
a subset of the above.
If using masked arrays, pass estimator=numpy.ma.median
'estimator' is used to measure the background level (height)
negamp can be used to force the peak negative (True), positive (False),
or it will be "autodetected" (negamp=None)
"""
dx = numpy.mean(Xax[1:] - Xax[:-1]) # assume a regular grid
integral = (data*dx).sum()
height = estimator(data)
# try to figure out whether pos or neg based on the minimum width of the pos/neg peaks
Lpeakintegral = integral - height*len(Xax)*dx - (data[data>height]*dx).sum()
Lamplitude = data.min()-height
Lwidth_x = 0.5*(numpy.abs(Lpeakintegral / Lamplitude))
Hpeakintegral = integral - height*len(Xax)*dx - (data[data<height]*dx).sum()
Hamplitude = data.max()-height
Hwidth_x = 0.5*(numpy.abs(Hpeakintegral / Hamplitude))
Lstddev = Xax[data<data.mean()].std()
Hstddev = Xax[data>data.mean()].std()
#print "Lstddev: %10.3g Hstddev: %10.3g" % (Lstddev,Hstddev)
#print "Lwidth_x: %10.3g Hwidth_x: %10.3g" % (Lwidth_x,Hwidth_x)
if negamp: # can force the guess to be negative
xcen,amplitude,width_x = Xax[numpy.argmin(data)],Lamplitude,Lwidth_x
elif negamp is None:
if Hstddev < Lstddev:
xcen,amplitude,width_x, = Xax[numpy.argmax(data)],Hamplitude,Hwidth_x
else:
xcen,amplitude,width_x, = Xax[numpy.argmin(data)],Lamplitude,Lwidth_x
else: # if negamp==False, make positive
xcen,amplitude,width_x = Xax[numpy.argmax(data)],Hamplitude,Hwidth_x
if veryverbose:
print "negamp: %s amp,width,cen Lower: %g, %g Upper: %g, %g Center: %g" %\
(negamp,Lamplitude,Lwidth_x,Hamplitude,Hwidth_x,xcen)
mylist = [amplitude,xcen,width_x]
if numpy.isnan(width_x) or numpy.isnan(height) or numpy.isnan(amplitude):
raise ValueError("something is nan")
if vheight:
mylist = [height] + mylist
return mylist
def onedgaussian(x,H,A,dx,w):
"""
Returns a 1-dimensional gaussian of form
H+A*numpy.exp(-(x-dx)**2/(2*w**2))
"""
return H+A*numpy.exp(-(x-dx)**2/(2*w**2))
def onedgaussfit(xax, data, err=None,
params=[0,1,0,1],fixed=[False,False,False,False],
limitedmin=[False,False,False,True],
limitedmax=[False,False,False,False], minpars=[0,0,0,0],
maxpars=[0,0,0,0], quiet=True, shh=True,
veryverbose=False,
vheight=True, negamp=False,
usemoments=False):
"""
Inputs:
xax - x axis
data - y axis
err - error corresponding to data
params - Fit parameters: Height of background, Amplitude, Shift, Width
fixed - Is parameter fixed?
limitedmin/minpars - set lower limits on each parameter (default: width>0)
limitedmax/maxpars - set upper limits on each parameter
quiet - should MPFIT output each iteration?
shh - output final parameters?
usemoments - replace default parameters with moments
Returns:
Fit parameters
Model
Fit errors
chi2
"""
def mpfitfun(x,y,err):
if err is None:
def f(p,fjac=None): return [0,(y-onedgaussian(x,*p))]
else:
def f(p,fjac=None): return [0,(y-onedgaussian(x,*p))/err]
return f
if xax == None:
xax = numpy.arange(len(data))
if vheight is False:
height = params[0]
fixed[0] = True
if usemoments:
params = onedmoments(xax,data,vheight=vheight,negamp=negamp, veryverbose=veryverbose)
if vheight is False: params = [height]+params
if veryverbose: print "OneD moments: h: %g a: %g c: %g w: %g" % tuple(params)
parinfo = [ {'n':0,'value':params[0],'limits':[minpars[0],maxpars[0]],'limited':[limitedmin[0],limitedmax[0]],'fixed':fixed[0],'parname':"HEIGHT",'error':0} ,
{'n':1,'value':params[1],'limits':[minpars[1],maxpars[1]],'limited':[limitedmin[1],limitedmax[1]],'fixed':fixed[1],'parname':"AMPLITUDE",'error':0},
{'n':2,'value':params[2],'limits':[minpars[2],maxpars[2]],'limited':[limitedmin[2],limitedmax[2]],'fixed':fixed[2],'parname':"SHIFT",'error':0},
{'n':3,'value':params[3],'limits':[minpars[3],maxpars[3]],'limited':[limitedmin[3],limitedmax[3]],'fixed':fixed[3],'parname':"WIDTH",'error':0}]
mp = mpfit(mpfitfun(xax,data,err),parinfo=parinfo,quiet=quiet)
mpp = mp.params
mpperr = mp.perror
chi2 = mp.fnorm
if mp.status == 0:
raise Exception(mp.errmsg)
if (not shh) or veryverbose:
print "Fit status: ",mp.status
for i,p in enumerate(mpp):
parinfo[i]['value'] = p
print parinfo[i]['parname'],p," +/- ",mpperr[i]
print "Chi2: ",mp.fnorm," Reduced Chi2: ",mp.fnorm/len(data)," DOF:",len(data)-len(mpp)
return mpp,onedgaussian(xax,*mpp),mpperr,chi2
def n_gaussian(pars=None,a=None,dx=None,sigma=None):
"""
Returns a function that sums over N gaussians, where N is the length of
a,dx,sigma *OR* N = len(pars) / 3
The background "height" is assumed to be zero (you must "baseline" your
spectrum before fitting)
pars - a list with len(pars) = 3n, assuming a,dx,sigma repeated
dx - offset (velocity center) values
sigma - line widths
a - amplitudes
"""
if len(pars) % 3 == 0:
a = [pars[ii] for ii in xrange(0,len(pars),3)]
dx = [pars[ii] for ii in xrange(1,len(pars),3)]
sigma = [pars[ii] for ii in xrange(2,len(pars),3)]
elif not(len(dx) == len(sigma) == len(a)):
raise ValueError("Wrong array lengths! dx: %i sigma: %i a: %i" % (len(dx),len(sigma),len(a)))
def g(x):
v = numpy.zeros(len(x))
for i in range(len(dx)):
v += a[i] * numpy.exp( - ( x - dx[i] )**2 / (2.0*sigma[i]**2) )
return v
return g
def multigaussfit(xax, data, ngauss=1, err=None, params=[1,0,1],
fixed=[False,False,False], limitedmin=[False,False,True],
limitedmax=[False,False,False], minpars=[0,0,0], maxpars=[0,0,0],
quiet=True, shh=True, veryverbose=False):
"""
An improvement on onedgaussfit. Lets you fit multiple gaussians.
Inputs:
xax - x axis
data - y axis
ngauss - How many gaussians to fit? Default 1 (this could supersede onedgaussfit)
err - error corresponding to data
These parameters need to have length = 3*ngauss. If ngauss > 1 and length = 3, they will
be replicated ngauss times, otherwise they will be reset to defaults:
params - Fit parameters: [amplitude, offset, width] * ngauss
If len(params) % 3 == 0, ngauss will be set to len(params) / 3
fixed - Is parameter fixed?
limitedmin/minpars - set lower limits on each parameter (default: width>0)
limitedmax/maxpars - set upper limits on each parameter
quiet - should MPFIT output each iteration?
shh - output final parameters?
Returns:
Fit parameters
Model
Fit errors
chi2
"""
if len(params) != ngauss and (len(params) / 3) > ngauss:
ngauss = len(params) / 3
if isinstance(params,numpy.ndarray): params=params.tolist()
# make sure all various things are the right length; if they're not, fix them using the defaults
for parlist in (params,fixed,limitedmin,limitedmax,minpars,maxpars):
if len(parlist) != 3*ngauss:
# if you leave the defaults, or enter something that can be multiplied by 3 to get to the
# right number of gaussians, it will just replicate
if len(parlist) == 3:
parlist *= ngauss
elif parlist==params:
parlist[:] = [1,0,1] * ngauss
elif parlist==fixed or parlist==limitedmax:
parlist[:] = [False,False,False] * ngauss
elif parlist==limitedmin:
parlist[:] = [False,False,True] * ngauss
elif parlist==minpars or parlist==maxpars:
parlist[:] = [0,0,0] * ngauss
def mpfitfun(x,y,err):
if err is None:
def f(p,fjac=None): return [0,(y-n_gaussian(pars=p)(x))]
else:
def f(p,fjac=None): return [0,(y-n_gaussian(pars=p)(x))/err]
return f
if xax == None:
xax = numpy.arange(len(data))
parnames = {0:"AMPLITUDE",1:"SHIFT",2:"WIDTH"}
parinfo = [ {'n':ii, 'value':params[ii],
'limits':[minpars[ii],maxpars[ii]],
'limited':[limitedmin[ii],limitedmax[ii]], 'fixed':fixed[ii],
'parname':parnames[ii%3]+str(ii%3), 'error':ii}
for ii in xrange(len(params)) ]
if veryverbose:
print "GUESSES: "
print "\n".join(["%s: %s" % (p['parname'],p['value']) for p in parinfo])
mp = mpfit(mpfitfun(xax,data,err),parinfo=parinfo,quiet=quiet)
mpp = mp.params
mpperr = mp.perror
chi2 = mp.fnorm
if mp.status == 0:
raise Exception(mp.errmsg)
if not shh:
print "Final fit values: "
for i,p in enumerate(mpp):
parinfo[i]['value'] = p
print parinfo[i]['parname'],p," +/- ",mpperr[i]
print "Chi2: ",mp.fnorm," Reduced Chi2: ",mp.fnorm/len(data)," DOF:",len(data)-len(mpp)
return mpp,n_gaussian(pars=mpp)(xax),mpperr,chi2
def collapse_gaussfit(cube,xax=None,axis=2,negamp=False,usemoments=True,nsigcut=1.0,mppsigcut=1.0,
return_errors=False, **kwargs):
import time
std_coll = cube.std(axis=axis)
std_coll[std_coll==0] = numpy.nan # must eliminate all-zero spectra
mean_std = median(std_coll[std_coll==std_coll])
if axis > 0:
cube = cube.swapaxes(0,axis)
width_arr = numpy.zeros(cube.shape[1:]) + numpy.nan
amp_arr = numpy.zeros(cube.shape[1:]) + numpy.nan
chi2_arr = numpy.zeros(cube.shape[1:]) + numpy.nan
offset_arr = numpy.zeros(cube.shape[1:]) + numpy.nan
width_err = numpy.zeros(cube.shape[1:]) + numpy.nan
amp_err = numpy.zeros(cube.shape[1:]) + numpy.nan
offset_err = numpy.zeros(cube.shape[1:]) + numpy.nan
if xax is None:
xax = numpy.arange(cube.shape[0])
starttime = time.time()
print "Cube shape: ",cube.shape
if negamp: extremum=numpy.min
else: extremum=numpy.max
print "Fitting a total of %i spectra with peak signal above %f" % ((numpy.abs(extremum(cube,axis=0)) > (mean_std*nsigcut)).sum(),mean_std*nsigcut)
for i in xrange(cube.shape[1]):
t0 = time.time()
nspec = (numpy.abs(extremum(cube[:,i,:],axis=0)) > (mean_std*nsigcut)).sum()
print "Working on row %d with %d spectra to fit" % (i,nspec) ,
for j in xrange(cube.shape[2]):
if numpy.abs(extremum(cube[:,i,j])) > (mean_std*nsigcut):
mpp,gfit,mpperr,chi2 = onedgaussfit(xax,cube[:,i,j],err=numpy.ones(cube.shape[0])*mean_std,negamp=negamp,usemoments=usemoments,**kwargs)
if numpy.abs(mpp[1]) > (mpperr[1]*mppsigcut):
width_arr[i,j] = mpp[3]
offset_arr[i,j] = mpp[2]
chi2_arr[i,j] = chi2
amp_arr[i,j] = mpp[1]
width_err[i,j] = mpperr[3]
offset_err[i,j] = mpperr[2]
amp_err[i,j] = mpperr[1]
dt = time.time()-t0
if nspec > 0:
print "in %f seconds (average: %f)" % (dt,dt/float(nspec))
else:
print "in %f seconds" % (dt)
print "Total time %f seconds" % (time.time()-starttime)
if return_errors:
return width_arr,offset_arr,amp_arr,width_err,offset_err,amp_err,chi2_arr
else:
return width_arr,offset_arr,amp_arr,chi2_arr
| lgpl-3.0 |
Mazecreator/tensorflow | tensorflow/contrib/timeseries/examples/predict.py | 69 | 5579 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An example of training and predicting with a TFTS estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import tensorflow as tf
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example, namely train_and_predict.
HAS_MATPLOTLIB = False
FLAGS = None
def structural_ensemble_train_and_predict(csv_file_name):
# Cycle between 5 latent values over a period of 100. This leads to a very
# smooth periodic component (and a small model), which is a good fit for our
# example data. Modeling high-frequency periodic variations will require a
# higher cycle_num_latent_values.
structural = tf.contrib.timeseries.StructuralEnsembleRegressor(
periodicities=100, num_features=1, cycle_num_latent_values=5)
return train_and_predict(structural, csv_file_name, training_steps=150)
def ar_train_and_predict(csv_file_name):
# An autoregressive model, with periodicity handled as a time-based
# regression. Note that this requires windows of size 16 (input_window_size +
# output_window_size) for training.
ar = tf.contrib.timeseries.ARRegressor(
periodicities=100, input_window_size=10, output_window_size=6,
num_features=1,
# Use the (default) normal likelihood loss to adaptively fit the
# variance. SQUARED_LOSS overestimates variance when there are trends in
# the series.
loss=tf.contrib.timeseries.ARModel.NORMAL_LIKELIHOOD_LOSS)
return train_and_predict(ar, csv_file_name, training_steps=600)
def train_and_predict(estimator, csv_file_name, training_steps):
"""A simple example of training and predicting."""
# Read data in the default "time,value" CSV format with no header
reader = tf.contrib.timeseries.CSVReader(csv_file_name)
# Set up windowing and batching for training
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
reader, batch_size=16, window_size=16)
# Fit model parameters to data
estimator.train(input_fn=train_input_fn, steps=training_steps)
# Evaluate on the full dataset sequentially, collecting in-sample predictions
# for a qualitative evaluation. Note that this loads the whole dataset into
# memory. For quantitative evaluation, use RandomWindowChunker.
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
# Predict starting after the evaluation
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=200)))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, 0]
mean = np.squeeze(np.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
variance = np.squeeze(np.concatenate(
[evaluation["covariance"][0], predictions["covariance"]], axis=0))
all_times = np.concatenate([times, predictions["times"]], axis=0)
upper_limit = mean + np.sqrt(variance)
lower_limit = mean - np.sqrt(variance)
return times, observed, all_times, mean, upper_limit, lower_limit
def make_plot(name, training_times, observed, all_times, mean,
upper_limit, lower_limit):
"""Plot a time series in a new figure."""
pyplot.figure()
pyplot.plot(training_times, observed, "b", label="training series")
pyplot.plot(all_times, mean, "r", label="forecast")
pyplot.plot(all_times, upper_limit, "g", label="forecast upper bound")
pyplot.plot(all_times, lower_limit, "g", label="forecast lower bound")
pyplot.fill_between(all_times, lower_limit, upper_limit, color="grey",
alpha="0.2")
pyplot.axvline(training_times[-1], color="k", linestyle="--")
pyplot.xlabel("time")
pyplot.ylabel("observations")
pyplot.legend(loc=0)
pyplot.title(name)
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
make_plot("Structural ensemble",
*structural_ensemble_train_and_predict(FLAGS.input_filename))
make_plot("AR", *ar_train_and_predict(FLAGS.input_filename))
pyplot.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_filename",
type=str,
required=True,
help="Input csv file.")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
krsjoseph/youtube-dl | youtube_dl/extractor/tinypic.py | 126 | 1893 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import ExtractorError
class TinyPicIE(InfoExtractor):
IE_NAME = 'tinypic'
IE_DESC = 'tinypic.com videos'
_VALID_URL = r'http://(?:.+?\.)?tinypic\.com/player\.php\?v=(?P<id>[^&]+)&s=\d+'
_TESTS = [
{
'url': 'http://tinypic.com/player.php?v=6xw7tc%3E&s=5#.UtqZmbRFCM8',
'md5': '609b74432465364e72727ebc6203f044',
'info_dict': {
'id': '6xw7tc',
'ext': 'flv',
'title': 'shadow phenomenon weird',
},
},
{
'url': 'http://de.tinypic.com/player.php?v=dy90yh&s=8',
'only_matching': True,
}
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id, 'Downloading page')
mobj = re.search(r'(?m)fo\.addVariable\("file",\s"(?P<fileid>[\da-z]+)"\);\n'
'\s+fo\.addVariable\("s",\s"(?P<serverid>\d+)"\);', webpage)
if mobj is None:
raise ExtractorError('Video %s does not exist' % video_id, expected=True)
file_id = mobj.group('fileid')
server_id = mobj.group('serverid')
KEYWORDS_SUFFIX = ', Video, images, photos, videos, myspace, ebay, video hosting, photo hosting'
keywords = self._html_search_meta('keywords', webpage, 'title')
title = keywords[:-len(KEYWORDS_SUFFIX)] if keywords.endswith(KEYWORDS_SUFFIX) else ''
video_url = 'http://v%s.tinypic.com/%s.flv' % (server_id, file_id)
thumbnail = 'http://v%s.tinypic.com/%s_th.jpg' % (server_id, file_id)
return {
'id': file_id,
'url': video_url,
'thumbnail': thumbnail,
'title': title
}
| unlicense |
qilicun/python | python2/diveintopythonzh-cn-5.4b/soundex/stage1/soundex1d.py | 4 | 2390 | """Soundex algorithm
This program is part of "Dive Into Python", a free Python book for
experienced programmers. Visit http://diveintopython.org/ for the
latest version.
"""
__author__ = "Mark Pilgrim ([email protected])"
__version__ = "$Revision: 1.2 $"
__date__ = "$Date: 2004/05/06 21:36:36 $"
__copyright__ = "Copyright (c) 2004 Mark Pilgrim"
__license__ = "Python"
import string, re
charToSoundex = {"A": "9",
"B": "1",
"C": "2",
"D": "3",
"E": "9",
"F": "1",
"G": "2",
"H": "9",
"I": "9",
"J": "2",
"K": "2",
"L": "4",
"M": "5",
"N": "5",
"O": "9",
"P": "1",
"Q": "2",
"R": "6",
"S": "2",
"T": "3",
"U": "9",
"V": "1",
"W": "9",
"X": "2",
"Y": "9",
"Z": "2"}
def soundex(source):
"convert string to Soundex equivalent"
# Soundex requirements:
# source string must be at least 1 character
# and must consist entirely of letters
if not source:
return "0000"
for c in source:
if not ('A' <= c <= 'Z') and not ('a' <= c <= 'z'):
return "0000"
# Soundex algorithm:
# 1. make first character uppercase
source = source[0].upper() + source[1:]
# 2. translate all other characters to Soundex digits
digits = source[0]
for s in source[1:]:
s = s.upper()
digits += charToSoundex[s]
# 3. remove consecutive duplicates
digits2 = digits[0]
for d in digits[1:]:
if digits2[-1] != d:
digits2 += d
# 4. remove all "9"s
digits3 = re.sub('9', '', digits2)
# 5. pad end with "0"s to 4 characters
while len(digits3) < 4:
digits3 += "0"
# 6. return first 4 characters
return digits3[:4]
if __name__ == '__main__':
from timeit import Timer
names = ('Woo', 'Pilgrim', 'Flingjingwaller')
for name in names:
statement = "soundex('%s')" % name
t = Timer(statement, "from __main__ import soundex")
print name.ljust(15), soundex(name), min(t.repeat())
| gpl-3.0 |
kizyle502/collegiatemasters | collegiatemasters/players/models.py | 1 | 1433 | from django.db import models
from django.core.urlresolvers import reverse
from autoslug import AutoSlugField
from model_utils.models import TimeStampedModel
from django_countries.fields import CountryField
class Player(TimeStampedModel):
GROUP_UNSPECIFIED = "unspecified"
GROUP_FIRST = "first"
GROUP_SECOND = "second"
GROUP_THIRD = "third"
GROUP_FOURTH = "fourth"
GROUP_CHOICES = (
(GROUP_UNSPECIFIED, "Unspecified"),
(GROUP_FIRST, "First"),
(GROUP_SECOND, "Second"),
(GROUP_THIRD, "Third"),
(GROUP_FOURTH, "Fourth"),
)
name = models.CharField("Player Name", max_length=255, unique=True)
slug = AutoSlugField("Player Address", unique=True, always_update=False, populate_from='name')
round1 = models.IntegerField("Round 1", null=True)
round2 = models.IntegerField("Round 2", null=True)
round3 = models.IntegerField("Round 3", null=True)
round4 = models.IntegerField("Round 4", null=True)
group = models.CharField("Group (Based on Golfweek world rank)",
choices=GROUP_CHOICES,
default=GROUP_UNSPECIFIED,
max_length=255)
home_country = CountryField("Home Country", null=True, blank=True)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('players:detail', kwargs={'name': self.name})
| bsd-3-clause |
Kupoman/thor | src/appdirs.py | 1 | 22475 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2005-2010 ActiveState Software Inc.
# Copyright (c) 2013 Eddy Petrișor
"""Utilities for determining application-specific dirs.
See <http://github.com/ActiveState/appdirs> for details and usage.
"""
# Dev Notes:
# - MSDN on where to store app data files:
# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
__version_info__ = (1, 4, 1)
__version__ = '.'.join(map(str, __version_info__))
import sys
import os
PY3 = sys.version_info[0] == 3
if PY3:
unicode = str
if sys.platform.startswith('java'):
import platform
os_name = platform.java_ver()[3][0]
if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
system = 'win32'
elif os_name.startswith('Mac'): # "Mac OS X", etc.
system = 'darwin'
else: # "Linux", "SunOS", "FreeBSD", etc.
# Setting this to "linux2" is not ideal, but only Windows or Mac
# are actually checked for and the rest of the module expects
# *sys.platform* style strings.
system = 'linux2'
else:
system = sys.platform
def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: ~/Library/Application Support/<AppName>
Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
That means, by default "~/.local/share/<AppName>".
"""
if system == "win32":
if appauthor is None:
appauthor = appname
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
path = os.path.normpath(_get_win_folder(const))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == 'darwin':
path = os.path.expanduser('~/Library/Application Support/')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of data dirs should be
returned. By default, the first item from XDG_DATA_DIRS is
returned, or '/usr/local/share/<AppName>',
if XDG_DATA_DIRS is not set
Typical user data directories are:
Mac OS X: /Library/Application Support/<AppName>
Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
For Unix, this is using the $XDG_DATA_DIRS[0] default.
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == 'darwin':
path = os.path.expanduser('/Library/Application Support')
if appname:
path = os.path.join(path, appname)
else:
# XDG default for $XDG_DATA_DIRS
# only first, if multipath is False
path = os.getenv('XDG_DATA_DIRS',
os.pathsep.join(['/usr/local/share', '/usr/share']))
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [os.sep.join([x, appname]) for x in pathlist]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
if appname and version:
path = os.path.join(path, version)
return path
def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific config dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: same as user_data_dir
Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
Win *: same as user_data_dir
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
That means, by deafult "~/.config/<AppName>".
"""
if system in ["win32", "darwin"]:
path = user_data_dir(appname, appauthor, None, roaming)
else:
path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of config dirs should be
returned. By default, the first item from XDG_CONFIG_DIRS is
returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
Typical user data directories are:
Mac OS X: same as site_data_dir
Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
$XDG_CONFIG_DIRS
Win *: same as site_data_dir
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if system in ["win32", "darwin"]:
path = site_data_dir(appname, appauthor)
if appname and version:
path = os.path.join(path, version)
else:
# XDG default for $XDG_CONFIG_DIRS
# only first, if multipath is False
path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [os.sep.join([x, appname]) for x in pathlist]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific cache dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Cache" to the base app data dir for Windows. See
discussion below.
Typical user cache directories are:
Mac OS X: ~/Library/Caches/<AppName>
Unix: ~/.cache/<AppName> (XDG default)
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
On Windows the only suggestion in the MSDN docs is that local settings go in
the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
app data dir (the default returned by `user_data_dir` above). Apps typically
put cache data somewhere *under* the given dir here. Some examples:
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
...\Acme\SuperApp\Cache\1.0
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
This can be disabled with the `opinion=False` option.
"""
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
if opinion:
path = os.path.join(path, "Cache")
elif system == 'darwin':
path = os.path.expanduser('~/Library/Caches')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific log dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Logs" to the base app data dir for Windows, and "log" to the
base cache dir for Unix. See discussion below.
Typical user cache directories are:
Mac OS X: ~/Library/Logs/<AppName>
Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
On Windows the only suggestion in the MSDN docs is that local settings
go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
examples of what some windows apps use for a logs dir.)
OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
value for Windows and appends "log" to the user cache dir for Unix.
This can be disabled with the `opinion=False` option.
"""
if system == "darwin":
path = os.path.join(
os.path.expanduser('~/Library/Logs'),
appname)
elif system == "win32":
path = user_data_dir(appname, appauthor, version)
version = False
if opinion:
path = os.path.join(path, "Logs")
else:
path = user_cache_dir(appname, appauthor, version)
version = False
if opinion:
path = os.path.join(path, "log")
if appname and version:
path = os.path.join(path, version)
return path
class AppDirs(object):
"""Convenience wrapper for getting application dirs."""
def __init__(self, appname, appauthor=None, version=None, roaming=False,
multipath=False):
self.appname = appname
self.appauthor = appauthor
self.version = version
self.roaming = roaming
self.multipath = multipath
@property
def user_data_dir(self):
return user_data_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_data_dir(self):
return site_data_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_config_dir(self):
return user_config_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_config_dir(self):
return site_config_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_cache_dir(self):
return user_cache_dir(self.appname, self.appauthor,
version=self.version)
@property
def user_log_dir(self):
return user_log_dir(self.appname, self.appauthor,
version=self.version)
#---- internal support stuff
def _get_win_folder_from_registry(csidl_name):
"""This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
if PY3:
import winreg as _winreg
else:
import _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
)
dir, type = _winreg.QueryValueEx(key, shell_folder_name)
return dir
def _get_win_folder_with_pywin32(csidl_name):
from win32com.shell import shellcon, shell
dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
# Try to make this a unicode path because SHGetFolderPath does
# not return unicode strings when there is unicode data in the
# path.
try:
dir = unicode(dir)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
try:
import win32api
dir = win32api.GetShortPathName(dir)
except ImportError:
pass
except UnicodeError:
pass
return dir
def _get_win_folder_with_ctypes(csidl_name):
import ctypes
csidl_const = {
"CSIDL_APPDATA": 26,
"CSIDL_COMMON_APPDATA": 35,
"CSIDL_LOCAL_APPDATA": 28,
}[csidl_name]
buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in buf:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf2 = ctypes.create_unicode_buffer(1024)
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
def _get_win_folder_with_jna(csidl_name):
import array
from com.sun import jna
from com.sun.jna.platform import win32
buf_size = win32.WinDef.MAX_PATH * 2
buf = array.zeros('c', buf_size)
shell = win32.Shell32.INSTANCE
shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf = array.zeros('c', buf_size)
kernel = win32.Kernel32.INSTANCE
if kernal.GetShortPathName(dir, buf, buf_size):
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
return dir
if system == "win32":
try:
import win32com.shell
_get_win_folder = _get_win_folder_with_pywin32
except ImportError:
try:
from ctypes import windll
_get_win_folder = _get_win_folder_with_ctypes
except ImportError:
try:
import com.sun.jna
_get_win_folder = _get_win_folder_with_jna
except ImportError:
_get_win_folder = _get_win_folder_from_registry
#---- self test code
if __name__ == "__main__":
appname = "MyApp"
appauthor = "MyCompany"
props = ("user_data_dir", "site_data_dir",
"user_config_dir", "site_config_dir",
"user_cache_dir", "user_log_dir")
print("-- app dirs %s --" % __version__)
print("-- app dirs (with optional 'version')")
dirs = AppDirs(appname, appauthor, version="1.0")
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'version')")
dirs = AppDirs(appname, appauthor)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'appauthor')")
dirs = AppDirs(appname)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (with disabled 'appauthor')")
dirs = AppDirs(appname, appauthor=False)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
| apache-2.0 |
Weuxel/cjdns | node_build/dependencies/libuv/build/gyp/test/mac/gyptest-depend-on-bundle.py | 303 | 1186 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that a dependency on a bundle causes the whole bundle to be built.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
test.run_gyp('test.gyp', chdir='depend-on-bundle')
test.build('test.gyp', 'dependent_on_bundle', chdir='depend-on-bundle')
# Binary itself.
test.built_file_must_exist('dependent_on_bundle', chdir='depend-on-bundle')
# Bundle dependency.
test.built_file_must_exist(
'my_bundle.framework/Versions/A/my_bundle',
chdir='depend-on-bundle')
test.built_file_must_exist( # package_framework
'my_bundle.framework/my_bundle',
chdir='depend-on-bundle')
test.built_file_must_exist( # plist
'my_bundle.framework/Versions/A/Resources/Info.plist',
chdir='depend-on-bundle')
test.built_file_must_exist(
'my_bundle.framework/Versions/A/Resources/English.lproj/' # Resources
'InfoPlist.strings',
chdir='depend-on-bundle')
test.pass_test()
| gpl-3.0 |
jeremiahmarks/sl4a | python/src/Lib/multiprocessing/util.py | 59 | 7839 | #
# Module providing various facilities to other parts of the package
#
# multiprocessing/util.py
#
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#
import itertools
import weakref
import atexit
import threading # we want threading to install it's
# cleanup function before multiprocessing does
from multiprocessing.process import current_process, active_children
__all__ = [
'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger',
'log_to_stderr', 'get_temp_dir', 'register_after_fork',
'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal',
'SUBDEBUG', 'SUBWARNING',
]
#
# Logging
#
NOTSET = 0
SUBDEBUG = 5
DEBUG = 10
INFO = 20
SUBWARNING = 25
LOGGER_NAME = 'multiprocessing'
DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s'
_logger = None
_log_to_stderr = False
def sub_debug(msg, *args):
if _logger:
_logger.log(SUBDEBUG, msg, *args)
def debug(msg, *args):
if _logger:
_logger.log(DEBUG, msg, *args)
def info(msg, *args):
if _logger:
_logger.log(INFO, msg, *args)
def sub_warning(msg, *args):
if _logger:
_logger.log(SUBWARNING, msg, *args)
def get_logger():
'''
Returns logger used by multiprocessing
'''
global _logger
import logging, atexit
logging._acquireLock()
try:
if not _logger:
_logger = logging.getLogger(LOGGER_NAME)
_logger.propagate = 0
logging.addLevelName(SUBDEBUG, 'SUBDEBUG')
logging.addLevelName(SUBWARNING, 'SUBWARNING')
# XXX multiprocessing should cleanup before logging
if hasattr(atexit, 'unregister'):
atexit.unregister(_exit_function)
atexit.register(_exit_function)
else:
atexit._exithandlers.remove((_exit_function, (), {}))
atexit._exithandlers.append((_exit_function, (), {}))
finally:
logging._releaseLock()
return _logger
def log_to_stderr(level=None):
'''
Turn on logging and add a handler which prints to stderr
'''
global _log_to_stderr
import logging
logger = get_logger()
formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
if level:
logger.setLevel(level)
_log_to_stderr = True
return _logger
#
# Function returning a temp directory which will be removed on exit
#
def get_temp_dir():
# get name of a temp directory which will be automatically cleaned up
if current_process()._tempdir is None:
import shutil, tempfile
tempdir = tempfile.mkdtemp(prefix='pymp-')
info('created temp directory %s', tempdir)
Finalize(None, shutil.rmtree, args=[tempdir], exitpriority=-100)
current_process()._tempdir = tempdir
return current_process()._tempdir
#
# Support for reinitialization of objects when bootstrapping a child process
#
_afterfork_registry = weakref.WeakValueDictionary()
_afterfork_counter = itertools.count()
def _run_after_forkers():
items = list(_afterfork_registry.items())
items.sort()
for (index, ident, func), obj in items:
try:
func(obj)
except Exception, e:
info('after forker raised exception %s', e)
def register_after_fork(obj, func):
_afterfork_registry[(_afterfork_counter.next(), id(obj), func)] = obj
#
# Finalization using weakrefs
#
_finalizer_registry = {}
_finalizer_counter = itertools.count()
class Finalize(object):
'''
Class which supports object finalization using weakrefs
'''
def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None):
assert exitpriority is None or type(exitpriority) is int
if obj is not None:
self._weakref = weakref.ref(obj, self)
else:
assert exitpriority is not None
self._callback = callback
self._args = args
self._kwargs = kwargs or {}
self._key = (exitpriority, _finalizer_counter.next())
_finalizer_registry[self._key] = self
def __call__(self, wr=None):
'''
Run the callback unless it has already been called or cancelled
'''
try:
del _finalizer_registry[self._key]
except KeyError:
sub_debug('finalizer no longer registered')
else:
sub_debug('finalizer calling %s with args %s and kwargs %s',
self._callback, self._args, self._kwargs)
res = self._callback(*self._args, **self._kwargs)
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
return res
def cancel(self):
'''
Cancel finalization of the object
'''
try:
del _finalizer_registry[self._key]
except KeyError:
pass
else:
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
def still_active(self):
'''
Return whether this finalizer is still waiting to invoke callback
'''
return self._key in _finalizer_registry
def __repr__(self):
try:
obj = self._weakref()
except (AttributeError, TypeError):
obj = None
if obj is None:
return '<Finalize object, dead>'
x = '<Finalize object, callback=%s' % \
getattr(self._callback, '__name__', self._callback)
if self._args:
x += ', args=' + str(self._args)
if self._kwargs:
x += ', kwargs=' + str(self._kwargs)
if self._key[0] is not None:
x += ', exitprority=' + str(self._key[0])
return x + '>'
def _run_finalizers(minpriority=None):
'''
Run all finalizers whose exit priority is not None and at least minpriority
Finalizers with highest priority are called first; finalizers with
the same priority will be called in reverse order of creation.
'''
if minpriority is None:
f = lambda p : p[0][0] is not None
else:
f = lambda p : p[0][0] is not None and p[0][0] >= minpriority
items = [x for x in _finalizer_registry.items() if f(x)]
items.sort(reverse=True)
for key, finalizer in items:
sub_debug('calling %s', finalizer)
try:
finalizer()
except Exception:
import traceback
traceback.print_exc()
if minpriority is None:
_finalizer_registry.clear()
#
# Clean up on exit
#
def is_exiting():
'''
Returns true if the process is shutting down
'''
return _exiting or _exiting is None
_exiting = False
def _exit_function():
global _exiting
info('process shutting down')
debug('running all "atexit" finalizers with priority >= 0')
_run_finalizers(0)
for p in active_children():
if p._daemonic:
info('calling terminate() for daemon %s', p.name)
p._popen.terminate()
for p in active_children():
info('calling join() for process %s', p.name)
p.join()
debug('running the remaining "atexit" finalizers')
_run_finalizers()
atexit.register(_exit_function)
#
# Some fork aware types
#
class ForkAwareThreadLock(object):
def __init__(self):
self._lock = threading.Lock()
self.acquire = self._lock.acquire
self.release = self._lock.release
register_after_fork(self, ForkAwareThreadLock.__init__)
class ForkAwareLocal(threading.local):
def __init__(self):
register_after_fork(self, lambda obj : obj.__dict__.clear())
def __reduce__(self):
return type(self), ()
| apache-2.0 |
uclaros/QGIS | tests/src/python/test_qgsmapcanvas.py | 25 | 23167 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsMapCanvas
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '24/1/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
import qgis # NOQA
from qgis.core import (QgsMapSettings,
QgsCoordinateReferenceSystem,
QgsRectangle,
QgsVectorLayer,
QgsFeature,
QgsGeometry,
QgsMultiRenderChecker,
QgsFillSymbol,
QgsSingleSymbolRenderer,
QgsMapThemeCollection,
QgsProject, QgsAnnotationPolygonItem,
QgsPolygon,
QgsLineString,
QgsPoint,
QgsPointXY,
QgsApplication)
from qgis.gui import (QgsMapCanvas)
from qgis.PyQt.QtCore import (Qt,
QDir)
from qgis.PyQt.QtXml import (QDomDocument, QDomElement)
import time
from qgis.testing import start_app, unittest
app = start_app()
class TestQgsMapCanvas(unittest.TestCase):
def setUp(self):
self.report = "<h1>Python QgsMapCanvas Tests</h1>\n"
def tearDown(self):
report_file_path = "%s/qgistest.html" % QDir.tempPath()
with open(report_file_path, 'a') as report_file:
report_file.write(self.report)
def testGettersSetters(self):
canvas = QgsMapCanvas()
# should be disabled by default
self.assertFalse(canvas.previewJobsEnabled())
canvas.setPreviewJobsEnabled(True)
self.assertTrue(canvas.previewJobsEnabled())
def testDeferredUpdate(self):
""" test that map canvas doesn't auto refresh on deferred layer update """
canvas = QgsMapCanvas()
canvas.setDestinationCrs(QgsCoordinateReferenceSystem(4326))
canvas.setFrameStyle(0)
canvas.resize(600, 400)
self.assertEqual(canvas.width(), 600)
self.assertEqual(canvas.height(), 400)
layer = QgsVectorLayer("Polygon?crs=epsg:4326&field=fldtxt:string",
"layer", "memory")
canvas.setLayers([layer])
canvas.setExtent(QgsRectangle(10, 30, 20, 35))
canvas.show()
# need to wait until first redraw can occur (note that we first need to wait till drawing starts!)
while not canvas.isDrawing():
app.processEvents()
canvas.waitWhileRendering()
self.assertTrue(self.canvasImageCheck('empty_canvas', 'empty_canvas', canvas))
# add polygon to layer
f = QgsFeature()
f.setGeometry(QgsGeometry.fromRect(QgsRectangle(5, 25, 25, 45)))
self.assertTrue(layer.dataProvider().addFeatures([f]))
# deferred update - so expect that canvas will not been refreshed
layer.triggerRepaint(True)
timeout = time.time() + 0.1
while time.time() < timeout:
# messy, but only way to check that canvas redraw doesn't occur
self.assertFalse(canvas.isDrawing())
# canvas should still be empty
self.assertTrue(self.canvasImageCheck('empty_canvas', 'empty_canvas', canvas))
# refresh canvas
canvas.refresh()
canvas.waitWhileRendering()
# now we expect the canvas check to fail (since they'll be a new polygon rendered over it)
self.assertFalse(self.canvasImageCheck('empty_canvas', 'empty_canvas', canvas))
def testRefreshOnTimer(self):
""" test that map canvas refreshes with auto refreshing layers """
canvas = QgsMapCanvas()
canvas.setDestinationCrs(QgsCoordinateReferenceSystem(4326))
canvas.setFrameStyle(0)
canvas.resize(600, 400)
self.assertEqual(canvas.width(), 600)
self.assertEqual(canvas.height(), 400)
layer = QgsVectorLayer("Polygon?crs=epsg:4326&field=fldtxt:string",
"layer", "memory")
canvas.setLayers([layer])
canvas.setExtent(QgsRectangle(10, 30, 20, 35))
canvas.show()
# need to wait until first redraw can occur (note that we first need to wait till drawing starts!)
while not canvas.isDrawing():
app.processEvents()
canvas.waitWhileRendering()
self.assertTrue(self.canvasImageCheck('empty_canvas', 'empty_canvas', canvas))
# add polygon to layer
f = QgsFeature()
f.setGeometry(QgsGeometry.fromRect(QgsRectangle(5, 25, 25, 45)))
self.assertTrue(layer.dataProvider().addFeatures([f]))
# set auto refresh on layer
layer.setAutoRefreshInterval(100)
layer.setAutoRefreshEnabled(True)
timeout = time.time() + 1
# expect canvas to auto refresh...
while not canvas.isDrawing():
app.processEvents()
self.assertTrue(time.time() < timeout)
while canvas.isDrawing():
app.processEvents()
self.assertTrue(time.time() < timeout)
# add a polygon to layer
f = QgsFeature()
f.setGeometry(QgsGeometry.fromRect(QgsRectangle(5, 25, 25, 45)))
self.assertTrue(layer.dataProvider().addFeatures([f]))
# wait for canvas auto refresh
while not canvas.isDrawing():
app.processEvents()
self.assertTrue(time.time() < timeout)
while canvas.isDrawing():
app.processEvents()
self.assertTrue(time.time() < timeout)
# now canvas should look different...
self.assertFalse(self.canvasImageCheck('empty_canvas', 'empty_canvas', canvas))
# switch off auto refresh
layer.setAutoRefreshEnabled(False)
timeout = time.time() + 0.5
while time.time() < timeout:
# messy, but only way to check that canvas redraw doesn't occur
self.assertFalse(canvas.isDrawing())
def testCancelAndDestroy(self):
""" test that nothing goes wrong if we destroy a canvas while a job is canceling """
canvas = QgsMapCanvas()
canvas.setDestinationCrs(QgsCoordinateReferenceSystem(4326))
canvas.setFrameStyle(0)
canvas.resize(600, 400)
layer = QgsVectorLayer("Polygon?crs=epsg:4326&field=fldtxt:string",
"layer", "memory")
# add a ton of features
for i in range(5000):
f = QgsFeature()
f.setGeometry(QgsGeometry.fromRect(QgsRectangle(5, 25, 25, 45)))
self.assertTrue(layer.dataProvider().addFeatures([f]))
canvas.setLayers([layer])
canvas.setExtent(QgsRectangle(10, 30, 20, 35))
canvas.show()
# need to wait until first redraw can occur (note that we first need to wait till drawing starts!)
while not canvas.isDrawing():
app.processEvents()
self.assertTrue(canvas.isDrawing())
canvas.stopRendering()
del canvas
def testMapTheme(self):
canvas = QgsMapCanvas()
canvas.setDestinationCrs(QgsCoordinateReferenceSystem(4326))
canvas.setFrameStyle(0)
canvas.resize(600, 400)
self.assertEqual(canvas.width(), 600)
self.assertEqual(canvas.height(), 400)
layer = QgsVectorLayer("Polygon?crs=epsg:4326&field=fldtxt:string",
"layer", "memory")
# add a polygon to layer
f = QgsFeature()
f.setGeometry(QgsGeometry.fromRect(QgsRectangle(5, 25, 25, 45)))
self.assertTrue(layer.dataProvider().addFeatures([f]))
# create a style
sym1 = QgsFillSymbol.createSimple({'color': '#ffb200'})
renderer = QgsSingleSymbolRenderer(sym1)
layer.setRenderer(renderer)
canvas.setLayers([layer])
canvas.setExtent(QgsRectangle(10, 30, 20, 35))
canvas.show()
# need to wait until first redraw can occur (note that we first need to wait till drawing starts!)
while not canvas.isDrawing():
app.processEvents()
canvas.waitWhileRendering()
self.assertTrue(self.canvasImageCheck('theme1', 'theme1', canvas))
# add some styles
layer.styleManager().addStyleFromLayer('style1')
sym2 = QgsFillSymbol.createSimple({'color': '#00b2ff'})
renderer2 = QgsSingleSymbolRenderer(sym2)
layer.setRenderer(renderer2)
layer.styleManager().addStyleFromLayer('style2')
canvas.refresh()
canvas.waitWhileRendering()
self.assertTrue(self.canvasImageCheck('theme2', 'theme2', canvas))
layer.styleManager().setCurrentStyle('style1')
canvas.refresh()
canvas.waitWhileRendering()
self.assertTrue(self.canvasImageCheck('theme1', 'theme1', canvas))
# OK, so all good with setting/rendering map styles
# try setting canvas to a particular theme
# make some themes...
theme1 = QgsMapThemeCollection.MapThemeRecord()
record1 = QgsMapThemeCollection.MapThemeLayerRecord(layer)
record1.currentStyle = 'style1'
record1.usingCurrentStyle = True
theme1.setLayerRecords([record1])
theme2 = QgsMapThemeCollection.MapThemeRecord()
record2 = QgsMapThemeCollection.MapThemeLayerRecord(layer)
record2.currentStyle = 'style2'
record2.usingCurrentStyle = True
theme2.setLayerRecords([record2])
QgsProject.instance().mapThemeCollection().insert('theme1', theme1)
QgsProject.instance().mapThemeCollection().insert('theme2', theme2)
canvas.setTheme('theme2')
canvas.refresh()
canvas.waitWhileRendering()
self.assertTrue(self.canvasImageCheck('theme2', 'theme2', canvas))
canvas.setTheme('theme1')
canvas.refresh()
canvas.waitWhileRendering()
self.assertTrue(self.canvasImageCheck('theme1', 'theme1', canvas))
# add another layer
layer2 = QgsVectorLayer("Polygon?crs=epsg:4326&field=fldtxt:string",
"layer2", "memory")
f = QgsFeature()
f.setGeometry(QgsGeometry.fromRect(QgsRectangle(5, 25, 25, 45)))
self.assertTrue(layer2.dataProvider().addFeatures([f]))
# create a style
sym1 = QgsFillSymbol.createSimple({'color': '#b2ff00'})
renderer = QgsSingleSymbolRenderer(sym1)
layer2.setRenderer(renderer)
# rerender canvas - should NOT show new layer
canvas.refresh()
canvas.waitWhileRendering()
self.assertTrue(self.canvasImageCheck('theme1', 'theme1', canvas))
# test again - this time refresh all layers
canvas.refreshAllLayers()
canvas.waitWhileRendering()
self.assertTrue(self.canvasImageCheck('theme1', 'theme1', canvas))
# add layer 2 to theme1
record3 = QgsMapThemeCollection.MapThemeLayerRecord(layer2)
theme1.setLayerRecords([record3])
QgsProject.instance().mapThemeCollection().update('theme1', theme1)
canvas.refresh()
canvas.waitWhileRendering()
self.assertTrue(self.canvasImageCheck('theme3', 'theme3', canvas))
# change the appearance of an active style
layer2.styleManager().addStyleFromLayer('original')
layer2.styleManager().addStyleFromLayer('style4')
record3.currentStyle = 'style4'
record3.usingCurrentStyle = True
theme1.setLayerRecords([record3])
QgsProject.instance().mapThemeCollection().update('theme1', theme1)
canvas.refresh()
canvas.waitWhileRendering()
self.assertTrue(self.canvasImageCheck('theme3', 'theme3', canvas))
layer2.styleManager().setCurrentStyle('style4')
sym3 = QgsFillSymbol.createSimple({'color': '#b200b2'})
layer2.renderer().setSymbol(sym3)
canvas.refresh()
canvas.waitWhileRendering()
self.assertTrue(self.canvasImageCheck('theme4', 'theme4', canvas))
# try setting layers while a theme is in place
canvas.setLayers([layer])
canvas.refresh()
# should be no change... setLayers should be ignored if canvas is following a theme!
canvas.waitWhileRendering()
self.assertTrue(self.canvasImageCheck('theme4', 'theme4', canvas))
# setLayerStyleOverrides while theme is in place
canvas.setLayerStyleOverrides({layer2.id(): 'original'})
# should be no change... setLayerStyleOverrides should be ignored if canvas is following a theme!
canvas.refresh()
canvas.waitWhileRendering()
self.assertTrue(self.canvasImageCheck('theme4', 'theme4', canvas))
# clear theme
canvas.setTheme('')
canvas.refresh()
canvas.waitWhileRendering()
# should be different - we should now render project layers
self.assertFalse(self.canvasImageCheck('theme4', 'theme4', canvas))
# set canvas to theme1
canvas.setTheme('theme1')
canvas.refresh()
canvas.waitWhileRendering()
self.assertEqual(canvas.theme(), 'theme1')
themeLayers = theme1.layerRecords()
# rename the active theme
QgsProject.instance().mapThemeCollection().renameMapTheme('theme1', 'theme5')
# canvas theme should now be set to theme5
canvas.refresh()
canvas.waitWhileRendering()
self.assertEqual(canvas.theme(), 'theme5')
# theme5 should render as theme1
theme5 = QgsProject.instance().mapThemeCollection().mapThemeState('theme5')
theme5Layers = theme5.layerRecords()
self.assertEqual(themeLayers, theme5Layers, 'themes are different')
# self.assertTrue(self.canvasImageCheck('theme5', 'theme5', canvas))
def testMainAnnotationLayerRendered(self):
""" test that main annotation layer is rendered above all other layers """
canvas = QgsMapCanvas()
canvas.setDestinationCrs(QgsCoordinateReferenceSystem(4326))
canvas.setFrameStyle(0)
canvas.resize(600, 400)
self.assertEqual(canvas.width(), 600)
self.assertEqual(canvas.height(), 400)
layer = QgsVectorLayer("Polygon?crs=epsg:4326&field=fldtxt:string",
"layer", "memory")
sym3 = QgsFillSymbol.createSimple({'color': '#b200b2'})
layer.renderer().setSymbol(sym3)
canvas.setLayers([layer])
canvas.setExtent(QgsRectangle(10, 30, 20, 35))
canvas.show()
# need to wait until first redraw can occur (note that we first need to wait till drawing starts!)
while not canvas.isDrawing():
app.processEvents()
canvas.waitWhileRendering()
self.assertTrue(self.canvasImageCheck('empty_canvas', 'empty_canvas', canvas))
# add polygon to layer
f = QgsFeature()
f.setGeometry(QgsGeometry.fromRect(QgsRectangle(5, 25, 25, 45)))
self.assertTrue(layer.dataProvider().addFeatures([f]))
# refresh canvas
canvas.refresh()
canvas.waitWhileRendering()
# no annotation yet...
self.assertFalse(self.canvasImageCheck('main_annotation_layer', 'main_annotation_layer', canvas))
annotation_layer = QgsProject.instance().mainAnnotationLayer()
annotation_layer.setCrs(QgsCoordinateReferenceSystem(4326))
annotation_geom = QgsGeometry.fromRect(QgsRectangle(12, 30, 18, 33))
annotation = QgsAnnotationPolygonItem(annotation_geom.constGet().clone())
sym3 = QgsFillSymbol.createSimple({'color': '#ff0000', 'outline_style': 'no'})
annotation.setSymbol(sym3)
annotation_layer.addItem(annotation)
# refresh canvas
canvas.refresh()
canvas.waitWhileRendering()
# annotation must be rendered over other layers
self.assertTrue(self.canvasImageCheck('main_annotation_layer', 'main_annotation_layer', canvas))
annotation_layer.clear()
def canvasImageCheck(self, name, reference_image, canvas):
self.report += "<h2>Render {}</h2>\n".format(name)
temp_dir = QDir.tempPath() + '/'
file_name = temp_dir + 'mapcanvas_' + name + ".png"
print(file_name)
canvas.saveAsImage(file_name)
checker = QgsMultiRenderChecker()
checker.setControlPathPrefix("mapcanvas")
checker.setControlName("expected_" + reference_image)
checker.setRenderedImage(file_name)
checker.setColorTolerance(2)
result = checker.runTest(name, 20)
self.report += checker.report()
print((self.report))
return result
def testSaveCanvasVariablesToProject(self):
"""
Ensure that temporary canvas atlas variables are not written to project
"""
c1 = QgsMapCanvas()
c1.setObjectName('c1')
c1.expressionContextScope().setVariable('atlas_featurenumber', 1111)
c1.expressionContextScope().setVariable('atlas_pagename', 'bb')
c1.expressionContextScope().setVariable('atlas_feature', QgsFeature(1))
c1.expressionContextScope().setVariable('atlas_featureid', 22)
c1.expressionContextScope().setVariable('atlas_geometry', QgsGeometry.fromWkt('Point( 1 2 )'))
c1.expressionContextScope().setVariable('vara', 1111)
c1.expressionContextScope().setVariable('varb', 'bb')
doc = QDomDocument("testdoc")
elem = doc.createElement("qgis")
doc.appendChild(elem)
c1.writeProject(doc)
c2 = QgsMapCanvas()
c2.setObjectName('c1')
c2.readProject(doc)
self.assertCountEqual(c2.expressionContextScope().variableNames(), ['vara', 'varb'])
self.assertEqual(c2.expressionContextScope().variable('vara'), 1111)
self.assertEqual(c2.expressionContextScope().variable('varb'), 'bb')
def testSaveMultipleCanvasesToProject(self):
# test saving/restoring canvas state to project with multiple canvases
c1 = QgsMapCanvas()
c1.setObjectName('c1')
c1.setDestinationCrs(QgsCoordinateReferenceSystem('EPSG:3111'))
c1.setRotation(45)
c1.expressionContextScope().setVariable('vara', 1111)
c1.expressionContextScope().setVariable('varb', 'bb')
c2 = QgsMapCanvas()
c2.setObjectName('c2')
c2.setDestinationCrs(QgsCoordinateReferenceSystem('EPSG:4326'))
c2.setRotation(65)
c2.expressionContextScope().setVariable('vara', 2222)
c2.expressionContextScope().setVariable('varc', 'cc')
doc = QDomDocument("testdoc")
elem = doc.createElement("qgis")
doc.appendChild(elem)
c1.writeProject(doc)
c2.writeProject(doc)
c3 = QgsMapCanvas()
c3.setObjectName('c1')
c4 = QgsMapCanvas()
c4.setObjectName('c2')
c3.readProject(doc)
c4.readProject(doc)
self.assertEqual(c3.mapSettings().destinationCrs().authid(), 'EPSG:3111')
self.assertEqual(c3.rotation(), 45)
self.assertEqual(set(c3.expressionContextScope().variableNames()), {'vara', 'varb'})
self.assertEqual(c3.expressionContextScope().variable('vara'), 1111)
self.assertEqual(c3.expressionContextScope().variable('varb'), 'bb')
self.assertEqual(c4.mapSettings().destinationCrs().authid(), 'EPSG:4326')
self.assertEqual(c4.rotation(), 65)
self.assertEqual(set(c4.expressionContextScope().variableNames()), {'vara', 'varc'})
self.assertEqual(c4.expressionContextScope().variable('vara'), 2222)
self.assertEqual(c4.expressionContextScope().variable('varc'), 'cc')
def testLockedScale(self):
"""Test zoom/pan/center operations when scale lock is on"""
c = QgsMapCanvas()
dpr = c.mapSettings().devicePixelRatio()
self.assertEqual(c.size().width(), 640)
self.assertEqual(c.size().height(), 480)
c.setExtent(QgsRectangle(5, 45, 9, 47))
self.assertEqual(round(c.scale() / 100000), 13 * dpr)
c.zoomScale(2500000)
c.setScaleLocked(True)
self.assertEqual(round(c.magnificationFactor(), 1), 1)
# Test setExtent
c.setExtent(QgsRectangle(6, 45.5, 8, 46), True)
self.assertEqual(round(c.scale()), 2500000)
self.assertEqual(c.center().x(), 7.0)
self.assertEqual(c.center().y(), 45.75)
self.assertEqual(round(c.magnificationFactor()), 4 / dpr)
# Test setCenter
c.setCenter(QgsPointXY(6, 46))
self.assertEqual(c.center().x(), 6)
self.assertEqual(c.center().y(), 46)
self.assertEqual(round(c.scale()), 2500000)
# Test zoom
c.zoomByFactor(0.5, QgsPointXY(6.5, 46.5), False)
self.assertEqual(c.center().x(), 6.5)
self.assertEqual(c.center().y(), 46.5)
self.assertTrue(c.magnificationFactor() > 7 / dpr)
self.assertEqual(round(c.scale()), 2500000)
# Test zoom with center
# default zoom factor is 2, x and y are pixel coordinates, default size is 640x480
c.zoomWithCenter(300, 200, True)
self.assertEqual(round(c.center().x(), 1), 6.5)
self.assertEqual(round(c.center().y(), 1), 46.6)
self.assertEqual(round(c.scale()), 2500000)
self.assertTrue(c.magnificationFactor() > (14 / dpr) and c.magnificationFactor() < (16 / dpr))
# out ...
c.zoomWithCenter(300, 200, False)
self.assertEqual(round(c.center().x(), 1), 6.5)
self.assertEqual(round(c.center().y(), 1), 46.6)
self.assertEqual(round(c.scale()), 2500000)
self.assertTrue(c.magnificationFactor() > 7 / dpr)
# Test setExtent with different ratio
c2 = QgsMapCanvas()
c2.setExtent(QgsRectangle(5, 45, 9, 47))
c2.zoomScale(2500000)
c2.setScaleLocked(True)
c2.setExtent(QgsRectangle(3, 45, 11, 45.5), True)
self.assertEqual(round(c2.scale()), 2500000)
self.assertEqual(c2.center().x(), 7.0)
self.assertEqual(c2.center().y(), 45.25)
self.assertAlmostEqual(c2.magnificationFactor(), 1 / dpr, 0)
# Restore original
c2.setExtent(QgsRectangle(5, 45, 9, 47), True)
self.assertEqual(round(c2.scale()), 2500000)
self.assertEqual(c2.center().x(), 7.0)
self.assertEqual(c2.center().y(), 46.0)
self.assertAlmostEqual(c2.magnificationFactor(), 2 / dpr, 0)
c2.setExtent(QgsRectangle(7, 46, 11, 46.5), True)
self.assertEqual(round(c2.scale()), 2500000)
self.assertEqual(c2.center().x(), 9.0)
self.assertEqual(c2.center().y(), 46.25)
self.assertAlmostEqual(c2.magnificationFactor(), 2 / dpr, 0)
c2.setExtent(QgsRectangle(7, 46, 9, 46.5), True)
self.assertEqual(round(c2.scale()), 2500000)
self.assertEqual(c2.center().x(), 8.0)
self.assertEqual(c2.center().y(), 46.25)
self.assertAlmostEqual(c2.magnificationFactor(), 4 / dpr, 0)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
liqueur/tornado | tornado/test/process_test.py | 123 | 10569 | #!/usr/bin/env python
from __future__ import absolute_import, division, print_function, with_statement
import logging
import os
import signal
import subprocess
import sys
from tornado.httpclient import HTTPClient, HTTPError
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.log import gen_log
from tornado.process import fork_processes, task_id, Subprocess
from tornado.simple_httpclient import SimpleAsyncHTTPClient
from tornado.testing import bind_unused_port, ExpectLog, AsyncTestCase, gen_test
from tornado.test.util import unittest, skipIfNonUnix
from tornado.web import RequestHandler, Application
def skip_if_twisted():
if IOLoop.configured_class().__name__.endswith(('TwistedIOLoop',
'AsyncIOMainLoop')):
raise unittest.SkipTest("Process tests not compatible with "
"TwistedIOLoop or AsyncIOMainLoop")
# Not using AsyncHTTPTestCase because we need control over the IOLoop.
@skipIfNonUnix
class ProcessTest(unittest.TestCase):
def get_app(self):
class ProcessHandler(RequestHandler):
def get(self):
if self.get_argument("exit", None):
# must use os._exit instead of sys.exit so unittest's
# exception handler doesn't catch it
os._exit(int(self.get_argument("exit")))
if self.get_argument("signal", None):
os.kill(os.getpid(),
int(self.get_argument("signal")))
self.write(str(os.getpid()))
return Application([("/", ProcessHandler)])
def tearDown(self):
if task_id() is not None:
# We're in a child process, and probably got to this point
# via an uncaught exception. If we return now, both
# processes will continue with the rest of the test suite.
# Exit now so the parent process will restart the child
# (since we don't have a clean way to signal failure to
# the parent that won't restart)
logging.error("aborting child process from tearDown")
logging.shutdown()
os._exit(1)
# In the surviving process, clear the alarm we set earlier
signal.alarm(0)
super(ProcessTest, self).tearDown()
def test_multi_process(self):
# This test can't work on twisted because we use the global reactor
# and have no way to get it back into a sane state after the fork.
skip_if_twisted()
with ExpectLog(gen_log, "(Starting .* processes|child .* exited|uncaught exception)"):
self.assertFalse(IOLoop.initialized())
sock, port = bind_unused_port()
def get_url(path):
return "http://127.0.0.1:%d%s" % (port, path)
# ensure that none of these processes live too long
signal.alarm(5) # master process
try:
id = fork_processes(3, max_restarts=3)
self.assertTrue(id is not None)
signal.alarm(5) # child processes
except SystemExit as e:
# if we exit cleanly from fork_processes, all the child processes
# finished with status 0
self.assertEqual(e.code, 0)
self.assertTrue(task_id() is None)
sock.close()
return
try:
if id in (0, 1):
self.assertEqual(id, task_id())
server = HTTPServer(self.get_app())
server.add_sockets([sock])
IOLoop.current().start()
elif id == 2:
self.assertEqual(id, task_id())
sock.close()
# Always use SimpleAsyncHTTPClient here; the curl
# version appears to get confused sometimes if the
# connection gets closed before it's had a chance to
# switch from writing mode to reading mode.
client = HTTPClient(SimpleAsyncHTTPClient)
def fetch(url, fail_ok=False):
try:
return client.fetch(get_url(url))
except HTTPError as e:
if not (fail_ok and e.code == 599):
raise
# Make two processes exit abnormally
fetch("/?exit=2", fail_ok=True)
fetch("/?exit=3", fail_ok=True)
# They've been restarted, so a new fetch will work
int(fetch("/").body)
# Now the same with signals
# Disabled because on the mac a process dying with a signal
# can trigger an "Application exited abnormally; send error
# report to Apple?" prompt.
# fetch("/?signal=%d" % signal.SIGTERM, fail_ok=True)
# fetch("/?signal=%d" % signal.SIGABRT, fail_ok=True)
# int(fetch("/").body)
# Now kill them normally so they won't be restarted
fetch("/?exit=0", fail_ok=True)
# One process left; watch it's pid change
pid = int(fetch("/").body)
fetch("/?exit=4", fail_ok=True)
pid2 = int(fetch("/").body)
self.assertNotEqual(pid, pid2)
# Kill the last one so we shut down cleanly
fetch("/?exit=0", fail_ok=True)
os._exit(0)
except Exception:
logging.error("exception in child process %d", id, exc_info=True)
raise
@skipIfNonUnix
class SubprocessTest(AsyncTestCase):
def test_subprocess(self):
if IOLoop.configured_class().__name__.endswith('LayeredTwistedIOLoop'):
# This test fails non-deterministically with LayeredTwistedIOLoop.
# (the read_until('\n') returns '\n' instead of 'hello\n')
# This probably indicates a problem with either TornadoReactor
# or TwistedIOLoop, but I haven't been able to track it down
# and for now this is just causing spurious travis-ci failures.
raise unittest.SkipTest("Subprocess tests not compatible with "
"LayeredTwistedIOLoop")
subproc = Subprocess([sys.executable, '-u', '-i'],
stdin=Subprocess.STREAM,
stdout=Subprocess.STREAM, stderr=subprocess.STDOUT,
io_loop=self.io_loop)
self.addCleanup(lambda: os.kill(subproc.pid, signal.SIGTERM))
subproc.stdout.read_until(b'>>> ', self.stop)
self.wait()
subproc.stdin.write(b"print('hello')\n")
subproc.stdout.read_until(b'\n', self.stop)
data = self.wait()
self.assertEqual(data, b"hello\n")
subproc.stdout.read_until(b">>> ", self.stop)
self.wait()
subproc.stdin.write(b"raise SystemExit\n")
subproc.stdout.read_until_close(self.stop)
data = self.wait()
self.assertEqual(data, b"")
def test_close_stdin(self):
# Close the parent's stdin handle and see that the child recognizes it.
subproc = Subprocess([sys.executable, '-u', '-i'],
stdin=Subprocess.STREAM,
stdout=Subprocess.STREAM, stderr=subprocess.STDOUT,
io_loop=self.io_loop)
self.addCleanup(lambda: os.kill(subproc.pid, signal.SIGTERM))
subproc.stdout.read_until(b'>>> ', self.stop)
self.wait()
subproc.stdin.close()
subproc.stdout.read_until_close(self.stop)
data = self.wait()
self.assertEqual(data, b"\n")
def test_stderr(self):
subproc = Subprocess([sys.executable, '-u', '-c',
r"import sys; sys.stderr.write('hello\n')"],
stderr=Subprocess.STREAM,
io_loop=self.io_loop)
self.addCleanup(lambda: os.kill(subproc.pid, signal.SIGTERM))
subproc.stderr.read_until(b'\n', self.stop)
data = self.wait()
self.assertEqual(data, b'hello\n')
def test_sigchild(self):
# Twisted's SIGCHLD handler and Subprocess's conflict with each other.
skip_if_twisted()
Subprocess.initialize(io_loop=self.io_loop)
self.addCleanup(Subprocess.uninitialize)
subproc = Subprocess([sys.executable, '-c', 'pass'],
io_loop=self.io_loop)
subproc.set_exit_callback(self.stop)
ret = self.wait()
self.assertEqual(ret, 0)
self.assertEqual(subproc.returncode, ret)
@gen_test
def test_sigchild_future(self):
skip_if_twisted()
Subprocess.initialize()
self.addCleanup(Subprocess.uninitialize)
subproc = Subprocess([sys.executable, '-c', 'pass'])
ret = yield subproc.wait_for_exit()
self.assertEqual(ret, 0)
self.assertEqual(subproc.returncode, ret)
def test_sigchild_signal(self):
skip_if_twisted()
Subprocess.initialize(io_loop=self.io_loop)
self.addCleanup(Subprocess.uninitialize)
subproc = Subprocess([sys.executable, '-c',
'import time; time.sleep(30)'],
io_loop=self.io_loop)
subproc.set_exit_callback(self.stop)
os.kill(subproc.pid, signal.SIGTERM)
ret = self.wait()
self.assertEqual(subproc.returncode, ret)
self.assertEqual(ret, -signal.SIGTERM)
@gen_test
def test_wait_for_exit_raise(self):
skip_if_twisted()
Subprocess.initialize()
self.addCleanup(Subprocess.uninitialize)
subproc = Subprocess([sys.executable, '-c', 'import sys; sys.exit(1)'])
with self.assertRaises(subprocess.CalledProcessError) as cm:
yield subproc.wait_for_exit()
self.assertEqual(cm.exception.returncode, 1)
@gen_test
def test_wait_for_exit_raise_disabled(self):
skip_if_twisted()
Subprocess.initialize()
self.addCleanup(Subprocess.uninitialize)
subproc = Subprocess([sys.executable, '-c', 'import sys; sys.exit(1)'])
ret = yield subproc.wait_for_exit(raise_error=False)
self.assertEqual(ret, 1)
| apache-2.0 |
spbguru/repo1 | tests/integration/py2/nupic/engine/network_testnode_interchangeability.py | 17 | 6158 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""This test verifies that the C++ test node and py.TestNode
It creates the same two node network with all four combinations
of TestNode and py.TestNode:
1. TestNode, TestNode
2. TestNode, py.TestNode
3. py.TestNode, TestNode
4. py.TestNode, py.TestNode
Then it performs the same tests as the twonode_network demo (except the error
messages tests for the three node network):
- Can add regions to network and set dimensions
- Linking induces dimensions correctly
- Network computation happens in correct order
- Direct (zero-copy) access to outputs
- Linking correctly maps outputs to inputs
"""
import logging
import unittest2 as unittest
from nupic.engine import Network, Dimensions
LOGGER = logging.getLogger(__name__)
class NetworkTestNodeInterchangeabilityTest(unittest.TestCase):
def testNodesPyTestNodeAndTestNode(self):
self.runNodesTest('py.TestNode', 'TestNode')
def testNodesTestNodeAndPyTestNode(self):
self.runNodesTest('TestNode', 'py.TestNode')
def testNodesTestNodeAndTestNode(self):
self.runNodesTest('TestNode', 'TestNode')
def testNodesPyTestNodeAndPyTestNode(self):
self.runNodesTest('py.TestNode', 'py.TestNode')
def runNodesTest(self, nodeType1, nodeType2):
# =====================================================
# Build and run the network
# =====================================================
LOGGER.info('test(level1: %s, level2: %s)', nodeType1, nodeType2)
net = Network()
level1 = net.addRegion("level1", nodeType1, "{int32Param: 15}")
dims = Dimensions([6, 4])
level1.setDimensions(dims)
level2 = net.addRegion("level2", nodeType2, "{real64Param: 128.23}")
net.link("level1", "level2", "TestFanIn2", "")
# Could call initialize here, but not necessary as net.run()
# initializes implicitly.
# net.initialize()
net.run(1)
LOGGER.info("Successfully created network and ran for one iteration")
# =====================================================
# Check everything
# =====================================================
dims = level1.getDimensions()
self.assertEqual(len(dims), 2)
self.assertEqual(dims[0], 6)
self.assertEqual(dims[1], 4)
dims = level2.getDimensions()
self.assertEqual(len(dims), 2)
self.assertEqual(dims[0], 3)
self.assertEqual(dims[1], 2)
# Check L1 output. "False" means don't copy, i.e.
# get a pointer to the actual output
# Actual output values are determined by the TestNode
# compute() behavior.
l1output = level1.getOutputData("bottomUpOut")
self.assertEqual(len(l1output), 48) # 24 nodes; 2 values per node
for i in xrange(24):
self.assertEqual(l1output[2*i], 0) # size of input to each node is 0
self.assertEqual(l1output[2*i+1], i) # node number
# check L2 output.
l2output = level2.getOutputData("bottomUpOut")
self.assertEqual(len(l2output), 12) # 6 nodes; 2 values per node
# Output val = node number + sum(inputs)
# Can compute from knowing L1 layout
#
# 00 01 | 02 03 | 04 05
# 06 07 | 08 09 | 10 11
# ---------------------
# 12 13 | 14 15 | 16 17
# 18 19 | 20 21 | 22 23
outputVals = []
outputVals.append(0 + (0 + 1 + 6 + 7))
outputVals.append(1 + (2 + 3 + 8 + 9))
outputVals.append(2 + (4 + 5 + 10 + 11))
outputVals.append(3 + (12 + 13 + 18 + 19))
outputVals.append(4 + (14 + 15 + 20 + 21))
outputVals.append(5 + (16 + 17 + 22 + 23))
for i in xrange(6):
if l2output[2*i] != 8:
LOGGER.info(l2output[2*i])
# from dbgp.client import brk; brk(port=9019)
self.assertEqual(l2output[2*i], 8) # size of input for each node is 8
self.assertEqual(l2output[2*i+1], outputVals[i])
# =====================================================
# Run for one more iteration
# =====================================================
LOGGER.info("Running for a second iteration")
net.run(1)
# =====================================================
# Check everything again
# =====================================================
# Outputs are all the same except that the first output is
# incremented by the iteration number
for i in xrange(24):
self.assertEqual(l1output[2*i], 1)
self.assertEqual(l1output[2*i+1], i)
for i in xrange(6):
self.assertEqual(l2output[2*i], 9)
self.assertEqual(l2output[2*i+1], outputVals[i] + 4)
# =====================================================
# Demonstrate a few other features
# =====================================================
#
# Linking can induce dimensions downward
#
net = Network()
level1 = net.addRegion("level1", nodeType1, "")
level2 = net.addRegion("level2", nodeType2, "")
dims = Dimensions([3, 2])
level2.setDimensions(dims)
net.link("level1", "level2", "TestFanIn2", "")
net.initialize()
# Level1 should now have dimensions [6, 4]
self.assertEqual(level1.getDimensions()[0], 6)
self.assertEqual(level1.getDimensions()[1], 4)
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.